From 49b4b1408a8a9066d96d0b7e3b7cd8527e278b77 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Mon, 26 Aug 2019 13:50:00 +0800 Subject: [PATCH 001/202] Translating Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 402 ++++++++++++++++++ 1 file changed, 402 insertions(+) create mode 100644 translated/tech/20190812 Why const Doesn-t Make C Code Faster.md diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md new file mode 100644 index 0000000000..b3aee8e7a4 --- /dev/null +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -0,0 +1,402 @@ +[#]: collector: (lujun9972) +[#]: translator: (LazyWolfLin) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Why const Doesn't Make C Code Faster) +[#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) +[#]: author: (Simon Arneaud https://theartofmachinery.com) + +为什么 `const` 不能让 C 代码跑得更快? +====== + +在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 可以帮助编译器优化 C 和 C++ 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论和人工构造的例子论证,然后在一个真正的代码库 Sqlite 上做一些实验和基准测试。 + +### 一个简单的测试 + +让我们从一个最简单、最明显的例子开始,以前认为这是一个 `const` 让 C 代码跑得更快的例子。首先,假设我们有如下两个函数声明: + +``` +void func(int *x); +void constFunc(const int *x); +``` + +然后假设我们如下两份代码: + +``` +void byArg(int *x) +{ + printf("%d\n", *x); + func(x); + printf("%d\n", *x); +} + +void constByArg(const int *x) +{ + printf("%d\n", *x); + constFunc(x); + printf("%d\n", *x); +} +``` + +调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。 It’s just printing the same thing. Right? Let’s see the assembly code generated by GCC with optimisations cranked up: + +``` +$ gcc -S -Wall -O3 test.c +$ view test.s +``` + +Here’s the full assembly output for `byArg()`: + +``` +byArg: +.LFB23: + .cfi_startproc + pushq %rbx + .cfi_def_cfa_offset 16 + .cfi_offset 3, -16 + movl (%rdi), %edx + movq %rdi, %rbx + leaq .LC0(%rip), %rsi + movl $1, %edi + xorl %eax, %eax + call __printf_chk@PLT + movq %rbx, %rdi + call func@PLT # The only instruction that's different in constFoo + movl (%rbx), %edx + leaq .LC0(%rip), %rsi + xorl %eax, %eax + movl $1, %edi + popq %rbx + .cfi_def_cfa_offset 8 + jmp __printf_chk@PLT + .cfi_endproc +``` + +The only difference between the generated assembly code for `byArg()` and `constByArg()` is that `constByArg()` has a `call constFunc@PLT`, just like the source code asked. The `const` itself has literally made zero difference. + +Okay, that’s GCC. Maybe we just need a sufficiently smart compiler. Is Clang any better? + +``` +$ clang -S -Wall -O3 -emit-llvm test.c +$ view test.ll +``` + +Here’s the IR. It’s more compact than assembly, so I’ll dump both functions so you can see what I mean by “literally zero difference except for the call”: + +``` +; Function Attrs: nounwind uwtable +define dso_local void @byArg(i32*) local_unnamed_addr #0 { + %2 = load i32, i32* %0, align 4, !tbaa !2 + %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %2) + tail call void @func(i32* %0) #4 + %4 = load i32, i32* %0, align 4, !tbaa !2 + %5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) + ret void +} + +; Function Attrs: nounwind uwtable +define dso_local void @constByArg(i32*) local_unnamed_addr #0 { + %2 = load i32, i32* %0, align 4, !tbaa !2 + %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %2) + tail call void @constFunc(i32* %0) #4 + %4 = load i32, i32* %0, align 4, !tbaa !2 + %5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) + ret void +} +``` + +### Something that (sort of) works + +Here’s some code where `const` actually does make a difference: + +``` +void localVar() +{ + int x = 42; + printf("%d\n", x); + constFunc(&x); + printf("%d\n", x); +} + +void constLocalVar() +{ + const int x = 42; // const on the local variable + printf("%d\n", x); + constFunc(&x); + printf("%d\n", x); +} +``` + +Here’s the assembly for `localVar()`, which has two instructions that have been optimised out of `constLocalVar()`: + +``` +localVar: +.LFB25: + .cfi_startproc + subq $24, %rsp + .cfi_def_cfa_offset 32 + movl $42, %edx + movl $1, %edi + movq %fs:40, %rax + movq %rax, 8(%rsp) + xorl %eax, %eax + leaq .LC0(%rip), %rsi + movl $42, 4(%rsp) + call __printf_chk@PLT + leaq 4(%rsp), %rdi + call constFunc@PLT + movl 4(%rsp), %edx # not in constLocalVar() + xorl %eax, %eax + movl $1, %edi + leaq .LC0(%rip), %rsi # not in constLocalVar() + call __printf_chk@PLT + movq 8(%rsp), %rax + xorq %fs:40, %rax + jne .L9 + addq $24, %rsp + .cfi_remember_state + .cfi_def_cfa_offset 8 + ret +.L9: + .cfi_restore_state + call __stack_chk_fail@PLT + .cfi_endproc +``` + +The LLVM IR is a little clearer. The `load` just before the second `printf()` call has been optimised out of `constLocalVar()`: + +``` +; Function Attrs: nounwind uwtable +define dso_local void @localVar() local_unnamed_addr #0 { + %1 = alloca i32, align 4 + %2 = bitcast i32* %1 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) #4 + store i32 42, i32* %1, align 4, !tbaa !2 + %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 42) + call void @constFunc(i32* nonnull %1) #4 + %4 = load i32, i32* %1, align 4, !tbaa !2 + %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) + call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2) #4 + ret void +} +``` + +Okay, so, `constLocalVar()` has sucessfully elided the reloading of `*x`, but maybe you’ve noticed something a bit confusing: it’s the same `constFunc()` call in the bodies of `localVar()` and `constLocalVar()`. If the compiler can deduce that `constFunc()` didn’t modify `*x` in `constLocalVar()`, why can’t it deduce that the exact same function call didn’t modify `*x` in `localVar()`? + +The explanation gets closer to the heart of why C `const` is impractical as an optimisation aid. C `const` effectively has two meanings: it can mean the variable is a read-only alias to some data that may or may not be constant, or it can mean the variable is actually constant. If you cast away `const` from a pointer to a constant value and then write to it, the result is undefined behaviour. On the other hand, it’s okay if it’s just a `const` pointer to a value that’s not constant. + +This possible implementation of `constFunc()` shows what that means: + +``` +// x is just a read-only pointer to something that may or may not be a constant +void constFunc(const int *x) +{ + // local_var is a true constant + const int local_var = 42; + + // Definitely undefined behaviour by C rules + doubleIt((int*)&local_var); + // Who knows if this is UB? + doubleIt((int*)x); +} + +void doubleIt(int *x) +{ + *x *= 2; +} +``` + +`localVar()` gave `constFunc()` a `const` pointer to non-`const` variable. Because the variable wasn’t originally `const`, `constFunc()` can be a liar and forcibly modify it without triggering UB. So the compiler can’t assume the variable has the same value after `constFunc()` returns. The variable in `constLocalVar()` really is `const`, though, so the compiler can assume it won’t change — because this time it _would_ be UB for `constFunc()` to cast `const` away and write to it. + +The `byArg()` and `constByArg()` functions in the first example are hopeless because the compiler has no way of knowing if `*x` really is `const`. + +But why the inconsistency? If the compiler can assume that `constFunc()` doesn’t modify its argument when called in `constLocalVar()`, surely it can go ahead an apply the same optimisations to other `constFunc()` calls, right? Nope. The compiler can’t assume `constLocalVar()` is ever run at all. If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. + +You might want to read the above explanation and examples a few times, but don’t worry if it sounds absurd: it is. Unfortunately, writing to `const` variables is the worst kind of UB: most of the time the compiler can’t know if it even would be UB. So most of the time the compiler sees `const`, it has to assume that someone, somewhere could cast it away, which means the compiler can’t use it for optimisation. This is true in practice because enough real-world C code has “I know what I’m doing” casting away of `const`. + +In short, a whole lot of things can prevent the compiler from using `const` for optimisation, including receiving data from another scope using a pointer, or allocating data on the heap. Even worse, in most cases where `const` can be used by the compiler, it’s not even necessary. For example, any decent compiler can figure out that `x` is constant in the following code, even without `const`: + +``` +int x = 42, y = 0; +printf("%d %d\n", x, y); +y += x; +printf("%d %d\n", x, y); +``` + +TL;DR: `const` is almost useless for optimisation because + + 1. Except for special cases, the compiler has to ignore it because other code might legally cast it away + 2. In most of the exceptions to #1, the compiler can figure out a variable is constant, anyway + + + +### C++ + +There’s another way `const` can affect code generation if you’re using C++: function overloads. You can have `const` and non-`const` overloads of the same function, and maybe the non-`const` can be optimised (by the programmer, not the compiler) to do less copying or something. + +``` +void foo(int *p) +{ + // Needs to do more copying of data +} + +void foo(const int *p) +{ + // Doesn't need defensive copies +} + +int main() +{ + const int x = 42; + // const-ness affects which overload gets called + foo(&x); + return 0; +} +``` + +On the one hand, I don’t think this is exploited much in practical C++ code. On the other hand, to make a real difference, the programmer has to make assumptions that the compiler can’t make because they’re not guaranteed by the language. + +### An experiment with Sqlite3 + +That’s enough theory and contrived examples. How much effect does `const` have on a real codebase? I thought I’d do a test on the Sqlite database (version 3.30.0) because + + * It actually uses `const` + * It’s a non-trivial codebase (over 200KLOC) + * As a database, it includes a range of things from string processing to arithmetic to date handling + * It can be tested with CPU-bound loads + + + +Also, the author and contributors have put years of effort into performance optimisation already, so I can assume they haven’t missed anything obvious. + +#### The setup + +I made two copies of [the source code][2] and compiled one normally. For the other copy, I used this hacky preprocessor snippet to turn `const` into a no-op: + +``` +#define const +``` + +(GNU) `sed` can add that to the top of each file with something like `sed -i '1i#define const' *.c *.h`. + +Sqlite makes things slightly more complicated by generating code using scripts at build time. Fortunately, compilers make a lot of noise when `const` and non-`const` code are mixed, so it was easy to detect when this happened, and tweak the scripts to include my anti-`const` snippet. + +Directly diffing the compiled results is a bit pointless because a tiny change can affect the whole memory layout, which can change pointers and function calls throughout the code. Instead I took a fingerprint of the disassembly (`objdump -d libsqlite3.so.0.8.6`), using the binary size and mnemonic for each instruction. For example, this function: + +``` +000000000005d570 : + 5d570: 4c 8d 05 59 a2 ff ff lea -0x5da7(%rip),%r8 # 577d0 + 5d577: e9 04 fe ff ff jmpq 5d380 + 5d57c: 0f 1f 40 00 nopl 0x0(%rax) +``` + +would turn into something like this: + +``` +sqlite3_blob_read 7lea 5jmpq 4nopl +``` + +I left all the Sqlite build settings as-is when compiling anything. + +#### Analysing the compiled code + +The `const` version of libsqlite3.so was 4,740,704 bytes, about 0.1% larger than the 4,736,712 bytes of the non-`const` version. Both had 1374 exported functions (not including low-level helpers like stuff in the PLT), and a total of 13 had any difference in fingerprint. + +A few of the changes were because of the dumb preprocessor hack. For example, here’s one of the changed functions (with some Sqlite-specific definitions edited out): + +``` +#define LARGEST_INT64 (0xffffffff|(((int64_t)0x7fffffff)<<32)) +#define SMALLEST_INT64 (((int64_t)-1) - LARGEST_INT64) + +static int64_t doubleToInt64(double r){ + /* + ** Many compilers we encounter do not define constants for the + ** minimum and maximum 64-bit integers, or they define them + ** inconsistently. And many do not understand the "LL" notation. + ** So we define our own static constants here using nothing + ** larger than a 32-bit integer constant. + */ + static const int64_t maxInt = LARGEST_INT64; + static const int64_t minInt = SMALLEST_INT64; + + if( r<=(double)minInt ){ + return minInt; + }else if( r>=(double)maxInt ){ + return maxInt; + }else{ + return (int64_t)r; + } +} +``` + +Removing `const` makes those constants into `static` variables. I don’t see why anyone who didn’t care about `const` would make those variables `static`. Removing both `static` and `const` makes GCC recognise them as constants again, and we get the same output. Three of the 13 functions had spurious changes because of local `static const` variables like this, but I didn’t bother fixing any of them. + +Sqlite uses a lot of global variables, and that’s where most of the real `const` optimisations came from. Typically they were things like a comparison with a variable being replaced with a constant comparison, or a loop being partially unrolled a step. (The [Radare toolkit][3] was handy for figuring out what the optimisations did.) A few changes were underwhelming. `sqlite3ParseUri()` is 487 instructions, but the only difference `const` made was taking this pair of comparisons: + +``` +test %al, %al +je +cmp $0x23, %al +je +``` + +And swapping their order: + +``` +cmp $0x23, %al +je +test %al, %al +je +``` + +#### Benchmarking + +Sqlite comes with a performance regression test, so I tried running it a hundred times for each version of the code, still using the default Sqlite build settings. Here are the timing results in seconds: + +| const | No const +---|---|--- +Minimum | 10.658s | 10.803s +Median | 11.571s | 11.519s +Maximum | 11.832s | 11.658s +Mean | 11.531s | 11.492s + +Personally, I’m not seeing enough evidence of a difference worth caring about. I mean, I removed `const` from the entire program, so if it made a significant difference, I’d expect it to be easy to see. But maybe you care about any tiny difference because you’re doing something absolutely performance critical. Let’s try some statistical analysis. + +I like using the Mann-Whitney U test for stuff like this. It’s similar to the more-famous t test for detecting differences in groups, but it’s more robust to the kind of complex random variation you get when timing things on computers (thanks to unpredictable context switches, page faults, etc). Here’s the result: + +| const | No const +---|---|--- +N | 100 | 100 +Mean rank | 121.38 | 79.62 +Mann-Whitney U | 2912 +---|--- +Z | -5.10 +2-sided p value | <10-6 +HL median difference | -.056s +95% confidence interval | -.077s – -0.038s + +The U test has detected a statistically significant difference in performance. But, surprise, it’s actually the non-`const` version that’s faster — by about 60ms, or 0.5%. It seems like the small number of “optimisations” that `const` enabled weren’t worth the cost of extra code. It’s not like `const` enabled any major optimisations like auto-vectorisation. Of course, your mileage may vary with different compiler flags, or compiler versions, or codebases, or whatever, but I think it’s fair to say that if `const` were effective at improving C performance, we’d have seen it by now. + +### So, what’s `const` for? + +For all its flaws, C/C++ `const` is still useful for type safety. In particular, combined with C++ move semantics and `std::unique_pointer`s, `const` can make pointer ownership explicit. Pointer ownership ambiguity was a huge pain in old C++ codebases over ~100KLOC, so personally I’m grateful for that alone. + +However, I used to go beyond using `const` for meaningful type safety. I’d heard it was best practices to use `const` literally as much as possible for performance reasons. I’d heard that when performance really mattered, it was important to refactor code to add more `const`, even in ways that made it less readable. That made sense at the time, but I’ve since learned that it’s just not true. + +-------------------------------------------------------------------------------- + +via: https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html + +作者:[Simon Arneaud][a] +选题:[lujun9972][b] +译者:[LazyWolfLin](https://github.com/LazyWolfLin) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://theartofmachinery.com +[b]: https://github.com/lujun9972 +[1]: https://theartofmachinery.com/2019/04/05/d_as_c_replacement.html#const-and-immutable +[2]: https://sqlite.org/src/doc/trunk/README.md +[3]: https://rada.re/r/ From d123a06338dd930b2b1addb783055d3c8ffdbf1a Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Tue, 27 Aug 2019 13:32:05 +0800 Subject: [PATCH 002/202] Translating Why const Doesn't Make C Code Faster. --- .../tech/20190812 Why const Doesn-t Make C Code Faster.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index b3aee8e7a4..7af0560671 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -39,14 +39,14 @@ void constByArg(const int *x) } ``` -调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。 It’s just printing the same thing. Right? Let’s see the assembly code generated by GCC with optimisations cranked up: +调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。对吧?让我们来看下 GCC 在如下编译选项下生成的汇编代码: ``` $ gcc -S -Wall -O3 test.c $ view test.s ``` -Here’s the full assembly output for `byArg()`: +以下是函数 `byArg()` 的完整汇编代码: ``` byArg: @@ -73,7 +73,7 @@ byArg: .cfi_endproc ``` -The only difference between the generated assembly code for `byArg()` and `constByArg()` is that `constByArg()` has a `call constFunc@PLT`, just like the source code asked. The `const` itself has literally made zero difference. +函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源码中的调用。关键字 `const` 本身并没有造成任何汇编代码上的不同。 Okay, that’s GCC. Maybe we just need a sufficiently smart compiler. Is Clang any better? From 8ed5788ba21d3aaef70b05468c095fbc66ebdbb3 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Thu, 29 Aug 2019 13:29:06 +0800 Subject: [PATCH 003/202] Translating Why const Doesn't Make C Code Faster. --- .../tech/20190812 Why const Doesn-t Make C Code Faster.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 7af0560671..695fc162fd 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -73,16 +73,16 @@ byArg: .cfi_endproc ``` -函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源码中的调用。关键字 `const` 本身并没有造成任何汇编代码上的不同。 +函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源码中的调用。关键字 `const` 本身并没有造成任何字面上的不同。 -Okay, that’s GCC. Maybe we just need a sufficiently smart compiler. Is Clang any better? +好了,这是 GCC 的结果。或许我们需要一个更聪明的编译器。Clang 会有更好的表现吗? ``` $ clang -S -Wall -O3 -emit-llvm test.c $ view test.ll ``` -Here’s the IR. It’s more compact than assembly, so I’ll dump both functions so you can see what I mean by “literally zero difference except for the call”: +这是 IR 代码(LLVM 的中间语言)。它比汇编代码更加紧凑,所以我可以把两个函数都导出来,让你可以看清楚我所说的“除了调用外,没有任何字面上的不同”是什么意思: ``` ; Function Attrs: nounwind uwtable From 0852afd89aa0d8b79c5c9e62b8ef05d8eb6ef9e4 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Sat, 31 Aug 2019 12:58:45 +0800 Subject: [PATCH 004/202] Translating Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 695fc162fd..2ce4f7d0ee 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -108,7 +108,7 @@ define dso_local void @constByArg(i32*) local_unnamed_addr #0 { ### Something that (sort of) works -Here’s some code where `const` actually does make a difference: +接下来是一组 `const` 能够真正产生作用的代码: ``` void localVar() @@ -128,7 +128,7 @@ void constLocalVar() } ``` -Here’s the assembly for `localVar()`, which has two instructions that have been optimised out of `constLocalVar()`: +下面是 `localVar()` 的汇编代码,其中有两条指令在 `constLocalVar()` 中会被优化: ``` localVar: @@ -164,7 +164,7 @@ localVar: .cfi_endproc ``` -The LLVM IR is a little clearer. The `load` just before the second `printf()` call has been optimised out of `constLocalVar()`: +LLVM 生成的 IR 代码中更明显。在 `constLocalVar()` 中,第二次调用 `printf()` 之前的 `load` 会被优化掉: ``` ; Function Attrs: nounwind uwtable @@ -182,22 +182,22 @@ define dso_local void @localVar() local_unnamed_addr #0 { } ``` -Okay, so, `constLocalVar()` has sucessfully elided the reloading of `*x`, but maybe you’ve noticed something a bit confusing: it’s the same `constFunc()` call in the bodies of `localVar()` and `constLocalVar()`. If the compiler can deduce that `constFunc()` didn’t modify `*x` in `constLocalVar()`, why can’t it deduce that the exact same function call didn’t modify `*x` in `localVar()`? +好吧,现在,`constLocalVar()` 成功的优化了 `*x` 的重新读取,但是可能你已经注意到一些问题:`localVar()` 和 `constLocalVar()` 在函数体中做了同样的 `constFunc()` 调用。如果编译器能够推断出 `constFunc()` 没有修改 `constLocalVar()` 中的 `*x`,那为什么不能推断出完全一样的函数调用也没有修改 `localVar()` 中的 `*x`? -The explanation gets closer to the heart of why C `const` is impractical as an optimisation aid. C `const` effectively has two meanings: it can mean the variable is a read-only alias to some data that may or may not be constant, or it can mean the variable is actually constant. If you cast away `const` from a pointer to a constant value and then write to it, the result is undefined behaviour. On the other hand, it’s okay if it’s just a `const` pointer to a value that’s not constant. +这个解释更贴近于为什么 C 语言的 `const` 不能作为优化手段的核心。C 语言的 `const` 有两个有效的含义:它可以表示这个变量是某个可能是常数也可能不是常数的数据的一个只读别名,或者它可以表示这变量真正的常量。如果你移除了一个指向常量的指针的 `const` 属性并写入数据,那结果将是一个未定义行为。另一方面,如果是一个指向非常量值的 `const` 指针,将就没问题。 -This possible implementation of `constFunc()` shows what that means: +这份 `constFunc()` 的可能实现揭示了这意味着什么: ``` -// x is just a read-only pointer to something that may or may not be a constant +// x 是一个指向某个可能是常数也可能不是常数的数据的只读指针 void constFunc(const int *x) { - // local_var is a true constant + // local_var 是一个真正的常数 const int local_var = 42; - // Definitely undefined behaviour by C rules + // C 语言规定的未定义行为 doubleIt((int*)&local_var); - // Who knows if this is UB? + // 谁知道这是不是一个未定义行为呢? doubleIt((int*)x); } From a905b55c781230e3bea8a620f3eeafae211a20ad Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Tue, 3 Sep 2019 13:35:55 +0800 Subject: [PATCH 005/202] Translating Why const Doesn't Make C Code Faster. --- .../tech/20190812 Why const Doesn-t Make C Code Faster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 2ce4f7d0ee..d55e61e01b 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -207,7 +207,7 @@ void doubleIt(int *x) } ``` -`localVar()` gave `constFunc()` a `const` pointer to non-`const` variable. Because the variable wasn’t originally `const`, `constFunc()` can be a liar and forcibly modify it without triggering UB. So the compiler can’t assume the variable has the same value after `constFunc()` returns. The variable in `constLocalVar()` really is `const`, though, so the compiler can assume it won’t change — because this time it _would_ be UB for `constFunc()` to cast `const` away and write to it. +`localVar()` 传递给 `constFunc()` 一个指向非 `const` 变量的 `const` 指针。因为这个变量并非常量,`constFunc()` 可以撒个谎并强行修改它而不触发而不触发未定义行为。所以,编译器不能断定变量在调用 `constFunc()` 后仍是同样的值。在 `constLocalVar()` 中的变量是真正的常量,因此,编译器可以断定它不会改变——因为在 `constFunc()` 去除变量的 `const` 属性并写入它*将*会是一个未定义行为。 The `byArg()` and `constByArg()` functions in the first example are hopeless because the compiler has no way of knowing if `*x` really is `const`. From 93b00be5c3ada13860e77f460d4a294f94420afd Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Wed, 4 Sep 2019 13:40:50 +0800 Subject: [PATCH 006/202] Translating Why const Doesn't Make C Code Faster. --- .../tech/20190812 Why const Doesn-t Make C Code Faster.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index d55e61e01b..a0ff95b5c7 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -209,9 +209,9 @@ void doubleIt(int *x) `localVar()` 传递给 `constFunc()` 一个指向非 `const` 变量的 `const` 指针。因为这个变量并非常量,`constFunc()` 可以撒个谎并强行修改它而不触发而不触发未定义行为。所以,编译器不能断定变量在调用 `constFunc()` 后仍是同样的值。在 `constLocalVar()` 中的变量是真正的常量,因此,编译器可以断定它不会改变——因为在 `constFunc()` 去除变量的 `const` 属性并写入它*将*会是一个未定义行为。 -The `byArg()` and `constByArg()` functions in the first example are hopeless because the compiler has no way of knowing if `*x` really is `const`. +第一个例子中的函数 `byArg()` 和 `constByArg()` 是没有可能的,因为编译器没有任何方法可以知道 `*x` 是否真的是 `const` 常量。 -But why the inconsistency? If the compiler can assume that `constFunc()` doesn’t modify its argument when called in `constLocalVar()`, surely it can go ahead an apply the same optimisations to other `constFunc()` calls, right? Nope. The compiler can’t assume `constLocalVar()` is ever run at all. If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. +但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,对吧?不。编译器不能假设 `constLocalVar()` 根本没有运行。 If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. You might want to read the above explanation and examples a few times, but don’t worry if it sounds absurd: it is. Unfortunately, writing to `const` variables is the worst kind of UB: most of the time the compiler can’t know if it even would be UB. So most of the time the compiler sees `const`, it has to assume that someone, somewhere could cast it away, which means the compiler can’t use it for optimisation. This is true in practice because enough real-world C code has “I know what I’m doing” casting away of `const`. From bf60790fcc51af6a3a8277b0d3d26fb3f7ef0838 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Sun, 8 Sep 2019 20:57:39 +0800 Subject: [PATCH 007/202] Translating Why const Doesn't Make C Code Faster. --- .../tech/20190812 Why const Doesn-t Make C Code Faster.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index a0ff95b5c7..78610c178e 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -224,12 +224,10 @@ y += x; printf("%d %d\n", x, y); ``` -TL;DR: `const` is almost useless for optimisation because - - 1. Except for special cases, the compiler has to ignore it because other code might legally cast it away - 2. In most of the exceptions to #1, the compiler can figure out a variable is constant, anyway - +总而言之,`const` 对优化而言几乎无用,因为: + 1. 除了特殊情况,编译器需要忽略它,因为其他代码可能合法地移除它 + 2. 在 #1 以外地大多数例外中,编译器无论如何都能推断出该变量是常量 ### C++ From ab40a5d68c0f39d41127fc0fd4402c3bb1483f79 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Mon, 9 Sep 2019 13:30:50 +0800 Subject: [PATCH 008/202] Translating Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 78610c178e..c6d1ef436b 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -213,9 +213,9 @@ void doubleIt(int *x) 但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,对吧?不。编译器不能假设 `constLocalVar()` 根本没有运行。 If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. -You might want to read the above explanation and examples a few times, but don’t worry if it sounds absurd: it is. Unfortunately, writing to `const` variables is the worst kind of UB: most of the time the compiler can’t know if it even would be UB. So most of the time the compiler sees `const`, it has to assume that someone, somewhere could cast it away, which means the compiler can’t use it for optimisation. This is true in practice because enough real-world C code has “I know what I’m doing” casting away of `const`. +你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 C 代码会在“明确知道后果”下移除 `const`。 -In short, a whole lot of things can prevent the compiler from using `const` for optimisation, including receiving data from another scope using a pointer, or allocating data on the heap. Even worse, in most cases where `const` can be used by the compiler, it’s not even necessary. For example, any decent compiler can figure out that `x` is constant in the following code, even without `const`: +简而言之,很多事情都可以阻止编译器使用 `const` 进行优化,包括使用指针从另一内存空间接受数据,或者在堆空间上分配数据。更糟糕的是,在大部分编译器能够使用 `const` 的情况,它都不是必须的。例如,任何像样的编译器都能推断出下面代码中的 `x` 是一个常量,甚至都不需要 `const`: ``` int x = 42, y = 0; @@ -224,61 +224,61 @@ y += x; printf("%d %d\n", x, y); ``` -总而言之,`const` 对优化而言几乎无用,因为: +TL;DR,`const` 对优化而言几乎无用,因为: 1. 除了特殊情况,编译器需要忽略它,因为其他代码可能合法地移除它 2. 在 #1 以外地大多数例外中,编译器无论如何都能推断出该变量是常量 ### C++ -There’s another way `const` can affect code generation if you’re using C++: function overloads. You can have `const` and non-`const` overloads of the same function, and maybe the non-`const` can be optimised (by the programmer, not the compiler) to do less copying or something. +如果你在使用 C++ 那么有另外一个方法让 `const` 能够影响到代码的生成。你可以用 `const` 和非 `const` 的参数重载同一个函数,而非 `const` 版本的代码可能可以优化(由程序员优化而不是编译器)掉某些拷贝或者其他事情。 ``` void foo(int *p) { - // Needs to do more copying of data + // 需要坐更多的数据拷贝 } void foo(const int *p) { - // Doesn't need defensive copies + // 不需要保护性的拷贝副本 } int main() { const int x = 42; - // const-ness affects which overload gets called + // const 影响被调用的是哪一个版本的重载 foo(&x); return 0; } ``` -On the one hand, I don’t think this is exploited much in practical C++ code. On the other hand, to make a real difference, the programmer has to make assumptions that the compiler can’t make because they’re not guaranteed by the language. +一方面,我不认为这会在实际的 C++ 代码中大量使用。另一方面,为了导致差异,程序员需要做出编译器无法做出的假设,因为它们不受语言保护。 -### An experiment with Sqlite3 +### 用 Sqlite3 进行实验 -That’s enough theory and contrived examples. How much effect does `const` have on a real codebase? I thought I’d do a test on the Sqlite database (version 3.30.0) because +有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在 Sqlite (version 3.30.0) 的代码库上做一个测试,因为: - * It actually uses `const` - * It’s a non-trivial codebase (over 200KLOC) - * As a database, it includes a range of things from string processing to arithmetic to date handling - * It can be tested with CPU-bound loads + * 它真正地使用了 `const` + * 它不是一个简单的代码库(超过 20 万行代码) + * 作为一个代码库,它包括了字符串处理、数学计算、日期处理等一系列内容 + * 它能够在绑定 CPU 下进行负载测试 -Also, the author and contributors have put years of effort into performance optimisation already, so I can assume they haven’t missed anything obvious. +此外,作者和贡献者们已经进行了多年的性能优化工作,因此我能断言他们没有错过任何明显的优化。 -#### The setup +#### 配置 -I made two copies of [the source code][2] and compiled one normally. For the other copy, I used this hacky preprocessor snippet to turn `const` into a no-op: +我做了两份[源码]][2]拷贝,并且正常编译其中一份。而对于另一份拷贝,我插入了这个预处理代码段,将 `const` 变成一个空操作: ``` #define const ``` -(GNU) `sed` can add that to the top of each file with something like `sed -i '1i#define const' *.c *.h`. +(GNU) `sed` 可以将一些东西添加到每个文件的顶端,比如 `sed -i '1i#define const' *.c *.h`。 -Sqlite makes things slightly more complicated by generating code using scripts at build time. Fortunately, compilers make a lot of noise when `const` and non-`const` code are mixed, so it was easy to detect when this happened, and tweak the scripts to include my anti-`const` snippet. +在编译期间使用脚本生成 Sqlite 代码稍微有点复杂。幸运的是当 `const` 代码和非 `const` 代码混合时,编译器会产生了大量的提醒,因此很容易发现它并调整脚本来包含我的反 `const` 代码段。 Directly diffing the compiled results is a bit pointless because a tiny change can affect the whole memory layout, which can change pointers and function calls throughout the code. Instead I took a fingerprint of the disassembly (`objdump -d libsqlite3.so.0.8.6`), using the binary size and mnemonic for each instruction. For example, this function: From b6c2e55b59b1cb458835036c20c8172a66bb17ae Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Tue, 10 Sep 2019 17:55:45 +0800 Subject: [PATCH 009/202] Translating Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index c6d1ef436b..819f73440e 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -7,14 +7,14 @@ [#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) [#]: author: (Simon Arneaud https://theartofmachinery.com) -为什么 `const` 不能让 C 代码跑得更快? +为什么 `const` 不能让 `C` 代码跑得更快? ====== -在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 可以帮助编译器优化 C 和 C++ 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论和人工构造的例子论证,然后在一个真正的代码库 Sqlite 上做一些实验和基准测试。 +在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 有助于编译器优化 `C` 和 `C++` 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论和人工构造的例子论证,然后在一个真正的代码库 `Sqlite` 上做一些实验和基准测试。 ### 一个简单的测试 -让我们从一个最简单、最明显的例子开始,以前认为这是一个 `const` 让 C 代码跑得更快的例子。首先,假设我们有如下两个函数声明: +让我们从一个最简单、最明显的例子开始,以前认为这是一个 `const` 让 `C` 代码跑得更快的例子。首先,假设我们有如下两个函数声明: ``` void func(int *x); @@ -39,7 +39,7 @@ void constByArg(const int *x) } ``` -调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。对吧?让我们来看下 GCC 在如下编译选项下生成的汇编代码: +调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。对吧?让我们来看下 `GCC` 在如下编译选项下生成的汇编代码: ``` $ gcc -S -Wall -O3 test.c @@ -75,14 +75,14 @@ byArg: 函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源码中的调用。关键字 `const` 本身并没有造成任何字面上的不同。 -好了,这是 GCC 的结果。或许我们需要一个更聪明的编译器。Clang 会有更好的表现吗? +好了,这是 `GCC` 的结果。或许我们需要一个更聪明的编译器。`Clang` 会有更好的表现吗? ``` $ clang -S -Wall -O3 -emit-llvm test.c $ view test.ll ``` -这是 IR 代码(LLVM 的中间语言)。它比汇编代码更加紧凑,所以我可以把两个函数都导出来,让你可以看清楚我所说的“除了调用外,没有任何字面上的不同”是什么意思: +这是 `IR` 代码(`LLVM` 的中间语言)。它比汇编代码更加紧凑,所以我可以把两个函数都导出来,让你可以看清楚我所说的“除了调用外,没有任何字面上的不同”是什么意思: ``` ; Function Attrs: nounwind uwtable @@ -106,7 +106,7 @@ define dso_local void @constByArg(i32*) local_unnamed_addr #0 { } ``` -### Something that (sort of) works +### 一些有效的代码 接下来是一组 `const` 能够真正产生作用的代码: @@ -164,7 +164,7 @@ localVar: .cfi_endproc ``` -LLVM 生成的 IR 代码中更明显。在 `constLocalVar()` 中,第二次调用 `printf()` 之前的 `load` 会被优化掉: +`LLVM` 生成的 `IR` 代码中更明显。在 `constLocalVar()` 中,第二次调用 `printf()` 之前的 `load` 会被优化掉: ``` ; Function Attrs: nounwind uwtable @@ -184,7 +184,7 @@ define dso_local void @localVar() local_unnamed_addr #0 { 好吧,现在,`constLocalVar()` 成功的优化了 `*x` 的重新读取,但是可能你已经注意到一些问题:`localVar()` 和 `constLocalVar()` 在函数体中做了同样的 `constFunc()` 调用。如果编译器能够推断出 `constFunc()` 没有修改 `constLocalVar()` 中的 `*x`,那为什么不能推断出完全一样的函数调用也没有修改 `localVar()` 中的 `*x`? -这个解释更贴近于为什么 C 语言的 `const` 不能作为优化手段的核心。C 语言的 `const` 有两个有效的含义:它可以表示这个变量是某个可能是常数也可能不是常数的数据的一个只读别名,或者它可以表示这变量真正的常量。如果你移除了一个指向常量的指针的 `const` 属性并写入数据,那结果将是一个未定义行为。另一方面,如果是一个指向非常量值的 `const` 指针,将就没问题。 +这个解释更贴近于为什么 `C` 语言的 `const` 不能作为优化手段的核心。`C` 语言的 `const` 有两个有效的含义:它可以表示这个变量是某个可能是常数也可能不是常数的数据的一个只读别名,或者它可以表示这变量真正的常量。如果你移除了一个指向常量的指针的 `const` 属性并写入数据,那结果将是一个未定义行为。另一方面,如果是一个指向非常量值的 `const` 指针,将就没问题。 这份 `constFunc()` 的可能实现揭示了这意味着什么: @@ -213,7 +213,7 @@ void doubleIt(int *x) 但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,对吧?不。编译器不能假设 `constLocalVar()` 根本没有运行。 If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. -你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 C 代码会在“明确知道后果”下移除 `const`。 +你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 `C` 代码会在“明确知道后果”下移除 `const`。 简而言之,很多事情都可以阻止编译器使用 `const` 进行优化,包括使用指针从另一内存空间接受数据,或者在堆空间上分配数据。更糟糕的是,在大部分编译器能够使用 `const` 的情况,它都不是必须的。例如,任何像样的编译器都能推断出下面代码中的 `x` 是一个常量,甚至都不需要 `const`: @@ -231,7 +231,7 @@ TL;DR,`const` 对优化而言几乎无用,因为: ### C++ -如果你在使用 C++ 那么有另外一个方法让 `const` 能够影响到代码的生成。你可以用 `const` 和非 `const` 的参数重载同一个函数,而非 `const` 版本的代码可能可以优化(由程序员优化而不是编译器)掉某些拷贝或者其他事情。 +如果你在使用 `C++` 那么有另外一个方法让 `const` 能够影响到代码的生成。你可以用 `const` 和非 `const` 的参数重载同一个函数,而非 `const` 版本的代码可能可以优化(由程序员优化而不是编译器)掉某些拷贝或者其他事情。 ``` void foo(int *p) @@ -253,11 +253,11 @@ int main() } ``` -一方面,我不认为这会在实际的 C++ 代码中大量使用。另一方面,为了导致差异,程序员需要做出编译器无法做出的假设,因为它们不受语言保护。 +一方面,我不认为这会在实际的 `C++` 代码中大量使用。另一方面,为了导致差异,程序员需要做出编译器无法做出的假设,因为它们不受语言保护。 -### 用 Sqlite3 进行实验 +### 用 `Sqlite3` 进行实验 -有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在 Sqlite (version 3.30.0) 的代码库上做一个测试,因为: +有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在 `Sqlite`(版本:3.30.0)的代码库上做一个测试,因为: * 它真正地使用了 `const` * 它不是一个简单的代码库(超过 20 万行代码) @@ -266,7 +266,7 @@ int main() -此外,作者和贡献者们已经进行了多年的性能优化工作,因此我能断言他们没有错过任何明显的优化。 +此外,作者和贡献者们已经进行了多年的性能优化工作,因此我能确定他们没有错过任何有显著效果的优化。 #### 配置 @@ -278,9 +278,9 @@ int main() (GNU) `sed` 可以将一些东西添加到每个文件的顶端,比如 `sed -i '1i#define const' *.c *.h`。 -在编译期间使用脚本生成 Sqlite 代码稍微有点复杂。幸运的是当 `const` 代码和非 `const` 代码混合时,编译器会产生了大量的提醒,因此很容易发现它并调整脚本来包含我的反 `const` 代码段。 +在编译期间使用脚本生成 `Sqlite` 代码稍微有点复杂。幸运的是当 `const` 代码和非 `const` 代码混合时,编译器会产生了大量的提醒,因此很容易发现它并调整脚本来包含我的反 `const` 代码段。 -Directly diffing the compiled results is a bit pointless because a tiny change can affect the whole memory layout, which can change pointers and function calls throughout the code. Instead I took a fingerprint of the disassembly (`objdump -d libsqlite3.so.0.8.6`), using the binary size and mnemonic for each instruction. For example, this function: +直接比较编译结果毫无意义,因为任意微小的改变就会影响整个内存布局,这可能会改变整个代码中的指针和函数调用。因此,我用每个指令的二进制大小和汇编代码作为反汇编代码(`objdump -d libsqlite3.so.0.8.6`)。举个例子,这个函数: ``` 000000000005d570 : @@ -289,17 +289,17 @@ Directly diffing the compiled results is a bit pointless because a tiny change c 5d57c: 0f 1f 40 00 nopl 0x0(%rax) ``` -would turn into something like this: +将会变成这样: ``` sqlite3_blob_read 7lea 5jmpq 4nopl ``` -I left all the Sqlite build settings as-is when compiling anything. +在编译时,我保留了所有 `Sqlite` 的编译设置。 -#### Analysing the compiled code +#### 分析编译后的代码 -The `const` version of libsqlite3.so was 4,740,704 bytes, about 0.1% larger than the 4,736,712 bytes of the non-`const` version. Both had 1374 exported functions (not including low-level helpers like stuff in the PLT), and a total of 13 had any difference in fingerprint. +The `const` version of `libsqlite3.so` was 4,740,704 bytes, about 0.1% larger than the 4,736,712 bytes of the non-`const` version. Both had 1374 exported functions (not including low-level helpers like stuff in the PLT), and a total of 13 had any difference in fingerprint. A few of the changes were because of the dumb preprocessor hack. For example, here’s one of the changed functions (with some Sqlite-specific definitions edited out): @@ -330,7 +330,7 @@ static int64_t doubleToInt64(double r){ Removing `const` makes those constants into `static` variables. I don’t see why anyone who didn’t care about `const` would make those variables `static`. Removing both `static` and `const` makes GCC recognise them as constants again, and we get the same output. Three of the 13 functions had spurious changes because of local `static const` variables like this, but I didn’t bother fixing any of them. -Sqlite uses a lot of global variables, and that’s where most of the real `const` optimisations came from. Typically they were things like a comparison with a variable being replaced with a constant comparison, or a loop being partially unrolled a step. (The [Radare toolkit][3] was handy for figuring out what the optimisations did.) A few changes were underwhelming. `sqlite3ParseUri()` is 487 instructions, but the only difference `const` made was taking this pair of comparisons: +`Sqlite` uses a lot of global variables, and that’s where most of the real `const` optimisations came from. Typically they were things like a comparison with a variable being replaced with a constant comparison, or a loop being partially unrolled a step. (The [Radare toolkit][3] was handy for figuring out what the optimisations did.) A few changes were underwhelming. `sqlite3ParseUri()` is 487 instructions, but the only difference `const` made was taking this pair of comparisons: ``` test %al, %al @@ -350,7 +350,7 @@ je #### Benchmarking -Sqlite comes with a performance regression test, so I tried running it a hundred times for each version of the code, still using the default Sqlite build settings. Here are the timing results in seconds: +`Sqlite` comes with a performance regression test, so I tried running it a hundred times for each version of the code, still using the default `Sqlite` build settings. Here are the timing results in seconds: | const | No const ---|---|--- From 485fa2026c7ed5bf58cb0d45a3f0e07bd3759167 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 10 Sep 2019 23:29:08 +0800 Subject: [PATCH 010/202] TSL --- sources/talk/20190902 Why I use Java.md | 106 --------------------- translated/talk/20190902 Why I use Java.md | 105 ++++++++++++++++++++ 2 files changed, 105 insertions(+), 106 deletions(-) delete mode 100644 sources/talk/20190902 Why I use Java.md create mode 100644 translated/talk/20190902 Why I use Java.md diff --git a/sources/talk/20190902 Why I use Java.md b/sources/talk/20190902 Why I use Java.md deleted file mode 100644 index eb4bc0f2b3..0000000000 --- a/sources/talk/20190902 Why I use Java.md +++ /dev/null @@ -1,106 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why I use Java) -[#]: via: (https://opensource.com/article/19/9/why-i-use-java) -[#]: author: (Chris Hermansen https://opensource.com/users/clhermansen) - -Why I use Java -====== -There are probably better languages than Java, depending on work -requirements. But I haven't seen anything yet to pull me away. -![Coffee beans][1] - -I believe I started using Java in 1997, not long after [Java 1.1 saw the light of day][2]. Since that time, by and large, I've really enjoyed programming in Java; although I confess these days, I'm as likely to be found writing [Groovy][3] scripts as "serious code" in Java. - -Coming from a background in [FORTRAN][4], [PL/1][5], [Pascal][6], and finally [C][7], I found a lot of things to like about Java. Java was my first significant hands-on experience with [object-oriented programming][8]. By then, I had been programming for about 20 years, and it's probably safe to say I had some ideas about what mattered and what didn't. - -### Debugging as a key language feature - -I really hated wasting time tracking down obscure bugs caused by my code carelessly iterating off the end of an array, especially back in the days of programming in FORTRAN on IBM mainframes. Another subtle problem that cropped up from time to time was calling a subroutine with a four-byte integer argument that was expecting two bytes; on small-endian architecture, this was often a benign bug, but on big-endian machines, the value of the top two bytes was usually, but not always, zero. - -Debugging in that batch environment was pretty awkward, too—poring through core dumps or inserting print statements, which themselves could move bugs around or even make them disappear. - -So my early experiences with Pascal, first on [MTS][9], then using the same MTS compiler on [IBM OS/VS1][10], made my life a lot easier. Pascal's [strong and static typing][11] were a big part of the win here, and every Pascal compiler I have used inserts run-time checks on array bounds and ranges, so bugs are detected at the point of occurrence. When we moved most of our work to a Unix system in the early 1980s, porting the Pascal code was a straightforward task. - -### Finding the right amount of syntax - -But for all the things I liked about Pascal, my code was wordy, and the syntax seemed to have a tendency to slightly obscure the code; for example, using: - - -``` -`if … then begin … end else … end` -``` - -instead of: - - -``` -`if (…) { … } else { … }` -``` - -in C and similar languages. Also, some things were quite hard to do in Pascal and much easier to do in C. But, as I began to use C more and more, I found myself running into the same kind of errors I used to commit in FORTRAN—running off the end of arrays, for example—that were not detected at the point of the original error, but only through their adverse effects later in the program's execution. Fortunately, I was no longer living in the batch environment and had great debugging tools at hand. Still, C gave me a little too much flexibility for my own good. - -When I discovered [awk][12], I found I had a nice counterpoint to C. At that time, a lot of my work involved transforming field data and creating reports. I found I could do a surprising amount of that with awk, coupled with other Unix command-line tools like sort, sed, cut, join, paste, comm, and so on. Essentially, these tools gave me something a lot like a relational database manager for text files that had a column-oriented structure, which was the way a lot of our field data came in. Or, if not exactly in that format, most of the time the data could be unloaded from a relational database or from some kind of binary format into that column-oriented structure. - -String handling, [regular expressions][13], and [associative arrays][14] supported by awk, as well as the basic nature of awk (it's really a data-transformation pipeline), fit my needs very well. When confronted with binary data files, complicated data structuring, and absolute performance needs, I would still revert to C; but as I used awk more and more, I found C's very basic string support more and more frustrating. As time went on, more and more often I would end up using C only when I had to—and probably overusing awk the rest of the time. - -### Java is the right level of abstraction - -And then along came Java. It looked pretty good right out of the gate—a relatively terse syntax reminiscent of C, or at least, more so than Pascal or any of those other earlier experiences. It was strongly typed, so a lot of programming errors would get caught at compile time. It didn't seem to require too much object-oriented learning to get going, which was a good thing, as I was barely familiar with [OOP design patterns][15] at the time. But even in the earliest days, I liked the ideas behind its simplified [inheritance model][16]. (Java allows for single inheritance with interfaces provided to enrich the paradigm somewhat.) - -And it seemed to come with a rich library of functionality (the concept of "batteries included") that worked at the right level to directly meet my needs. Finally, I found myself rapidly coming to like the idea of both data and behavior being grouped together in objects. This seemed like a great way to explicitly control interactions among data—much better than enormous parameter lists or uncontrolled access to global variables. - -Since then, Java has grown to be the Helvetic military knife in my programming toolbox. I will still write stuff occasionally in awk or use Linux command-line utilities like cut, sort, or sed when they're obviously and precisely the straightforward way to solve the problem at hand. I doubt if I've written 50 lines of C in the last 20 years, though; Java has completely replaced C for my needs. - -In addition, Java has been improving over time. First of all, it's become much more performant. And it's added some really useful capabilities, like [try with resources][17], which very nicely cleans up verbose and somewhat messy code dealing with error handling during file I/O, for example; or [lambdas][18], which provide the ability to declare functions and pass them as parameters, instead of the old approach, which required creating classes or interfaces to "host" those functions; or [streams][19], which encapsulate iterative behavior in functions, creating an efficient data-transformation pipeline materialized in the form of chained function calls. - -### Java is getting better and better - -A number of language designers have looked at ways to radically improve the Java experience. For me, most of these aren't yet of great interest; again, that's more a reflection of my typical workflow and (much) less a function of the features those languages bring. But one of these evolutionary steps has become an indispensable part of my programming arsenal: [Groovy][20]. Groovy has become my go-to solution when I run into a small problem that needs a small solution. Moreover, it's highly compatible with Java. For me, Groovy fills the same niche that Python fills for a lot of other people—it's compact, DRY (don't repeat yourself), and expressive (lists and dictionaries have full language support). I also make use of [Grails][21], which uses Groovy to provide a streamlined web framework for very performant and useful Java web applications. - -### But is Java still open source? - -Recently, growing support for [OpenJDK][22] has further improved my comfort level with Java. A number of companies are supporting OpenJDK in various ways, including [AdoptOpenJDK, Amazon, and Red Hat][23]. In one of my bigger and longer-term projects, we use AdoptOpenJDK to [generate customized runtimes on several desktop platforms][24]. - -Are there better languages than Java? I'm sure there are, depending on your work needs. But I'm still a very happy Java user, and I haven't seen anything yet that threatens to pull me away. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/9/why-i-use-java - -作者:[Chris Hermansen][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/clhermansen -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/java-coffee-beans.jpg?itok=3hkjX5We (Coffee beans) -[2]: https://en.wikipedia.org/wiki/Java_version_history -[3]: https://en.wikipedia.org/wiki/Apache_Groovy -[4]: https://en.wikipedia.org/wiki/Fortran -[5]: https://en.wikipedia.org/wiki/PL/I -[6]: https://en.wikipedia.org/wiki/Pascal_(programming_language) -[7]: https://en.wikipedia.org/wiki/C_(programming_language) -[8]: https://en.wikipedia.org/wiki/Object-oriented_programming -[9]: https://en.wikipedia.org/wiki/Michigan_Terminal_System -[10]: https://en.wikipedia.org/wiki/OS/VS1 -[11]: https://stackoverflow.com/questions/11889602/difference-between-strong-vs-static-typing-and-weak-vs-dynamic-typing -[12]: https://en.wikipedia.org/wiki/AWK -[13]: https://en.wikipedia.org/wiki/Regular_expression -[14]: https://en.wikipedia.org/wiki/Associative_array -[15]: https://opensource.com/article/19/7/understanding-software-design-patterns -[16]: https://www.w3schools.com/java/java_inheritance.asp -[17]: https://www.baeldung.com/java-try-with-resources -[18]: https://www.baeldung.com/java-8-lambda-expressions-tips -[19]: https://www.tutorialspoint.com/java8/java8_streams -[20]: https://groovy-lang.org/ -[21]: https://grails.org/ -[22]: https://openjdk.java.net/ -[23]: https://en.wikipedia.org/wiki/OpenJDK -[24]: https://opensource.com/article/19/4/java-se-11-removing-jnlp diff --git a/translated/talk/20190902 Why I use Java.md b/translated/talk/20190902 Why I use Java.md new file mode 100644 index 0000000000..5cf445e8e2 --- /dev/null +++ b/translated/talk/20190902 Why I use Java.md @@ -0,0 +1,105 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Why I use Java) +[#]: via: (https://opensource.com/article/19/9/why-i-use-java) +[#]: author: (Chris Hermansen https://opensource.com/users/clhermansen) + +我为什么使用 Java +====== + +> 根据你的工作需要,可能有比 Java 更好的语言,但是我还没有看到任何能把我拉走的语言。 + +![Coffee beans][1] + +我记得我是从 1997 年开始使用 Java 的,没多久 [Java 1.1 就露出了曙光][2]。从那时起,总的来说,我非常喜欢用 Java 编程;虽然我得承认,这些日子我经常像在 Java 中编写“严肃的代码”一样编写 [Groovy][3] 脚本。 + +我的来自 [FORTRAN][4]、[PL/1][5]、[Pascal][6] 以及最后的 [C][7] 的背景,我发现了很多关于 Java 的东西。Java 是我[面向对象编程][8]的第一次重要实践经验。到那时,我已经编程了大约 20 年,而且可以说我对什么重要、什么不重要有了一些看法。 + +### 调试是一个关键的语言特性 + +我真的很讨厌浪费时间追踪由我的代码不小心迭代到数组末尾引起的模糊错误,特别是在 IBM 大型机上的 FORTRAN 编程时代。另一个不时出现的微妙问题是调用一个子程序,该子程序带有一个四字节整数参数,而预期有两个字节;在小端架构上,这通常是一个良性的错误,但在大端机器上,前两个字节的值通常但不总是为零。 + +在该批处理环境中进行调试非常尴尬,通过核心转储或插入打印语句进行调试,这些语句本身会移动错误的位置甚至使它们消失。 + +所以我早期使用 Pascal 的经验,先是在 [MTS][9] 上,然后是在 [IBM OS/VS1][10] 上使用相同的 MTS 编译器,让我的生活变得更加轻松。 Pascal 的[强类型和静态类型][11]是胜利的重要组成部分,我使用的每个 Pascal 编译器都在数组的边界和范围上插入运行时检查,因此错误可以在发生时检测到。当我们在 20 世纪 80 年代早期将大部分工作转移到 Unix 系统时,移植 Pascal 代码是一项简单的任务。 + +### 找到适量的语法 + +但是对于我喜欢的 Pascal 的所有事情,我的代码很冗长,而且语法似乎倾向于略微模糊代码;例如,使用: + +``` +if ... then begin ... end else ... end +``` + +而不是 C 或类似语言中的: + +``` +if (...) { ... } else { ... } +``` + +另外,有些事情在 Pascal 中很难完成,在 C 中更容易。但是,当我开始越来越多地使用 C 时,我发现自己遇到了我曾经在 FORTRAN 中遇到的同样类型的错误,例如,超出数组边界。在原始的错误点未检测到数组结束,而仅在程序执行后期才会检测到它们的不利影响。幸运的是,我不再生活在批处理环境中,并且手头有很好的调试工具。不过,为我自己好,C 给了我一点灵活性。 + +当我发现 [awk][12] 时,我对它与 C 做了一个很好的对比。那时,我的很多工作都涉及转换字段数据并创建报告。我发现用 `awk` 可以做到惊人的数量,加上其他 Unix 命令行工具,如 `sort`、`sed`、`cut`、`join`、`paste`、`comm` 等等。从本质上讲,这些工具给了我一个像是文本文件的关系数据库管理器,这种文本文件具有列式结构,是我们很多字段数据出现的方式。或者,如果不是那种格式,大部分时候该数据可以从关系数据库或某种二进制格式导出到列式的结构中。 + +`awk` 支持的字符串处理、[正则表达式][13]和[关联数组][14],以及 `awk`(它实际上是一个数据转换管道)的基本特性,非常符合我的需求。当面对二进制数据文件,复杂的数据结构和绝对性能需求时,我仍然会转回到 C;但随着我越来越多地使用 `awk`,我发现 C 的非常基础的字符串支持越来越令人沮丧。随着时间的推移,越来越多的时候我只会在必须时才使用 C,并且可能在其余的时候里大量使用 `awk`。 + +### Java 的抽象层级合适 + +然后是 Java。它看起来相当不错 —— 一个相对简洁的语法,让人联想到 C,或者这种相似性至少比 Pascal 或其他任何早期的语言更为明显。它是强类型的,因此很多编程错误会在编译时被捕获。它似乎并不需要过多的面向对象的学习就能开始,这是一件好事,因为我当时对 [OOP 设计模式][15]几乎完全不熟悉。但即使在最初的日子里,我也喜欢它的简化[继承模型][16]背后的思想。(Java 允许使用提供的接口进行单继承,以在某种程度上丰富范例。) + +它似乎带有丰富的功能库(即“自备电池”的概念),在适当的水平上直接满足了我的需求。最后,我发现自己很快就会想到数据和行为在对象中组合在一起的想法。这似乎是明确控制数据之间交互的好方法 —— 比大量的参数列表或对全局变量的不受控制的访问要好得多。 + +从那以后,Java 在我的编程工具箱中成为了 Helvetic 军刀。我仍然偶尔会在 `awk` 中编写内容,或者使用 Linux 命令行实用程序(如 `cut`、`sort` 或 `sed`),因为它们显然是解决手头问题的直接方法。我怀疑过去 20 年我是否写过 50 行 C 语言代码;Java 完全满足了我的需求。 + +此外,Java 一直在不断改进。首先,它变得更加高效。并且它添加了一些非常有用的功能,例如[可以 try 测试资源][17],例如它可以很好地清理在文件 I/O 期间冗长和有点混乱的错误处理代码;或 [lambdas] [18],它们提供了声明函数并将它们作为参数传递的能力,而旧方法需要创建类或接口来“托管”这些函数; 或[流][19],它封装了函数中的迭代行为,创建了以链式函数调用形式实现的高效数据转换管道。 + +### Java 越来越好 + +许多语言设计者研究了从根本上改善 Java 体验的方法。对我来说,其中大部分还没有引起人们的极大兴趣;再次,这更多地反映了我的典型工作流程,并且(更多地)减少了这些语言带来的功能。但其中一个演化步骤已经成为我的编程工具中不可或缺的一部分:[Groovy][20]。当我遇到一个需要小解决方案的小问题时,Groovy 已成为我的首选解决方案。而且,它与 Java 高度兼容。对我来说,Groovy 填补了 Python 为许多其他人填充的相同利基 —— 它紧凑、DRY(不要重复自己)和具有表达性(列表和词典有完整的语言支持)。我还使用了 [Grails][21],它使用 Groovy 为非常高性能和有用的 Java Web 应用程序提供简化的 Web 框架。 + +### Java 仍然开源吗? + +最近,对 [OpenJDK][22] 越来越多的支持进一步提高了我对 Java 的舒适度。许多公司以各种方式支持 OpenJDK,包括 [AdoptOpenJDK、Amazon 和 Red Hat][23]。在我的一个更大和更长期的项目中,我们使用 AdoptOpenJDK [来在几个桌面平台上生成自定义的运行时环境][24]。 + +有没有比 Java 更好的语言?我确信有,这取决于你的工作需要。但我仍然是一个非常高兴的 Java 用户,我还没有看到任何可能会让我失望的东西。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/why-i-use-java + +作者:[Chris Hermansen][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/clhermansen +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/java-coffee-beans.jpg?itok=3hkjX5We (Coffee beans) +[2]: https://en.wikipedia.org/wiki/Java_version_history +[3]: https://en.wikipedia.org/wiki/Apache_Groovy +[4]: https://en.wikipedia.org/wiki/Fortran +[5]: https://en.wikipedia.org/wiki/PL/I +[6]: https://en.wikipedia.org/wiki/Pascal_(programming_language) +[7]: https://en.wikipedia.org/wiki/C_(programming_language) +[8]: https://en.wikipedia.org/wiki/Object-oriented_programming +[9]: https://en.wikipedia.org/wiki/Michigan_Terminal_System +[10]: https://en.wikipedia.org/wiki/OS/VS1 +[11]: https://stackoverflow.com/questions/11889602/difference-between-strong-vs-static-typing-and-weak-vs-dynamic-typing +[12]: https://en.wikipedia.org/wiki/AWK +[13]: https://en.wikipedia.org/wiki/Regular_expression +[14]: https://en.wikipedia.org/wiki/Associative_array +[15]: https://opensource.com/article/19/7/understanding-software-design-patterns +[16]: https://www.w3schools.com/java/java_inheritance.asp +[17]: https://www.baeldung.com/java-try-with-resources +[18]: https://www.baeldung.com/java-8-lambda-expressions-tips +[19]: https://www.tutorialspoint.com/java8/java8_streams +[20]: https://groovy-lang.org/ +[21]: https://grails.org/ +[22]: https://openjdk.java.net/ +[23]: https://en.wikipedia.org/wiki/OpenJDK +[24]: https://opensource.com/article/19/4/java-se-11-removing-jnlp From e7cfe337f3c05496016ac196f1c388e7f0403695 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Wed, 11 Sep 2019 13:38:56 +0800 Subject: [PATCH 011/202] Translating Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 819f73440e..6d884b10a6 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -106,7 +106,7 @@ define dso_local void @constByArg(i32*) local_unnamed_addr #0 { } ``` -### 一些有效的代码 +### 某些有作用的东西 接下来是一组 `const` 能够真正产生作用的代码: @@ -280,7 +280,7 @@ int main() 在编译期间使用脚本生成 `Sqlite` 代码稍微有点复杂。幸运的是当 `const` 代码和非 `const` 代码混合时,编译器会产生了大量的提醒,因此很容易发现它并调整脚本来包含我的反 `const` 代码段。 -直接比较编译结果毫无意义,因为任意微小的改变就会影响整个内存布局,这可能会改变整个代码中的指针和函数调用。因此,我用每个指令的二进制大小和汇编代码作为反汇编代码(`objdump -d libsqlite3.so.0.8.6`)。举个例子,这个函数: +直接比较编译结果毫无意义,因为任意微小的改变就会影响整个内存布局,这可能会改变整个代码中的指针和函数调用。因此,我用每个指令的二进制大小和汇编代码作为识别码(`objdump -d libsqlite3.so.0.8.6`)。举个例子,这个函数: ``` 000000000005d570 : @@ -297,11 +297,11 @@ sqlite3_blob_read 7lea 5jmpq 4nopl 在编译时,我保留了所有 `Sqlite` 的编译设置。 -#### 分析编译后的代码 +#### 分析编译结果 -The `const` version of `libsqlite3.so` was 4,740,704 bytes, about 0.1% larger than the 4,736,712 bytes of the non-`const` version. Both had 1374 exported functions (not including low-level helpers like stuff in the PLT), and a total of 13 had any difference in fingerprint. +`const` 版本的 `libsqlite3.so` 的大小是 4,740,704 byte,大约比 4,736,712 byte 的非 `const` 版本大了 0.1% 。在全部 1374 个导出函数(不包括类似 PLT 里的底层辅助函数)中,一共有 13 个函数的识别码不一致。 -A few of the changes were because of the dumb preprocessor hack. For example, here’s one of the changed functions (with some Sqlite-specific definitions edited out): +其中的一些改变是由于插入的预处理代码。举个例子,这里有一个发生了更改的函数(已经删去一些 `Sqlite` 特有的定义): ``` #define LARGEST_INT64 (0xffffffff|(((int64_t)0x7fffffff)<<32)) @@ -328,9 +328,9 @@ static int64_t doubleToInt64(double r){ } ``` -Removing `const` makes those constants into `static` variables. I don’t see why anyone who didn’t care about `const` would make those variables `static`. Removing both `static` and `const` makes GCC recognise them as constants again, and we get the same output. Three of the 13 functions had spurious changes because of local `static const` variables like this, but I didn’t bother fixing any of them. +删去 `const` 使得这些常量变成了 `static` 变量。我不明白为什么会有不了解 `const` 的人让这些变量加上 `static`。同时删去 `static` 和 `const` 会让 GCC 再次认为它们是常量,而我们将得到同样的编译输出。由于像这样子的局部的 `static const` 变量,使得 13 个函数中有 3 个函数产生假的变化,但我一个都不打算修复它们。 -`Sqlite` uses a lot of global variables, and that’s where most of the real `const` optimisations came from. Typically they were things like a comparison with a variable being replaced with a constant comparison, or a loop being partially unrolled a step. (The [Radare toolkit][3] was handy for figuring out what the optimisations did.) A few changes were underwhelming. `sqlite3ParseUri()` is 487 instructions, but the only difference `const` made was taking this pair of comparisons: +`Sqlite` 使用了很多全局变量,而这正是大多数真正的 `const` 优化产生的地方。通常情况下,它们类似于将一个变量比较代替成一个常量比较,或者一个循环在部分展开的一步。([Radare toolkit][3] 可以很方便的找出这些优化措施。)一些变化则令人失望。`sqlite3ParseUri()` 有 487 指令,但 `const` 产生的唯一区别是进行了这个比较: ``` test %al, %al @@ -339,7 +339,7 @@ cmp $0x23, %al je ``` -And swapping their order: +并交换了它们的顺序: ``` cmp $0x23, %al @@ -348,9 +348,9 @@ test %al, %al je ``` -#### Benchmarking +#### 基准测试 -`Sqlite` comes with a performance regression test, so I tried running it a hundred times for each version of the code, still using the default `Sqlite` build settings. Here are the timing results in seconds: +`Sqlite` 自带了一个性能回归测试,因此我尝试每个版本的代码执行一百次,仍然使用默认的 `Sqlite` 编译设置。以秒为单位的测试结果如下: | const | No const ---|---|--- @@ -359,7 +359,7 @@ Median | 11.571s | 11.519s Maximum | 11.832s | 11.658s Mean | 11.531s | 11.492s -Personally, I’m not seeing enough evidence of a difference worth caring about. I mean, I removed `const` from the entire program, so if it made a significant difference, I’d expect it to be easy to see. But maybe you care about any tiny difference because you’re doing something absolutely performance critical. Let’s try some statistical analysis. +就我个人看来,我没有发现足够的证据说明这个差异值得关注。我是说,我从整个程序中删去 `const`,所以如果它有明显的差别,那么我希望它是显而易见的。但也许你关心任何微小的差异,因为你正在做一些绝对性能非常重要的事。那让我们试一下统计分析。 I like using the Mann-Whitney U test for stuff like this. It’s similar to the more-famous t test for detecting differences in groups, but it’s more robust to the kind of complex random variation you get when timing things on computers (thanks to unpredictable context switches, page faults, etc). Here’s the result: From b31a78eab73f2bebc237335cbac7ffc4ac4b3ce6 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 12 Sep 2019 00:52:14 +0800 Subject: [PATCH 012/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190909=20Firefo?= =?UTF-8?q?x=2069=20available=20in=20Fedora?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190909 Firefox 69 available in Fedora.md --- ...20190909 Firefox 69 available in Fedora.md | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 sources/tech/20190909 Firefox 69 available in Fedora.md diff --git a/sources/tech/20190909 Firefox 69 available in Fedora.md b/sources/tech/20190909 Firefox 69 available in Fedora.md new file mode 100644 index 0000000000..817d4f391e --- /dev/null +++ b/sources/tech/20190909 Firefox 69 available in Fedora.md @@ -0,0 +1,63 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Firefox 69 available in Fedora) +[#]: via: (https://fedoramagazine.org/firefox-69-available-in-fedora/) +[#]: author: (Paul W. Frields https://fedoramagazine.org/author/pfrields/) + +Firefox 69 available in Fedora +====== + +![][1] + +When you install the Fedora Workstation, you’ll find the world-renowned Firefox browser included. The Mozilla Foundation underwrites work on Firefox, as well as other projects that promote an open, safe, and privacy respecting Internet. Firefox already features a fast browsing engine and numerous privacy features. + +A community of developers continues to improve and enhance Firefox. The latest version, Firefox 69, was released recently and you can get it for your stable Fedora system (30 and later). Read on for more details. + +### New features in Firefox 69 + +The newest version of Firefox includes [Enhanced Tracking Protection][2] (or ETP). When you use Firefox 69 with a new (or reset) settings profile, the browser makes it harder for sites to track your information or misuse your computer resources. + +For instance, less scrupulous websites use scripts that cause your system to do lots of intense calculations to produce cryptocurrency results, called _[cryptomining][3]_. Cryptomining happens without your knowledge or permission and is therefore a misuse of your system. The new standard setting in Firefox 69 prevents sites from this kind of abuse. + +Firefox 69 has additional settings to prevent sites from identifying or fingerprinting your browser for later use. These improvements give you additional protection from having your activities tracked online. + +Another common annoyance is videos that start in your browser without warning. Video playback also uses extra CPU power and you may not want this happening on your laptop without permission. Firefox already stops this from happening using the [Block Autoplay][4] feature. But Firefox 69 also lets you stop videos from playing even if they start without sound. This feature prevents unwanted sudden noise. It also solves more of the real problem — having your computer’s power used without permission. + +There are numerous other new features in the new release. Read more about them in the [Firefox release notes][5]. + +### How to get the update + +Firefox 69 is available in the stable Fedora 30 and pre-release Fedora 31 repositories, as well as Rawhide. The update is provided by Fedora’s maintainers of the Firefox package. The maintainers also ensured an update to Mozilla’s Network Security Services (the nss package). We appreciate the hard work of the Mozilla project and Firefox community in providing this new release. + +If you’re using Fedora 30 or later, use the _Software_ tool on Fedora Workstation, or run the following command on any Fedora system: + +``` +$ sudo dnf --refresh upgrade firefox +``` + +If you’re on Fedora 29, [help test the update][6] for that release so it can become stable and easily available for all users. + +Firefox may prompt you to upgrade your profile to use the new settings. To take advantage of new features, you should do this. + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/firefox-69-available-in-fedora/ + +作者:[Paul W. Frields][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/pfrields/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2019/09/firefox-v69-816x345.jpg +[2]: https://blog.mozilla.org/blog/2019/09/03/todays-firefox-blocks-third-party-tracking-cookies-and-cryptomining-by-default/ +[3]: https://www.webopedia.com/TERM/C/cryptocurrency-mining.html +[4]: https://support.mozilla.org/kb/block-autoplay +[5]: https://www.mozilla.org/en-US/firefox/69.0/releasenotes/ +[6]: https://bodhi.fedoraproject.org/updates/FEDORA-2019-89ae5bb576 From 2cf5eee84570fccf409b159dea10193ad21fc1b3 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Thu, 12 Sep 2019 08:38:22 +0800 Subject: [PATCH 013/202] Rename sources/tech/20190909 Firefox 69 available in Fedora.md to sources/news/20190909 Firefox 69 available in Fedora.md --- sources/{tech => news}/20190909 Firefox 69 available in Fedora.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190909 Firefox 69 available in Fedora.md (100%) diff --git a/sources/tech/20190909 Firefox 69 available in Fedora.md b/sources/news/20190909 Firefox 69 available in Fedora.md similarity index 100% rename from sources/tech/20190909 Firefox 69 available in Fedora.md rename to sources/news/20190909 Firefox 69 available in Fedora.md From 76daa8e808935b2af7f7f5896297699e3fa98730 Mon Sep 17 00:00:00 2001 From: geekpi Date: Thu, 12 Sep 2019 08:48:42 +0800 Subject: [PATCH 014/202] translating --- ...ash Script to Send a Mail About New User Account Creation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md b/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md index a65013ff04..e8e4d27a2c 100644 --- a/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md +++ b/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 5b9c12fdd8e280cde9124e102e13537d5f30ff92 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 12 Sep 2019 09:26:58 +0800 Subject: [PATCH 015/202] TSL --- translated/talk/20190902 Why I use Java.md | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/translated/talk/20190902 Why I use Java.md b/translated/talk/20190902 Why I use Java.md index 5cf445e8e2..ed98447c9c 100644 --- a/translated/talk/20190902 Why I use Java.md +++ b/translated/talk/20190902 Why I use Java.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Why I use Java) @@ -14,21 +14,21 @@ ![Coffee beans][1] -我记得我是从 1997 年开始使用 Java 的,没多久 [Java 1.1 就露出了曙光][2]。从那时起,总的来说,我非常喜欢用 Java 编程;虽然我得承认,这些日子我经常像在 Java 中编写“严肃的代码”一样编写 [Groovy][3] 脚本。 +我记得我是从 1997 年开始使用 Java 的,就在 [Java 1.1 刚刚发布][2]不久之后。从那时起,总的来说,我非常喜欢用 Java 编程;虽然我得承认,这些日子我经常像在 Java 中编写“严肃的代码”一样编写 [Groovy][3] 脚本。 -我的来自 [FORTRAN][4]、[PL/1][5]、[Pascal][6] 以及最后的 [C][7] 的背景,我发现了很多关于 Java 的东西。Java 是我[面向对象编程][8]的第一次重要实践经验。到那时,我已经编程了大约 20 年,而且可以说我对什么重要、什么不重要有了一些看法。 +来自 [FORTRAN][4]、[PL/1][5]、[Pascal][6] 以及最后的 [C 语言][7] 背景,我发现了许多让我喜欢 Java 的东西。Java 是我[面向对象编程][8]的第一次重要实践经验。到那时,我已经编程了大约 20 年,而且可以说我对什么重要、什么不重要有了一些看法。 ### 调试是一个关键的语言特性 -我真的很讨厌浪费时间追踪由我的代码不小心迭代到数组末尾引起的模糊错误,特别是在 IBM 大型机上的 FORTRAN 编程时代。另一个不时出现的微妙问题是调用一个子程序,该子程序带有一个四字节整数参数,而预期有两个字节;在小端架构上,这通常是一个良性的错误,但在大端机器上,前两个字节的值通常但不总是为零。 +我真的很讨厌浪费时间追踪由我的代码不小心迭代到数组末尾导致的模糊错误,特别是在 IBM 大型机上的 FORTRAN 编程时代。另一个不时出现的微妙问题是调用一个子程序,该子程序带有一个四字节整数参数,而预期有两个字节;在小端架构上,这通常是一个良性的错误,但在大端机器上,前两个字节的值通常并不总是为零。 -在该批处理环境中进行调试非常尴尬,通过核心转储或插入打印语句进行调试,这些语句本身会移动错误的位置甚至使它们消失。 +在那种批处理环境中进行调试也非常不便,通过核心转储或插入打印语句进行调试,这些语句本身会移动错误的位置甚至使它们消失。 -所以我早期使用 Pascal 的经验,先是在 [MTS][9] 上,然后是在 [IBM OS/VS1][10] 上使用相同的 MTS 编译器,让我的生活变得更加轻松。 Pascal 的[强类型和静态类型][11]是胜利的重要组成部分,我使用的每个 Pascal 编译器都在数组的边界和范围上插入运行时检查,因此错误可以在发生时检测到。当我们在 20 世纪 80 年代早期将大部分工作转移到 Unix 系统时,移植 Pascal 代码是一项简单的任务。 +所以我使用 Pascal 的早期体验,先是在 [MTS][9] 上,然后是在 [IBM OS/VS1][10] 上使用相同的 MTS 编译器,让我的生活变得更加轻松。Pascal 的[强类型和静态类型][11]是胜利的重要组成部分,我使用的每个 Pascal 编译器都在数组的边界和范围上插入运行时检查,因此错误可以在发生时检测到。当我们在 20 世纪 80 年代早期将大部分工作转移到 Unix 系统时,移植 Pascal 代码是一项简单的任务。 ### 找到适量的语法 -但是对于我喜欢的 Pascal 的所有事情,我的代码很冗长,而且语法似乎倾向于略微模糊代码;例如,使用: +但是对于我所喜欢的 Pascal 来说,我的代码很冗长,而且语法似乎要比代码还要多;例如,使用: ``` if ... then begin ... end else ... end @@ -40,31 +40,31 @@ if ... then begin ... end else ... end if (...) { ... } else { ... } ``` -另外,有些事情在 Pascal 中很难完成,在 C 中更容易。但是,当我开始越来越多地使用 C 时,我发现自己遇到了我曾经在 FORTRAN 中遇到的同样类型的错误,例如,超出数组边界。在原始的错误点未检测到数组结束,而仅在程序执行后期才会检测到它们的不利影响。幸运的是,我不再生活在批处理环境中,并且手头有很好的调试工具。不过,为我自己好,C 给了我一点灵活性。 +另外,有些事情在 Pascal 中很难完成,在 C 中更容易。但是,当我开始越来越多地使用 C 时,我发现自己遇到了我曾经在 FORTRAN 中遇到的同样类型的错误,例如,超出数组边界。在原始的错误点未检测到数组结束,而仅在程序执行后期才会检测到它们的不利影响。幸运的是,我不再生活在那种批处理环境中,并且手头有很好的调试工具。不过,C 给我的灵活性还是对我有好处的。 -当我发现 [awk][12] 时,我对它与 C 做了一个很好的对比。那时,我的很多工作都涉及转换字段数据并创建报告。我发现用 `awk` 可以做到惊人的数量,加上其他 Unix 命令行工具,如 `sort`、`sed`、`cut`、`join`、`paste`、`comm` 等等。从本质上讲,这些工具给了我一个像是文本文件的关系数据库管理器,这种文本文件具有列式结构,是我们很多字段数据出现的方式。或者,如果不是那种格式,大部分时候该数据可以从关系数据库或某种二进制格式导出到列式的结构中。 +当我发现 [awk][12] 时,我对它与 C 做了一个很好的对比。那时,我的很多工作都涉及转换字段数据并创建报告。我发现用 `awk` 加上其他 Unix 命令行工具,如 `sort`、`sed`、`cut`、`join`、`paste`、`comm` 等等,可以做到事情令人吃惊。从本质上讲,这些工具给了我一个像是文本文件的关系数据库管理器,这种文本文件具有列式结构,是我们很多字段数据保存的方式。或者,即便不是那种格式,大部分时候也可以从关系数据库或某种二进制格式导出到列式结构中。 -`awk` 支持的字符串处理、[正则表达式][13]和[关联数组][14],以及 `awk`(它实际上是一个数据转换管道)的基本特性,非常符合我的需求。当面对二进制数据文件,复杂的数据结构和绝对性能需求时,我仍然会转回到 C;但随着我越来越多地使用 `awk`,我发现 C 的非常基础的字符串支持越来越令人沮丧。随着时间的推移,越来越多的时候我只会在必须时才使用 C,并且可能在其余的时候里大量使用 `awk`。 +`awk` 支持的字符串处理、[正则表达式][13]和[关联数组][14],以及 `awk` 的基本特性(它实际上是一个数据转换管道),非常符合我的需求。当面对二进制数据文件、复杂的数据结构和对性能的绝对需求时,我仍然会转回到 C;但随着我越来越多地使用 `awk`,我发现 C 的非常基础的字符串支持越来越令人沮丧。随着时间的推移,更多的时候我只会在必须时才使用 C,并且在其余的时候里大量使用 `awk`。 ### Java 的抽象层级合适 -然后是 Java。它看起来相当不错 —— 一个相对简洁的语法,让人联想到 C,或者这种相似性至少比 Pascal 或其他任何早期的语言更为明显。它是强类型的,因此很多编程错误会在编译时被捕获。它似乎并不需要过多的面向对象的学习就能开始,这是一件好事,因为我当时对 [OOP 设计模式][15]几乎完全不熟悉。但即使在最初的日子里,我也喜欢它的简化[继承模型][16]背后的思想。(Java 允许使用提供的接口进行单继承,以在某种程度上丰富范例。) +然后是 Java。它看起来相当不错 —— 一个相对简洁的语法,让人联想到 C,或者这种相似性至少比 Pascal 或其他任何早期的语言更为明显。它是强类型的,因此很多编程错误会在编译时被捕获。它似乎并不需要过多的面向对象的知识就能开始,这是一件好事,因为我当时对 [OOP 设计模式][15]毫不熟悉。但即使在刚开始,我也喜欢它的简化[继承模型][16]背后的思想。(Java 允许使用提供的接口进行单继承,以在某种程度上丰富范例。) -它似乎带有丰富的功能库(即“自备电池”的概念),在适当的水平上直接满足了我的需求。最后,我发现自己很快就会想到数据和行为在对象中组合在一起的想法。这似乎是明确控制数据之间交互的好方法 —— 比大量的参数列表或对全局变量的不受控制的访问要好得多。 +它似乎带有丰富的功能库(即“自备电池”的概念),在适当的水平上直接满足了我的需求。最后,我发现自己很快就会想到将数据和行为在对象中组合在一起的想法。这似乎是明确控制数据之间交互的好方法 —— 比大量的参数列表或对全局变量的不受控制的访问要好得多。 -从那以后,Java 在我的编程工具箱中成为了 Helvetic 军刀。我仍然偶尔会在 `awk` 中编写内容,或者使用 Linux 命令行实用程序(如 `cut`、`sort` 或 `sed`),因为它们显然是解决手头问题的直接方法。我怀疑过去 20 年我是否写过 50 行 C 语言代码;Java 完全满足了我的需求。 +从那以后,Java 在我的编程工具箱中成为了 Helvetic 军刀。我仍然偶尔会在 `awk` 中编写内容,或者使用 Linux 命令行实用程序(如 `cut`、`sort` 或 `sed`),因为它们显然是解决手头问题的直接方法。我怀疑过去 20 年我有没有写过 50 行的 C 语言代码;Java 完全满足了我的需求。 -此外,Java 一直在不断改进。首先,它变得更加高效。并且它添加了一些非常有用的功能,例如[可以 try 测试资源][17],例如它可以很好地清理在文件 I/O 期间冗长和有点混乱的错误处理代码;或 [lambdas] [18],它们提供了声明函数并将它们作为参数传递的能力,而旧方法需要创建类或接口来“托管”这些函数; 或[流][19],它封装了函数中的迭代行为,创建了以链式函数调用形式实现的高效数据转换管道。 +此外,Java 一直在不断改进。首先,它变得更加高效。并且它添加了一些非常有用的功能,例如[可以用 try 来测试资源][17],它可以很好地清理在文件 I/O 期间冗长而有点混乱的错误处理代码;或 [lambda][18],它提供了声明函数并将其作为参数传递的能力,而旧方法需要创建类或接口来“托管”这些函数;或[流][19],它封装了函数中的迭代行为,可以创建以链式函数调用形式实现的高效数据转换管道。 ### Java 越来越好 -许多语言设计者研究了从根本上改善 Java 体验的方法。对我来说,其中大部分还没有引起人们的极大兴趣;再次,这更多地反映了我的典型工作流程,并且(更多地)减少了这些语言带来的功能。但其中一个演化步骤已经成为我的编程工具中不可或缺的一部分:[Groovy][20]。当我遇到一个需要小解决方案的小问题时,Groovy 已成为我的首选解决方案。而且,它与 Java 高度兼容。对我来说,Groovy 填补了 Python 为许多其他人填充的相同利基 —— 它紧凑、DRY(不要重复自己)和具有表达性(列表和词典有完整的语言支持)。我还使用了 [Grails][21],它使用 Groovy 为非常高性能和有用的 Java Web 应用程序提供简化的 Web 框架。 +许多语言设计者研究了从根本上改善 Java 体验的方法。对我来说,其中大部分没有引起我的太多兴趣;再次,这更多地反映了我的典型工作流程,并且(更多地)减少了这些语言带来的功能。但其中一个演化步骤已经成为我的编程工具中不可或缺的一部分:[Groovy][20]。当我遇到一个小问题,需要一个简单的解决方案时,Groovy 已经成为了我的首选解决方案。而且,它与 Java 高度兼容。对我来说,Groovy 填补了 Python 为许多其他人所提供相同用处 —— 它紧凑、DRY(不要重复自己)和具有表达性(列表和词典有完整的语言支持)。我还使用了 [Grails][21],它使用 Groovy 为非常高性能和有用的 Java Web 应用程序提供简化的 Web 框架。 ### Java 仍然开源吗? -最近,对 [OpenJDK][22] 越来越多的支持进一步提高了我对 Java 的舒适度。许多公司以各种方式支持 OpenJDK,包括 [AdoptOpenJDK、Amazon 和 Red Hat][23]。在我的一个更大和更长期的项目中,我们使用 AdoptOpenJDK [来在几个桌面平台上生成自定义的运行时环境][24]。 +最近,对 [OpenJDK][22] 越来越多的支持进一步提高了我对 Java 的舒适度。许多公司以各种方式支持 OpenJDK,包括 [AdoptOpenJDK、Amazon 和 Red Hat][23]。在我的一个更大、更长期的项目中,我们使用 AdoptOpenJDK [来在几个桌面平台上生成自定义的运行时环境][24]。 -有没有比 Java 更好的语言?我确信有,这取决于你的工作需要。但我仍然是一个非常高兴的 Java 用户,我还没有看到任何可能会让我失望的东西。 +有没有比 Java 更好的语言?我确信有,这取决于你的工作需要。但我一直对 Java 非常满意,我还没有遇到任何可能会让我失望的东西。 -------------------------------------------------------------------------------- From 7d3dd081dcd6d428d1ca55a6db2d3a5bf23e6f60 Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Thu, 12 Sep 2019 10:03:46 +0800 Subject: [PATCH 016/202] Update translator heguangzhi --- sources/tech/20190730 How to manage logs in Linux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sources/tech/20190730 How to manage logs in Linux.md b/sources/tech/20190730 How to manage logs in Linux.md index cebfbc5f99..0449f0f048 100644 --- a/sources/tech/20190730 How to manage logs in Linux.md +++ b/sources/tech/20190730 How to manage logs in Linux.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (heguangzhi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) @@ -94,7 +94,7 @@ via: https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.ht 作者:[Sandra Henry-Stocker][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[译者ID](https://github.com/heguangzhi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 788e4001de6c4931e6ae3312a5d20cefc016b4c4 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 12 Sep 2019 10:29:42 +0800 Subject: [PATCH 017/202] PRF --- ...ing started with HTTPie for API testing.md | 172 +++++++++--------- 1 file changed, 83 insertions(+), 89 deletions(-) diff --git a/translated/tech/20190829 Getting started with HTTPie for API testing.md b/translated/tech/20190829 Getting started with HTTPie for API testing.md index 925cd6fe51..018ff81135 100644 --- a/translated/tech/20190829 Getting started with HTTPie for API testing.md +++ b/translated/tech/20190829 Getting started with HTTPie for API testing.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Getting started with HTTPie for API testing) @@ -9,22 +9,24 @@ 使用 HTTPie 进行 API 测试 ====== -使用 HTTPie 调试 API,一个用 Python 写的简易命令行工具。 -![Raspberry pie with slice missing][1] -[HTTPie][2] 是一个非常易于使用且易于升级的 HTTP 客户端。它的发音为 “aitch-tee-tee-pie” 并以 **http** 运行,它是一个用 Python 编写的命令行工具来用于访问 Web。 +> 使用 HTTPie 调试 API,这是一个用 Python 写的易用的命令行工具。 -由于这篇是关于 HTTP 客户端的,因此你需要一个 HTTP 服务器来试用它。在这里,访问 [httpbin.org] [3],它是一个简单的开源 HTTP 请求和响应服务。httpbin.org 网站是一种测试 Web API 的强大方式,并能仔细管理并显示请求和相应内容,但现在我们将专注于 HTTPie 的强大功能。 +![](https://img.linux.net.cn/data/attachment/album/201909/12/102919ry1ute1y9h991ftz.jpg) + +[HTTPie][2] 是一个非常易用、易于升级的 HTTP 客户端。它的发音为 “aitch-tee-tee-pie” 并以 `http` 命令运行,它是一个用 Python 编写的来用于访问 Web 的命令行工具。 + +由于这是一篇关于 HTTP 客户端的指导文章,因此你需要一个 HTTP 服务器来试用它。在这里,访问 [httpbin.org][3],它是一个简单的开源 HTTP 请求和响应服务。httpbin.org 网站是一种测试 Web API 的强大方式,并能仔细管理并显示请求和响应内容,不过现在让我们专注于 HTTPie 的强大功能。 ### Wget 和 cURL 的替代品 -你可能听说过古老的 [Wget][4] 或稍微更新的 [cURL][5] 工具,它们允许你从命令行访问 Web。它们是为访问网站而编写的,而 HTTPie 则用于访问 _Web API_。 +你可能听说过古老的 [Wget][4] 或稍微新一些的 [cURL][5] 工具,它们允许你从命令行访问 Web。它们是为访问网站而编写的,而 HTTPie 则用于访问 Web API。 -网站请求设计介于计算机和正在阅读并响应他们所看到的内容的最终用户之间。这并不太依赖于结构化的响应。但是,API 请求会在两台计算机之间进行_结构化_调用。人类不是图片的一部分,像 HTTPie 这样的命令行工具的参数可以有效地处理这个问题。 +网站请求发生在计算机和正在阅读并响应它所看到的内容的最终用户之间,这并不太依赖于结构化的响应。但是,API 请求会在两台计算机之间进行*结构化*调用,人并不是该流程内的一部分,像 HTTPie 这样的命令行工具的参数可以有效地处理这个问题。 ### 安装 HTTPie -有几种方法可以安装 HTTPie。你可以通过包管理器安装,无论你使用的是 **brew**、**apt**、**yum** 还是 **dnf**。但是,如果你已配置 [virtualenvwrapper] [6],那么你可以用自己的方式安装: +有几种方法可以安装 HTTPie。你可以通过包管理器安装,无论你使用的是 `brew`、`apt`、`yum` 还是 `dnf`。但是,如果你已配置 [virtualenvwrapper][6],那么你可以用自己的方式安装: ``` @@ -34,34 +36,33 @@ $ mkvirtualenv httpie ... (httpie) $ deactivate $ alias http=~/.virtualenvs/httpie/bin/http -$ http -b GET +$ http -b GET https://httpbin.org/get { -    "args": {}, -    "headers": { -        "Accept": "*/*", -        "Accept-Encoding": "gzip, deflate", -        "Host": "httpbin.org", -        "User-Agent": "HTTPie/1.0.2" -    }, -    "origin": "104.220.242.210, 104.220.242.210", -    "url": "" + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "Host": "httpbin.org", + "User-Agent": "HTTPie/1.0.2" + }, + "origin": "104.220.242.210, 104.220.242.210", + "url": "https://httpbin.org/get" } ``` -通过直接将 **http** 设置为虚拟环境中的命令别名,即使虚拟环境在非活动状态,你也可以运行它。 你可以将 **alias** 命令放在 **.bash_profile** 或 **.bashrc** 中,这样你就可以使用以下命令升级 HTTPie: +通过将 `http` 别名指向为虚拟环境中的命令,即使虚拟环境在非活动状态,你也可以运行它。你可以将 `alias` 命令放在 `.bash_profile` 或 `.bashrc` 中,这样你就可以使用以下命令升级 HTTPie: ``` -`$ ~/.virtualenvs/httpie/bin/pip install -U pip` +$ ~/.virtualenvs/httpie/bin/pip install -U pip ``` ### 使用 HTTPie 查询网站 -HTTPie 可以简化查询和测试 API。 这里使用了一个选项, **-b**(也可以是 **\--body**)。 没有它,HTTPie 将默认打印整个响应,包括头: - +HTTPie 可以简化查询和测试 API。上面使用了一个选项,`-b`(即 `--body`)。没有它,HTTPie 将默认打印整个响应,包括响应头: ``` -$ http GET +$ http GET https://httpbin.org/get HTTP/1.1 200 OK Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -77,23 +78,22 @@ X-Frame-Options: DENY X-XSS-Protection: 1; mode=block { -    "args": {}, -    "headers": { -        "Accept": "*/*", -        "Accept-Encoding": "gzip, deflate", -        "Host": "httpbin.org", -        "User-Agent": "HTTPie/1.0.2" -    }, -    "origin": "104.220.242.210, 104.220.242.210", -    "url": "" + "args": {}, + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "Host": "httpbin.org", + "User-Agent": "HTTPie/1.0.2" + }, + "origin": "104.220.242.210, 104.220.242.210", + "url": "https://httpbin.org/get" } ``` -这在调试 API 服务时非常重要,因为大量信息在 HTTP 头中发送。 例如,查看发送的 cookie 通常很重要。Httpbin.org 提供了通过 URL 路径设置 cookie(用于测试目的)的选项。 以下设置一个标题为 **opensource**, 值为 **awesome** 的 cookie: - +这在调试 API 服务时非常重要,因为大量信息在响应头中发送。例如,查看发送的 cookie 通常很重要。httpbin.org 提供了通过 URL 路径设置 cookie(用于测试目的)的方式。以下设置一个标题为 `opensource`, 值为 `awesome` 的 cookie: ``` -$ http GET +$ http GET https://httpbin.org/cookies/set/opensource/awesome HTTP/1.1 302 FOUND Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -116,11 +116,10 @@ X-XSS-Protection: 1; mode=block /cookies. If not click the link. ``` -注意 **Set-Cookie: opensource=awesome; Path=/** 的 HTTP 头。 这表明你预期设置的 cookie 已正确设置,路径为 **/**。 另请注意,即使你有 **302**重定向,**http** 也不会遵循它。 如果你想要遵循重定向,则需要使用 **\--follow** 标志请求: - +注意 `Set-Cookie: opensource=awesome; Path=/` 的响应头。这表明你预期设置的 cookie 已正确设置,路径为 `/`。另请注意,即使你得到了 `302` 重定向,`http` 也不会遵循它。如果你想要遵循重定向,则需要明确使用 `--follow` 标志请求: ``` -$ http --follow GET +$ http --follow GET https://httpbin.org/cookies/set/opensource/awesome HTTP/1.1 200 OK Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -136,18 +135,17 @@ X-Frame-Options: DENY X-XSS-Protection: 1; mode=block { -    "cookies": { -        "opensource": "awesome" -    } + "cookies": { + "opensource": "awesome" + } } ``` -但此时你无法看到原来的 **Set-Cookie** 头。为了看到中间响应,你需要使用 **\--all**: +但此时你无法看到原来的 `Set-Cookie` 头。为了看到中间响应,你需要使用 `--all`: ``` -$ http --headers --all --follow \ -GET +$ http --headers --all --follow GET https://httpbin.org/cookies/set/opensource/awesome HTTP/1.1 302 FOUND Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -178,12 +176,10 @@ Content-Length: 66 Connection: keep-alive ``` -打印 body 并不有趣,因为你大多数关心 cookie。如果你像看到中间请求的头,而不是最终请求中的 body,你可以使用: - +打印响应体并不有趣,因为你大多数时候只关心 cookie。如果你想看到中间请求的响应头,而不是最终请求中的响应体,你可以使用: ``` -$ http --print hb --history-print h --all --follow \ -GET +$ http --print hb --history-print h --all --follow GET https://httpbin.org/cookies/set/opensource/awesome HTTP/1.1 302 FOUND Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -214,21 +210,20 @@ Content-Length: 66 Connection: keep-alive { -  "cookies": { -    "opensource": "awesome" -  } + "cookies": { + "opensource": "awesome" + } } ``` -你可以使用 **\--print** 精确控制打印,并使用 **\--history-print** 覆盖中间请求的打印。 +你可以使用 `--print` 精确控制打印的内容(`h`:响应头;`b`:响应体),并使用 `--history-print` 覆盖中间请求的打印内容设置。 ### 使用 HTTPie 下载二进制文件 -有时 body 并不是文本形式,它需要发送到可被不同应用打开的文件: - +有时响应体并不是文本形式,它需要发送到可被不同应用打开的文件: ``` -$ http GET +$ http GET https://httpbin.org/image/jpeg HTTP/1.1 200 OK Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -242,6 +237,7 @@ X-Content-Type-Options: nosniff X-Frame-Options: DENY X-XSS-Protection: 1; mode=block + +-----------------------------------------+ | NOTE: binary data not shown in terminal | +-----------------------------------------+ @@ -249,9 +245,8 @@ X-XSS-Protection: 1; mode=block 要得到正确的图片,你需要保存到文件: - ``` -$ http --download GET +$ http --download GET https://httpbin.org/image/jpeg HTTP/1.1 200 OK Access-Control-Allow-Credentials: true Access-Control-Allow-Origin: * @@ -273,19 +268,18 @@ Done. 34.75 kB in 0.00068s (50.05 MB/s) ### 使用 HTTPie 发送自定义请求 -你可以发送指定头。这对于需要非标准头的自定义 Web API 很有用: - +你可以发送指定的请求头。这对于需要非标准头的自定义 Web API 很有用: ``` -$ http GET X-Open-Source-Com:Awesome +$ http GET https://httpbin.org/headers X-Open-Source-Com:Awesome { -  "headers": { -    "Accept": "*/*", -    "Accept-Encoding": "gzip, deflate", -    "Host": "httpbin.org", -    "User-Agent": "HTTPie/1.0.2", -    "X-Open-Source-Com": "Awesome" -  } + "headers": { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "Host": "httpbin.org", + "User-Agent": "HTTPie/1.0.2", + "X-Open-Source-Com": "Awesome" + } } ``` @@ -293,31 +287,31 @@ $ http GET X-Open-Source-Com:Awesome ``` -$ http --body PUT open-source=awesome author=moshez +$ http --body PUT https://httpbin.org/anything open-source=awesome author=moshez { -  "args": {}, -  "data": "{\"open-source\": \"awesome\", \"author\": \"moshez\"}", -  "files": {}, -  "form": {}, -  "headers": { -    "Accept": "application/json, */*", -    "Accept-Encoding": "gzip, deflate", -    "Content-Length": "46", -    "Content-Type": "application/json", -    "Host": "httpbin.org", -    "User-Agent": "HTTPie/1.0.2" -  }, -  "json": { -    "author": "moshez", -    "open-source": "awesome" -  }, -  "method": "PUT", -  "origin": "73.162.254.113, 73.162.254.113", -  "url": "" + "args": {}, + "data": "{\"open-source\": \"awesome\", \"author\": \"moshez\"}", + "files": {}, + "form": {}, + "headers": { + "Accept": "application/json, */*", + "Accept-Encoding": "gzip, deflate", + "Content-Length": "46", + "Content-Type": "application/json", + "Host": "httpbin.org", + "User-Agent": "HTTPie/1.0.2" + }, + "json": { + "author": "moshez", + "open-source": "awesome" + }, + "method": "PUT", + "origin": "73.162.254.113, 73.162.254.113", + "url": "https://httpbin.org/anything" } ``` -下次在调试 Web API 时,无论时你自己还是别人,记得放下 cURL,试试 HTTPie 这个命令行工具。 +下次在调试 Web API 时,无论是你自己的还是别人的,记得放下 cURL,试试 HTTPie 这个命令行工具。 -------------------------------------------------------------------------------- @@ -326,7 +320,7 @@ via: https://opensource.com/article/19/8/getting-started-httpie 作者:[Moshe Zadka][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From f091f47f920fe31f174d0de7bf9ba6f7429544d4 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 12 Sep 2019 10:30:29 +0800 Subject: [PATCH 018/202] PUB @geekpi https://linux.cn/article-11333-1.html --- .../20190829 Getting started with HTTPie for API testing.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190829 Getting started with HTTPie for API testing.md (99%) diff --git a/translated/tech/20190829 Getting started with HTTPie for API testing.md b/published/20190829 Getting started with HTTPie for API testing.md similarity index 99% rename from translated/tech/20190829 Getting started with HTTPie for API testing.md rename to published/20190829 Getting started with HTTPie for API testing.md index 018ff81135..c85c165df5 100644 --- a/translated/tech/20190829 Getting started with HTTPie for API testing.md +++ b/published/20190829 Getting started with HTTPie for API testing.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11333-1.html) [#]: subject: (Getting started with HTTPie for API testing) [#]: via: (https://opensource.com/article/19/8/getting-started-httpie) [#]: author: (Moshe Zadka https://opensource.com/users/moshezhttps://opensource.com/users/mkalindepauleduhttps://opensource.com/users/jamesf) From 1ee87094944b17e9058671ab05f54c48e85775fc Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Thu, 12 Sep 2019 10:53:11 +0800 Subject: [PATCH 019/202] translated by heguangzhi --- .../20190730 How to manage logs in Linux.md | 110 ----------------- .../20190730 How to manage logs in Linux.md | 111 ++++++++++++++++++ 2 files changed, 111 insertions(+), 110 deletions(-) delete mode 100644 sources/tech/20190730 How to manage logs in Linux.md create mode 100644 translated/tech/20190730 How to manage logs in Linux.md diff --git a/sources/tech/20190730 How to manage logs in Linux.md b/sources/tech/20190730 How to manage logs in Linux.md deleted file mode 100644 index 0449f0f048..0000000000 --- a/sources/tech/20190730 How to manage logs in Linux.md +++ /dev/null @@ -1,110 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (heguangzhi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How to manage logs in Linux) -[#]: via: (https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.html) -[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) - -How to manage logs in Linux -====== -Log files on Linux systems contain a LOT of information — more than you'll ever have time to view. Here are some tips on how you can make use of it without ... drowning in it. -![Greg Lobinski \(CC BY 2.0\)][1] - -Managing log files on Linux systems can be incredibly easy or painful. It all depends on what you mean by log management. - -If all you mean is how you can go about ensuring that your log files don’t eat up all the disk space on your Linux server, the issue is generally quite straightforward. Log files on Linux systems will automatically roll over, and the system will only maintain a fixed number of the rolled-over logs. Even so, glancing over what can easily be a group of 100 files can be overwhelming. In this post, we'll take a look at how the log rotation works and some of the most relevant log files. - -**[ Two-Minute Linux Tips: [Learn how to master a host of Linux commands in these 2-minute video tutorials][2] ]** - -### Automatic log rotation - -Log files rotate frequently. What is the current log acquires a slightly different file name and a new log file is established. Take the syslog file as an example. This file is something of a catch-all for a lot of normal system messages. If you **cd** over to **/var/log** and take a look, you’ll probably see a series of syslog files like this: - -``` -$ ls -l syslog* --rw-r----- 1 syslog adm 28996 Jul 30 07:40 syslog --rw-r----- 1 syslog adm 71212 Jul 30 00:00 syslog.1 --rw-r----- 1 syslog adm 5449 Jul 29 00:00 syslog.2.gz --rw-r----- 1 syslog adm 6152 Jul 28 00:00 syslog.3.gz --rw-r----- 1 syslog adm 7031 Jul 27 00:00 syslog.4.gz --rw-r----- 1 syslog adm 5602 Jul 26 00:00 syslog.5.gz --rw-r----- 1 syslog adm 5995 Jul 25 00:00 syslog.6.gz --rw-r----- 1 syslog adm 32924 Jul 24 00:00 syslog.7.gz -``` - -Rolled over at midnight each night, the older syslog files are kept for a week and then the oldest is deleted. The syslog.7.gz file will be tossed off the system and syslog.6.gz will be renamed syslog.7.gz. The remainder of the log files will follow suit until syslog becomes syslog.1 and a new syslog file is created. Some syslog files will be larger than others, but in general, none will likely ever get very large and you’ll never see more than eight of them. This gives you just over a week to review any data they collect. - -The number of files maintained for any particular log file depends on the log file itself. For some, you may have as many as 13. Notice how the older files – both for syslog and dpkg – are gzipped to save space. The thinking here is likely that you’ll be most interested in the recent logs. Older logs can be unzipped with **gunzip** as needed. - -``` -# ls -t dpkg* -dpkg.log dpkg.log.3.gz dpkg.log.6.gz dpkg.log.9.gz dpkg.log.12.gz -dpkg.log.1 dpkg.log.4.gz dpkg.log.7.gz dpkg.log.10.gz -dpkg.log.2.gz dpkg.log.5.gz dpkg.log.8.gz dpkg.log.11.gz -``` - -Log files can be rotated based on age, as well as by size. Keep this in mind as you examine your log files. - -Log file rotation can be configured differently if you are so inclined, though the defaults work for most Linux sysadmins. Take a look at files like **/etc/rsyslog.conf** and **/etc/logrotate.conf** for some of the details. - -### Making use of your log files - -Managing log files should also include using them from time to time. The first step in making use of log files should probably include getting used to what each log file can tell you about how your system is working and what problems it might have run into. Reading log files from top to bottom is almost never a good option, but knowing how to pull information from them can be of great benefit when you want to get a sense of how well your system is working or need to track down a problem. This also suggests that you have a general idea what kind of information is stored in each file. For example: - -``` -$ who wtmp | tail -10 show the most recent logins -$ who wtmp | grep shark show recent logins for a particular user -$ grep "sudo:" auth.log see who is using sudo -$ tail dmesg look at kernel messages -$ tail dpkg.log see recently installed and updated packages -$ more ufw.log see firewall activity (i.e., if you are using ufw) -``` - -Some commands that you run will also extract information from your log files. If you want to see, for example, a list of system reboots, you can use a command like this: - -``` -$ last reboot -reboot system boot 5.0.0-20-generic Tue Jul 16 13:19 still running -reboot system boot 5.0.0-15-generic Sat May 18 17:26 - 15:19 (21+21:52) -reboot system boot 5.0.0-13-generic Mon Apr 29 10:55 - 15:34 (18+04:39) -``` - -### Using more advanced log managers - -While you can write scripts to make it easier to find interesting information in your log files, you should also be aware that there are some very sophisticated tools available for log file analysis. Some correlate information from multiple sources to get a fuller picture of what’s happening on your network. They may provide real-time monitoring, as well. Tools such as [Solarwinds Log & Event Manager][3] and [PRTG Network Monitor][4] (which includes log monitoring) come to mind. - -There are also some free tools that can help with analyzing log files. These include: - - * **Logwatch** — program to scan system logs for interesting lines - * **Logcheck** — system log analyzer and reporter - - - -I'll provide some insights and help on these tools in upcoming posts. - -**[ Also see: [Invaluable tips and tricks for troubleshooting Linux][5] ]** - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.html - -作者:[Sandra Henry-Stocker][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/logs-100806633-large.jpg -[2]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua -[3]: https://www.esecurityplanet.com/products/solarwinds-log-event-manager-siem.html -[4]: https://www.paessler.com/prtg -[5]: https://www.networkworld.com/article/3242170/linux/invaluable-tips-and-tricks-for-troubleshooting-linux.html -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/translated/tech/20190730 How to manage logs in Linux.md b/translated/tech/20190730 How to manage logs in Linux.md new file mode 100644 index 0000000000..2d95d97e89 --- /dev/null +++ b/translated/tech/20190730 How to manage logs in Linux.md @@ -0,0 +1,111 @@ +[#]: collector: (lujun9972) +[#]: translator: (heguangzhi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to manage logs in Linux) +[#]: via: (https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.html) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +如何在 Linux 中管理日志 +====== +Linux 系统上的日志文件包含了很多信息——比您有时间查看的还要多。以下是一些建议,告诉你如何正确的使用它们...而不是淹没在其中。 +![Greg Lobinski \(CC BY 2.0\)][1] + +在 Linux 系统上管理日志文件可能是非常容易,也可能是非常痛苦。这完全取决于您所说的日志管理是什么意思。 + +如果您的意思是如何确保日志文件不会耗尽您的 Linux 服务器上的所有磁盘空间,那么这个问题通常很简单。Linux 系统上的日志文件将自动覆盖,系统将只维护固定数量的覆盖日志。即便如此,浏览一下一组100个文件可能会让人不知所措。在这篇文章中,我们将看看循环日志是如何工作的,以及一些最相关的日志文件。 + +**[两分钟 Linux 技巧:[在这些两分钟视频教程中学习如何掌握大量 Linux 命令][2] ]** + +### 自动日志轮换 + +日志文件经常是循环使用的。当前的日志会获得稍微不同的文件名,并建立一个新的日志文件。以系统日志文件为例。对于许多正常的系统消息来说,这个文件是一个包罗万象的东西。如果您 **cd** 转到 **/var/log** 并查看一下,您可能会看到一系列系统日志文件,如下所示: + +``` +$ ls -l syslog* +-rw-r----- 1 syslog adm 28996 Jul 30 07:40 syslog +-rw-r----- 1 syslog adm 71212 Jul 30 00:00 syslog.1 +-rw-r----- 1 syslog adm 5449 Jul 29 00:00 syslog.2.gz +-rw-r----- 1 syslog adm 6152 Jul 28 00:00 syslog.3.gz +-rw-r----- 1 syslog adm 7031 Jul 27 00:00 syslog.4.gz +-rw-r----- 1 syslog adm 5602 Jul 26 00:00 syslog.5.gz +-rw-r----- 1 syslog adm 5995 Jul 25 00:00 syslog.6.gz +-rw-r----- 1 syslog adm 32924 Jul 24 00:00 syslog.7.gz +``` + +每天午夜将旧系统日志文件轮换使用,保留一周,然后删除最早的系统日志文件。syslog.7.gz 文件将被从系统中删除,syslog.6.gz 将被重命名为 syslog.7.gz。日志文件的其余部分将继续运行,直到 syslog 成 syslog.1 并创建一个新的系统日志文件。有些系统日志文件会比其他文件大,但是一般来说,没有一个文件可能会变得非常大,并且您永远不会看到超过八个。这给了你一个多星期的时间来回顾它们收集的任何数据。 + +为任何特定日志文件维护的文件数量取决于日志文件本身。对一些人来说,你可能有13个。请注意系统日志和 dpkg 的旧文件是如何压缩以节省空间的。可能是您对最近的日志最感兴趣。旧日志可以根据需要用 **gunzip** 解压。 + +``` +# ls -t dpkg* +dpkg.log dpkg.log.3.gz dpkg.log.6.gz dpkg.log.9.gz dpkg.log.12.gz +dpkg.log.1 dpkg.log.4.gz dpkg.log.7.gz dpkg.log.10.gz +dpkg.log.2.gz dpkg.log.5.gz dpkg.log.8.gz dpkg.log.11.gz +``` + +日志文件可以根据时间和大小进行轮换。检查日志文件时请记住这一点。 + +Log file rotation can be configured differently if you are so inclined, though the defaults work for most Linux sysadmins. Take a look at files like **/etc/rsyslog.conf** and **/etc/logrotate.conf** for some of the details. + +尽管默认值适用于大多数 Linux 系统管理员,如果您愿意,可以对日志文件轮换进行不同的配置。查看这些文件,如 **/etc/rsyslog.conf** 和 **/etc/logrotate.conf** 。 + +### 利用您的日志文件 + +管理日志文件包括时不时的使用它们。使用日志文件第一步是每个日志文件可以告诉您的系统如何工作以及可能遇到的问题。从上到下读取日志文件几乎不是一个好的选择,但是当您想了解您的系统运行的情况或者需要跟踪一个问题时,知道如何从日志文件中获取信息会是有很大的好处。这也表明您对每个文件中存储的信息有一个大致的了解了。例如: + +``` +$ who wtmp | tail -10 show the most recent logins +$ who wtmp | grep shark show recent logins for a particular user +$ grep "sudo:" auth.log see who is using sudo +$ tail dmesg look at kernel messages +$ tail dpkg.log see recently installed and updated packages +$ more ufw.log see firewall activity (i.e., if you are using ufw) +``` + +您运行的一些命令也会从日志文件中提取信息。例如,如果您想查看系统重新启动的列表,可以使用如下命令: + +``` +$ last reboot +reboot system boot 5.0.0-20-generic Tue Jul 16 13:19 still running +reboot system boot 5.0.0-15-generic Sat May 18 17:26 - 15:19 (21+21:52) +reboot system boot 5.0.0-13-generic Mon Apr 29 10:55 - 15:34 (18+04:39) +``` + +### 使用更高级的日志管理器 + +虽然您编写脚本来更容易地在日志文件中找到感兴趣的信息,但是您也应该知道有一些非常复杂的工具可用于日志文件分析。一些人把来自多个来源的信息联系起来,以便更全面地了解您的网络上发生了什么。它们也可以提供实时监控。这些工具,如[Solarwinds Log & Event Manager][3]和[PRTG 网络监视器][4](包括日志监视)浮现在脑海中。 + +还有一些免费工具可以帮助分析日志文件。其中包括: + + * **Logwatch** — 用于扫描系统日志中感兴趣的行的程序 + * **Logcheck** — 系统日志分析器和报告器 + + +在接下来的帖子中,我将提供一些关于这些工具的见解和帮助。 + +**[另请参阅:[排除 Linux 故障的宝贵技巧和诀窍][5] ]** + +加入[Facebook][6] 和[LinkedIn][7] 上的网络世界社区,就您最关心的话题发表评论。 + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.html + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/heguangzhi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://images.idgesg.net/images/article/2019/07/logs-100806633-large.jpg +[2]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua +[3]: https://www.esecurityplanet.com/products/solarwinds-log-event-manager-siem.html +[4]: https://www.paessler.com/prtg +[5]: https://www.networkworld.com/article/3242170/linux/invaluable-tips-and-tricks-for-troubleshooting-linux.html +[6]: https://www.facebook.com/NetworkWorld/ +[7]: https://www.linkedin.com/company/network-world From b7a987a5a0b0f7a23b7247915ec354356887fbe4 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 12 Sep 2019 11:31:40 +0800 Subject: [PATCH 020/202] PRF @geekpi --- ...Shutter Screenshot Tool in Ubuntu 19.04.md | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md b/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md index 62be6d205f..f802e60a46 100644 --- a/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md +++ b/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to Install Shutter Screenshot Tool in Ubuntu 19.04) @@ -10,19 +10,20 @@ 如何在 Ubuntu 19.04 中安装 Shutter 截图工具 ====== -Shutter 是我在 [Linux 中最喜欢的截图工具][1]。你可以使用它截图,还可以用它编辑截图或其他图像。它是一个在图像上添加箭头和文本的不错的工具。你也可以使用它在 Ubuntu 或其他你使用的发行版中[调整图像大小][2]。FOSS 上大多数截图教程都使用 Shutter 编辑。 +Shutter 是我在 [Linux 中最喜欢的截图工具][1]。你可以使用它截图,还可以用它编辑截图或其他图像。它是一个在图像上添加箭头和文本的不错的工具。你也可以使用它在 Ubuntu 或其它你使用的发行版中[调整图像大小][2]。FOSS 上大多数截图教程都使用 Shutter 编辑。 + +![Install Shutter Ubuntu][8] -![][3] 虽然 [Shutter][4] 一直是一款很棒的工具,但它的开发却停滞了。这几年来一直没有新版本的 Shutter。甚至像 [Shutter 中编辑模式被禁用][5]这样的简单 bug 也没有修复。根本没有开发者的消息。 -也许这就是为什么 Ubuntu 的新版本放弃它的原因。在 Ubuntu 18.04 LTS 之前,你可以在软件中心,或者[启用 universe 仓库][7]来[使用 apt-get 命令][6]安装它。但是从 Ubuntu 18.10 及更高版本开始,你就不能再这样做了。 +也许这就是为什么新版本的 Ubuntu 放弃它的原因。在 Ubuntu 18.04 LTS 之前,你可以在软件中心,或者[启用 universe 仓库][7]来[使用 apt-get 命令][6]安装它。但是从 Ubuntu 18.10 及更高版本开始,你就不能再这样做了。 抛开这些缺点,Shutter 是一个很好的工具,我想继续使用它。也许你也是像我这样的 Shutter 粉丝,并且想要使用它。好的方面是你仍然可以在 Ubuntu 19.04 中安装 Shutter,这要归功于非官方 PPA。 ### 在 Ubuntu 19.04 上安装 Shutter -![Install Shutter Ubuntu][8] +![][3] 我希望你了解 PPA 的概念。如果不了解,我强烈建议阅读我的指南,以了解更多关于[什么是 PPA 以及如何使用它][9]。 @@ -32,9 +33,9 @@ Shutter 是我在 [Linux 中最喜欢的截图工具][1]。你可以使用它截 sudo add-apt-repository -y ppa:linuxuprising/shutter ``` -不需要再使用 apt update,因为从 Ubuntu 18.04 开始,仓库会在添加新条目后自动更新。 +不需要再使用 `apt update`,因为从 Ubuntu 18.04 开始,仓库会在添加新条目后自动更新。 -现在使用 apt 命令安装 Shutter: +现在使用 `apt` 命令安装 Shutter: ``` sudo apt install shutter @@ -42,7 +43,6 @@ sudo apt install shutter 完成。你应该已经安装 Shutter 截图工具。你可从菜单搜索并启动它。 - ### 删除通过非官方 PPA 安装的 Shutter 最后我以卸载 Shutter 以及删除添加的仓库来结束教程。 @@ -61,8 +61,6 @@ sudo add-apt-repository --remove ppa:linuxuprising/shutter 你或许还想了解 [Y PPA Manager][11],这是一款 PPA 图形管理工具。 -Shutter - Shutter 是一个很好的工具,我希望它能被积极开发。我希望它的开发人员没问题,他/她可以找一些时间来处理它。或者是时候让其他人分叉并继续让它变得更棒。 -------------------------------------------------------------------------------- @@ -72,7 +70,7 @@ via: https://itsfoss.com/install-shutter-ubuntu/ 作者:[Abhishek Prakash][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 64db15f91c875ff53a784bd829189f23a64657d4 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 12 Sep 2019 11:33:01 +0800 Subject: [PATCH 021/202] PUB @geekpi https://linux.cn/article-11335-1.html --- ... How to Install Shutter Screenshot Tool in Ubuntu 19.04.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md (98%) diff --git a/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md b/published/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md similarity index 98% rename from translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md rename to published/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md index f802e60a46..fd526ef267 100644 --- a/translated/tech/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md +++ b/published/20190909 How to Install Shutter Screenshot Tool in Ubuntu 19.04.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11335-1.html) [#]: subject: (How to Install Shutter Screenshot Tool in Ubuntu 19.04) [#]: via: (https://itsfoss.com/install-shutter-ubuntu/) [#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) From edd4b6e59deca225cb213197c94827bd515979fe Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Thu, 12 Sep 2019 14:57:59 +0800 Subject: [PATCH 022/202] Update from heguangzhi How to Create and Swap file --- .../tech/20190830 How to Create and Use Swap File on Linux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sources/tech/20190830 How to Create and Use Swap File on Linux.md b/sources/tech/20190830 How to Create and Use Swap File on Linux.md index bfda3bcdbe..ab9aef0afc 100644 --- a/sources/tech/20190830 How to Create and Use Swap File on Linux.md +++ b/sources/tech/20190830 How to Create and Use Swap File on Linux.md @@ -1,7 +1,7 @@ [#]: collector: (lujun9972) [#]: translator: (hello-wn) [#]: reviewer: ( ) -[#]: publisher: ( ) +[#]: publisher: (heguangzhi) [#]: url: ( ) [#]: subject: (How to Create and Use Swap File on Linux) [#]: via: (https://itsfoss.com/create-swap-file-linux/) @@ -243,7 +243,7 @@ via: https://itsfoss.com/create-swap-file-linux/ 作者:[Abhishek Prakash][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[译者ID](https://github.com/heguangzhi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From ba2cf1ea482f04f36d42ea01a16c01144f8f15e7 Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Thu, 12 Sep 2019 15:53:06 +0800 Subject: [PATCH 023/202] translated by heguangzhi How to Create and Use Swap on Linux --- ...ow to Create and Use Swap File on Linux.md | 261 ----------------- ...ow to Create and Use Swap File on Linux.md | 263 ++++++++++++++++++ 2 files changed, 263 insertions(+), 261 deletions(-) delete mode 100644 sources/tech/20190830 How to Create and Use Swap File on Linux.md create mode 100644 translated/tech/20190830 How to Create and Use Swap File on Linux.md diff --git a/sources/tech/20190830 How to Create and Use Swap File on Linux.md b/sources/tech/20190830 How to Create and Use Swap File on Linux.md deleted file mode 100644 index ab9aef0afc..0000000000 --- a/sources/tech/20190830 How to Create and Use Swap File on Linux.md +++ /dev/null @@ -1,261 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (hello-wn) -[#]: reviewer: ( ) -[#]: publisher: (heguangzhi) -[#]: url: ( ) -[#]: subject: (How to Create and Use Swap File on Linux) -[#]: via: (https://itsfoss.com/create-swap-file-linux/) -[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) - -How to Create and Use Swap File on Linux -====== - -This tutorial discusses the concept of swap file in Linux, why it is used and its advantages over the traditional swap partition. You’ll learn how to create swap file or resize it. - -### What is a swap file in Linux? - -A swap file allows Linux to simulate the disk space as RAM. When your system starts running out of RAM, it uses the swap space to and swaps some content of the RAM on to the disk space. This frees up the RAM to serve more important processes. When the RAM is free again, it swaps back the data from the disk. I recommend [reading this article to learn more about swap on Linux][1]. - -Traditionally, swap space is used as a separate partition on the disk. When you install Linux, you create a separate partition just for swap. But this trend has changed in the recent years. - -With swap file, you don’t need a separate partition anymore. You create a file under root and tell your system to use it as the swap space. - -With dedicated swap partition, resizing the swap space is a nightmare and an impossible task in many cases. But with swap files, you can resize them as you like. - -Recent versions of Ubuntu and some other Linux distributions have started [using the swap file by default][2]. Even if you don’t create a swap partition, Ubuntu creates a swap file of around 1 GB on its own. - -Let’s see some more on swap files. - -![][3] - -### Check swap space in Linux - -Before you go and start adding swap space, it would be a good idea to check whether you have swap space already available in your system. - -You can check it with the [free command in Linux][4]. In my case, my [Dell XPS][5] has 14GB of swap. - -``` -free -h - total used free shared buff/cache available -Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G -Swap: 14G 0B 14G -``` - -The free command gives you the size of the swap space but it doesn’t tell you if it’s a real swap partition or a swap file. The swapon command is better in this regard. - -``` -swapon --show -NAME TYPE SIZE USED PRIO -/dev/nvme0n1p4 partition 14.9G 0B -2 -``` - -As you can see, I have 14.9 GB of swap space and it’s on a separate partition. If it was a swap file, the type would have been file instead of partition. - -``` -swapon --show -NAME TYPE SIZE USED PRIO -/swapfile file 2G 0B -2 -``` - -If you don’ have a swap space on your system, it should show something like this: - -``` -free -h - total used free shared buff/cache available -Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G -Swap: 0B 0B 0B -``` - -The swapon command won’t show any output. - -### Create swap file on Linux - -If your system doesn’t have swap space or if you think the swap space is not adequate enough, you can create swap file on Linux. You can create multiple swap files as well. - -[][6] - -Suggested read  Fix Missing System Settings In Ubuntu 14.04 [Quick Tip] - -Let’s see how to create swap file on Linux. I am using Ubuntu 18.04 in this tutorial but it should work on other Linux distributions as well. - -#### Step 1: Make a new swap file - -First thing first, create a file with the size of swap space you want. Let’s say that I want to add 1 GB of swap space to my system. Use the fallocate command to create a file of size 1 GB. - -``` -sudo fallocate -l 1G /swapfile -``` - -It is recommended to allow only root to read and write to the swap file. You’ll even see warning like “insecure permissions 0644, 0600 suggested” when you try to use this file for swap area. - -``` -sudo chmod 600 /swapfile -``` - -Do note that the name of the swap file could be anything. If you need multiple swap spaces, you can give it any appropriate name like swap_file_1, swap_file_2 etc. It’s just a file with a predefined size. - -#### Step 2: Mark the new file as swap space - -Your need to tell the Linux system that this file will be used as swap space. You can do that with [mkswap][7] tool. - -``` -sudo mkswap /swapfile -``` - -You should see an output like this: - -``` -Setting up swapspace version 1, size = 1024 MiB (1073737728 bytes) -no label, UUID=7e1faacb-ea93-4c49-a53d-fb40f3ce016a -``` - -#### Step 3: Enable the swap file - -Now your system knows that the file swapfile can be used as swap space. But it is not done yet. You need to enable the swap file so that your system can start using this file as swap. - -``` -sudo swapon /swapfile -``` - -Now if you check the swap space, you should see that your Linux system recognizes and uses it as the swap area: - -``` -swapon --show -NAME TYPE SIZE USED PRIO -/swapfile file 1024M 0B -2 -``` - -#### Step 4: Make the changes permanent - -Whatever you have done so far is temporary. Reboot your system and all the changes will disappear. - -You can make the changes permanent by adding the newly created swap file to /etc/fstab file. - -It’s always a good idea to make a backup before you make any changes to the /etc/fstab file. - -``` -sudo cp /etc/fstab /etc/fstab.back -``` - -Now you can add the following line to the end of /etc/fstab file: - -``` -/swapfile none swap sw 0 0 -``` - -You can do it manually using a [command line text editor][8] or you just use the following command: - -``` -echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab -``` - -Now you have everything in place. Your swap file will be used even after you reboot your Linux system. - -### Adjust swappiness - -The swappiness parameters determines how often the swap space should be used. The swappiness value ranges from 0 to 100. Higher value means the swap space will be used more frequently. - -The default swappiness in Ubuntu desktop is 60 while in server it is 1. You can check the swappiness with the following command: - -``` -cat /proc/sys/vm/swappiness -``` - -Why servers should use a low swappiness? Because swap is slower than RAM and for a better performance, the RAM should be utilized as much as possible. On servers, the performance factor is crucial and hence the swappinness is as low as possible. - -[][9] - -Suggested read  How to Replace One Linux Distribution With Another From Dual Boot [Keeping Home Partition] - -You can change the swappiness on the fly using the following systemd command: - -``` -sudo sysctl vm.swappiness=25 -``` - -This change it only temporary though. If you want to make it permanent, you can edit the /etc/sysctl.conf file and add the swappiness value in the end of the file: - -``` -vm.swappiness=25 -``` - -### Resizing swap space on Linux - -There are a couple of ways you can resize the swap space on Linux. But before you see that, you should learn a few things around it. - -When you ask your system to stop using a swap file for swap area, it transfers all the data (pages to be precise) back to RAM. So you should have enough free RAM before you swap off. - -This is why a good practice is to create and enable another temporary swap file. This way, when you swap off the original swap area, your system will use the temporary swap file. Now you can resize the original swap space. You can manually remove the temporary swap file or leave it as it is and it will be automatically deleted on the next boot. - -If you have enough free RAM or if you created a temporary swap space, swapoff your original file. - -``` -sudo swapoff /swapfile -``` - -Now you can use fallocate command to change the size of the file. Let’s say, you change it to 2 GB in size: - -``` -sudo fallocate -l 2G /swapfile -``` - -Now mark the file as swap space again: - -``` -sudo mkswap /swapfile -``` - -And turn the swap on again: - -``` -sudo swapon /swapfile -``` - -You may also choose to have multiple swap files at the same time. - -### Removing swap file in Linux - -You may have your reasons for not using swap file on Linux. If you want to remove it, the process is similar to what you just saw in resizing the swap. - -First, make sure that you have enough free RAM. Now swap off the file: - -``` -sudo swapoff /swapfile -``` - -The next step is to remove the respective entry from the /etc/fstab file. - -And in the end, you can remove the file to free up the space: - -``` -sudo rm /swapfile -``` - -**Do you swap?** - -I think you now have a good understanding of swap file concept in Linux. You can now easily create swap file or resize them as per your need. - -If you have anything to add on this topic or if you have any doubts, please leave a comment below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/create-swap-file-linux/ - -作者:[Abhishek Prakash][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/abhishek/ -[b]: https://github.com/lujun9972 -[1]: https://itsfoss.com/swap-size/ -[2]: https://help.ubuntu.com/community/SwapFaq -[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/08/swap-file-linux.png?resize=800%2C450&ssl=1 -[4]: https://linuxhandbook.com/free-command/ -[5]: https://itsfoss.com/dell-xps-13-ubuntu-review/ -[6]: https://itsfoss.com/fix-missing-system-settings-ubuntu-1404-quick-tip/ -[7]: http://man7.org/linux/man-pages/man8/mkswap.8.html -[8]: https://itsfoss.com/command-line-text-editors-linux/ -[9]: https://itsfoss.com/replace-linux-from-dual-boot/ diff --git a/translated/tech/20190830 How to Create and Use Swap File on Linux.md b/translated/tech/20190830 How to Create and Use Swap File on Linux.md new file mode 100644 index 0000000000..6c5e1561be --- /dev/null +++ b/translated/tech/20190830 How to Create and Use Swap File on Linux.md @@ -0,0 +1,263 @@ +[#]: collector: (lujun9972) +[#]: translator: (hello-wn) +[#]: reviewer: ( ) +[#]: publisher: (heguangzhi) +[#]: url: ( ) +[#]: subject: (How to Create and Use Swap File on Linux) +[#]: via: (https://itsfoss.com/create-swap-file-linux/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +如何在 Linux 上创建和使用交换文件 +====== + +本教程讨论了 Linux 中交换文件的概念,为什么使用它以及它相对于传统交换分区的优势。您将学习如何创建交换文件和调整其大小。 + +### 什么是 Linux 的交换文件? + +交换文件允许 Linux 将磁盘空间模拟为内存。当您的系统开始耗尽内存时,它会使用交换空间将内存的一些内容交换到磁盘空间上。这样释放了内存,为更重要的进程服务。当内存再次空闲时,它会从磁盘交换回数据。我建议[阅读这篇文章,了解更多关于交换在 Linux ][1]。 + +传统上,交换空间被用作磁盘上的一个独立分区。安装 Linux 时,只需创建一个单独的分区进行交换。但是这种趋势在最近几年发生了变化。 + +使用交换文件,您不再需要单独的分区。您在 root 下创建一个文件,并告诉您的系统将其用作交换空间就行了。 + +使用专用的交换分区,在许多情况下,调整交换空间的大小是一个噩梦,也是一项不可能完成的任务。但是有了交换文件,你可以随意调整它们的大小。 + +最新版本的 Ubuntu 和其他一些 Linux 发行版已经开始 [默认使用交换文件][2]。即使您没有创建交换分区,Ubuntu 也会自己创建一个 1GB 左右的交换文件。 + +让我们看看交换文件的更多信息。 + +![][3] + +### 检查 Linux 的交换空间 + +在您开始添加交换空间之前,最好检查一下您的系统中是否已经有了交换空间。 + +你可以用[ free 命令在Linux][4]检查它。就我而言,我的[戴尔XPS][5]有 14GB 的交换容量。 + +``` +free -h + total used free shared buff/cache available +Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G +Swap: 14G 0B 14G +``` +free 命令给出了交换空间的大小,但它并没有告诉你它是真正的交换分区还是交换文件。swapon 命令在这方面会更好。 + +``` +swapon --show +NAME TYPE SIZE USED PRIO +/dev/nvme0n1p4 partition 14.9G 0B -2 +``` + +如您所见,我有 14.9GB 的交换空间,它在一个单独的分区上。如果是交换文件,类型应该是文件而不是分区。 + +``` +swapon --show +NAME TYPE SIZE USED PRIO +/swapfile file 2G 0B -2 +``` + +如果您的系统上没有交换空间,它应该显示如下内容: + +``` +free -h + total used free shared buff/cache available +Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G +Swap: 0B 0B 0B +``` + +swapon 命令不会显示任何输出。 + + +### 在 Linux 上创建交换文件 + +如果您的系统没有交换空间,或者您认为交换空间不足,您可以在 Linux 上创建交换文件。您也可以创建多个交换文件。 + +[][6] + +建议阅读 Ubuntu 14.04 的修复缺失系统设置[快速提示] + +让我们看看如何在 Linux 上创建交换文件。我在本教程中使用 Ubuntu 18.04,但它也应该适用于其他 Linux 发行版本。 + +#### 步骤1:创建一个新的交换文件 + +首先,创建一个具有所需交换空间大小的文件。假设我想给我的系统增加 1GB 的交换空间。使用fallocate 命令创建大小为 1GB 的文件。 + +``` +sudo fallocate -l 1G /swapfile +``` + +建议只允许 root 用户读写交换文件。当您尝试将此文件用于交换区域时,您甚至会看到类似“建议的不安全权限0644,0600”的警告。 + +``` +sudo chmod 600 /swapfile +``` + +请注意,交换文件的名称可以是任意的。如果您需要多个交换空间,您可以给它任何合适的名称,如swap_file_1、swap_file_2等。它只是一个预定义大小的文件。 + +#### 步骤2:将新文件标记为交换空间 + +您需要告诉 Linux 系统该文件将被用作交换空间。你可以用 [mkswap][7] 工具做到这一点。 + +``` +sudo mkswap /swapfile +``` + +您应该会看到这样的输出: + +``` +Setting up swapspace version 1, size = 1024 MiB (1073737728 bytes) +no label, UUID=7e1faacb-ea93-4c49-a53d-fb40f3ce016a +``` + +#### 步骤3:启用交换文件 + +现在,您的系统知道文件交换文件可以用作交换空间。但是还没有完成。您需要启用交换文件,以便系统可以开始使用该文件作为交换。 + +``` +sudo swapon /swapfile +``` + +现在,如果您检查交换空间,您应该会看到您的Linux系统识别并使用它作为交换区域: + +``` +swapon --show +NAME TYPE SIZE USED PRIO +/swapfile file 1024M 0B -2 +``` + +#### 第四步:让改变持久化 + +迄今为止您所做的一切都是暂时的。重新启动系统,所有更改都将消失。 + +您可以通过将新创建的交换文件添加到 /etc/fstab 文件来使更改持久化。 + +对 /etc/fstab 文件进行任何更改之前,最好先进行备份。 + +``` +sudo cp /etc/fstab /etc/fstab.back +``` + +如何将以下行添加到 /etc/fstab 文件的末尾: + +``` +/swapfile none swap sw 0 0 +``` + + +您可以使用[命令行文本编辑器][8]手动执行,或者只使用以下命令: + +``` +echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab +``` + +现在一切都准备好了。即使在重新启动您的 Linux 系统后,您的交换文件也会被使用。 + +### 调整交换 + +交换参数决定了交换空间的使用频率。交换值的范围从0到100。较高的值意味着交换空间将被更频繁地使用。 + +Ubuntu 桌面的默认交 换度是 60,而服务器的默认交换度是 1。您可以使用以下命令检查swappiness: + +``` +cat /proc/sys/vm/swappiness +``` + +为什么服务器应该使用低交换率?因为交换比内存慢,为了获得更好的性能,应该尽可能多地使用内存。在服务器上,性能因素至关重要,因此交换性尽可能低。 + +[][9] + +建议阅读如何在双引导区用另一个替换一个 Linux 发行版[保留主分区] + + +您可以使用以下系统命令动态更改变: + +``` +sudo sysctl vm.swappiness=25 +``` + +这种改变只是暂时的。如果要使其永久化,可以编辑 /etc/sysctl.conf 文件,并在文件末尾添加swappiness 值: + + +``` +vm.swappiness=25 +``` + +### 在 Linux 上调整交换空间的大小 + +在 Linux 上有几种方法可以调整交换空间的大小。但是在您看到这一点之前,您应该了解一些关于它的事情。 + +当您要求系统停止将交换文件用于交换区域时,它会将所有数据(确切地说是页面)传输回内存。所以你应该有足够的空闲内存,然后再停止交换。 + +这就是为什么创建和启用另一个临时交换文件是一个好的做法原因。这样,当您交换原始交换区域时,您的系统将使用临时交换文件。现在您可以调整原始交换空间的大小。您可以手动删除临时交换文件或保持原样,下次启动时会自动删除。 + +如果您有足够的可用内存或者创建了临时交换空间,那就使您的原始交换文件下线。 + +``` +sudo swapoff /swapfile +``` + +现在您可以使用 fallocate 命令来更改文件的大小。比方说,您将其大小更改为 2GB: + +``` +sudo fallocate -l 2G /swapfile +``` + +现在再次将文件标记为交换空间: + +``` +sudo mkswap /swapfile +``` + +并再次使交换文件上线: + +``` +sudo swapon /swapfile +``` +您也可以选择同时拥有多个交换文件。 + +### 删除 Linux 中的交换文件 + +您可能有不在 Linux 上使用交换文件的原因。如果您想删除它,该过程类似于您刚才看到的调整交换大小的过程。 + +首先,确保你有足够的空闲内存。现在使交换文件离线: + +``` +sudo swapoff /swapfile +``` + +下一步是从 /etc/fstab 文件中删除相应的条目。 + +最后,您可以删除文件来释放空间: + +``` +sudo rm /swapfile +``` + +**你交换吗?** + +我想您现在已经很好地理解了 Linux 中的交换文件概念。现在,您可以根据需要轻松创建交换文件或调整它们的大小。 + +如果你对这个话题有什么要补充的或者有任何疑问,请在下面留下评论。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/create-swap-file-linux/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/heguangzhi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/swap-size/ +[2]: https://help.ubuntu.com/community/SwapFaq +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/08/swap-file-linux.png?resize=800%2C450&ssl=1 +[4]: https://linuxhandbook.com/free-command/ +[5]: https://itsfoss.com/dell-xps-13-ubuntu-review/ +[6]: https://itsfoss.com/fix-missing-system-settings-ubuntu-1404-quick-tip/ +[7]: http://man7.org/linux/man-pages/man8/mkswap.8.html +[8]: https://itsfoss.com/command-line-text-editors-linux/ +[9]: https://itsfoss.com/replace-linux-from-dual-boot/ From 2bf6f0201b226ff35fc9d3652d3e13831806b384 Mon Sep 17 00:00:00 2001 From: LazyWolf Lin Date: Thu, 12 Sep 2019 16:52:13 +0800 Subject: [PATCH 024/202] Translated Why const Doesn't Make C Code Faster. --- ...12 Why const Doesn-t Make C Code Faster.md | 402 ------------------ ...12 Why const Doesn-t Make C Code Faster.md | 42 +- 2 files changed, 22 insertions(+), 422 deletions(-) delete mode 100644 sources/tech/20190812 Why const Doesn-t Make C Code Faster.md diff --git a/sources/tech/20190812 Why const Doesn-t Make C Code Faster.md b/sources/tech/20190812 Why const Doesn-t Make C Code Faster.md deleted file mode 100644 index c30d5bddfe..0000000000 --- a/sources/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ /dev/null @@ -1,402 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (LazyWolfLin) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why const Doesn't Make C Code Faster) -[#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -Why const Doesn't Make C Code Faster -====== - -In a post a few months back I said [it’s a popular myth that `const` is helpful for enabling compiler optimisations in C and C++][1]. I figured I should explain that one, especially because I used to believe it was obviously true, myself. I’ll start off with some theory and artificial examples, then I’ll do some experiments and benchmarks on a real codebase: Sqlite. - -### A simple test - -Let’s start with what I used to think was the simplest and most obvious example of how `const` can make C code faster. First, let’s say we have these two function declarations: - -``` -void func(int *x); -void constFunc(const int *x); -``` - -And suppose we have these two versions of some code: - -``` -void byArg(int *x) -{ - printf("%d\n", *x); - func(x); - printf("%d\n", *x); -} - -void constByArg(const int *x) -{ - printf("%d\n", *x); - constFunc(x); - printf("%d\n", *x); -} -``` - -To do the `printf()`, the CPU has to fetch the value of `*x` from RAM through the pointer. Obviously, `constByArg()` can be made slightly faster because the compiler knows that `*x` is constant, so there’s no need to load its value a second time after `constFunc()` does its thing. It’s just printing the same thing. Right? Let’s see the assembly code generated by GCC with optimisations cranked up: - -``` -$ gcc -S -Wall -O3 test.c -$ view test.s -``` - -Here’s the full assembly output for `byArg()`: - -``` -byArg: -.LFB23: - .cfi_startproc - pushq %rbx - .cfi_def_cfa_offset 16 - .cfi_offset 3, -16 - movl (%rdi), %edx - movq %rdi, %rbx - leaq .LC0(%rip), %rsi - movl $1, %edi - xorl %eax, %eax - call __printf_chk@PLT - movq %rbx, %rdi - call func@PLT # The only instruction that's different in constFoo - movl (%rbx), %edx - leaq .LC0(%rip), %rsi - xorl %eax, %eax - movl $1, %edi - popq %rbx - .cfi_def_cfa_offset 8 - jmp __printf_chk@PLT - .cfi_endproc -``` - -The only difference between the generated assembly code for `byArg()` and `constByArg()` is that `constByArg()` has a `call constFunc@PLT`, just like the source code asked. The `const` itself has literally made zero difference. - -Okay, that’s GCC. Maybe we just need a sufficiently smart compiler. Is Clang any better? - -``` -$ clang -S -Wall -O3 -emit-llvm test.c -$ view test.ll -``` - -Here’s the IR. It’s more compact than assembly, so I’ll dump both functions so you can see what I mean by “literally zero difference except for the call”: - -``` -; Function Attrs: nounwind uwtable -define dso_local void @byArg(i32*) local_unnamed_addr #0 { - %2 = load i32, i32* %0, align 4, !tbaa !2 - %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %2) - tail call void @func(i32* %0) #4 - %4 = load i32, i32* %0, align 4, !tbaa !2 - %5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) - ret void -} - -; Function Attrs: nounwind uwtable -define dso_local void @constByArg(i32*) local_unnamed_addr #0 { - %2 = load i32, i32* %0, align 4, !tbaa !2 - %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %2) - tail call void @constFunc(i32* %0) #4 - %4 = load i32, i32* %0, align 4, !tbaa !2 - %5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) - ret void -} -``` - -### Something that (sort of) works - -Here’s some code where `const` actually does make a difference: - -``` -void localVar() -{ - int x = 42; - printf("%d\n", x); - constFunc(&x); - printf("%d\n", x); -} - -void constLocalVar() -{ - const int x = 42; // const on the local variable - printf("%d\n", x); - constFunc(&x); - printf("%d\n", x); -} -``` - -Here’s the assembly for `localVar()`, which has two instructions that have been optimised out of `constLocalVar()`: - -``` -localVar: -.LFB25: - .cfi_startproc - subq $24, %rsp - .cfi_def_cfa_offset 32 - movl $42, %edx - movl $1, %edi - movq %fs:40, %rax - movq %rax, 8(%rsp) - xorl %eax, %eax - leaq .LC0(%rip), %rsi - movl $42, 4(%rsp) - call __printf_chk@PLT - leaq 4(%rsp), %rdi - call constFunc@PLT - movl 4(%rsp), %edx # not in constLocalVar() - xorl %eax, %eax - movl $1, %edi - leaq .LC0(%rip), %rsi # not in constLocalVar() - call __printf_chk@PLT - movq 8(%rsp), %rax - xorq %fs:40, %rax - jne .L9 - addq $24, %rsp - .cfi_remember_state - .cfi_def_cfa_offset 8 - ret -.L9: - .cfi_restore_state - call __stack_chk_fail@PLT - .cfi_endproc -``` - -The LLVM IR is a little clearer. The `load` just before the second `printf()` call has been optimised out of `constLocalVar()`: - -``` -; Function Attrs: nounwind uwtable -define dso_local void @localVar() local_unnamed_addr #0 { - %1 = alloca i32, align 4 - %2 = bitcast i32* %1 to i8* - call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) #4 - store i32 42, i32* %1, align 4, !tbaa !2 - %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 42) - call void @constFunc(i32* nonnull %1) #4 - %4 = load i32, i32* %1, align 4, !tbaa !2 - %5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) - call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2) #4 - ret void -} -``` - -Okay, so, `constLocalVar()` has sucessfully elided the reloading of `*x`, but maybe you’ve noticed something a bit confusing: it’s the same `constFunc()` call in the bodies of `localVar()` and `constLocalVar()`. If the compiler can deduce that `constFunc()` didn’t modify `*x` in `constLocalVar()`, why can’t it deduce that the exact same function call didn’t modify `*x` in `localVar()`? - -The explanation gets closer to the heart of why C `const` is impractical as an optimisation aid. C `const` effectively has two meanings: it can mean the variable is a read-only alias to some data that may or may not be constant, or it can mean the variable is actually constant. If you cast away `const` from a pointer to a constant value and then write to it, the result is undefined behaviour. On the other hand, it’s okay if it’s just a `const` pointer to a value that’s not constant. - -This possible implementation of `constFunc()` shows what that means: - -``` -// x is just a read-only pointer to something that may or may not be a constant -void constFunc(const int *x) -{ - // local_var is a true constant - const int local_var = 42; - - // Definitely undefined behaviour by C rules - doubleIt((int*)&local_var); - // Who knows if this is UB? - doubleIt((int*)x); -} - -void doubleIt(int *x) -{ - *x *= 2; -} -``` - -`localVar()` gave `constFunc()` a `const` pointer to non-`const` variable. Because the variable wasn’t originally `const`, `constFunc()` can be a liar and forcibly modify it without triggering UB. So the compiler can’t assume the variable has the same value after `constFunc()` returns. The variable in `constLocalVar()` really is `const`, though, so the compiler can assume it won’t change — because this time it _would_ be UB for `constFunc()` to cast `const` away and write to it. - -The `byArg()` and `constByArg()` functions in the first example are hopeless because the compiler has no way of knowing if `*x` really is `const`. - -But why the inconsistency? If the compiler can assume that `constFunc()` doesn’t modify its argument when called in `constLocalVar()`, surely it can go ahead an apply the same optimisations to other `constFunc()` calls, right? Nope. The compiler can’t assume `constLocalVar()` is ever run at all. If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. - -You might want to read the above explanation and examples a few times, but don’t worry if it sounds absurd: it is. Unfortunately, writing to `const` variables is the worst kind of UB: most of the time the compiler can’t know if it even would be UB. So most of the time the compiler sees `const`, it has to assume that someone, somewhere could cast it away, which means the compiler can’t use it for optimisation. This is true in practice because enough real-world C code has “I know what I’m doing” casting away of `const`. - -In short, a whole lot of things can prevent the compiler from using `const` for optimisation, including receiving data from another scope using a pointer, or allocating data on the heap. Even worse, in most cases where `const` can be used by the compiler, it’s not even necessary. For example, any decent compiler can figure out that `x` is constant in the following code, even without `const`: - -``` -int x = 42, y = 0; -printf("%d %d\n", x, y); -y += x; -printf("%d %d\n", x, y); -``` - -TL;DR: `const` is almost useless for optimisation because - - 1. Except for special cases, the compiler has to ignore it because other code might legally cast it away - 2. In most of the exceptions to #1, the compiler can figure out a variable is constant, anyway - - - -### C++ - -There’s another way `const` can affect code generation if you’re using C++: function overloads. You can have `const` and non-`const` overloads of the same function, and maybe the non-`const` can be optimised (by the programmer, not the compiler) to do less copying or something. - -``` -void foo(int *p) -{ - // Needs to do more copying of data -} - -void foo(const int *p) -{ - // Doesn't need defensive copies -} - -int main() -{ - const int x = 42; - // const-ness affects which overload gets called - foo(&x); - return 0; -} -``` - -On the one hand, I don’t think this is exploited much in practical C++ code. On the other hand, to make a real difference, the programmer has to make assumptions that the compiler can’t make because they’re not guaranteed by the language. - -### An experiment with Sqlite3 - -That’s enough theory and contrived examples. How much effect does `const` have on a real codebase? I thought I’d do a test on the Sqlite database (version 3.30.0) because - - * It actually uses `const` - * It’s a non-trivial codebase (over 200KLOC) - * As a database, it includes a range of things from string processing to arithmetic to date handling - * It can be tested with CPU-bound loads - - - -Also, the author and contributors have put years of effort into performance optimisation already, so I can assume they haven’t missed anything obvious. - -#### The setup - -I made two copies of [the source code][2] and compiled one normally. For the other copy, I used this hacky preprocessor snippet to turn `const` into a no-op: - -``` -#define const -``` - -(GNU) `sed` can add that to the top of each file with something like `sed -i '1i#define const' *.c *.h`. - -Sqlite makes things slightly more complicated by generating code using scripts at build time. Fortunately, compilers make a lot of noise when `const` and non-`const` code are mixed, so it was easy to detect when this happened, and tweak the scripts to include my anti-`const` snippet. - -Directly diffing the compiled results is a bit pointless because a tiny change can affect the whole memory layout, which can change pointers and function calls throughout the code. Instead I took a fingerprint of the disassembly (`objdump -d libsqlite3.so.0.8.6`), using the binary size and mnemonic for each instruction. For example, this function: - -``` -000000000005d570 : - 5d570: 4c 8d 05 59 a2 ff ff lea -0x5da7(%rip),%r8 # 577d0 - 5d577: e9 04 fe ff ff jmpq 5d380 - 5d57c: 0f 1f 40 00 nopl 0x0(%rax) -``` - -would turn into something like this: - -``` -sqlite3_blob_read 7lea 5jmpq 4nopl -``` - -I left all the Sqlite build settings as-is when compiling anything. - -#### Analysing the compiled code - -The `const` version of libsqlite3.so was 4,740,704 bytes, about 0.1% larger than the 4,736,712 bytes of the non-`const` version. Both had 1374 exported functions (not including low-level helpers like stuff in the PLT), and a total of 13 had any difference in fingerprint. - -A few of the changes were because of the dumb preprocessor hack. For example, here’s one of the changed functions (with some Sqlite-specific definitions edited out): - -``` -#define LARGEST_INT64 (0xffffffff|(((int64_t)0x7fffffff)<<32)) -#define SMALLEST_INT64 (((int64_t)-1) - LARGEST_INT64) - -static int64_t doubleToInt64(double r){ - /* - ** Many compilers we encounter do not define constants for the - ** minimum and maximum 64-bit integers, or they define them - ** inconsistently. And many do not understand the "LL" notation. - ** So we define our own static constants here using nothing - ** larger than a 32-bit integer constant. - */ - static const int64_t maxInt = LARGEST_INT64; - static const int64_t minInt = SMALLEST_INT64; - - if( r<=(double)minInt ){ - return minInt; - }else if( r>=(double)maxInt ){ - return maxInt; - }else{ - return (int64_t)r; - } -} -``` - -Removing `const` makes those constants into `static` variables. I don’t see why anyone who didn’t care about `const` would make those variables `static`. Removing both `static` and `const` makes GCC recognise them as constants again, and we get the same output. Three of the 13 functions had spurious changes because of local `static const` variables like this, but I didn’t bother fixing any of them. - -Sqlite uses a lot of global variables, and that’s where most of the real `const` optimisations came from. Typically they were things like a comparison with a variable being replaced with a constant comparison, or a loop being partially unrolled a step. (The [Radare toolkit][3] was handy for figuring out what the optimisations did.) A few changes were underwhelming. `sqlite3ParseUri()` is 487 instructions, but the only difference `const` made was taking this pair of comparisons: - -``` -test %al, %al -je -cmp $0x23, %al -je -``` - -And swapping their order: - -``` -cmp $0x23, %al -je -test %al, %al -je -``` - -#### Benchmarking - -Sqlite comes with a performance regression test, so I tried running it a hundred times for each version of the code, still using the default Sqlite build settings. Here are the timing results in seconds: - -| const | No const ----|---|--- -Minimum | 10.658s | 10.803s -Median | 11.571s | 11.519s -Maximum | 11.832s | 11.658s -Mean | 11.531s | 11.492s - -Personally, I’m not seeing enough evidence of a difference worth caring about. I mean, I removed `const` from the entire program, so if it made a significant difference, I’d expect it to be easy to see. But maybe you care about any tiny difference because you’re doing something absolutely performance critical. Let’s try some statistical analysis. - -I like using the Mann-Whitney U test for stuff like this. It’s similar to the more-famous t test for detecting differences in groups, but it’s more robust to the kind of complex random variation you get when timing things on computers (thanks to unpredictable context switches, page faults, etc). Here’s the result: - -| const | No const ----|---|--- -N | 100 | 100 -Mean rank | 121.38 | 79.62 -Mann-Whitney U | 2912 ----|--- -Z | -5.10 -2-sided p value | <10-6 -HL median difference | -.056s -95% confidence interval | -.077s – -0.038s - -The U test has detected a statistically significant difference in performance. But, surprise, it’s actually the non-`const` version that’s faster — by about 60ms, or 0.5%. It seems like the small number of “optimisations” that `const` enabled weren’t worth the cost of extra code. It’s not like `const` enabled any major optimisations like auto-vectorisation. Of course, your mileage may vary with different compiler flags, or compiler versions, or codebases, or whatever, but I think it’s fair to say that if `const` were effective at improving C performance, we’d have seen it by now. - -### So, what’s `const` for? - -For all its flaws, C/C++ `const` is still useful for type safety. In particular, combined with C++ move semantics and `std::unique_pointer`s, `const` can make pointer ownership explicit. Pointer ownership ambiguity was a huge pain in old C++ codebases over ~100KLOC, so personally I’m grateful for that alone. - -However, I used to go beyond using `const` for meaningful type safety. I’d heard it was best practices to use `const` literally as much as possible for performance reasons. I’d heard that when performance really mattered, it was important to refactor code to add more `const`, even in ways that made it less readable. That made sense at the time, but I’ve since learned that it’s just not true. - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[LazyWolfLin](https://github.com/LazyWolfLin) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://theartofmachinery.com/2019/04/05/d_as_c_replacement.html#const-and-immutable -[2]: https://sqlite.org/src/doc/trunk/README.md -[3]: https://rada.re/r/ diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 6d884b10a6..7ecc3e7386 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -7,10 +7,10 @@ [#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) [#]: author: (Simon Arneaud https://theartofmachinery.com) -为什么 `const` 不能让 `C` 代码跑得更快? +为什么 `const` 无法让 `C` 代码跑得更快? ====== -在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 有助于编译器优化 `C` 和 `C++` 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论和人工构造的例子论证,然后在一个真正的代码库 `Sqlite` 上做一些实验和基准测试。 +在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 有助于编译器优化 `C` 和 `C++` 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论并构造一些例子来论证,然后在一个真正的代码库,`Sqlite`,上做一些实验和基准测试。 ### 一个简单的测试 @@ -39,7 +39,7 @@ void constByArg(const int *x) } ``` -调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。对吧?让我们来看下 `GCC` 在如下编译选项下生成的汇编代码: +调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。没问题吧?让我们来看下 `GCC` 在如下编译选项下生成的汇编代码: ``` $ gcc -S -Wall -O3 test.c @@ -209,13 +209,13 @@ void doubleIt(int *x) `localVar()` 传递给 `constFunc()` 一个指向非 `const` 变量的 `const` 指针。因为这个变量并非常量,`constFunc()` 可以撒个谎并强行修改它而不触发而不触发未定义行为。所以,编译器不能断定变量在调用 `constFunc()` 后仍是同样的值。在 `constLocalVar()` 中的变量是真正的常量,因此,编译器可以断定它不会改变——因为在 `constFunc()` 去除变量的 `const` 属性并写入它*将*会是一个未定义行为。 -第一个例子中的函数 `byArg()` 和 `constByArg()` 是没有可能的,因为编译器没有任何方法可以知道 `*x` 是否真的是 `const` 常量。 +第一个例子中的函数 `byArg()` 和 `constByArg()` 是没有可能优化的,因为编译器没有任何方法能够知道 `*x` 是否真的是 `const` 常量。 -但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,对吧?不。编译器不能假设 `constLocalVar()` 根本没有运行。 If it isn’t (say, because it’s just some unused extra output of a code generator or macro), `constFunc()` can sneakily modify data without ever triggering UB. +但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,是吗?并不。编译器不能假设 `constLocalVar()` 根本没有运行。 如果不是这样(例如,它只是代码生成器或者宏的一些未使用的额外输出),`constFunc()` 就能偷偷地修改数据而不触发未定义行为。 -你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 `C` 代码会在“明确知道后果”下移除 `const`。 +你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是正确的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 `C` 代码会在“深思熟虑”后移除 `const`。 -简而言之,很多事情都可以阻止编译器使用 `const` 进行优化,包括使用指针从另一内存空间接受数据,或者在堆空间上分配数据。更糟糕的是,在大部分编译器能够使用 `const` 的情况,它都不是必须的。例如,任何像样的编译器都能推断出下面代码中的 `x` 是一个常量,甚至都不需要 `const`: +简而言之,很多事情都可以阻止编译器使用 `const` 进行优化,包括使用指针从另一内存空间接受数据,或者在堆空间上分配数据。更糟糕的是,在大部分编译器能够使用 `const` 进行优化的情况,它都不是必须的。例如,任何像样的编译器都能推断出下面代码中的 `x` 是一个常量,甚至都不需要 `const`: ``` int x = 42, y = 0; @@ -247,7 +247,7 @@ void foo(const int *p) int main() { const int x = 42; - // const 影响被调用的是哪一个版本的重载 + // const 影响被调用的是哪一个版本的重载函数 foo(&x); return 0; } @@ -257,7 +257,7 @@ int main() ### 用 `Sqlite3` 进行实验 -有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在 `Sqlite`(版本:3.30.0)的代码库上做一个测试,因为: +有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在代码库 `Sqlite`(版本:3.30.0)上做一个测试,因为: * 它真正地使用了 `const` * 它不是一个简单的代码库(超过 20 万行代码) @@ -328,7 +328,7 @@ static int64_t doubleToInt64(double r){ } ``` -删去 `const` 使得这些常量变成了 `static` 变量。我不明白为什么会有不了解 `const` 的人让这些变量加上 `static`。同时删去 `static` 和 `const` 会让 GCC 再次认为它们是常量,而我们将得到同样的编译输出。由于像这样子的局部的 `static const` 变量,使得 13 个函数中有 3 个函数产生假的变化,但我一个都不打算修复它们。 +删去 `const` 使得这些常量变成了 `static` 变量。我不明白为什么会有不了解 `const` 的人让这些变量加上 `static`。同时删去 `static` 和 `const` 会让 GCC 再次认为它们是常量,而我们将得到同样的编译输出。由于类似这样的局部的 `static const` 变量,使得 13 个函数中有 3 个函数产生假的变化,但我一个都不打算修复它们。 `Sqlite` 使用了很多全局变量,而这正是大多数真正的 `const` 优化产生的地方。通常情况下,它们类似于将一个变量比较代替成一个常量比较,或者一个循环在部分展开的一步。([Radare toolkit][3] 可以很方便的找出这些优化措施。)一些变化则令人失望。`sqlite3ParseUri()` 有 487 指令,但 `const` 产生的唯一区别是进行了这个比较: @@ -361,26 +361,28 @@ Mean | 11.531s | 11.492s 就我个人看来,我没有发现足够的证据说明这个差异值得关注。我是说,我从整个程序中删去 `const`,所以如果它有明显的差别,那么我希望它是显而易见的。但也许你关心任何微小的差异,因为你正在做一些绝对性能非常重要的事。那让我们试一下统计分析。 -I like using the Mann-Whitney U test for stuff like this. It’s similar to the more-famous t test for detecting differences in groups, but it’s more robust to the kind of complex random variation you get when timing things on computers (thanks to unpredictable context switches, page faults, etc). Here’s the result: +我喜欢使用类似 Mann-Whitney U 检验这样的东西。它类似于更著名的 T 检验,但对你在机器上计时时产生的复杂随机变量(由于不可预测的上下文切换,页错误等)更加鲁棒。以下是结果: -| const | No const +|| const | No const| ---|---|--- N | 100 | 100 Mean rank | 121.38 | 79.62 -Mann-Whitney U | 2912 + +||| ---|--- +Mann-Whitney U | 2912 Z | -5.10 -2-sided p value | <10-6 -HL median difference | -.056s -95% confidence interval | -.077s – -0.038s +2-sided p value | <10-6 +HL median difference | -0.056s +95% confidence interval | -0.077s – -0.038s -The U test has detected a statistically significant difference in performance. But, surprise, it’s actually the non-`const` version that’s faster — by about 60ms, or 0.5%. It seems like the small number of “optimisations” that `const` enabled weren’t worth the cost of extra code. It’s not like `const` enabled any major optimisations like auto-vectorisation. Of course, your mileage may vary with different compiler flags, or compiler versions, or codebases, or whatever, but I think it’s fair to say that if `const` were effective at improving C performance, we’d have seen it by now. +U 检验已经发现统计意义上具有显著的性能差异。但是,令人惊讶的是,实际上是非 `const` 版本更快——大约 60ms,0.5%。似乎 `const` 启用的少量“优化”不值得额外代码的开销。这不像是 `const` 启用了任何类似于自动矢量化的重要的优化。当然,你的结果可能因为编译器配置、编译器版本或者代码库等等而有所不同,但是我觉得这已经说明了 `const` 是否能够有效地提高 `C` 的性能,我们现在已经看到答案了。 -### So, what’s `const` for? +### 那么,`const` 有什么用呢? -For all its flaws, C/C++ `const` is still useful for type safety. In particular, combined with C++ move semantics and `std::unique_pointer`s, `const` can make pointer ownership explicit. Pointer ownership ambiguity was a huge pain in old C++ codebases over ~100KLOC, so personally I’m grateful for that alone. +尽管存在缺陷,`C/C++` 的 `const` 仍有助于类型安全。特别是,结合 `C++` 的移动语义和 `std::unique_pointer`,`const` 可以使指针所有权显式化。在超过十万行代码的 `C++` 旧代码库里,指针所有权模糊是一个大难题,我对此深有感触。 -However, I used to go beyond using `const` for meaningful type safety. I’d heard it was best practices to use `const` literally as much as possible for performance reasons. I’d heard that when performance really mattered, it was important to refactor code to add more `const`, even in ways that made it less readable. That made sense at the time, but I’ve since learned that it’s just not true. +但是,我以前常常使用 `const` 来实现有意义的类型安全。我曾听说过基于性能上的原因,最好是尽可能多地使用 `const`。我曾听说过当性能很重要时,重构代码并添加更多的 `const` 非常重要,即使以降低代码可读性的方式。当时觉得这没问题,但后来我才知道这并不对。 -------------------------------------------------------------------------------- From b529c9f2dc2096c30b14a9adda8383535300b6cf Mon Sep 17 00:00:00 2001 From: DarkSun Date: Fri, 13 Sep 2019 00:51:50 +0800 Subject: [PATCH 025/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20How=20?= =?UTF-8?q?to=20Find=20and=20Replace=20a=20String=20in=20File=20Using=20th?= =?UTF-8?q?e=20sed=20Command=20in=20Linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190913 How to Find and Replace a String in File Using the sed Command in Linux.md --- ... in File Using the sed Command in Linux.md | 352 ++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100644 sources/tech/20190913 How to Find and Replace a String in File Using the sed Command in Linux.md diff --git a/sources/tech/20190913 How to Find and Replace a String in File Using the sed Command in Linux.md b/sources/tech/20190913 How to Find and Replace a String in File Using the sed Command in Linux.md new file mode 100644 index 0000000000..bfb85529d4 --- /dev/null +++ b/sources/tech/20190913 How to Find and Replace a String in File Using the sed Command in Linux.md @@ -0,0 +1,352 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Find and Replace a String in File Using the sed Command in Linux) +[#]: via: (https://www.2daygeek.com/linux-sed-to-find-and-replace-string-in-files/) +[#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) + +How to Find and Replace a String in File Using the sed Command in Linux +====== + +When you are working on text files you may need to find and replace a string in the file. + +Sed command is mostly used to replace the text in a file. + +This can be done using the sed command and awk command in Linux. + +In this tutorial, we will show you how to do this using the sed command and then show about the awk command. + +### What is sed Command + +Sed command stands for Stream Editor, It is used to perform basic text manipulation in Linux. It could perform various functions such as search, find, modify, insert or delete files. + +Also, it’s performing complex regular expression pattern matching. + +It can be used for the following purpose. + + * To find and replace matches with a given format. + * To find and replace specific lines that match a given format. + * To find and replace the entire line that matches the given format. + * To search and replace two different patterns simultaneously. + + + +The fifteen examples listed in this article will help you to master in the sed command. + +If you want to remove a line from a file using the Sed command, go to the following article. + +**`Note:`** Since this is a demonstration article, we use the sed command without the `-i` option, which removes lines and prints the contents of the file in the Linux terminal. + +But if you want to remove lines from the source file in the real environment, use the `-i` option with the sed command. + +Common Syntax for sed to replace a string. + +``` +sed -i 's/Search_String/Replacement_String/g' Input_File +``` + +First we need to understand sed syntax to do this. See details about it. + + * `sed:` It’s a Linux command. + * `-i:` It’s one of the option for sed and what it does? By default sed print the results to the standard output. When you add this option with sed then it will edit files in place. A backup of the original file will be created when you add a suffix (For ex, -i.bak + * `s:` The s is the substitute command. + * `Search_String:` To search a given string or regular expression. + * `Replacement_String:` The replacement string. + * `g:` Global replacement flag. By default, the sed command replaces the first occurrence of the pattern in each line and it won’t replace the other occurrence in the line. But, all occurrences will be replaced when the replacement flag is provided + * `/` Delimiter character. + * `Input_File:` The filename that you want to perform the action. + + + +Let us look at some examples of commonly used with sed command to search and convert text in files. + +We have created the below file for demonstration purposes. + +``` +# cat sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 1) How to Find and Replace the “first” Event of the Pattern on a Line + +The below sed command replaces the word **unix** with **linux** in the file. This only changes the first instance of the pattern on each line. + +``` +# sed 's/unix/linux/' sed-test.txt + +1 Unix linux unix 23 +2 linux Linux 34 +3 linuxlinux UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 2) How to Find and Replace the “Nth” Occurrence of the Pattern on a Line + +Use the /1,/2,../n flags to replace the corresponding occurrence of a pattern in a line. + +The below sed command replaces the second instance of the “unix” pattern with “linux” in a line. + +``` +# sed 's/unix/linux/2' sed-test.txt + +1 Unix unix linux 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 3) How to Search and Replace all Instances of the Pattern in a Line + +The below sed command replaces all instances of the “unix” format with “Linux” on the line because “g” means a global replacement. + +``` +# sed 's/unix/linux/g' sed-test.txt + +1 Unix linux linux 23 +2 linux Linux 34 +3 linuxlinux UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 4) How to Find and Replace the Pattern for all Instances in a Line from the “Nth” Event + +The below sed command replaces all the patterns from the “Nth” instance of a pattern in a line. + +``` +# sed 's/unix/linux/2g' sed-test.txt + +1 Unix unix linux 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 5) Search and Replace the pattern on a specific line number + +You can able to replace the string on a specific line number. The below sed command replaces the pattern “unix” with “linux” only on the 3rd line. + +``` +# sed '3 s/unix/linux/' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxlinux UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 6) How to Find and Replace Pattern in a Range of Lines + +You can specify the range of line numbers to replace the string. + +The below sed command replaces the “Unix” pattern with “Linux” with lines 1 through 3. + +``` +# sed '1,3 s/unix/linux/' sed-test.txt + +1 Unix linux unix 23 +2 linux Linux 34 +3 linuxlinux UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 7) How to Find and Change the pattern in the Last Line + +The below sed command allows you to replace the matching string only in the last line. + +The below sed command replaces the “Linux” pattern with “Unix” only on the last line. + +``` +# sed '$ s/Linux/Unix/' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Unix is free and opensource operating system +``` + +### 8) How to Find and Replace the Pattern with only Right Word in a Line + +As you might have noticed, the substring “linuxunix” is replaced with “linuxlinux” in the 6th example. If you want to replace only the right matching word, use the word-boundary expression “\b” on both ends of the search string. + +``` +# sed '1,3 s/\bunix\b/linux/' sed-test.txt + +1 Unix linux unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 9) How to Search and Replaces the pattern with case insensitive + +Everyone knows that Linux is case sensitive. To make the pattern match with case insensitive, use the I flag. + +``` +# sed 's/unix/linux/gI' sed-test.txt + +1 linux linux linux 23 +2 linux Linux 34 +3 linuxlinux linuxLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 10) How to Find and Replace a String that Contains the Delimiter Character + +When you search and replace for a string with the delimiter character, we need to use the backslash “\” to escape the slash. + +In this example, we are going to replaces the “/bin/bash” with “/usr/bin/fish”. + +``` +# sed 's/\/bin\/bash/\/usr\/bin\/fish/g' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /usr/bin/fish CentOS Linux OS +Linux is free and opensource operating system +``` + +The above sed command works as expected, but it looks bad. To simplify this, most of the people will use the vertical bar “|”. So, I advise you to go with it. + +``` +# sed 's|/bin/bash|/usr/bin/fish/|g' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /usr/bin/fish/ CentOS Linux OS +Linux is free and opensource operating system +``` + +### 11) How to Find and Replaces Digits with a Given Pattern + +Similarly, digits can be replaced with pattern. The below sed command replaces all digits with “[0-9]” “number” pattern. + +``` +# sed 's/[0-9]/number/g' sed-test.txt + +number Unix unix unix numbernumber +number linux Linux numbernumber +number linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 12) How to Find and Replace only two Digit Numbers with Pattern + +If you want to replace the two digit numbers with the pattern, use the sed command below. + +``` +# sed 's/\b[0-9]\{2\}\b/number/g' sed-test.txt + +1 Unix unix unix number +2 linux Linux number +3 linuxunix UnixLinux +linux /bin/bash CentOS Linux OS +Linux is free and opensource operating system +``` + +### 13) How to Print only Replaced Lines with the sed Command + +If you want to display only the changed lines, use the below sed command. + + * p – It prints the replaced line twice on the terminal. + * n – It suppresses the duplicate rows generated by the “p” flag. + + + +``` +# sed -n 's/Unix/Linux/p' sed-test.txt + +1 Linux unix unix 23 +3 linuxunix LinuxLinux +``` + +### 14) How to Run Multiple sed Commands at Once + +The following sed command detect and replaces two different patterns simultaneously. + +The below sed command searches for “linuxunix” and “CentOS” pattern, replacing them with “LINUXUNIX” and “RHEL8” at a time. + +``` +# sed -e 's/linuxunix/LINUXUNIX/g' -e 's/CentOS/RHEL8/g' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 LINUXUNIX UnixLinux +linux /bin/bash RHEL8 Linux OS +Linux is free and opensource operating system +``` + +The following sed command search for two different patterns and replaces them with one string at a time. + +The below sed command searches for “linuxunix” and “CentOS” pattern, replacing them with “Fedora30” at a time. + +``` +# sed -e 's/\(linuxunix\|CentOS\)/Fedora30/g' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 Fedora30 UnixLinux +linux /bin/bash Fedora30 Linux OS +Linux is free and opensource operating system +``` + +### 15) How to Find and Replace the Entire Line if the Given Pattern Matches + +If the pattern matches, you can use the sed command to replace the entire line with the new line. This can be done using the “C” flag. + +``` +# sed '/OS/ c New Line' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +New Line +Linux is free and opensource operating system +``` + +### 16) How to Search and Replace lines that Matches a Pattern + +You can specify a pattern for the sed command to fit on a line. In the event of pattern matching, the sed command searches for the string to be replaced. + +The below sed command first looks for lines that have the “OS” pattern, then replaces the word “Linux” with “ArchLinux”. + +``` +# sed '/OS/ s/Linux/ArchLinux/' sed-test.txt + +1 Unix unix unix 23 +2 linux Linux 34 +3 linuxunix UnixLinux +linux /bin/bash CentOS ArchLinux OS +Linux is free and opensource operating system +``` +-------------------------------------------------------------------------------- + +via: https://www.2daygeek.com/linux-sed-to-find-and-replace-string-in-files/ + +作者:[Magesh Maruthamuthu][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.2daygeek.com/author/magesh/ +[b]: https://github.com/lujun9972 From 6fa887537c256cbb9b9f626cbf381e4eacef07be Mon Sep 17 00:00:00 2001 From: MjSeven Date: Fri, 13 Sep 2019 14:01:11 +0800 Subject: [PATCH 026/202] Translating by MjSeven --- sources/tech/20190409 Working with variables on Linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190409 Working with variables on Linux.md b/sources/tech/20190409 Working with variables on Linux.md index da4fec5ea9..a926fe67b4 100644 --- a/sources/tech/20190409 Working with variables on Linux.md +++ b/sources/tech/20190409 Working with variables on Linux.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (MjSeven) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 08accab319788003b6373437452ef9be4ec885df Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 13 Sep 2019 16:18:30 +0800 Subject: [PATCH 027/202] PRF @heguangzhi --- .../20190730 How to manage logs in Linux.md | 57 ++++++++----------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/translated/tech/20190730 How to manage logs in Linux.md b/translated/tech/20190730 How to manage logs in Linux.md index 2d95d97e89..02f067cbb5 100644 --- a/translated/tech/20190730 How to manage logs in Linux.md +++ b/translated/tech/20190730 How to manage logs in Linux.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (heguangzhi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to manage logs in Linux) @@ -9,18 +9,18 @@ 如何在 Linux 中管理日志 ====== -Linux 系统上的日志文件包含了很多信息——比您有时间查看的还要多。以下是一些建议,告诉你如何正确的使用它们...而不是淹没在其中。 + +> Linux 系统上的日志文件包含了**很多**信息——比你有时间查看的还要多。以下是一些建议,告诉你如何正确的使用它们……而不是淹没在其中。 + ![Greg Lobinski \(CC BY 2.0\)][1] -在 Linux 系统上管理日志文件可能是非常容易,也可能是非常痛苦。这完全取决于您所说的日志管理是什么意思。 +在 Linux 系统上管理日志文件可能非常容易,也可能非常痛苦。这完全取决于你所认为的日志管理是什么。 -如果您的意思是如何确保日志文件不会耗尽您的 Linux 服务器上的所有磁盘空间,那么这个问题通常很简单。Linux 系统上的日志文件将自动覆盖,系统将只维护固定数量的覆盖日志。即便如此,浏览一下一组100个文件可能会让人不知所措。在这篇文章中,我们将看看循环日志是如何工作的,以及一些最相关的日志文件。 - -**[两分钟 Linux 技巧:[在这些两分钟视频教程中学习如何掌握大量 Linux 命令][2] ]** +如果你认为是如何确保日志文件不会耗尽你的 Linux 服务器上的所有磁盘空间,那么这个问题通常很简单。Linux 系统上的日志文件会自动翻转,系统将只维护固定数量的翻转日志。即便如此,一眼看去一组上百个文件可能会让人不知所措。在这篇文章中,我们将看看日志轮换是如何工作的,以及一些最相关的日志文件。 ### 自动日志轮换 -日志文件经常是循环使用的。当前的日志会获得稍微不同的文件名,并建立一个新的日志文件。以系统日志文件为例。对于许多正常的系统消息来说,这个文件是一个包罗万象的东西。如果您 **cd** 转到 **/var/log** 并查看一下,您可能会看到一系列系统日志文件,如下所示: +日志文件是经常轮转的。当前的日志会获得稍微不同的文件名,并建立一个新的日志文件。以系统日志文件为例。对于许多正常的系统 messages 文件来说,这个文件是一个包罗万象的东西。如果你 `cd` 转到 `/var/log` 并查看一下,你可能会看到一系列系统日志文件,如下所示: ``` $ ls -l syslog* @@ -34,9 +34,9 @@ $ ls -l syslog* -rw-r----- 1 syslog adm 32924 Jul 24 00:00 syslog.7.gz ``` -每天午夜将旧系统日志文件轮换使用,保留一周,然后删除最早的系统日志文件。syslog.7.gz 文件将被从系统中删除,syslog.6.gz 将被重命名为 syslog.7.gz。日志文件的其余部分将继续运行,直到 syslog 成 syslog.1 并创建一个新的系统日志文件。有些系统日志文件会比其他文件大,但是一般来说,没有一个文件可能会变得非常大,并且您永远不会看到超过八个。这给了你一个多星期的时间来回顾它们收集的任何数据。 +轮换发生在每天午夜,旧的日志文件会保留一周,然后删除最早的系统日志文件。`syslog.7.gz` 文件将被从系统中删除,`syslog.6.gz` 将被重命名为 `syslog.7.gz`。日志文件的其余部分将依次改名,直到 `syslog` 变成 `syslog.1` 并创建一个新的 `syslog` 文件。有些系统日志文件会比其他文件大,但是一般来说,没有一个文件可能会变得非常大,并且你永远不会看到超过八个的文件。这给了你一个多星期的时间来回顾它们收集的任何数据。 -为任何特定日志文件维护的文件数量取决于日志文件本身。对一些人来说,你可能有13个。请注意系统日志和 dpkg 的旧文件是如何压缩以节省空间的。可能是您对最近的日志最感兴趣。旧日志可以根据需要用 **gunzip** 解压。 +某种特定日志文件维护的文件数量取决于日志文件本身。有些文件可能有 13 个。请注意 `syslog` 和 `dpkg` 的旧文件是如何压缩以节省空间的。这里的考虑是你对最近的日志最感兴趣,而更旧的日志可以根据需要用 `gunzip` 解压。 ``` # ls -t dpkg* @@ -47,24 +47,22 @@ dpkg.log.2.gz dpkg.log.5.gz dpkg.log.8.gz dpkg.log.11.gz 日志文件可以根据时间和大小进行轮换。检查日志文件时请记住这一点。 -Log file rotation can be configured differently if you are so inclined, though the defaults work for most Linux sysadmins. Take a look at files like **/etc/rsyslog.conf** and **/etc/logrotate.conf** for some of the details. +尽管默认值适用于大多数 Linux 系统管理员,但如果你愿意,可以对日志文件轮换进行不同的配置。查看这些文件,如 `/etc/rsyslog.conf` 和 `/etc/logrotate.conf`。 -尽管默认值适用于大多数 Linux 系统管理员,如果您愿意,可以对日志文件轮换进行不同的配置。查看这些文件,如 **/etc/rsyslog.conf** 和 **/etc/logrotate.conf** 。 +### 使用日志文件 -### 利用您的日志文件 - -管理日志文件包括时不时的使用它们。使用日志文件第一步是每个日志文件可以告诉您的系统如何工作以及可能遇到的问题。从上到下读取日志文件几乎不是一个好的选择,但是当您想了解您的系统运行的情况或者需要跟踪一个问题时,知道如何从日志文件中获取信息会是有很大的好处。这也表明您对每个文件中存储的信息有一个大致的了解了。例如: +对日志文件的管理也包括时不时的使用它们。使用日志文件的第一步可能包括:习惯每个日志文件可以告诉你有关系统如何工作以及系统可能会遇到哪些问题。从头到尾读取日志文件几乎不是一个好的选择,但是当你想了解你的系统运行的情况或者需要跟踪一个问题时,知道如何从日志文件中获取信息会是有很大的好处。这也表明你对每个文件中存储的信息有一个大致的了解了。例如: ``` -$ who wtmp | tail -10 show the most recent logins -$ who wtmp | grep shark show recent logins for a particular user -$ grep "sudo:" auth.log see who is using sudo -$ tail dmesg look at kernel messages -$ tail dpkg.log see recently installed and updated packages -$ more ufw.log see firewall activity (i.e., if you are using ufw) +$ who wtmp | tail -10 显示最近的登录信息 +$ who wtmp | grep shark 显示特定用户的最近登录信息 +$ grep "sudo:" auth.log 查看谁在使用 sudo +$ tail dmesg 查看(最近的)内核日志 +$ tail dpkg.log 查看最近安装和更新的软件包 +$ more ufw.log 查看防火墙活动(假如你使用 ufw) ``` -您运行的一些命令也会从日志文件中提取信息。例如,如果您想查看系统重新启动的列表,可以使用如下命令: +你运行的一些命令也会从日志文件中提取信息。例如,如果你想查看系统重新启动的列表,可以使用如下命令: ``` $ last reboot @@ -75,19 +73,14 @@ reboot system boot 5.0.0-13-generic Mon Apr 29 10:55 - 15:34 (18+04:39) ### 使用更高级的日志管理器 -虽然您编写脚本来更容易地在日志文件中找到感兴趣的信息,但是您也应该知道有一些非常复杂的工具可用于日志文件分析。一些人把来自多个来源的信息联系起来,以便更全面地了解您的网络上发生了什么。它们也可以提供实时监控。这些工具,如[Solarwinds Log & Event Manager][3]和[PRTG 网络监视器][4](包括日志监视)浮现在脑海中。 +虽然你可以编写脚本来更容易地在日志文件中找到感兴趣的信息,但是你也应该知道有一些非常复杂的工具可用于日志文件分析。一些可以把来自多个来源的信息联系起来,以便更全面地了解你的网络上发生了什么。它们也可以提供实时监控。这些工具,如 [Solarwinds Log & Event Manager][3] 和 [PRTG 网络监视器][4](包括日志监视)浮现在脑海中。 还有一些免费工具可以帮助分析日志文件。其中包括: - * **Logwatch** — 用于扫描系统日志中感兴趣的行的程序 - * **Logcheck** — 系统日志分析器和报告器 + * Logwatch — 用于扫描系统日志中感兴趣的日志行的程序 + * Logcheck — 系统日志分析器和报告器 - -在接下来的帖子中,我将提供一些关于这些工具的见解和帮助。 - -**[另请参阅:[排除 Linux 故障的宝贵技巧和诀窍][5] ]** - -加入[Facebook][6] 和[LinkedIn][7] 上的网络世界社区,就您最关心的话题发表评论。 +在接下来的文章中,我将提供一些关于这些工具的见解和帮助。 -------------------------------------------------------------------------------- @@ -95,8 +88,8 @@ via: https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.ht 作者:[Sandra Henry-Stocker][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) +译者:[heguangzhi](https://github.com/heguangzhi) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From e150f0da27eec0519118f9ad08df2b3ce9138a3c Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 13 Sep 2019 16:20:26 +0800 Subject: [PATCH 028/202] PUB @heguangzhi https://linux.cn/article-11336-1.html --- .../20190730 How to manage logs in Linux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190730 How to manage logs in Linux.md (98%) diff --git a/translated/tech/20190730 How to manage logs in Linux.md b/published/20190730 How to manage logs in Linux.md similarity index 98% rename from translated/tech/20190730 How to manage logs in Linux.md rename to published/20190730 How to manage logs in Linux.md index 02f067cbb5..b3d448a305 100644 --- a/translated/tech/20190730 How to manage logs in Linux.md +++ b/published/20190730 How to manage logs in Linux.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (heguangzhi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11336-1.html) [#]: subject: (How to manage logs in Linux) [#]: via: (https://www.networkworld.com/article/3428361/how-to-manage-logs-in-linux.html) [#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) From 2c5e051891e74391e1c06a1b85fa3322fe13b09a Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 13 Sep 2019 17:12:48 +0800 Subject: [PATCH 029/202] PRF @wxy --- translated/talk/20190902 Why I use Java.md | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/translated/talk/20190902 Why I use Java.md b/translated/talk/20190902 Why I use Java.md index ed98447c9c..d7ff16abc6 100644 --- a/translated/talk/20190902 Why I use Java.md +++ b/translated/talk/20190902 Why I use Java.md @@ -12,7 +12,7 @@ > 根据你的工作需要,可能有比 Java 更好的语言,但是我还没有看到任何能把我拉走的语言。 -![Coffee beans][1] +![](https://img.linux.net.cn/data/attachment/album/201909/13/171223bf7noo4bbnkxbkdk.jpg) 我记得我是从 1997 年开始使用 Java 的,就在 [Java 1.1 刚刚发布][2]不久之后。从那时起,总的来说,我非常喜欢用 Java 编程;虽然我得承认,这些日子我经常像在 Java 中编写“严肃的代码”一样编写 [Groovy][3] 脚本。 @@ -20,13 +20,13 @@ ### 调试是一个关键的语言特性 -我真的很讨厌浪费时间追踪由我的代码不小心迭代到数组末尾导致的模糊错误,特别是在 IBM 大型机上的 FORTRAN 编程时代。另一个不时出现的微妙问题是调用一个子程序,该子程序带有一个四字节整数参数,而预期有两个字节;在小端架构上,这通常是一个良性的错误,但在大端机器上,前两个字节的值通常并不总是为零。 +我真的很讨厌浪费时间追踪由我的代码不小心迭代到数组末尾而导致的模糊错误,特别是在 IBM 大型机上的 FORTRAN 编程时代。另一个不时出现的隐晦问题是调用一个子程序时,该子程序带有一个四字节整数参数,而预期有两个字节;在小端架构上,这通常是一个良性的错误,但在大端机器上,前两个字节的值通常并不总是为零。 在那种批处理环境中进行调试也非常不便,通过核心转储或插入打印语句进行调试,这些语句本身会移动错误的位置甚至使它们消失。 -所以我使用 Pascal 的早期体验,先是在 [MTS][9] 上,然后是在 [IBM OS/VS1][10] 上使用相同的 MTS 编译器,让我的生活变得更加轻松。Pascal 的[强类型和静态类型][11]是胜利的重要组成部分,我使用的每个 Pascal 编译器都在数组的边界和范围上插入运行时检查,因此错误可以在发生时检测到。当我们在 20 世纪 80 年代早期将大部分工作转移到 Unix 系统时,移植 Pascal 代码是一项简单的任务。 +所以我使用 Pascal 的早期体验,先是在 [MTS][9] 上,然后是在 [IBM OS/VS1][10] 上使用相同的 MTS 编译器,让我的生活变得更加轻松。Pascal 的[强类型和静态类型][11]是取得这种胜利的重要组成部分,我使用的每个 Pascal 编译器都会在数组的边界和范围上插入运行时检查,因此错误可以在发生时检测到。当我们在 20 世纪 80 年代早期将大部分工作转移到 Unix 系统时,移植 Pascal 代码是一项简单的任务。 -### 找到适量的语法 +### 适量的语法 但是对于我所喜欢的 Pascal 来说,我的代码很冗长,而且语法似乎要比代码还要多;例如,使用: @@ -40,25 +40,25 @@ if ... then begin ... end else ... end if (...) { ... } else { ... } ``` -另外,有些事情在 Pascal 中很难完成,在 C 中更容易。但是,当我开始越来越多地使用 C 时,我发现自己遇到了我曾经在 FORTRAN 中遇到的同样类型的错误,例如,超出数组边界。在原始的错误点未检测到数组结束,而仅在程序执行后期才会检测到它们的不利影响。幸运的是,我不再生活在那种批处理环境中,并且手头有很好的调试工具。不过,C 给我的灵活性还是对我有好处的。 +另外,有些事情在 Pascal 中很难完成,在 C 中更容易。但是,当我开始越来越多地使用 C 时,我发现自己遇到了我曾经在 FORTRAN 中遇到的同样类型的错误,例如,超出数组边界。在原始的错误点未检测到数组结束,而仅在程序执行后期才会检测到它们的不利影响。幸运的是,我不再生活在那种批处理环境中,并且手头有很好的调试工具。不过,C 对于我来说有点太灵活了。 -当我发现 [awk][12] 时,我对它与 C 做了一个很好的对比。那时,我的很多工作都涉及转换字段数据并创建报告。我发现用 `awk` 加上其他 Unix 命令行工具,如 `sort`、`sed`、`cut`、`join`、`paste`、`comm` 等等,可以做到事情令人吃惊。从本质上讲,这些工具给了我一个像是文本文件的关系数据库管理器,这种文本文件具有列式结构,是我们很多字段数据保存的方式。或者,即便不是那种格式,大部分时候也可以从关系数据库或某种二进制格式导出到列式结构中。 +当我遇到 [awk][12] 时,我发现它与 C 相比又是另外一种样子。那时,我的很多工作都涉及转换字段数据并创建报告。我发现用 `awk` 加上其他 Unix 命令行工具,如 `sort`、`sed`、`cut`、`join`、`paste`、`comm` 等等,可以做到事情令人吃惊。从本质上讲,这些工具给了我一个像是基于文本文件的关系数据库管理器,这种文本文件具有列式结构,是我们很多字段数据的保存方式。或者,即便不是这种格式,大部分时候也可以从关系数据库或某种二进制格式导出到列式结构中。 -`awk` 支持的字符串处理、[正则表达式][13]和[关联数组][14],以及 `awk` 的基本特性(它实际上是一个数据转换管道),非常符合我的需求。当面对二进制数据文件、复杂的数据结构和对性能的绝对需求时,我仍然会转回到 C;但随着我越来越多地使用 `awk`,我发现 C 的非常基础的字符串支持越来越令人沮丧。随着时间的推移,更多的时候我只会在必须时才使用 C,并且在其余的时候里大量使用 `awk`。 +`awk` 支持的字符串处理、[正则表达式][13]和[关联数组][14],以及 `awk` 的基本特性(它实际上是一个数据转换管道),非常符合我的需求。当面对二进制数据文件、复杂的数据结构和关键性能需求时,我仍然会转回到 C;但随着我越来越多地使用 `awk`,我发现 C 的非常基础的字符串支持越来越令人沮丧。随着时间的推移,更多的时候我只会在必须时才使用 C,并且在其余的时候里大量使用 `awk`。 ### Java 的抽象层级合适 -然后是 Java。它看起来相当不错 —— 一个相对简洁的语法,让人联想到 C,或者这种相似性至少比 Pascal 或其他任何早期的语言更为明显。它是强类型的,因此很多编程错误会在编译时被捕获。它似乎并不需要过多的面向对象的知识就能开始,这是一件好事,因为我当时对 [OOP 设计模式][15]毫不熟悉。但即使在刚开始,我也喜欢它的简化[继承模型][16]背后的思想。(Java 允许使用提供的接口进行单继承,以在某种程度上丰富范例。) +然后是 Java。它看起来相当不错 —— 相对简洁的语法,让人联想到 C,或者这种相似性至少要比 Pascal 或其他任何早期的语言更为明显。它是强类型的,因此很多编程错误会在编译时被捕获。它似乎并不需要过多的面向对象的知识就能起步,这是一件好事,因为我当时对 [OOP 设计模式][15]毫不熟悉。但即使在刚刚开始,我也喜欢它的简化[继承模型][16]背后的思想。(Java 允许使用提供的接口进行单继承,以在某种程度上丰富范例。) 它似乎带有丰富的功能库(即“自备电池”的概念),在适当的水平上直接满足了我的需求。最后,我发现自己很快就会想到将数据和行为在对象中组合在一起的想法。这似乎是明确控制数据之间交互的好方法 —— 比大量的参数列表或对全局变量的不受控制的访问要好得多。 -从那以后,Java 在我的编程工具箱中成为了 Helvetic 军刀。我仍然偶尔会在 `awk` 中编写内容,或者使用 Linux 命令行实用程序(如 `cut`、`sort` 或 `sed`),因为它们显然是解决手头问题的直接方法。我怀疑过去 20 年我有没有写过 50 行的 C 语言代码;Java 完全满足了我的需求。 +从那以后,Java 在我的编程工具箱中成为了 Helvetic 军刀。我仍然偶尔会在 `awk` 中编写程序,或者使用 Linux 命令行实用程序(如 `cut`、`sort` 或 `sed`),因为它们显然是解决手头问题的直接方法。我怀疑过去 20 年我可能没写过 50 行的 C 语言代码;Java 完全满足了我的需求。 -此外,Java 一直在不断改进。首先,它变得更加高效。并且它添加了一些非常有用的功能,例如[可以用 try 来测试资源][17],它可以很好地清理在文件 I/O 期间冗长而有点混乱的错误处理代码;或 [lambda][18],它提供了声明函数并将其作为参数传递的能力,而旧方法需要创建类或接口来“托管”这些函数;或[流][19],它封装了函数中的迭代行为,可以创建以链式函数调用形式实现的高效数据转换管道。 +此外,Java 一直在不断改进。首先,它变得更加高效。并且它添加了一些非常有用的功能,例如[可以用 try 来测试资源][17],它可以很好地清理在文件 I/O 期间冗长而有点混乱的错误处理代码;或 [lambda][18],它提供了声明函数并将其作为参数传递的能力,而旧方法需要创建类或接口来“托管”这些函数;或[流][19],它在函数中封装了迭代行为,可以创建以链式函数调用形式实现的高效数据转换管道。 ### Java 越来越好 -许多语言设计者研究了从根本上改善 Java 体验的方法。对我来说,其中大部分没有引起我的太多兴趣;再次,这更多地反映了我的典型工作流程,并且(更多地)减少了这些语言带来的功能。但其中一个演化步骤已经成为我的编程工具中不可或缺的一部分:[Groovy][20]。当我遇到一个小问题,需要一个简单的解决方案时,Groovy 已经成为了我的首选解决方案。而且,它与 Java 高度兼容。对我来说,Groovy 填补了 Python 为许多其他人所提供相同用处 —— 它紧凑、DRY(不要重复自己)和具有表达性(列表和词典有完整的语言支持)。我还使用了 [Grails][21],它使用 Groovy 为非常高性能和有用的 Java Web 应用程序提供简化的 Web 框架。 +许多语言设计者研究了从根本上改善 Java 体验的方法。对我来说,其中大部分没有引起我的太多兴趣;再次,这更多地反映了我的典型工作流程,并且(更多地)减少了这些语言带来的功能。但其中一个演化步骤已经成为我的编程工具中不可或缺的一部分:[Groovy][20]。当我遇到一个小问题,需要一个简单的解决方案时,Groovy 已经成为了我的首选。而且,它与 Java 高度兼容。对我来说,Groovy 填补了 Python 为许多其他人所提供的相同用处 —— 它紧凑、DRY(不要重复自己)和具有表达性(列表和词典有完整的语言支持)。我还使用了 [Grails][21],它使用 Groovy 为非常高性能和有用的 Java Web 应用程序提供简化的 Web 框架。 ### Java 仍然开源吗? @@ -73,7 +73,7 @@ via: https://opensource.com/article/19/9/why-i-use-java 作者:[Chris Hermansen][a] 选题:[lujun9972][b] 译者:[wxy](https://github.com/wxy) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 4e820fd42a704f6f5efefd648a0a2cfffb180ea9 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 13 Sep 2019 17:13:14 +0800 Subject: [PATCH 030/202] PUB @wxy https://linux.cn/article-11337-1.html --- {translated/talk => published}/20190902 Why I use Java.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/talk => published}/20190902 Why I use Java.md (99%) diff --git a/translated/talk/20190902 Why I use Java.md b/published/20190902 Why I use Java.md similarity index 99% rename from translated/talk/20190902 Why I use Java.md rename to published/20190902 Why I use Java.md index d7ff16abc6..d4ec8d6570 100644 --- a/translated/talk/20190902 Why I use Java.md +++ b/published/20190902 Why I use Java.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11337-1.html) [#]: subject: (Why I use Java) [#]: via: (https://opensource.com/article/19/9/why-i-use-java) [#]: author: (Chris Hermansen https://opensource.com/users/clhermansen) From 1dd90296a8495aff6236406792ed2f4d57700d6d Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 00:52:35 +0800 Subject: [PATCH 031/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190914=20GNOME?= =?UTF-8?q?=203.34=20Released=20With=20New=20Features=20&=20Performance=20?= =?UTF-8?q?Improvements?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md --- ...New Features - Performance Improvements.md | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md diff --git a/sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md new file mode 100644 index 0000000000..898f3763ca --- /dev/null +++ b/sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md @@ -0,0 +1,89 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (GNOME 3.34 Released With New Features & Performance Improvements) +[#]: via: (https://itsfoss.com/gnome-3-34-release/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +GNOME 3.34 Released With New Features & Performance Improvements +====== + +The latest version of GNOME dubbed “Thessaloniki” is here. It is an impressive upgrade over [GNOME 3.32][1] considering 6 months of work there. + +With this release, there’s a lot of new features and significant performance improvements. In addition to the new features, the level of customization has also improved. + +Here’s what’s new: + +### GNOME 3.34 Key Improvements + +You may watch this video to have a look at what’s new in GNOME 3.34: + +#### Drag and drop app icons into a folder + +The new shell theme lets you drag and drop the icons in the app drawer to re-arrange them or compile them into a folder. You may have already used a feature like this in your Android or iOS smartphone. + +![You can now drag and drop icons into a folder][2] + +#### Improved Calendar Manager + +The improved calendar manager integrates easily with 3rd party services and gives you the ability to manage your schedule right from your Linux system – without utilizing another app separately. + +![GNOME Calendar Improvements][3] + +#### Background selection settings + +It’s now easier to select a custom background for the main screen and lock screen as it displays all the available backgrounds in the same screen. Saves you at least one mouse click. + +![It’s easier to select backgrounds now][4] + +#### Re-arranging search options + +The search options/results can be re-arranged manually. So, you can decide what comes first when you head to search something. + +#### Responsive design for ‘Settings’ app + +The settings menu UI is now responsive – so that you can easily access all the options no matter what type (or size) of device you’re on. This is surely going to help GNOME on [Linux smartphones like Librem 5][5]. + +In addition to all these, the [official announcement][6] also notes useful additions for developers (additions to system profiler and virtualization improvements): + +> For developers, GNOME 3.34 includes more data sources in Sysprof, making performance profiling an application even easier. Multiple improvements to Builder include an integrated D-Bus inspector. + +![Improved Sysprof tool in GNOME 3.34][7] + +### How to get GNOME 3.34? + +Even though the new release is live – it hasn’t yet reached the official repositories of your Linux distros. So, we recommend to wait it out and upgrade it when it’s available as update packages. In either case, you can explore the [source code][8] – if you want to build it. + +[][9] + +Suggested read  Fedora 26 Is Released! Check Out The New Features + +Well, that’s about it. If you’re curious, you may check out the [full release notes][10] for technical details. + +What do you think about the new GNOME 3.34? + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/gnome-3-34-release/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://www.gnome.org/news/2019/03/gnome-3-32-released/ +[2]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/icon-grid-drag-gnome.png?ssl=1 +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/gnome-calendar-improvements.jpg?ssl=1 +[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/background-panel-GNOME.png?resize=800%2C555&ssl=1 +[5]: https://itsfoss.com/librem-linux-phone/ +[6]: https://www.gnome.org/press/2019/09/gnome-3-34-released/ +[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/sysprof-gnome.jpg?resize=800%2C493&ssl=1 +[8]: https://download.gnome.org/ +[9]: https://itsfoss.com/fedora-26-release/ +[10]: https://help.gnome.org/misc/release-notes/3.34/ From d9eda20c7d22d0766203b5975dfc26066888f347 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:08:11 +0800 Subject: [PATCH 032/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190914=20Manjar?= =?UTF-8?q?o=20Linux=20Graduates=20From=20A=20Hobby=20Project=20To=20A=20P?= =?UTF-8?q?rofessional=20Project?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md --- ...Hobby Project To A Professional Project.md | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md diff --git a/sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md new file mode 100644 index 0000000000..1431b9f76f --- /dev/null +++ b/sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md @@ -0,0 +1,84 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Manjaro Linux Graduates From A Hobby Project To A Professional Project) +[#]: via: (https://itsfoss.com/manjaro-linux-business-formation/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +Manjaro Linux Graduates From A Hobby Project To A Professional Project +====== + +_**Brief: Manjaro is taking things professionally. While the Manjaro community will be responsible for the development of the project and other related activities, a company has been formed to work as its legal entity and handle the commercial agreements and professional services.**_ + +Manjaro is a quite popular Linux distribution considering that it was just a passion project by three people, Bernhard, Jonathan and Philip, which came into existence in 2011. Now that it’s one of the [best Linux distros][1] out there, this can’t really remain a hobby project, right? + +Well, here’s good news: Manjaro has established a new company “**Manjaro GmbH & Co. KG**” with [Blue Systems][2] as an advisor to enable full-time employment of maintainers and exploration of future commercial opportunities. + +![][3] + +### What is exactly the change here? + +As per the [official announcement][4], the Manjaro project will stay as-is. However, a new company has been formed to secure the project and allow them to make legal contracts, official agreements, and other potential commercial activities. So, this makes the “hobby project” a professional endeavor. + +In addition to this, the donation funds will be transferred to non-profit [fiscal hosts][5] ([CommunityBridge][6] and [OpenCollective][7]) which will then accept and administer the funds on behalf of the project. Do note, that the donations haven’t been used to create the company – so the transfer of funds to a non-profit fiscal host will ensure transparency while securing the donations. + +### How does this improve things? + +With the company formed, the new structure will help Manjaro in the following ways (as mentioned by the devlopers): + + * enable developers to commit full time to Manjaro and its related projects; + * interact with other developers in sprints and events around Linux; + * protect the independence of Manjaro as a community-driven project, as well as protect its brand; + * provide faster security updates and a more efficient reaction to the needs of users; + * provide the means to act as a company on a professional level. + + + +[][8] + +Suggested read  Linux Mint Website Hacked, ISOs Compromised With Backdoor + +The Manjaro team also shed some light on how it’s going to stay committed to the community: + +> The mission and goals of Manjaro will remain the same as before – to support the collaborative development of Manjaro and its widespread use. This effort will continue to be supported through donations and sponsorship and these will not, under any circumstances, be used by the established company. + +### More about Manjaro as a company + +Even though they mentioned that the project will remain independent of the company, not everyone is clear about the involvement of Manjaro with the “community” while having a company with commercial interests. So, the team also clarified about their plans as a company in the announcement. + +Manjaro GmbH & Co. KG has been formed to effectively engage in commercial agreements, form partnerships, and offer professional services. With this, Manjaro devs Bernhard and Philip will now be able to commit full-time to Manjaro, while Blue Systems will take a role as an advisor. + +The company will be able to sign contracts and cover duties and guarantees officially, which the community cannot take or be held responsible for. + +**Wrapping Up** + +So, with this move, along with commercial opportunities, they plan to go full-time and also hire contributors. + +Of course, now they mean – “business” (not as the bad guys, I hope). Most of the reactions to this announcement are positive and we all wish them good luck with this. While some might be skeptical about a “community” project having “commercial” interests (remember the [FreeOffice and Manjaro fiasco][9]?), I see this as an interesting move. + +What do you think? Feel free to let us know your thoughts in the comments below. + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/manjaro-linux-business-formation/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/best-linux-distributions/ +[2]: https://www.blue-systems.com/ +[3]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/manjaro-gmbh.jpg?ssl=1 +[4]: https://forum.manjaro.org/t/manjaro-is-taking-the-next-step/102105 +[5]: https://en.wikipedia.org/wiki/Fiscal_sponsorship +[6]: https://communitybridge.org/ +[7]: https://opencollective.com/ +[8]: https://itsfoss.com/linux-mint-hacked/ +[9]: https://itsfoss.com/libreoffice-freeoffice-manjaro-linux/ From 22dd43b5f296ef2ed9e7a8abf3eb1dda2d9f8cac Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:08:40 +0800 Subject: [PATCH 033/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=20How=20?= =?UTF-8?q?to=20Collect=20System=20and=20Application=20Metrics=20using=20M?= =?UTF-8?q?etricbeat?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190911 How to Collect System and Application Metrics using Metricbeat.md --- ...nd Application Metrics using Metricbeat.md | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 sources/tech/20190911 How to Collect System and Application Metrics using Metricbeat.md diff --git a/sources/tech/20190911 How to Collect System and Application Metrics using Metricbeat.md b/sources/tech/20190911 How to Collect System and Application Metrics using Metricbeat.md new file mode 100644 index 0000000000..194fd077e6 --- /dev/null +++ b/sources/tech/20190911 How to Collect System and Application Metrics using Metricbeat.md @@ -0,0 +1,161 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Collect System and Application Metrics using Metricbeat) +[#]: via: (https://www.linuxtechi.com/collect-system-application-metrics-metricbeat/) +[#]: author: (Pradeep Kumar https://www.linuxtechi.com/author/pradeep/) + +How to Collect System and Application Metrics using Metricbeat +====== + +**Metricbeat** is a lightweight shipper (or agent) which is used to collect system’s metrics and application metrics and send them to Elastic Stack Server (i.e **Elasticsearch**). Here system’s metrics refers to CPU, Memory, Disk and Network Stats (IOPS) and application metrics means monitoring and collecting metrics of applications like **Apache**, **NGINX**, **Docker**, **Kubernetes** and **Redis** etc. For metricbeat to work first we must make sure that we have a healthy elastic stack setup up and running.  Please refer the below url to setup elastic stack: + +**[How to Setup Multi Node Elastic Stack Cluster on RHEL 8 / CentOS 8][1]** + +In this article we will demonstrate how install metricbeat on linux servers and then how metricbeat sends data to Elastic Stack Server (i.e Elasticsearch) and then we will verify from kiabana GUI whether metrics data is visible or not. + +### Install Metricbeat on CentOS / RHEL Servers + +On CentOS / RHEL Servers, metricbeat is installed using the following rpm command, + +``` +[root@linuxtechi ~]# rpm -ivh https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.3.1-x86_64.rpm +``` + +For Debian based systems, use below command to install metricbeat. + +``` +~]# curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.3.1-amd64.deb +~]# dpkg -i metricbeat-7.3.1-amd64.deb +``` + +Add the following lines in /etc/hosts file, as we will be using FQDN of Elasticsearch and Kibana in metricbeat config file and command, + +**Note:** Change the IPs and Hostname as per  your setup + +``` +192.168.56.40 elasticsearch1.linuxtechi.local +192.168.56.50 elasticsearch2.linuxtechi.local +192.168.56.60 elasticsearch3.linuxtechi.local +192.168.56.10 kibana.linuxtechi.local +``` + +### Configure Metricbeat on Linux Server (CentOS / RHEL / Debian) + +Whenever metricbeat rpm & deb package is installed then its configuration file (**metricbeat.yml**) is created under “**/etc/metricbeat/**“. Let’s edit this configuration file and inform system to send system and application metrics data to Elasticsearch servers. + +``` +[root@linuxtechi ~]# vi /etc/metricbeat/metricbeat.yml + +setup.kibana: + host: "kibana.linuxtechi.local:5601" +output.elasticsearch: + hosts: ["elasticsearch1.linuxtechi.local:9200","elasticsearch2.linuxtechi.local:9200","elasticsearch3.linuxtechi.local:9200"] +``` + +Save and exit the file + +**Note:** Replace Elasticsearch and Kibana details that suits to your environment. + +Run following metricbeat command so that metric dashboard become available in Kibana GUI. + +``` +[root@linuxtechi ~]# metricbeat setup -e -E output.elasticsearch.hosts=['elasticsearch1.linuxtechi.local:9200','elasticsearch2.linuxtechi.local:9200','elasticsearch3.linuxtechi.local:9200'] -E setup.kibana.host=kibana.linuxtechi.local:5601 +``` + +Output of above command would be something like below: + +![metricbeat-command-output-linuxserver][2] + +Above output confirms that metrics dashboard is loaded successfully in Kibana GUI. Now via metricbeat it will send the metrics data to Elastic Stack server every 10 seconds. + +Let’s start and enable metricbeat service using following commands, + +``` +[root@linuxtechi ~]# systemctl start metricbeat +[root@linuxtechi ~]# systemctl enable metricbeat +``` + +Now go to Kibana GUI and click on Dashboard from left side bar, + +[![Kibana-GUI-Dashbaord-Option][2]][3] + +In the next window we will see available metrics dashboards, search ‘**system**’ and then choose System Metrics Dashboard, + +[![Choose-Metric-Dashbaord-Kibana][2]][4] + +[![Metricbeat-System-Overview-ECS-Kibana][2]][5] + +As we can see System’s metrics data is available on the dashboard, these metrics are collected based on entries mentioned in the file “**/etc/metricbeat/modules.d/system.yml**” + +Let’s suppose we want to collect application’s metrics data as well then we have to first enable their respective modules, to enable Apache and MySQL metric module ,run the following command from client machine, + +``` +[root@linuxtechi ~]# metricbeat modules enable apache mysql +Enabled apache +Enabled mysql +[root@linuxtechi ~]# +``` + +Once we enable the modules, we can edit their yml file, + +``` +[root@linuxtechi ~]# vi /etc/metricbeat/modules.d/apache.yml +… +- module: apache + period: 10s + hosts: ["http://192.168.56.70"] +… +``` + +Change IP in host parameter that suits to your environment. + +Similarly edit mysql yml file, Change mysql root credentials that suits to your environment + +``` +[root@linuxtechi ~]# vi /etc/metricbeat/modules.d/mysql.yml +……… +- module: mysql + metricsets: + - status + - galera_status + period: 10s +hosts: ["root:root@linuxtechi(127.0.0.1:3306)/"] +……… +``` + +After making the changes restart the metricbeat service, + +``` +[root@linuxtechi ~]# systemctl restart metricbeat +``` + +Now Go to Kibana GUI and under Dashboard option, look for MySQL metrics, + +[![Metricbeat-MySQL-Overview-ECS-Kibana][2]][6] + +As we can see above, MySQL metrics data is visible, this confirms that we have successfully installed and configure metricbeat. + +That’s all from tutorial, If these steps help you to setup metricbeat then please do share your feedback and comment. + +-------------------------------------------------------------------------------- + +via: https://www.linuxtechi.com/collect-system-application-metrics-metricbeat/ + +作者:[Pradeep Kumar][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linuxtechi.com/author/pradeep/ +[b]: https://github.com/lujun9972 +[1]: https://www.linuxtechi.com/setup-multinode-elastic-stack-cluster-rhel8-centos8/ +[2]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +[3]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Kibana-GUI-Dashbaord-Option.jpg +[4]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Choose-Metric-Dashbaord-Kibana.jpg +[5]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Metricbeat-System-Overview-ECS-Kibana.jpg +[6]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Metricbeat-MySQL-Overview-ECS-Kibana.jpg From 7c53ba8597b9ec9cc1b6f1774b9302403c8867d9 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:08:57 +0800 Subject: [PATCH 034/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190909=20How=20?= =?UTF-8?q?to=20Setup=20Multi=20Node=20Elastic=20Stack=20Cluster=20on=20RH?= =?UTF-8?q?EL=208=20/=20CentOS=208?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190909 How to Setup Multi Node Elastic Stack Cluster on RHEL 8 - CentOS 8.md --- ...stic Stack Cluster on RHEL 8 - CentOS 8.md | 476 ++++++++++++++++++ 1 file changed, 476 insertions(+) create mode 100644 sources/tech/20190909 How to Setup Multi Node Elastic Stack Cluster on RHEL 8 - CentOS 8.md diff --git a/sources/tech/20190909 How to Setup Multi Node Elastic Stack Cluster on RHEL 8 - CentOS 8.md b/sources/tech/20190909 How to Setup Multi Node Elastic Stack Cluster on RHEL 8 - CentOS 8.md new file mode 100644 index 0000000000..f56e708426 --- /dev/null +++ b/sources/tech/20190909 How to Setup Multi Node Elastic Stack Cluster on RHEL 8 - CentOS 8.md @@ -0,0 +1,476 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Setup Multi Node Elastic Stack Cluster on RHEL 8 / CentOS 8) +[#]: via: (https://www.linuxtechi.com/setup-multinode-elastic-stack-cluster-rhel8-centos8/) +[#]: author: (Pradeep Kumar https://www.linuxtechi.com/author/pradeep/) + +How to Setup Multi Node Elastic Stack Cluster on RHEL 8 / CentOS 8 +====== + +Elastic stack widely known as **ELK stack**, it is a group of opensource products like **Elasticsearch**, **Logstash** and **Kibana**. Elastic Stack is developed and maintained by Elastic company. Using elastic stack, one can feed system’s logs to Logstash, it is a data collection engine which accept the logs or data from all the sources and normalize logs and then it forwards the logs to Elasticsearch for **analyzing**, **indexing**, **searching** and **storing** and finally using Kibana one can represent the visualize data, using Kibana we can also create interactive graphs and diagram based on user’s queries. + +[![Elastic-Stack-Cluster-RHEL8-CentOS8][1]][2] + +In this article we will demonstrate how to setup multi node elastic stack cluster on RHEL 8 / CentOS 8 servers. Following are details for my Elastic Stack Cluster: + +### Elasticsearch: + + * Three Servers with Minimal RHEL 8 / CentOS 8 + * IPs & Hostname – 192.168.56.40 (elasticsearch1.linuxtechi. local), 192.168.56.50 (elasticsearch2.linuxtechi. local), 192.168.56.60 (elasticsearch3.linuxtechi. local) + + + +### Logstash: + + * Two Servers with minimal RHEL 8 / CentOS 8 + * IPs & Hostname – 192.168.56.20 (logstash1.linuxtechi. local) , 192.168.56.30 (logstash2.linuxtechi. local) + + + +### Kibana: + + * One Server with minimal RHEL 8 / CentOS 8 + * Hostname – kibana.linuxtechi.local + * IP – 192.168.56.10 + + + +### Filebeat: + + * One Server with minimal CentOS 7 + * IP & hostname – 192.168.56.70 (web-server) + + + +Let’s start with Elasticsearch cluster setup, + +#### Setup 3 node Elasticsearch cluster + +As I have already stated that I have kept nodes for Elasticsearch cluster, login to each node, set the hostname and configure yum/dnf repositories. + +Use the below hostnamectl command to set the hostname on respective nodes, + +``` +[root@linuxtechi ~]# hostnamectl set-hostname "elasticsearch1.linuxtechi. local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +[root@linuxtechi ~]# hostnamectl set-hostname "elasticsearch2.linuxtechi. local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +[root@linuxtechi ~]# hostnamectl set-hostname "elasticsearch3.linuxtechi. local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +``` + +For CentOS 8 System we don’t need to configure any OS package repository and for RHEL 8 Server, if you have valid subscription and then subscribed it with Red Hat for getting package repository.  In Case you want to configure local yum/dnf repository for OS packages then refer the below url: + +[How to Setup Local Yum/DNF Repository on RHEL 8 Server Using DVD or ISO File][3] + +Configure Elasticsearch package repository on all the nodes, create a file elastic.repo  file under /etc/yum.repos.d/ folder with the following content + +``` +~]# vi /etc/yum.repos.d/elastic.repo +[elasticsearch-7.x] +name=Elasticsearch repository for 7.x packages +baseurl=https://artifacts.elastic.co/packages/7.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + +save & exit the file + +Use below rpm command on all three nodes to import Elastic’s public signing key + +``` +~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch +``` + +Add the following lines in /etc/hosts file on all three nodes, + +``` +192.168.56.40 elasticsearch1.linuxtechi.local +192.168.56.50 elasticsearch2.linuxtechi.local +192.168.56.60 elasticsearch3.linuxtechi.local +``` + +Install Java on all three Nodes using yum / dnf command, + +``` +[root@linuxtechi ~]# dnf install java-openjdk -y +[root@linuxtechi ~]# dnf install java-openjdk -y +[root@linuxtechi ~]# dnf install java-openjdk -y +``` + +Install Elasticsearch using beneath dnf command on all three nodes, + +``` +[root@linuxtechi ~]# dnf install elasticsearch -y +[root@linuxtechi ~]# dnf install elasticsearch -y +[root@linuxtechi ~]# dnf install elasticsearch -y +``` + +**Note:** In case OS firewall is enabled and running in each Elasticsearch node then allow following ports using beneath firewall-cmd command, + +``` +~]# firewall-cmd --permanent --add-port=9300/tcp +~]# firewall-cmd --permanent --add-port=9200/tcp +~]# firewall-cmd --reload +``` + +Configure Elasticsearch, edit the file “**/etc/elasticsearch/elasticsearch.yml**” on all the three nodes and add the followings, + +``` +~]# vim /etc/elasticsearch/elasticsearch.yml +………………………………………… +cluster.name: opn-cluster +node.name: elasticsearch1.linuxtechi.local +network.host: 192.168.56.40 +http.port: 9200 +discovery.seed_hosts: ["elasticsearch1.linuxtechi.local", "elasticsearch2.linuxtechi.local", "elasticsearch3.linuxtechi.local"] +cluster.initial_master_nodes: ["elasticsearch1.linuxtechi.local", "elasticsearch2.linuxtechi.local", "elasticsearch3.linuxtechi.local"] +…………………………………………… +``` + +**Note:** on Each node, add the correct hostname in node.name parameter and ip address in network.host parameter and other parameters will remain the same. + +Now Start and enable the Elasticsearch service on all three nodes using following systemctl command, + +``` +~]# systemctl daemon-reload +~]# systemctl enable elasticsearch.service +~]# systemctl start elasticsearch.service +``` + +Use below ‘ss’ command to verify whether elasticsearch node is start listening on 9200 port, + +``` +[root@linuxtechi ~]# ss -tunlp | grep 9200 +tcp LISTEN 0 128 [::ffff:192.168.56.40]:9200 *:* users:(("java",pid=2734,fd=256)) +[root@linuxtechi ~]# +``` + +Use following curl commands to verify the Elasticsearch cluster status + +``` +[root@linuxtechi ~]# curl http://elasticsearch1.linuxtechi.local:9200 +[root@linuxtechi ~]# curl -X GET http://elasticsearch2.linuxtechi.local:9200/_cluster/health?pretty +``` + +Output above command would be something like below, + +![Elasticsearch-cluster-status-rhel8][1] + +Above output confirms that we have successfully created 3 node Elasticsearch cluster and status of cluster is also green. + +**Note:** If you want to modify JVM heap size then you have edit the file “**/etc/elasticsearch/jvm.options**” and change the below parameters that suits to your environment, + + * -Xms1g + * -Xmx1g + + + +Now let’s move to Logstash nodes, + +#### Install and Configure Logstash + +Perform the following steps on both Logstash nodes, + +Login to both the nodes set the hostname using following hostnamectl command, + +``` +[root@linuxtechi ~]# hostnamectl set-hostname "logstash1.linuxtechi.local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +[root@linuxtechi ~]# hostnamectl set-hostname "logstash2.linuxtechi.local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +``` + +Add the following entries in /etc/hosts file in both logstash nodes + +``` +~]# vi /etc/hosts +192.168.56.40 elasticsearch1.linuxtechi.local +192.168.56.50 elasticsearch2.linuxtechi.local +192.168.56.60 elasticsearch3.linuxtechi.local +``` + +Save and exit the file + +Configure Logstash repository on both the nodes, create a file **logstash.repo** under the folder /ete/yum.repos.d/ with following content, + +``` +~]# vi /etc/yum.repos.d/logstash.repo +[elasticsearch-7.x] +name=Elasticsearch repository for 7.x packages +baseurl=https://artifacts.elastic.co/packages/7.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + +Save and exit the file, run the following rpm command to import the signing key + +``` +~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch +``` + +Install Java OpenJDK on both the nodes using following dnf command, + +``` +~]# dnf install java-openjdk -y +``` + +Run the following dnf command from both the nodes to install logstash, + +``` +[root@linuxtechi ~]# dnf install logstash -y +[root@linuxtechi ~]# dnf install logstash -y +``` + +Now configure logstash, perform below steps on both logstash nodes, + +Create a logstash conf file, for that first we have copy sample logstash file under ‘/etc/logstash/conf.d/’ + +``` +# cd /etc/logstash/ +# cp logstash-sample.conf conf.d/logstash.conf +``` + +Edit conf file and update the following content, + +``` +# vi conf.d/logstash.conf + +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => ["http://elasticsearch1.linuxtechi.local:9200", "http://elasticsearch2.linuxtechi.local:9200", "http://elasticsearch3.linuxtechi.local:9200"] + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + #user => "elastic" + #password => "changeme" + } +} +``` + +Under output section, in hosts parameter specify FQDN of all three Elasticsearch nodes, other parameters leave as it is. + +Allow logstash port “5044” in OS firewall using following firewall-cmd command, + +``` +~ # firewall-cmd --permanent --add-port=5044/tcp +~ # firewall-cmd –reload +``` + +Now start and enable Logstash service, run the following systemctl commands on both the nodes + +``` +~]# systemctl start logstash +~]# systemctl eanble logstash +``` + +Use below ss command to verify whether logstash service start listening on 5044, + +``` +[root@linuxtechi ~]# ss -tunlp | grep 5044 +tcp LISTEN 0 128 *:5044 *:* users:(("java",pid=2416,fd=96)) +[root@linuxtechi ~]# +``` + +Above output confirms that logstash has been installed and configured successfully. Let’s move to Kibana installation. + +#### Install and Configure Kibana + +Login to Kibana node, set the hostname with **hostnamectl** command, + +``` +[root@linuxtechi ~]# hostnamectl set-hostname "kibana.linuxtechi.local" +[root@linuxtechi ~]# exec bash +[root@linuxtechi ~]# +``` + +Edit /etc/hosts file and add the following lines + +``` +192.168.56.40 elasticsearch1.linuxtechi.local +192.168.56.50 elasticsearch2.linuxtechi.local +192.168.56.60 elasticsearch3.linuxtechi.local +``` + +Setup the Kibana repository using following, + +``` +[root@linuxtechi ~]# vi /etc/yum.repos.d/kibana.repo +[elasticsearch-7.x] +name=Elasticsearch repository for 7.x packages +baseurl=https://artifacts.elastic.co/packages/7.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md + +[root@linuxtechi ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch +``` + +Execute below dnf command to install kibana, + +``` +[root@linuxtechi ~]# yum install kibana -y +``` + +Configure Kibana by editing the file “**/etc/kibana/kibana.yml**” + +``` +[root@linuxtechi ~]# vim /etc/kibana/kibana.yml +………… +server.host: "kibana.linuxtechi.local" +server.name: "kibana.linuxtechi.local" +elasticsearch.hosts: ["http://elasticsearch1.linuxtechi.local:9200", "http://elasticsearch2.linuxtechi.local:9200", "http://elasticsearch3.linuxtechi.local:9200"] +………… +``` + +Start and enable kibana service + +``` +[root@linuxtechi ~]# systemctl start kibana +[root@linuxtechi ~]# systemctl enable kibana +``` + +Allow Kibana port ‘5601’ in OS firewall, + +``` +[root@linuxtechi ~]# firewall-cmd --permanent --add-port=5601/tcp +success +[root@linuxtechi ~]# firewall-cmd --reload +success +[root@linuxtechi ~]# +``` + +Access Kibana portal / GUI using the following URL: + + + +[![Kibana-Dashboard-rhel8][1]][4] + +From dashboard, we can also check our Elastic Stack cluster status + +[![Stack-Monitoring-Overview-RHEL8][1]][5] + +This confirms that we have successfully setup multi node Elastic Stack cluster on RHEL 8 / CentOS 8. + +Now let’s send some logs to logstash nodes via filebeat from other Linux servers, In my case I have one CentOS 7 Server, I will push all important logs of this server to logstash via filebeat. + +Login to CentOS 7 server and install filebeat package using following rpm command, + +``` +[root@linuxtechi ~]# rpm -ivh https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.1-x86_64.rpm +Retrieving https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.1-x86_64.rpm +Preparing... ################################# [100%] +Updating / installing... + 1:filebeat-7.3.1-1 ################################# [100%] +[root@linuxtechi ~]# +``` + +Edit the /etc/hosts file and add the following entries, + +``` +192.168.56.20 logstash1.linuxtechi.local +192.168.56.30 logstash2.linuxtechi.local +``` + +Now configure the filebeat so that it can send logs to logstash nodes using load balancing technique, edit the file “**/etc/filebeat/filebeat.yml**” and add the following parameters, + +Under the ‘**filebeat.inputs:**’ section change ‘**enabled: false**‘ to ‘**enabled: true**‘ and under the “**paths**” parameter specify the location log files that we can send to logstash, In output Elasticsearch section comment out “**output.elasticsearch**” and **host** parameter. In Logstash output section, remove the comments for “**output.logstash:**” and “**hosts:**” and add the both logstash nodes in hosts parameters and also “**loadbalance: true**”. + +``` +[root@linuxtechi ~]# vi /etc/filebeat/filebeat.yml +………………………. +filebeat.inputs: +- type: log + enabled: true + paths: + - /var/log/messages + - /var/log/dmesg + - /var/log/maillog + - /var/log/boot.log +#output.elasticsearch: + # hosts: ["localhost:9200"] + +output.logstash: + hosts: ["logstash1.linuxtechi.local:5044", "logstash2.linuxtechi.local:5044"] + loadbalance: true +……………………………………… +``` + +Start and enable filebeat service using beneath systemctl commands, + +``` +[root@linuxtechi ~]# systemctl start filebeat +[root@linuxtechi ~]# systemctl enable filebeat +``` + +Now go to Kibana GUI, verify whether new indices are visible or not, + +Choose Management option from Left side bar and then click on Index Management under Elasticsearch, + +[![Elasticsearch-index-management-Kibana][1]][6] + +As we can see above, indices are visible now, let’s create index pattern, + +Click on “Index Patterns” from Kibana Section, it will prompt us to create a new pattern, click on “**Create Index Pattern**” and specify the pattern name as “**filebeat**” + +[![Define-Index-Pattern-Kibana-RHEL8][1]][7] + +Click on Next Step + +Choose “**Timestamp**” as time filter for index pattern and then click on “Create index pattern” + +[![Time-Filter-Index-Pattern-Kibana-RHEL8][1]][8] + +[![filebeat-index-pattern-overview-Kibana][1]][9] + +Now Click on Discover to see real time filebeat index pattern, + +[![Discover-Kibana-REHL8][1]][10] + +This confirms that Filebeat agent has been configured successfully and we are able to see real time logs on Kibana dashboard. + +That’s all from this article, please don’t hesitate to share your feedback and comments in case these steps help you to setup multi node Elastic Stack Cluster on RHEL 8 / CentOS 8 system. + +-------------------------------------------------------------------------------- + +via: https://www.linuxtechi.com/setup-multinode-elastic-stack-cluster-rhel8-centos8/ + +作者:[Pradeep Kumar][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linuxtechi.com/author/pradeep/ +[b]: https://github.com/lujun9972 +[1]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +[2]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Elastic-Stack-Cluster-RHEL8-CentOS8.jpg +[3]: https://www.linuxtechi.com/setup-local-yum-dnf-repository-rhel-8/ +[4]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Kibana-Dashboard-rhel8.jpg +[5]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Stack-Monitoring-Overview-RHEL8.jpg +[6]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Elasticsearch-index-management-Kibana.jpg +[7]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Define-Index-Pattern-Kibana-RHEL8.jpg +[8]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Time-Filter-Index-Pattern-Kibana-RHEL8.jpg +[9]: https://www.linuxtechi.com/wp-content/uploads/2019/09/filebeat-index-pattern-overview-Kibana.jpg +[10]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Discover-Kibana-REHL8.jpg From 50a689cb607c5132f6155765c1f3d814f6860f3c Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:09:49 +0800 Subject: [PATCH 035/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20Why=20?= =?UTF-8?q?the=20founder=20of=20Apache=20is=20all-in=20on=20blockchain?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md --- ...under of Apache is all-in on blockchain.md | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md diff --git a/sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md b/sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md new file mode 100644 index 0000000000..7487ee3626 --- /dev/null +++ b/sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md @@ -0,0 +1,88 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Why the founder of Apache is all-in on blockchain) +[#]: via: (https://opensource.com/article/19/9/podcast-interview-brian-behlendorf) +[#]: author: (Gordon Haff https://opensource.com/users/ghaffhttps://opensource.com/users/wonderchookhttps://opensource.com/users/sachinpbhttps://opensource.com/users/ron-mcfarland) + +Why the founder of Apache is all-in on blockchain +====== +Brian Behlendorf talks about starting Apache, the tension between +pragmatism and idealism, and why he’s excited about blockchain. +![Data container block with hexagons][1] + +Brian Behlendorf is perhaps best known for being a co-founder of the Apache Project, which became the Apache Software Foundation. Today, he's the executive director of the Hyperledger Foundation, an organization focused on enterprise-grade, open source, distributed ledgers (better known as blockchains). He also says he "put the first ad banner online and have been apologizing ever since." + +In a recent [conversation on my podcast][2], Behlendorf talks about the goals of the Apache Project, the role of foundations generally, and his hopes for blockchain. + +### Starting Apache + +As Behlendorf tells the story, [Apache][3] came out of an environment when "we might have had a more beneficent view of technology companies. We still thought of them as leading the fight for individual empowerment." + +At the same time, Behlendorf adds, "there was still a concern that, as the web grew, it would lose its character and its soul as this kind of funky domain, very flat space, supportive of freedoms of speech, freedoms of thought, freedoms of association that were completely novel to us at the time, but now we take for granted—or even we have found weaponized against us." + +This led him to want Apache to address concerns that were both pragmatic in nature and more idealistic. + +The pragmatic aspect stemmed from the fact that "iteratively improving upon the [NCSA web server][4] was just easier and certainly a lot cheaper than buying Netscape's commercial web server or thinking about [IIS][5] or any of the other commercial options at the time." Behlendorf also acknowledges, "it's nice to have other people out there who can review my code and [to] work together with." + +There was also an "idealistic notion that tapped into that zeitgeist in the '90s," Behlendorf says. "This is a printing press. We can help people publish their own blogs, help people publish their own websites, and get as much content liberated as possible and digitized as possible. That was kind of the web movement. In particular, we felt it would be important to make sure that the printing presses remained in the hands of the people." + +### Founding the Apache Software Foundation + +Once the [Apache HTTPD][6] web server project grew to the point that 70% of the web was running on top of Apache HTTPD, it was clear to the project's participants that more structure was needed. + +As Behlendorf describes it: "It was still being built by a group of people whose only connection to each other was that they were all on an email mailing list. All had commit to a CVS repository. All had shell on a Unix box that I maintained off of _Wired_'s internet connection. And otherwise [we] had no formalism between us. In a way, that was liberating; in a way, we were like, 'yeah, you know, we don't need overhead, we don't need stuffy bureaucrats.'" + +Behlendorf and the others weren't interested in incorporating a for-profit company, given that they all had other projects and startups and weren't looking to make Apache HTTPD a full-time job. However, they recognized the legal risks of not having some sort of corporate shield, especially as the portfolio of Apache projects grew. + +As Behlendorf puts it: "What happens if somebody who owned a patent decided to file a patent lawsuit against the developers of Apache and wanted something as simple and modest as a dollar per copy? If they won—and given patent laws, they certainly could win—they'd seek those tens or hundreds of millions of dollars from the Apache developers. For that crime of giving away free software, we could lose our homes." + +In response, the Apache Software Foundation was incorporated in 1999 as a US 501(c)(3) charitable organization that was explicitly membership-based, in contrast to foundations like the Linux Foundation that are organized more along the lines of industry consortia. (The Linux Foundation is a US 501(c)(6) nonprofit mutual benefit corporation.) + +Behlendorf observes that there are a lot of different models out there and he's happy "to see quite a few foundations out there and new ones showing up." Whatever the specific approach, however, he argues, "in general, if you're doing anything meaningful in open source software, your activities should be parked somewhere where there is a protective structure around it that helps answer the questions and the needs of the broader user community." + +### Joining blockchain + +Today, Behlendorf is executive director of the [Hyperledger Foundation][7], which he joined about three years ago, a few months after the first Hyperledger Fabric code drop in late 2015. He says, "with Hyperledger, one thing that pulled me in and got me excited was this notion that there are some really important problems we can solve with distributed systems, with distributed ledgers, and smart contract techniques. It wasn't programmable money, it wasn't regulatory arbitrage. It wasn't … the things people associate with cryptocurrencies that was the driver here. It was the sense that the digitalization of society had led to a future that looked a lot more like big, central systems. It was a very un-internet kind of worldview, but it seemed to be the trend line we were on." + +As a result, "blockchain technology seemed urgent to get involved in [and] that lined up with these idealistic and pragmatic impulses that I've had—and I think other people in open source have had," he adds. + +Specifically, it was the emergence of a set of use cases beyond programmable money that drew in Behlendorf. "I think the one that pulled me in was land titles and emerging markets," he recalls. It wasn't just about having a distributed database. It was about having a distributed ledger that "actually supported consensus, one that actually had the network enforcing rules about valid transactions versus invalid transactions. One that was programmable, with smart contracts on top. This started to make sense to me, and [it] was something that was appealing to me in a way that financial instruments and proof-of-work was not." + +Behlendorf makes the point that for blockchain technology to have a purpose, the network has to be decentralized. For example, you probably want "nodes that are being run by different technology partners or … nodes being run by end-user organizations themselves because otherwise, why not just use a central database run by a single vendor or a single technology partner?" he argues. + +### Growing open source today + +Behlendorf rounds out our interview by discussing how open source software has continued to grow in importance, often for totally pragmatic reasons. "I think there's an entirely rational, non-idealistic business argument for why we're seeing more and more companies, even the ones we traditionally associated with very proprietary business models, be it Microsoft, be it Uber, be it Facebook, actually recognizing open source is strategically interesting," Behlendorf says. + +He feels as if this is a continuation of the thinking the Apache Software Foundation had 20 years ago. "We thought that, if we just involved some of these parties in our projects and kept to our core principles of how to build software, of how our licenses work, how our development processes work publicly, if we made them play by our rules—we may still end up in a much better place and move further faster. I think that's been the story of the last 20 years,'" Behlendorf concludes. + +* * * + +**Listen to the [original podcast audio][8] [MP3, 28:42 minutes]. Download below.** + +Introduction to Apache Hadoop, an open source software framework for storage and large scale... + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/podcast-interview-brian-behlendorf + +作者:[Gordon Haff][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ghaffhttps://opensource.com/users/wonderchookhttps://opensource.com/users/sachinpbhttps://opensource.com/users/ron-mcfarland +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_container_block.png?itok=S8MbXEYw (Data container block with hexagons) +[2]: https://bitmason.blogspot.com/2019/08/hyperledgers-brian-behlendorf-on.html +[3]: https://www.apache.org/ +[4]: https://en.wikipedia.org/wiki/NCSA_HTTPd +[5]: https://en.wikipedia.org/wiki/Internet_Information_Services +[6]: https://en.wikipedia.org/wiki/Apache_HTTP_Server +[7]: https://www.hyperledger.org/ +[8]: https://grhpodcasts.s3.amazonaws.com/behlendorf_1908.mp3 From d5d55caf265e6ce0f149b3f66d7079a0bf4bdf4e Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:10:17 +0800 Subject: [PATCH 036/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20An=20i?= =?UTF-8?q?ntroduction=20to=20Virtual=20Machine=20Manager?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190913 An introduction to Virtual Machine Manager.md --- ...introduction to Virtual Machine Manager.md | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 sources/tech/20190913 An introduction to Virtual Machine Manager.md diff --git a/sources/tech/20190913 An introduction to Virtual Machine Manager.md b/sources/tech/20190913 An introduction to Virtual Machine Manager.md new file mode 100644 index 0000000000..9c2ae81643 --- /dev/null +++ b/sources/tech/20190913 An introduction to Virtual Machine Manager.md @@ -0,0 +1,102 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (An introduction to Virtual Machine Manager) +[#]: via: (https://opensource.com/article/19/9/introduction-virtual-machine-manager) +[#]: author: (Alan Formy-Duval https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo) + +An introduction to Virtual Machine Manager +====== +Virt-manager provides a full range of options for spinning up virtual +machines on Linux. +![A person programming][1] + +In my [series][2] about [GNOME Boxes][3], I explained how Linux users can quickly spin up virtual machines on their desktop without much fuss. Boxes is ideal for creating virtual machines in a pinch when a simple configuration is all you need. + +But if you need to configure more detail in your virtual machine, you need a tool that provides a full range of options for disks, network interface cards (NICs), and other hardware. This is where [Virtual Machine Manager][4] (virt-manager) comes in. If you don't see it in your applications menu, you can install it from your package manager or via the command line: + + * On Fedora: **sudo dnf install virt-manager** + * On Ubuntu: **sudo apt install virt-manager** + + + +Once it's installed, you can launch it from its application menu icon or from the command line by entering **virt-manager**. + +![Virtual Machine Manager's main screen][5] + +To demonstrate how to create a virtual machine using virt-manager, I'll go through the steps to set one up for Red Hat Enterprise Linux 8. + +To start, click **File** then **New Virtual Machine**. Virt-manager's developers have thoughtfully titled each step of the process (e.g., Step 1 of 5) to make it easy. Click **Local install media** and **Forward**. + +![Step 1 virtual machine creation][6] + +On the next screen, browse to select the ISO file for the operating system you want to install. (My RHEL 8 image is located in my Downloads directory.) Virt-manager automatically detects the operating system. + +![Step 2 Choose the ISO File][7] + +In Step 3, you can specify the virtual machine's memory and CPU. The defaults are 1,024MB memory and one CPU. + +![Step 3 Set CPU and Memory][8] + +I want to give RHEL ample room to run—and the hardware I'm using can accommodate it—so I'll increase them (respectively) to 4,096MB and two CPUs. + +The next step configures storage for the virtual machine; the default setting is a 10GB disk image. (I'll keep this setting, but you can adjust it for your needs.) You can also choose an existing disk image or create one in a custom location. + +![Step 4 Configure VM Storage][9] + +Step 5 is the place to name your virtual machine and click Finish. This is equivalent to creating a virtual machine or a Box in GNOME Boxes. While it's technically the last step, you have several options (as you can see in the screenshot below). Since the advantage of virt-manager is the ability to customize a virtual machine, I'll check the box labeled **Customize configuration before install** before I click **Finish**. + +Since I chose to customize the configuration, virt-manager opens a screen displaying a bunch of devices and settings. This is the fun part! + +Here you have another chance to name the virtual machine. In the list on the left, you can view details on various aspects, such as CPU, memory, disks, controllers, and many other items. For example, I can click on **CPUs** to verify the change I made in Step 3. + +![Changing the CPU count][10] + +I can also confirm the amount of memory I set. + +When installing a VM to run as a server, I usually disable or remove its sound capability. To do so, select **Sound** and click **Remove** or right-click on **Sound** and choose **Remove Hardware**. + +You can also add hardware with the **Add Hardware** button at the bottom. This brings up the **Add New Virtual Hardware** screen where you can add additional storage devices, memory, sound, etc. It's like having access to a very well-stocked (if virtual) computer hardware warehouse. + +![The Add New Hardware screen][11] + +Once you are happy with your VM configuration, click **Begin Installation**, and the system will boot and begin installing your specified operating system from the ISO. + +![Begin installing the OS][12] + +Once it completes, it reboots, and your new VM is ready for use. + +![Red Hat Enterprise Linux 8 running in VMM][13] + +Virtual Machine Manager is a powerful tool for desktop Linux users. It is open source and an excellent alternative to proprietary and closed virtualization products. + +Learn how Vagrant and Ansible can be used to provision virtual machines for web development. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/introduction-virtual-machine-manager + +作者:[Alan Formy-Duval][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_keyboard_laptop_development_code_woman.png?itok=vbYz6jjb (A person programming) +[2]: https://opensource.com/sitewide-search?search_api_views_fulltext=GNOME%20Box +[3]: https://wiki.gnome.org/Apps/Boxes +[4]: https://virt-manager.org/ +[5]: https://opensource.com/sites/default/files/1-vmm_main_0.png (Virtual Machine Manager's main screen) +[6]: https://opensource.com/sites/default/files/2-vmm_step1_0.png (Step 1 virtual machine creation) +[7]: https://opensource.com/sites/default/files/3-vmm_step2.png (Step 2 Choose the ISO File) +[8]: https://opensource.com/sites/default/files/4-vmm_step3default.png (Step 3 Set CPU and Memory) +[9]: https://opensource.com/sites/default/files/6-vmm_step4.png (Step 4 Configure VM Storage) +[10]: https://opensource.com/sites/default/files/9-vmm_customizecpu.png (Changing the CPU count) +[11]: https://opensource.com/sites/default/files/11-vmm_addnewhardware.png (The Add New Hardware screen) +[12]: https://opensource.com/sites/default/files/12-vmm_rhelbegininstall.png +[13]: https://opensource.com/sites/default/files/13-vmm_rhelinstalled_0.png (Red Hat Enterprise Linux 8 running in VMM) From 231453714a65ca9cc16f2cbe86e4cba738c423dd Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:11:33 +0800 Subject: [PATCH 037/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190912=20How=20?= =?UTF-8?q?to=20fix=20common=20pitfalls=20with=20the=20Python=20ORM=20tool?= =?UTF-8?q?=20SQLAlchemy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md --- ...lls with the Python ORM tool SQLAlchemy.md | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md diff --git a/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md b/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md new file mode 100644 index 0000000000..b9bb3e51ec --- /dev/null +++ b/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md @@ -0,0 +1,208 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to fix common pitfalls with the Python ORM tool SQLAlchemy) +[#]: via: (https://opensource.com/article/19/9/common-pitfalls-python) +[#]: author: (Zach Todd https://opensource.com/users/zchtoddhttps://opensource.com/users/lauren-pritchetthttps://opensource.com/users/liranhaimovitchhttps://opensource.com/users/moshez) + +How to fix common pitfalls with the Python ORM tool SQLAlchemy +====== +Seemingly small choices made when using SQLAlchemy can have important +implications on the object-relational mapping toolkit's performance. +![A python with a package.][1] + +Object-relational mapping ([ORM][2]) makes life easier for application developers, in no small part because it lets you interact with a database in a language you may know (such as Python) instead of raw SQL queries. [SQLAlchemy][3] is a Python ORM toolkit that provides access to SQL databases using Python. It is a mature ORM tool that adds the benefit of model relationships, a powerful query construction paradigm, easy serialization, and much more. Its ease of use, however, makes it easy to forget what is going on behind the scenes. Seemingly small choices made using SQLAlchemy can have important performance implications. + +This article explains some of the top performance issues developers encounter when using SQLAlchemy and how to fix them. + +### Retrieving an entire result set when you only need the count + +Sometimes a developer just needs a count of results, but instead of utilizing a database count, all the results are fetched and the count is done with **len** in Python. + + +``` +`count = len(User.query.filter_by(acct_active=True).all())` +``` + +Using SQLAlchemy's **count** method instead will do the count on the server side, resulting in far less data sent to the client. Calling **all()** in the prior example also results in the instantiation of model objects, which can become expensive quickly, given enough rows. + +Unless more than the count is required, just use the **count** method. + + +``` +`count = User.query.filter_by(acct_active=True).count()` +``` + +### Retrieving entire models when you only need a few columns + +In many cases, only a few columns are needed when issuing a query. Instead of returning entire model instances, SQLAlchemy can fetch only the columns you're interested in. This not only reduces the amount of data sent but also avoids the need to instantiate entire objects. Working with tuples of column data instead of models can be quite a bit faster. + + +``` +result = User.query.all() +for user in result: +    print(user.name, user.email) +``` + +Instead, select only what is needed using the **with_entities** method. + + +``` +result = User.query.with_entities(User.name, User.email).all() +for (username, email) in result: +    print(username, email) +``` + +### Updating one object at a time inside a loop + +Avoid using loops to update collections individually. While the database may execute a single update very quickly, the roundtrip time between the application and database servers will quickly add up. In general, strive for fewer queries where reasonable. + + +``` +for user in users_to_update: +  user.acct_active = True +  db.session.add(user) +``` + +Use the bulk update method instead. + + +``` +query = User.query.filter(user.id.in_([user.id for user in users_to_update])) +query.update({"acct_active": True}, synchronize_session=False) +``` + +### Triggering cascading deletes + +ORM allows easy configuration of relationships on models, but there are some subtle behaviors that can be surprising. Most databases maintain relational integrity through foreign keys and various cascade options. SQLAlchemy allows you to define models with foreign keys and cascade options, but the ORM has its own cascade logic that can preempt the database. + +Consider the following models. + + +``` +class Artist(Base): +    __tablename__ = "artist" + +    id = Column(Integer, primary_key=True) +    songs = relationship("Song", cascade="all, delete") + +class Song(Base): +    __tablename__ = "song" + +    id = Column(Integer, primary_key=True) +    artist_id = Column(Integer, ForeignKey("artist.id", ondelete="CASCADE")) +``` + +Deleting artists will cause the ORM to issue **delete** queries on the Song table, thus preventing the deletes from happening as a result of the foreign key. This behavior can become a bottleneck with complex relationships and a large number of records. + +Include the **passive_deletes** option to ensure that the database is managing relationships. Be sure, however, that your database is capable of this. SQLite, for example, does not manage foreign keys by default. + + +``` +`songs = relationship("Song", cascade="all, delete", passive_deletes=True)` +``` + +### Relying on lazy loading when eager loading should be used + +Lazy loading is the default SQLAlchemy approach to relationships. Building from the last example, this implies that loading an artist does not simultaneously load his or her songs. This is usually a good idea, but the separate queries can be wasteful if certain relationships always need to be loaded. + +Popular serialization frameworks like [Marshmallow][4] can trigger a cascade of queries if relationships are allowed to load in a lazy fashion. + +There are a few ways to control this behavior. The simplest method is through the relationship function itself. + + +``` +`songs = relationship("Song", lazy="joined", cascade="all, delete")` +``` + +This will cause a left join to be added to any query for artists, and as a result, the **songs** collection will be immediately available. Although more data is returned to the client, there are potentially far fewer roundtrips. + +SQLAlchemy offers finer-grained control for situations where such a blanket approach can't be taken. The **joinedload()** function can be used to toggle joined loading on a per-query basis. + + +``` +from sqlalchemy.orm import joinedload + +artists = Artist.query.options(joinedload(Artist.songs)) +print(artists.songs) # Does not incur a roundtrip to load +``` + +### Using the ORM for a bulk record import + +The overhead of constructing full model instances becomes a major bottleneck when importing thousands of records. Imagine, for example, loading thousands of song records from a file where each song has first been converted to a dictionary. + + +``` +for song in songs: +    db.session.add(Song(**song)) +``` + +Instead, bypass the ORM and use just the parameter binding functionality of core SQLAlchemy. + + +``` +batch = [] +insert_stmt = Song.__table__.insert() +for song in songs: +    if len(batch) > 1000: +       db.session.execute(insert_stmt, batch) +       batch.clear() +    batch.append(song) +if batch: +    db.session.execute(insert_stmt, batch) +``` + +Keep in mind that this method naturally skips any client-side ORM logic you might depend on, such as Python-based column defaults. While this method is faster than loading objects as full model instances, your database may have bulk loading methods that are faster. PostgreSQL, for example, has the **COPY** command that offers perhaps the best performance for loading large numbers of records. + +### Calling commit or flush prematurely + +There are many occasions when you need to associate a child record to its parent, or vice versa. One obvious way of doing this is to flush the session so that the record in question will be assigned an ID. + + +``` +artist = Artist(name="Bob Dylan") +song = Song(title="Mr. Tambourine Man") + +db.session.add(artist) +db.session.flush() + +song.artist_id = artist.id +``` + +Committing or flushing more than once per request is usually unnecessary and undesirable. A database flush involves forcing disk writes on the database server, and in most circumstances, the client will block until the server can acknowledge that the data has been written. + +SQLAlchemy can track relationships and manage keys behind the scenes. + + +``` +artist = Artist(name="Bob Dylan") +song = Song(title="Mr. Tambourine Man") + +artist.songs.append(song) +``` + +### Wrapping up + +I hope this list of common pitfalls can help you avoid these issues and keep your application running smoothly. As always, when diagnosing a performance problem, measurement is key. Most databases offer performance diagnostics that can help you pinpoint issues, such as the PostgreSQL **pg_stat_statements** module. + +* * * + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/common-pitfalls-python + +作者:[Zach Todd][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/zchtoddhttps://opensource.com/users/lauren-pritchetthttps://opensource.com/users/liranhaimovitchhttps://opensource.com/users/moshez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/python_snake_file_box.jpg?itok=UuDVFLX- (A python with a package.) +[2]: https://en.wikipedia.org/wiki/Object-relational_mapping +[3]: https://www.sqlalchemy.org/ +[4]: https://marshmallow.readthedocs.io/en/stable/ From 985e85759fbe6e678112df4cca003ceeff9bd4ff Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:11:54 +0800 Subject: [PATCH 038/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190912=203=20wa?= =?UTF-8?q?ys=20to=20handle=20transient=20faults=20for=20DevOps?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190912 3 ways to handle transient faults for DevOps.md --- ...s to handle transient faults for DevOps.md | 147 ++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 sources/tech/20190912 3 ways to handle transient faults for DevOps.md diff --git a/sources/tech/20190912 3 ways to handle transient faults for DevOps.md b/sources/tech/20190912 3 ways to handle transient faults for DevOps.md new file mode 100644 index 0000000000..d2b112c656 --- /dev/null +++ b/sources/tech/20190912 3 ways to handle transient faults for DevOps.md @@ -0,0 +1,147 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (3 ways to handle transient faults for DevOps) +[#]: via: (https://opensource.com/article/19/9/transient-faults-devops) +[#]: author: (Willy-Peter Schaub https://opensource.com/users/wpschaubhttps://opensource.com/users/wpschaubhttps://opensource.com/users/wpschaub) + +3 ways to handle transient faults for DevOps +====== +DevOps is about delighting our stakeholders with continuous business +value, and how we manage transient faults is part of that. +![Bright gears connecting][1] + +In electrical engineering, a _transient fault_ is defined as an error condition that vanishes after the power is disconnected and restored. This is also a workaround many of us unconsciously use when we forcefully power our physical devices off and on when they're performing poorly or frozen on a blue crash screen filled with gibberish. + +In cloud computing, we are faced with increased complexity, known unknowns, or worse, unknown unknowns with infrastructure we will never touch as well as technology that's evolving at an exponential rate and disparate solutions connecting an expanding digital world. Today's virtual users' tolerance for unresponsive, unreliable, and underperforming products is zero—everyone wants 24x7 uptime and solutions that evolve and blend into their lifestyle. + +In this new virtual world, we cannot just walk up and reboot a machine, at least not without impacting hundreds, thousands, or even millions of users. And loyalty to brands and products is dwindling fast in today's competitive world; users would probably look for an alternative service at a click of a keystroke and never return rather than put up with any measurable amount of downtime. + +Let's take a quick look at two events that were _humbling_ reminders that today's transient faults can occur in a heartbeat, are complex to identify and resolve, and have a profound impact on our stakeholders. + + * [**A Rough Patch**][2]**:** "_We've had some issues with the service over the past week and a half. I feel terrible about it, and I can't apologize enough. It's the biggest incident we've had since the instability created by our service refactoring,"_ wrote Microsoft corporate VP of cloud developer services Brian Harry on his blog. After weeks of sleepless nights, the root cause was identified as a storm of requests to the access control service (ACS) that exhausted Source Network Address Translation (SNAT) ports, prevented authentications, and impacted our stakeholders. + * **[503 Error][3]:** _"Setting up monitoring from the beginning of the implementation of our Azure function confirms the importance of monitoring in the DevOps process,"_ reported Cellenza's Mikael Krief on the ALM DevOps Rangers blog. Again, we spent sleepless nights finding the root cause of why our refactored extension spawned a storm of connections and threads, imploded our Azure service, and frustrated our stakeholders with 503 Service Unavailable errors. + + + +We can set up failure and disaster recovery for our cloud applications to help minimize, not eliminate, the impact of an outage caused by resource failures or natural disasters. However, for solutions that use remote resources or communicate with remote services, we need to add a pinch of sensitivity to transient faults. Well-designed solutions detect and attempt to self-correct transient faults before sounding an alarm—or worse, becoming unresponsive and failing. + +There are a handful of transient fault handling patterns, including the three shown on the following whiteboard: **retry**, **throttling**, and **circuit breaker**. + +![transient fault handling patterns][4] + +### Retry pattern + +The retry pattern is the simplest of the three transient fault handling patterns and something we do naturally in everyday life. It can be effective in solutions that communicate across distributed networks to handle transient faults caused by issues such as network latency, service overloads, and power outages. + +![Retry pattern][5] + +**Pseudocode** + +Set failure_count = 0 +**Call** the [micro] service +If (fail) failure_count++ +If (failure_count > retry_limit) or (not transient failure) FAIL +**Delay** (delay_time) +Increase delay_time by factor of failure_count +**Retry** step 2 + +The pattern ensures that a user's request eventually succeeds during less-than-ideal situations where transient failures would otherwise result in immediate and frequent failures. See open source implementations such as [java-design-patterns][6] and [transient-fault-handling-application-block][7] for details. + +### Throttling pattern + +We need to protect our services against clients that are overutilizing our solution or have gone rogue due to a system or logic failure. Like a four-lane tunnel servicing a six-lane freeway, we must manage the flow of requests (cars) and throttle endpoints (lanes) that are exceeding the maximum throughput (tunnel). + +![Throttling pattern][8] + +**Pseudocode** + +Increment request_count +// Limit – Maximum requests within an interval +// Degrade – Fail with “slow down” error or pause operation +If (request_count > limit) **degrade** service +**Call** the [micro] service + +The pattern helps us meet service level agreements, prevent a single user from overutilizing the system, optimize request flows, and deal with bursts of unexpected requests. One of the reasons we need to increase the delay between retries in the previous pattern is to ensure that we do not inadvertently exceed the throughput of the system and trigger degradation of service. See open source implementations such as [WebApiThrottle][9] and [Core.Throttling][10] for more details. + +### Circuit breaker pattern + +Like circuit breakers in your home, the circuit breaker pattern is your last defense. While the retry pattern helps to autocorrect brief transient faults, this pattern is more suited for transient faults that take a longer period to resolve. When dealing with network or service outages, such as the [Rough Patch][2] event, retrying a failing service operation could worsen the situation, lead to cascading failures, and eventually trigger a solution meltdown. The hypothesis of the circuit breaker pattern is that the failed service call is likely to succeed if (and only if) it is automatically retried after a significant delay. + +Like when you stagger into your basement in the dark to find your circuit breaker cabinet, you are allowing the electrical system and potential static charges to recover before you flip the switch. + +![Circuit breaker pattern][11] + +**Pseudocode** + +// Circuit breaker has not tripped +If (circuit_state == open) + +**Call** the [micro] service +If (fail) fail_count++ +If (fail_count > limit) circuit_state = **closed** + +// Circuit breaker tripped +Else + +If (circuit_state == closed) Start Timer + +// Call back for timer event +On Timer timeout + +**Call** the [micro] service +If (success) circuit_state == **open** + +See open source implementations such as [Hystrix][12], [circuit-breaker][13], and [Polly][14] for more details. + +### Don't fear faults  + +Remember to include unit and integration tests for all known faults and implemented handling patterns. Your unit tests must validate that your solution reacts appropriately when your fault-handling logic is triggered. On the other hand, your integration tests must simulate resilient faults to verify that your solution of collective services can deal with the fault effectively. You can simulate services, transient faults, and degrading services by using service virtualization such as [Hoverfly][15]. Your stakeholders will not be amused if your solution and associated fault handling patterns fail to deliver the promise of self-healing and avoidance of catastrophic meltdowns. + +So, faults, like failures, are a [feature in blameless DevOps][16] and we should [not fear them][17]. To remain competitive, we must raise the quality bar of our infrastructures, solutions, and accountability to detect, remediate at the root-cause level, and self-correct to maintain an acceptable service level. + +For example, in the following illustration, microservice #7 has imploded, triggering circuit breakers and traffic throttling and allowing the system to recover while continuing to service the users. What is evident from this simple illustration is that the combination of faults and the difficulty of dealing with them can become complex at the flip of a switch of a feature flag. + +![Transient fault example][18] + +These and other patterns are powerful allies for one of the [core values of a healthy DevOps mindset][19] to "_improve beyond the limits of today's processes—strive to always innovate and improve beyond repeatable processes and frameworks._" They help us raise the quality bar and continuously deliver business value and delight our stakeholders. + +* * * + +_Special thanks to [Brent Reed][20] for his candid review and feedback that help us improve and share our insights._ + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/transient-faults-devops + +作者:[Willy-Peter Schaub][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/wpschaubhttps://opensource.com/users/wpschaubhttps://opensource.com/users/wpschaub +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/devop.png?itok=Yicb2nnZ (Bright gears connecting) +[2]: https://aka.ms/bh-ff-sos +[3]: https://aka.ms/vsar-ff-sos +[4]: https://opensource.com/sites/default/files/uploads/handlingtransientfaults_1.jpg (transient fault handling patterns) +[5]: https://opensource.com/sites/default/files/uploads/handlingtransientfaults_2.png (Retry pattern) +[6]: https://github.com/iluwatar/java-design-patterns +[7]: https://github.com/microsoftarchive/transient-fault-handling-application-block +[8]: https://opensource.com/sites/default/files/uploads/handlingtransientfaults_3.png (Throttling pattern) +[9]: https://github.com/stefanprodan/WebApiThrottle +[10]: https://github.com/SharePoint/PnP/tree/master/Samples/Core.Throttling +[11]: https://opensource.com/sites/default/files/uploads/handlingtransientfaults_4.png (Circuit breaker pattern) +[12]: https://github.com/Netflix/Hystrix/wiki +[13]: https://github.com/josephwilk/circuit-breaker +[14]: https://github.com/App-vNext/Polly +[15]: https://github.com/SpectoLabs/hoverfly +[16]: https://opensource.com/article/19/8/failure-feature-blameless-devops +[17]: https://opensource.com/article/19/8/why-fear-failure-silent-devops-virus +[18]: https://opensource.com/sites/default/files/uploads/handlingtransientfaults_5.png (Transient fault example) +[19]: https://agents-of-chaos.org/2019/05/12/five-essential-values-for-the-devops-mindset/ +[20]: https://opensource.com/users/brentaaronreed From 9672344781e1c6259413ee91e18f5c8b5a298c5d Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:12:17 +0800 Subject: [PATCH 039/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190912=20An=20i?= =?UTF-8?q?ntroduction=20to=20Markdown?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190912 An introduction to Markdown.md --- .../20190912 An introduction to Markdown.md | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 sources/tech/20190912 An introduction to Markdown.md diff --git a/sources/tech/20190912 An introduction to Markdown.md b/sources/tech/20190912 An introduction to Markdown.md new file mode 100644 index 0000000000..df13f64f6d --- /dev/null +++ b/sources/tech/20190912 An introduction to Markdown.md @@ -0,0 +1,166 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (An introduction to Markdown) +[#]: via: (https://opensource.com/article/19/9/introduction-markdown) +[#]: author: (Juan Islas https://opensource.com/users/xislashttps://opensource.com/users/mbbroberghttps://opensource.com/users/scottnesbitthttps://opensource.com/users/scottnesbitthttps://opensource.com/users/f%C3%A1bio-emilio-costahttps://opensource.com/users/don-watkinshttps://opensource.com/users/greg-phttps://opensource.com/users/marcobravohttps://opensource.com/users/alanfdosshttps://opensource.com/users/scottnesbitthttps://opensource.com/users/jamesf) + +An introduction to Markdown +====== +Write once and convert your text into multiple formats. Here's how to +get started with Markdown. +![Woman programming][1] + +For a long time, I thought all the files I saw on GitLab and GitHub with an **.md** extension were written in a file type exclusively for developers. That changed a few weeks ago when I started using Markdown. It quickly became the most important tool in my daily work. + +Markdown makes my life easier. I just need to add a few symbols to what I'm already writing and, with the help of a browser extension or an open source program, I can transform my text into a variety of commonly used formats such as ODT, email (more on that later), PDF, and EPUB. + +### What is Markdown? + +A friendly reminder from [Wikipedia][2]: + +> Markdown is a lightweight markup language with plain text formatting syntax. + +What this means to you is that by using just a few extra symbols in your text, Markdown helps you create a document with an explicit structure. When you take notes in plain text (in a notepad application, for example), there's nothing to indicate which text is meant to be bold or italic. In ordinary text, you might write a link as **** one time, then as just **example.com**, and later **go to the website (example.com)**. There's no internal consistency. + +But if you write the way Markdown prescribes, your text has internal consistency. Computers like consistency because it enables them to follow strict instructions without worrying about exceptions. + +Trust me; once you learn to use Markdown, every writing task will be, in some way, easier and better than before. So let's learn it. + +### Markdown basics + +The following rules are the basics for writing in Markdown. + + 1. Create a text file with an **.md** extension (for example, **example.md**.) You can use any text editor (even a word processor like LibreOffice or Microsoft Word), as long as you remember to save it as a _text_ file. + + + +![Names of Markdown files][3] + + 2. Write whatever you want, just as you usually do: + + +``` +Lorem ipsum + +Consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + +De Finibus Bonorum et Malorum + +Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. +Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. + +  Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. +``` + + 3. Make sure to place an empty line between paragraphs. That might feel unnatural if you're used to writing business letters or traditional prose, where paragraphs have only one new line and maybe even an indentation before the first word. For Markdown, an empty line (some word processors mark this with **¶**, called a Pilcrow symbol) guarantees a new paragraph is created when you convert it to another format like HTML. + + 4. Designate titles and subtitles. For the document's title, add a pound or hash (**#**) symbol and a space before the text (e.g., **# Lorem ipsum**). The first subtitle level uses two (**## De Finibus Bonorum et Malorum**), the next level gets three (**### Third Subtitle**), and so on. Note that there is a space between the pound sign and the first word. + + +``` +# Lorem ipsum + +Consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + +## De Finibus Bonorum et Malorum + +Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. +Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. + +  Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. +``` + + 5. If you want **bold** letters, just place the letters between two asterisks (stars) with no spaces: ****This will be in bold****. + + + + +![Bold text in Markdown][4] + + 6. For _italics_, put the text between underline symbols with no spaces: **_I want this text to be in italics_**. + + + +![Italics text in Markdown][5] + + 7. To insert a link (like [Markdown Tutorial][6]), put the text you want to link in brackets and the URL in parentheses with no spaces between them: +**[Markdown Tutorial]()**. + + + +![Hyperlinks in Markdown][7] + + 8. Blockquotes are written with a greater-than (**>**) symbol and a space before the text you want to quote: **> A famous quote**. + + + +![Blockquote text in Markdown][8] + +### Markdown tutorials and tip sheets + +These tips will get you started writing in Markdown, but it has a lot more functions than just bold and italics and links. The best way to learn Markdown is to use it, but I recommend investing 15 minutes stepping through the simple [Markdown Tutorial][6] to practice these rules and learn a couple more. + +Because modern Markdown is an amalgamation of many different interpretations of the idea of structured text, the [CommonMark][9] project defines a spec with a rigid set of rules to bring clarity to Markdown. It might be helpful to keep a [CommonMark-compliant cheatsheet][10] on hand when writing. + +### What you can do with Markdown + +Markdown lets you write anything you want—once—and transform it into almost any kind of format you want to use. The following examples show how to turn simple text written in MD into different formats. You don't need multiple formats of your text—you can start from a single source and then… rule the world! + + 1. **Simple note-taking:** You can write your notes in Markdown and, the moment you save them, the open source note application [Turtl][11] interprets your text file and shows you the formatted result. You can have your notes anywhere! + + + +![Turtl application][12] + + 2. **PDF files:** With the [Pandoc][13] application, you can convert your Markdown into a PDF with one simple command: **pandoc <file.md> -o <file.pdf>**. + + + +![Markdown text converted to PDF with Pandoc][14] + + 3. **Email:** You can also convert Markdown text into an HTML-formatted email by installing the browser extension [Markdown Here][15]. To use it, just select your Markdown text, use Markdown Here to translate it into HTML, and send your message using your favorite email client. + + + +![Markdown text converted to email with Markdown Here][16] + +### Start using it + +You don't need a special application to use Markdown—you just need a text editor and the tips above. It's compatible with how you already write; all you need to do is use it, so give it a try. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/introduction-markdown + +作者:[Juan Islas][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/xislashttps://opensource.com/users/mbbroberghttps://opensource.com/users/scottnesbitthttps://opensource.com/users/scottnesbitthttps://opensource.com/users/f%C3%A1bio-emilio-costahttps://opensource.com/users/don-watkinshttps://opensource.com/users/greg-phttps://opensource.com/users/marcobravohttps://opensource.com/users/alanfdosshttps://opensource.com/users/scottnesbitthttps://opensource.com/users/jamesf +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/programming-code-keyboard-laptop-music-headphones.png?itok=EQZ2WKzy (Woman programming) +[2]: https://en.wikipedia.org/wiki/Markdown +[3]: https://opensource.com/sites/default/files/uploads/markdown_names_md-1.png (Names of Markdown files) +[4]: https://opensource.com/sites/default/files/uploads/markdown_bold.png (Bold text in Markdown) +[5]: https://opensource.com/sites/default/files/uploads/markdown_italic.png (Italics text in Markdown) +[6]: https://www.markdowntutorial.com/ +[7]: https://opensource.com/sites/default/files/uploads/markdown_link.png (Hyperlinks in Markdown) +[8]: https://opensource.com/sites/default/files/uploads/markdown_blockquote.png (Blockquote text in Markdown) +[9]: https://commonmark.org/help/ +[10]: https://opensource.com/downloads/cheat-sheet-markdown +[11]: https://turtlapp.com/ +[12]: https://opensource.com/sites/default/files/uploads/markdown_turtl_02.png (Turtl application) +[13]: https://opensource.com/article/19/5/convert-markdown-to-word-pandoc +[14]: https://opensource.com/sites/default/files/uploads/markdown_pdf.png (Markdown text converted to PDF with Pandoc) +[15]: https://markdown-here.com/ +[16]: https://opensource.com/sites/default/files/uploads/markdown_mail_02.png (Markdown text converted to email with Markdown Here) From ef317882e199305957f6f86581562ca1e6854fde Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:12:42 +0800 Subject: [PATCH 040/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=2010=20A?= =?UTF-8?q?nsible=20modules=20you=20need=20to=20know?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190911 10 Ansible modules you need to know.md --- ...911 10 Ansible modules you need to know.md | 381 ++++++++++++++++++ 1 file changed, 381 insertions(+) create mode 100644 sources/tech/20190911 10 Ansible modules you need to know.md diff --git a/sources/tech/20190911 10 Ansible modules you need to know.md b/sources/tech/20190911 10 Ansible modules you need to know.md new file mode 100644 index 0000000000..51b0078f86 --- /dev/null +++ b/sources/tech/20190911 10 Ansible modules you need to know.md @@ -0,0 +1,381 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (10 Ansible modules you need to know) +[#]: via: (https://opensource.com/article/19/9/must-know-ansible-modules) +[#]: author: (DirectedSoul https://opensource.com/users/directedsoulhttps://opensource.com/users/markphttps://opensource.com/users/rich-butkevichttps://opensource.com/users/jairojuniorhttps://opensource.com/users/marcobravohttps://opensource.com/users/johnsimcall) + +10 Ansible modules you need to know +====== +See examples and learn the most important modules for automating +everyday tasks with Ansible. +![Text editor on a browser, in blue][1] + +[Ansible][2] is an open source IT configuration management and automation platform. It uses human-readable YAML templates so users can program repetitive tasks to happen automatically without having to learn an advanced programming language. + +Ansible is agentless, which means the nodes it manages do not require any software to be installed on them. This eliminates potential security vulnerabilities and makes overall management smoother. + +Ansible [modules][3] are standalone scripts that can be used inside an Ansible playbook. A playbook consists of a play, and a play consists of tasks. These concepts may seem confusing if you're new to Ansible, but as you begin writing and working more with playbooks, they will become familiar. + +There are some modules that are frequently used in automating everyday tasks; those are the ones that we will cover in this article. + +Ansible has three main files that you need to consider: + + * **Host/inventory file:** Contains the entry of the nodes that need to be managed + * **Ansible.cfg file:** Located by default at **/etc/ansible/ansible.cfg**, it has the necessary privilege escalation options and the location of the inventory file + * **Main file:** A playbook that has modules that perform various tasks on a host listed in an inventory or host file + + + +### Module 1: Package management + +There is a module for most popular package managers, such as DNF and APT, to enable you to install any package on a system. Functionality depends entirely on the package manager, but usually these modules can install, upgrade, downgrade, remove, and list packages. The names of relevant modules are easy to guess. For example, the DNF module is [dnf_module][4], the old YUM module (required for Python 2 compatibility) is [yum_module][5], while the APT module is [apt_module][6], the Slackpkg module is [slackpkg_module][7], and so on. + +Example 1: + + +``` +\- name: install the latest version of Apache and MariaDB +  dnf: +    name: +     - httpd +      - mariadb-server +    state: latest +``` + +This installs the Apache web server and the MariaDB SQL database. + +#### Example 2: + + +``` +\- name: Install a list of packages +  yum: +    name: +     - nginx +      - postgresql +      - postgresql-server +    state: present +``` + +This installs the list of packages and helps download multiple packages. + +### Module 2: Service + +After installing a package, you need a module to start it. The [service module][8] enables you to start, stop, and reload installed packages; this comes in pretty handy. + +#### Example 1: + + +``` +\- name: Start service foo, based on running process /usr/bin/foo +  service: +    name: foo +    pattern: /usr/bin/foo +    state: started +``` + +This starts the service **foo**. + +#### Example 2: + + +``` +\- name: Restart network service for interface eth0 +  service: +    name: network +    state: restarted +    args: eth0 +``` + +This restarts the network service of the interface **eth0**. + +### Module 3: Copy + +The [copy module][9] copies a file from the local or remote machine to a location on the remote machine. + +#### Example 1: + + +``` +\- name: Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version +  copy: +    src: /mine/ntp.conf +    dest: /etc/ntp.conf +    owner: root +    group: root +    mode: '0644' +    backup: yes +``` + +#### Example 2: + + +``` +\- name: Copy file with owner and permission, using symbolic representation +  copy: +    src: /srv/myfiles/foo.conf +    dest: /etc/foo.conf +    owner: foo +    group: foo +    mode: u=rw,g=r,o=r +``` + +### Module 4: Debug + +The [debug module][10] prints statements during execution and can be useful for debugging variables or expressions without having to halt the playbook. + +#### Example 1: + + +``` +\- name: Display all variables/facts known for a host +  debug: +    var: hostvars[inventory_hostname] +    verbosity: 4 +``` + +This displays all the variable information for a host that is defined in the inventory file. + +#### Example 2: + + +``` +\- name: Write some content in a file /tmp/foo.txt +  copy: +    dest: /tmp/foo.txt +    content: | +     Good Morning! +      Awesome sunshine today. +    register: display_file_content +\- name: Debug display_file_content +    debug: +      var: display_file_content +      verbosity: 2 +``` + +This registers the content of the copy module output and displays it only when you specify verbosity as 2. For example: + + +``` +`ansible-playbook demo.yaml -vv` +``` + +### Module 5: File + +The [file module][11] manages the file and its properties. + + * It sets attributes of files, symlinks, or directories. + * It also removes files, symlinks, or directories. + + + +#### Example 1: + + +``` +\- name: Change file ownership, group and permissions +  file: +    path: /etc/foo.conf +    owner: foo +    group: foo +    mode: '0644' +``` + +This creates a file named **foo.conf** and sets the permission to **0644**. + +#### Example 2: + + +``` +\- name: Create a directory if it does not exist +  file: +    path: /etc/some_directory +    state: directory +    mode: '0755' +``` + +This creates a directory named **some_directory** and sets the permission to **0755**. + +### Module 6: Lineinfile + +The [lineinfile module][12] manages lines in a text file. + + * It ensures a particular line is in a file or replaces an existing line using a back-referenced regular expression. + * It's primarily useful when you want to change just a single line in a file. + + + +#### Example 1: + + +``` +\- name: Ensure SELinux is set to enforcing mode +  lineinfile: +    path: /etc/selinux/config +    regexp: '^SELINUX=' +    line: SELINUX=enforcing +``` + +This sets the value of **SELINUX=enforcing**. + +#### Example 2: + + +``` +\- name: Add a line to a file if the file does not exist, without passing regexp +  lineinfile: +    path: /etc/resolv.conf +    line: 192.168.1.99 foo.lab.net foo +    create: yes +``` + +This adds an entry for the IP and hostname in the **resolv.conf** file. + +### Module 7: Git + +The [git module][13] manages git checkouts of repositories to deploy files or software. + +#### Example 1: + + +``` +# Example Create git archive from repo +\- git: +    repo: +    dest: /src/ansible-examples +    archive: /tmp/ansible-examples.zip +``` + +#### Example 2: + + +``` +\- git: +    repo: +    dest: /src/ansible-examples +    separate_git_dir: /src/ansible-examples.git +``` + +This clones a repo with a separate Git directory. + +### Module 8: Cli_command + +The [cli_command module][14], first available in Ansible 2.7, provides a platform-agnostic way of pushing text-based configurations to network devices over the **network_cli connection** plugin. + +#### Example 1: + + +``` +\- name: commit with comment +  cli_config: +    config: set system host-name foo +    commit_comment: this is a test +``` + +This sets the hostname for a switch and exits with a commit message. + +#### Example 2: + + +``` +\- name: configurable backup path +  cli_config: +    config: "{{ lookup('template', 'basic/config.j2') }}" +    backup: yes +    backup_options: +      filename: backup.cfg +      dir_path: /home/user +``` + +This backs up a config to a different destination file. + +### Module 9: Archive + +The [archive module][15] creates a compressed archive of one or more files. By default, it assumes the compression source exists on the target. + +#### Example 1: + + +``` +\- name: Compress directory /path/to/foo/ into /path/to/foo.tgz +  archive: +    path: /path/to/foo +    dest: /path/to/foo.tgz +``` + +#### Example 2: + + +``` +\- name: Create a bz2 archive of multiple files, rooted at /path +  archive: +    path: +   - /path/to/foo +    - /path/wong/foo +    dest: /path/file.tar.bz2 +    format: bz2 +``` + +### Module 10: Command + +One of the most basic but useful modules, the [command module][16] takes the command name followed by a list of space-delimited arguments. + +#### Example 1: + + +``` +\- name: return motd to registered var +  command: cat /etc/motd +  register: mymotd +``` + +#### Example 2: + + +``` +\- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist. +  command: /usr/bin/make_database.sh db_user db_name +  become: yes +  become_user: db_owner +  args: +    chdir: somedir/ +    creates: /path/to/database +``` + +### Conclusion + +There are tons of modules available in Ansible, but these ten are the most basic and powerful ones you can use for an automation job. As your requirements change, you can learn about other useful modules by entering **ansible-doc <module-name>** on the command line or refer to the [official documentation][17]. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/must-know-ansible-modules + +作者:[DirectedSoul][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/directedsoulhttps://opensource.com/users/markphttps://opensource.com/users/rich-butkevichttps://opensource.com/users/jairojuniorhttps://opensource.com/users/marcobravohttps://opensource.com/users/johnsimcall +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/browser_blue_text_editor_web.png?itok=lcf-m6N7 (Text editor on a browser, in blue) +[2]: https://www.ansible.com/ +[3]: https://docs.ansible.com/ansible/latest/user_guide/modules.html +[4]: https://docs.ansible.com/ansible/latest/modules/dnf_module.html +[5]: https://docs.ansible.com/ansible/latest/modules/yum_module.html +[6]: https://docs.ansible.com/ansible/latest/modules/apt_module.html +[7]: https://docs.ansible.com/ansible/latest/modules/slackpkg_module.html +[8]: https://docs.ansible.com/ansible/latest/modules/service_module.html +[9]: https://docs.ansible.com/ansible/latest/modules/copy_module.html +[10]: https://docs.ansible.com/ansible/latest/modules/debug_module.html +[11]: https://docs.ansible.com/ansible/latest/modules/file_module.html +[12]: https://docs.ansible.com/ansible/latest/modules/lineinfile_module.html +[13]: https://docs.ansible.com/ansible/latest/modules/git_module.html#git-module +[14]: https://docs.ansible.com/ansible/latest/modules/cli_command_module.html +[15]: https://docs.ansible.com/ansible/latest/modules/archive_module.html +[16]: https://docs.ansible.com/ansible/latest/modules/command_module.html +[17]: https://docs.ansible.com/ From 13c1987cd4f1e91af804c0bce81efcfa285875f8 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:13:06 +0800 Subject: [PATCH 041/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=20How=20?= =?UTF-8?q?Linux=20came=20to=20the=20mainframe?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190911 How Linux came to the mainframe.md --- ...0190911 How Linux came to the mainframe.md | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 sources/tech/20190911 How Linux came to the mainframe.md diff --git a/sources/tech/20190911 How Linux came to the mainframe.md b/sources/tech/20190911 How Linux came to the mainframe.md new file mode 100644 index 0000000000..c83cc7ae91 --- /dev/null +++ b/sources/tech/20190911 How Linux came to the mainframe.md @@ -0,0 +1,67 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How Linux came to the mainframe) +[#]: via: (https://opensource.com/article/19/9/linux-mainframes-part-1) +[#]: author: (Elizabeth K. Joseph https://opensource.com/users/pleia2https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/jhibbetshttps://opensource.com/users/jimmyolano) + +How Linux came to the mainframe +====== +Linux's emergence on the mainframe is indebted to the external developer +community working with IBM. +![Person typing on a 1980's computer][1] + +Despite my 15 years of experience in the Linux infrastructure space, if you had asked me a year ago what a mainframe was, I'd be hard-pressed to give a satisfying technical answer. I was surprised to learn that the entire time I'd been toiling away on x86 machines in various systems administration roles, Linux was running on the s390x architecture for mainframes. In fact, 2019 marks 20 years of IBM's involvement in Linux on the mainframe, with purely community efforts predating that by a year. + +### What is a mainframe? + +You can think of a mainframe as a big, enterprise-grade server. Everything from the custom s390x CPUs to memory and power, coupled with external storage arrays, is highly redundant and even built to survive earthquakes (check out the [z13 Earthquake Shake Test][2] on YouTube). Built-in hardware-based encryption allows for end-to-end encryption of your data without taking general processing power away from your workloads. Essentially, if you want your workloads and data to be safe and secure, it still makes sense to invest in a mainframe. + +The traditional mainframe operating system is z/OS, which has a long history and still gets regular releases to add functionality, security, and stability. The [Open Mainframe Project][3] hosts a collection of open source projects, largely built around z/OS, to further modernize the platform. This includes [Zowe][4], which brings modern API, shell, automation, and scripting tooling to the mainframe. Systems administrators who are looking at mainframes now have a modern, familiar toolset to work with. Add support for Linux in the mix? Suddenly you have an incredibly powerful addition to infrastructure, and it supports all the tools you're familiar with. + +### The origins of Linux on the mainframe + +To get a real picture of how Linux got to the mainframe, I'll take you back in time. The first IBM mainframe, the IBM System/360, was introduced in 1964. Before this, computers were purpose-built for specific clients and industries. The S/360 was built to be a multi-purpose computer, able to cover a variety of computing needs across industries. Fast-forward to 1972, and we have what's really a pivotal moment in modern computing: the introduction of VM/370. + +VM stands for the same thing it means today, virtual machine, and this was the first virtualization technology for the mainframe. Time-sharing on the mainframe had been an idea in whitepapers dating back to the 1950s, but VM/370 brought that to another level. If you're interested in why this was revolutionary and the technical history of the components and teams that made this happen, read Melinda Varian's fascinating article _[VM and the VM Community: Past, Present, and Future][5]_. + +As a long-time open source enthusiast, I found one of the most interesting things about the development of the VM was how community-driven it was. People from various universities and companies outside IBM were driving the efforts and giving their feedback to IBM to push development forward. While learning about the history, I identified key people inside IBM—I would call them the first developer advocates—who went out of their way to support external developers on the platform and advocate for their needs internally to drive change at IBM. + +Fast-forward to 1994, and we reach another milestone that helped pave the way for Linux on the mainframe: experimental TCP/IP support in VMs. Mainframes have supported a series of network-type interfaces; TCP/IP was just one, but for the purposes of porting Linux to the mainframe, it was a key one. + +It was 1998 when a fellow named Linas Vepstas started the "Bigfoot" effort to see if he could get Linux running on a mainframe using a VM. I love this part of the history, partially because it goes back to the community driving development, but also because of his [_Why port Linux to the mainframe?_][6] page. His first four points really spoke to me as an infrastructure geek: + + * Stunt + * To learn + * Because it's there + * Because it's knarly, duude! + + + +Linas fleshes these points out—and reading them showed me immediately that I had found a kindred spirit. His list continues into more practical things: I/O, address spaces and access lists, VM, and the business model. But given how he orders his points, I think I know where his real motivations are. + +But when did IBM join in and offer official support? I'll explain next week in part two of this history. + +We got a chance to send a few questions to Jim Zemlin, executive director of the Linux Foundation... + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/linux-mainframes-part-1 + +作者:[Elizabeth K. Joseph][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/pleia2https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/jhibbetshttps://opensource.com/users/jimmyolano +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/1980s-computer-yearbook.png?itok=eGOYEKK- (Person typing on a 1980's computer) +[2]: https://www.youtube.com/watch?v=kmMn5Q_lnkk +[3]: https://www.openmainframeproject.org/ +[4]: https://www.openmainframeproject.org/projects/zowe +[5]: http://www.leeandmelindavarian.com/Melinda/25paper.pdf +[6]: https://linas.org/linux/i370-why.html From 844e566d837e5367deb5eb67aa37f50e15414606 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:13:24 +0800 Subject: [PATCH 042/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=204=20op?= =?UTF-8?q?en=20source=20cloud=20security=20tools?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190911 4 open source cloud security tools.md --- ...0911 4 open source cloud security tools.md | 90 +++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 sources/tech/20190911 4 open source cloud security tools.md diff --git a/sources/tech/20190911 4 open source cloud security tools.md b/sources/tech/20190911 4 open source cloud security tools.md new file mode 100644 index 0000000000..5d14a725df --- /dev/null +++ b/sources/tech/20190911 4 open source cloud security tools.md @@ -0,0 +1,90 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (4 open source cloud security tools) +[#]: via: (https://opensource.com/article/19/9/open-source-cloud-security) +[#]: author: (Alison NaylorAaron Rinehart https://opensource.com/users/asnaylorhttps://opensource.com/users/ansilvahttps://opensource.com/users/sethhttps://opensource.com/users/bretthunoldtcomhttps://opensource.com/users/aaronrineharthttps://opensource.com/users/marcobravo) + +4 open source cloud security tools +====== +Find and eliminate vulnerabilities in the data you store in AWS and +GitHub. +![Tools in a cloud][1] + +If your day-to-day as a developer, system administrator, full-stack engineer, or site reliability engineer involves Git pushes, commits, and pulls to and from GitHub and deployments to Amazon Web Services (AWS), security is a persistent concern. Fortunately, open source tools are available to help your team avoid common mistakes that could cost your organization thousands of dollars. + +This article describes four open source tools that can help improve your security practices when you're developing on GitHub and AWS. Also, in the spirit of open source, I've joined forces with three security experts—[Travis McPeak][2], senior cloud security engineer at Netflix; [Rich Monk][3], senior principal information security analyst at Red Hat; and [Alison Naylor][4], principal information security analyst at Red Hat—to contribute to this article. + +We've separated each tool by scenario, but they are not mutually exclusive. + +### 1\. Find sensitive data with Gitrob + +You need to find any potentially sensitive information present in your team's Git repos so you can remove it. It may make sense for you to use tools that are focused towards attacking an application or a system using a red/blue team model, in which an infosec team is divided in two: an attack team (a.k.a. a red team) and a defense team (a.k.a. a blue team). Having a red team to try to penetrate your systems and applications is lots better than waiting for an adversary to do so. Your red team might try using [Gitrob][5], a tool that can clone and crawl through your Git repositories looking for credentials and sensitive files. + +Even though tools like Gitrob could be used for harm, the idea here is for your infosec team to use it to find inadvertently disclosed sensitive data that belongs to your organization (such as AWS keypairs or other credentials that were committed by mistake). That way, you can get your repositories fixed and sensitive data expunged—hopefully before an adversary finds them. Remember to remove not only the affected files but [also their history][6]! + +### 2\. Avoid committing sensitive data with git-secrets + +While it's important to find and remove sensitive information in your Git repos, wouldn't it be better to avoid committing those secrets in the first place? Mistakes happen, but you can protect yourself from public embarrassment by using [git-secrets][7]. This tool allows you to set up hooks that scan your commits, commit messages, and merges looking for common patterns for secrets. Choose patterns that match the credentials your team uses, such as AWS access keys and secret keys. If it finds a match, your commit is rejected and a potential crisis averted. + +It's simple to set up git-secrets for your existing repos, and you can apply a global configuration to protect all future repositories you initialize or clone. You can also use git-secrets to scan your repos (and all previous revisions) to search for secrets before making them public. + +### 3\. Create temporary credentials with Key Conjurer + +It's great to have a little extra insurance to prevent inadvertently publishing stored secrets, but maybe we can do even better by not storing credentials at all. Keeping track of credentials generally—including who has access to them, where they are stored, and when they were last rotated—is a hassle. However, programmatically generating temporary credentials can avoid a lot of those issues altogether, neatly side-stepping the issue of storing secrets in Git repos. Enter [Key Conjurer][8], which was created to address this need. For more on why Riot Games created Key Conjurer and how they developed it, read _[Key conjurer: our policy of least privilege][9]_. + +### 4\. Apply least privilege automatically with Repokid + +Anyone who has taken a security 101 course knows that least privilege is the best practice for role-based access control configuration. Sadly, outside school, it becomes prohibitively difficult to apply least-privilege policies manually. An application's access requirements change over time, and developers are too busy to trim back their permissions manually. [Repokid][10] uses data that AWS provides about identity and access management (IAM) use to automatically right-size policies. Repokid helps even the largest organizations apply least privilege automatically in AWS. + +### Tools, not silver bullets + +These tools are by no means silver bullets, but they are just that: tools! So, make sure you work with the rest of your organization to understand the use cases and usage patterns for your cloud services before trying to implement any of these tools or other controls. + +Becoming familiar with the best practices documented by all your cloud and code repository services should be taken seriously as well. The following articles will help you do so. + +**For AWS:** + + * [Best practices for managing AWS access keys][11] + * [AWS security audit guidelines][12] + + + +**For GitHub:** + + * [Introducing new ways to keep your code secure][13] + * [GitHub Enterprise security best practices][14] + + + +Last but not least, reach out to your infosec team; they should be able to provide you with ideas, recommendations, and guidelines for your team's success. Always remember: security is everyone's responsibility, not just theirs. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/open-source-cloud-security + +作者:[Alison NaylorAaron Rinehart][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/asnaylorhttps://opensource.com/users/ansilvahttps://opensource.com/users/sethhttps://opensource.com/users/bretthunoldtcomhttps://opensource.com/users/aaronrineharthttps://opensource.com/users/marcobravo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cloud_tools_hardware.png?itok=PGjJenqT (Tools in a cloud) +[2]: https://twitter.com/travismcpeak?lang=en +[3]: https://github.com/rmonk +[4]: https://www.linkedin.com/in/alperkins/ +[5]: https://github.com/michenriksen/gitrob +[6]: https://help.github.com/en/articles/removing-sensitive-data-from-a-repository +[7]: https://github.com/awslabs/git-secrets +[8]: https://github.com/RiotGames/key-conjurer +[9]: https://technology.riotgames.com/news/key-conjurer-our-policy-least-privilege +[10]: https://github.com/Netflix/repokid +[11]: https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html +[12]: https://docs.aws.amazon.com/general/latest/gr/aws-security-audit-guide.html +[13]: https://github.blog/2019-05-23-introducing-new-ways-to-keep-your-code-secure/ +[14]: https://github.blog/2015-10-09-github-enterprise-security-best-practices/ From a4cfb8387db2763661259b36bc3155214cbdc07b Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:13:45 +0800 Subject: [PATCH 043/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20How=20?= =?UTF-8?q?6G=20will=20work:=20Terahertz-to-fiber=20conversion?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190913 How 6G will work- Terahertz-to-fiber conversion.md --- ...ill work- Terahertz-to-fiber conversion.md | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 sources/talk/20190913 How 6G will work- Terahertz-to-fiber conversion.md diff --git a/sources/talk/20190913 How 6G will work- Terahertz-to-fiber conversion.md b/sources/talk/20190913 How 6G will work- Terahertz-to-fiber conversion.md new file mode 100644 index 0000000000..9dad34a936 --- /dev/null +++ b/sources/talk/20190913 How 6G will work- Terahertz-to-fiber conversion.md @@ -0,0 +1,62 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How 6G will work: Terahertz-to-fiber conversion) +[#]: via: (https://www.networkworld.com/article/3438337/how-6g-will-work-terahertz-to-fiber-conversion.html) +[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) + +How 6G will work: Terahertz-to-fiber conversion +====== +For 6G wireless to become a reality, it must overcome a few technical hurdles, such as connecting terahertz spectrum to hard, optical transmission lines. Researchers at the Karlsruhe Institute of Technology say they have solved the problem. +Nelli Velichko / Getty Images + +Upcoming 6G wireless, superseding 5G and arriving possibly by 2030, is envisaged to function at hundreds of gigabits per second. Slowly, the technical advances needed are being made. + +A hole in the tech development thus far has been at the interface between terahertz spectrum and hard, optical transmission lines. How does one connect terahertz (THz), which is basically through-the-air spectrum found between microwave and infrared, to the transmission lines that will be needed for the longer-distance data sends? The curvature of the Earth, for one thing, limits line of sight, so hard-wiring is necessary for distances. Short distances, too, can be impeded by environmental obstructions: blocking by objects, even rain or fog, becomes more apparent the higher in spectrum one goes, as wavelengths get shorter. + +Researchers at the Karlsruhe Institute of Technology (KIT) say they know how to make the fiber link. They say, in a [press release][1], that one must develop modulators that operate on plasmonic nanophotonics, which is nano-scale, light-trapping technology (in this case, made with silicon) that will “directly couple the receiver antenna to a glass fiber.” The radio becomes part of the cable, in other words. It will “enable terahertz connections with very high data rates. Several hundred gigabits per second are feasible,” the researchers say. + +**Read also: [6G will achieve terabits-per-second speeds][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ** + +In tests, the research team demonstrated a terahertz link that was “seamlessly integrated” into fiber using a link at the terahertz receiver. They performed a terahertz-to-optical transmission rate of 50 gigabits per second. For comparison, current over-air wireless data rates with LTE technology and using radio are often around 20 megabits per second (Mbps)—nowhere near what the team produced. Verizon, now launching millimeter wave 5G in the U.S., [says typical speeds, for its fixed 5G service will be around 300 Mbps][4]. + +### Other 6G challenges + +The fiber-terahertz connection in 6G, though, isn’t the only area that must be addressed over the next few years. [Spatial multiplexing also needs to be mastered at terahertz to get the kinds of throughputs desired][5], experts say. Spatial multiplexing is where individual data signals are beamed out in streams. Every bit of the bandwidth thus gets used and reused continually, introducing bandwidth efficiency. + +Efficiency gains will also need to be obtained with more advanced MIMO antennas. That’s where antennas take advantage of multipath—signals sent over more than one route. + +Penetration loss also needs to be addressed. That’s the difference between the signal strength as it enters a building or structure and air. The loss increases with higher frequencies, as terahertz is; however, the amount of loss is dependent on the material that needs penetrating. Clear glass, for example, has less penetration loss overall than drywall, for example. That means construction materials used for upcoming buildings could be reimagined with new materials science to take advantage of 6G data throughput. + +**[ [Take this mobile device management course from PluralSight and learn how to secure devices in your company without degrading the user experience.][6] ]** + +In March, [the FCC announced a new category of experimental spectrum licenses][7] for frequencies between 95 GHz and 3 THz. That’s so that telcos and scientists can work on the spectrum. + +We think “6G will emerge around 2030,” Ari Pouttu, a professor at the University of Oulu and a 5G system architect, [told me when I met him in Finland last year][2]. “It will eventually offer terabits per second,” along with millionth-of-a-second (microsecond) latency. + +Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438337/how-6g-will-work-terahertz-to-fiber-conversion.html + +作者:[Patrick Nelson][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Patrick-Nelson/ +[b]: https://github.com/lujun9972 +[1]: http://www.kit.edu/kit/english/pi_2019_095_technologies-for-the-sixth-generation-cellular-network.php +[2]: https://www.networkworld.com/article/3305359/6g-will-achieve-terabits-per-second-speeds.html +[3]: https://www.networkworld.com/newsletters/signup.html +[4]: https://www.verizonwireless.com/support/5g-home-faqs/ +[5]: https://www.networkworld.com/article/3285112/get-ready-for-upcoming-6g-wireless-too.html +[6]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fcourses%2Fmobile-device-management-big-picture +[7]: https://www.fcc.gov/document/fcc-opens-spectrum-horizons-new-services-technologies +[8]: https://www.facebook.com/NetworkWorld/ +[9]: https://www.linkedin.com/company/network-world From c4e028637497d113aa2e791a5c74d26f4d836c9a Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:14:57 +0800 Subject: [PATCH 044/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190912=20A=20Vi?= =?UTF-8?q?rtual=20WAN:=20Moving=20closer=20to=20the=20enterprise?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190912 A Virtual WAN- Moving closer to the enterprise.md --- ...al WAN- Moving closer to the enterprise.md | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 sources/talk/20190912 A Virtual WAN- Moving closer to the enterprise.md diff --git a/sources/talk/20190912 A Virtual WAN- Moving closer to the enterprise.md b/sources/talk/20190912 A Virtual WAN- Moving closer to the enterprise.md new file mode 100644 index 0000000000..b589bd5e2a --- /dev/null +++ b/sources/talk/20190912 A Virtual WAN- Moving closer to the enterprise.md @@ -0,0 +1,120 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (A Virtual WAN: Moving closer to the enterprise) +[#]: via: (https://www.networkworld.com/article/3438357/a-virtual-wan-moving-closer-to-the-enterprise.html) +[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) + +A Virtual WAN: Moving closer to the enterprise +====== +Virtual WAN will radically change the telecom environment, including how circuits are procured and who procures them. +BlueBay2014 / Getty Images + +Microsoft has introduced a new virtual WAN as a competitive differentiator and is getting enough tracking that AWS and Google may follow. At present, Microsoft is the only company to offer a virtual WAN of this kind. This made me curious to discover the highs and lows of this technology. So I sat down with [Sorell Slaymaker][1], Principal Consulting Analyst at TechVision Research to discuss. The following is a summary of our discussion. + +But before we proceed, let’s gain some understanding of the cloud connectivity. + +Cloud connectivity has evolved over time. When the cloud was introduced about a decade ago, let’s say, if you were an enterprise, you would connect to what's known as a cloud service provider (CSP). However, over the last 10 years, many providers like Equinix have started to offer carrier-neutral collocations. Now, there is the opportunity to meet a variety of cloud companies in a carrier-neutral colocation. On the other hand, there are certain limitations as well as cloud connectivity. + +[NEWSLETTERS: Get the latest tech news sent directly to your in-box][2] + +Undoubtedly, carrier-neutral colocation has fetched many advantages. However, even with this enhanced and flexible connectivity model, the pain-point for the majority of cloud companies is still the network connectivity. The network presents a number of challenges. Firstly, it will slow you down and secondly, from a security perspective, it makes you susceptible to vulnerabilities. + +### The connectivity transition + +We are now entering into a new phase, taking the cloud connectivity to the next level. Primarily, the cloud is moving closer to the enterprise, as opposed to the enterprise moving closer to the cloud. So, where will this approach lead us to? + +Eventually, we will see the removal of the CSP, where the connectivity is serviced directly into the enterprise. We have already witnessed this transition with AWS. AWS now allows the companies to run AWS infrastructure in their own private data centers. This, ultimately, enables the organization to connect the various VPCs together. + +With this paradigm, the cloud meets the customer in the enterprise as opposed to the enterprise coming to the cloud, or to a carrier-neutral colocation. This will radically change the telecom environment; especially how circuits are procured and who procures them. + +### Cloud connectivity + +First, let’s recap some basics. The connectivity to any cloud can be done via the internet or a private direct connection. The internet is untrusted and public, where an IPsec tunnel is created. It may be cheap and quick to provision logical tunnels, but this comes with certain drawbacks, such as security, uptime, latency, packet loss and jitter. + +All such hindrances can degrade the application’s performance severely. This can be critical to support the sensitive and hybrid applications requiring real-time backend on-premise communications. For direct connectivity, most cloud providers have a more stable solution rather than relying solely on the Internet. + +For example, AWS has a product called AWS Direct Connect, whereas Microsoft has a product known as Azure ExpressRoute. Both products have the same end goal: cloud and on-premise endpoint connectivity that is not over the Internet. + +With Microsoft's Azure ExpressRoute, you get a private connection with guaranteed service level agreement (SLA). It's like a natural extension to the on-premise data center that offers lower latency, higher throughput and better reliability than the Internet has to offer. However, there are some drawbacks to this mechanism. + +Even though ExpressRoute provides a private connection, enforcing end-to-end QoS is quite challenging. How Microsoft would label the packet will be different from how the service providers label based on their standard MPLS links. The other challenge is the lack of efficient load balancing due to separate BGP domains. Since ExpressRoute is not delivered end-to-end, so you are basically doing a cross-connect or meeting the service provider at a point where there is already another BGP domain available. + +As a result, if you want to do equal-cost multipath (ECMP), complex failover and dynamic routing around congestion and configurations can get complex. This surfaces the demand for a change. Since Microsoft is aware of this need, therefore, they are evolving this ExpressRoute into what they are calling as the “Azure Virtual WAN.” A virtual WAN, as you might expect, provides a massive scale during the software-defined connectivity. + +### What the virtual WAN offers? + +The Azure Virtual WAN brings together many Azure cloud connectivity services, such as site-to-site VPN and ExpressRoute into a single operational interface. Now connectivity can leverage the Azure backbone to connect the branches and enjoy the branch-to-VNet connectivity. We will learn more about the new connectivity options later. The virtual WAN is purposely designed to provide large-scale site-to-site connectivity. It is built to offer throughput, scalability and ease of use. + +Microsoft has virtual WAN locations that connect to the Microsoft virtual network. This network consists of 130 connections in 50 countries. This pushes the cloud connectivity model closer to the edge. Therefore, if you are using Azure or Office 365, connectivity is closer, as compared to doing a global backhaul. Microsoft's primary objective is to be within 30 ms of the consumer. + +Microsoft also gives the option to do hop-on and hop-off, so that you can use the Microsoft global backbone as your WAN. In the case of remote offices in different locations, you only need to provide local circuits to the Microsoft edge point in that country. This eliminates the need for purchasing large WAN circuits. + +This significantly allows you to have a global WAN where you don't have to pay for the expensive WAN circuits between or within counties. + +Virtual WAN offers the following advantages: + + * **Integrated connectivity solutions in the hub-and-spoke:** You can automate the site-to-site configuration and connectivity between the on-premises sites and an Azure hub. + * **Automated spoke setup and configuration:** You can connect your virtual networks and workloads to the Azure hub seamlessly. + * **Intuitive troubleshooting:** You can see the end-to-end flow within Azure, and then use this information to take action when required + + + +### The partners and virtual Hub + +Typically, virtual WAN lets you connect and configure the branch devices to communicate with Azure. This can be done in two ways, either manually, or by using the provider devices offered through a virtual WAN partner. + +The partner devices allow ease of use, simplification of connectivity and configuration management. The connectivity from the on-premise device is established in an automated way to the virtual hub. Fundamentally, a virtual hub is a Microsoft-managed virtual network. + +### The global transit network + +The virtual WAN helps in laying the foundation of a global transit network architecture by enabling ubiquitous, any-to-any connectivity between distributed VNets, sites, applications and users. + +In the WAN architecture, Azure regions serve as hubs, from which you can choose to connect your branches. Once the branches are connected, you can leverage the Azure backbone to establish, for example, branch-to-VNet and branch-to-branch connectivity. + +The virtual WAN supports the following; global transit connectivity paths, branch to VNets, branch to branch, the remote user to VNet, the remote user to branch, VNet to VNet by using VNet peering and ExpressRoute global reach. + +### The WAN architecture + +The architecture is based on a hub-and-spoke model where the Microsoft cloud-hosted network acts as the hub. This enables transitive connectivity between endpoints that may be distributed across different types of spokes. + +A spoke can be a VNets, physical branch site, remote user and internet. Global transit network architecture enables any-to-any connectivity via a central network hub. Largely, this architecture eliminates or reduces the need for full-mesh or partial mesh connectivity models that are complex to build and maintain. + +Routing control in mesh networks is easier to configure and maintain through the hub-and-spoke model. The Microsoft any-to-any connectivity enables an enterprise with globally distributed users, branches, data centers, VNets and applications to connect to each other through the Microsoft transit hub. In essence, it is this transit hub that acts as the global system. + +Mainly, you can establish a virtual WAN by creating a single Virtual WAN hub in the region that has the largest number of spokes that can take the form of branches, VNets and users. Then you can connect the spokes that are in other regions of the hub. An alternative design would occur if the spokes are geographically distributed, you can also instantiate regional hubs and interconnect the hubs. The hubs are all part of the same virtual WAN, but the best part is that they can be associated with different regional policies. + +### Virtual WAN SD-WAN capabilities + +Currently, SD-WAN is not fully integrated. Microsoft offers SD-WAN services from a couple of SD-WAN vendors including Citrix, GloudGenix and 128 technologies as part of the overall virtual WAN offering. + +Under this model, you could, for example, have a 1G connection to Azure and you can run the SD-WAN vendor software within the location. You can use that software to route to other Azure locations. Now since you are using the SD-WAN, you get all the SD-WAN services on top of the Microsoft virtual WAN i.e. route around congestion and another brownout. This is contrary to the typical protocol that routes only around blackouts. + +Citrix WAN optimization features allow you to tune and configure the WAN to have more control as opposed to relying on the underlying network. Although, Citrix is very strong with WAN optimization, but it is IPsec based. Therefore, its overall scaling is eventually limited to the limitations of scaling IPsec. On the other hand, an SD-WAN offering from 128 technologies is not IPsec tunnel-based and doesn't have the overhead. Thence, it can more intelligently route traffic in a one-to-many vs tunnels that are point-to-point. + +Regarding in-built security on the WAN backbone, Microsoft provides the connectivity to integrate the security solution according to your choice. If you want security to be over and above, you can service-chain a firewall vendor or leverage security in place of an SD-WAN service. The majority of SD-WAN players have layer 1 to layer 4 built into them. They don't have the proxies or the Layer 5 or higher than the security solutions that you get from security companies, such as Pao Alto but you always have the option to service-chain. + +**This article is published as part of the IDG Contributor Network. [Want to Join?][3]** + +Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438357/a-virtual-wan-moving-closer-to-the-enterprise.html + +作者:[Matt Conran][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Matt-Conran/ +[b]: https://github.com/lujun9972 +[1]: https://techvisionresearch.com/sorell-slaymaker/ +[2]: https://www.networkworld.com/newsletters/signup.html +[3]: https://www.networkworld.com/contributor-network/signup.html +[4]: https://www.facebook.com/NetworkWorld/ +[5]: https://www.linkedin.com/company/network-world From 333f4ff0c266c9c959be7d2d2d39d5edbc73fd58 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:16:40 +0800 Subject: [PATCH 045/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=20To=20s?= =?UTF-8?q?ecure=20industrial=20IoT,=20use=20segmentation=20instead=20of?= =?UTF-8?q?=20firewalls?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190911 To secure industrial IoT, use segmentation instead of firewalls.md --- ..., use segmentation instead of firewalls.md | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 sources/talk/20190911 To secure industrial IoT, use segmentation instead of firewalls.md diff --git a/sources/talk/20190911 To secure industrial IoT, use segmentation instead of firewalls.md b/sources/talk/20190911 To secure industrial IoT, use segmentation instead of firewalls.md new file mode 100644 index 0000000000..9d5e300b47 --- /dev/null +++ b/sources/talk/20190911 To secure industrial IoT, use segmentation instead of firewalls.md @@ -0,0 +1,66 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (To secure industrial IoT, use segmentation instead of firewalls) +[#]: via: (https://www.networkworld.com/article/3437956/to-secure-industrial-iot-use-segmentation-instead-of-firewalls.html) +[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) + +To secure industrial IoT, use segmentation instead of firewalls +====== +Firewalls have been the de facto standard for securing internal devices for years, but the industrial internet of things (IIoT) will change that. +Jiraroj Praditcharoenkul / Getty Images + +The internet of things (IoT) has been top of mind for network and security professionals for the better part of the past five years. This has been particularly true for the area of industrial IoT (IIoT). Connected industrial devices are nothing new, but most IT people aren’t familiar with them because they have been managed by operational technology (OT) teams. More and more, though, business leaders want to bring OT and IT together to drive better insights from the combined data set. + +While there are many advantages to merging IT and OT and having IIoT fall under IT ownership, it has a profound impact on the cybersecurity team because it introduces several new security threats. Each connected endpoint, if breached, creates a backdoor into the other systems. + +**[ Also read: [A corporate guide to addressing IoT security][1] ]** + +### Internal firewalls an expensive, complex option for IIoT + +One way to protect IIoT environments is to use internal firewalls. This may seem like an obvious choice because internal firewalls have become the de facto standard for securing almost anything. However, in an IIoT environment, firewalls are perhaps the worst choice because of cost and complexity. + +Historically, internal firewalls were deployed where traffic moved in a “north-south” direction and would pass through a single ingress/egress point, such as a core switch.  Also, the devices connected were all known and managed by IT. With IIoT, connections can be much more dynamic and traffic can flow between devices in an “east-west” pattern, bypassing where the firewalls are located. That means security teams would need to deploy an internal firewall at every possible IIoT connection point and then manage the policies and configurations across hundreds, possibly thousands of firewalls, creating an almost unmanageable situation. + +To get a better understanding of the magnitude of this problem, I talked with Jeff Hussey, president and CEO of Tempered Networks, which specializes in IIoT security solutions, and he told me about one of the company’s customers that explored using internal firewalls. After doing an extensive evaluation of where all the internal firewalls would need to go, the business estimated that the total cost of firewalls would be about $100 million. Even if a business could afford that, there’s another layer of challenges associated with the operational side. + +Hussey then told me about a healthcare customer that’s trying to use a combination of firewall rules, ACL, VLANs, and VPNs to secure their environment, but, as he put it, “the complexity was killing them” and makes it impossible to get anything done because of the operational overhead. + +I also spoke with Derek Harp, founder and chairman of the [Control System Cyber Security Association International][2] (CS2AI), who does a lot of work in the IIoT area. He described the current IIoT environments as getting “more porous” as networks continue to evolve and become more open as third parties need access to data from internal systems. Toss in the advanced skill level of threat actors, and it’s easy to see how this isn’t a fight that cybersecurity teams can fight with traditional network security. + +**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][3] ]** + +### Micro-segmentation preferred over internal firewalls for IIoT + +Instead of using internal firewalls, security professionals should turn towards IIoT micro-segmentation. Segmentation is similar to the use of VLANs and ACLs, but the environmental separation is done at the device level and managed with rules instead of at the network layer. With VLANs and ACLs, all devices, including IIoT endpoints, would need to be assigned to a VLAN. If the endpoint moves, the network then needs to be reconfigured to accommodate. If it’s not, the device either can’t connect or is on the same network as devices where bad things could happen if it were breached. + +The Target breach of a few years ago is an excellent example of this where the retailer's HVAC system was compromised, and that created a back door into the point-of-sale (PoS) system. Traditional security works great in highly static environments, but IIoT can be highly dynamic with devices routinely joining and leaving the network. + +### Segmentation operates at the device layer + +The benefit of segmentation is that it’s done in software and operates at the device connectivity layer, so policies follow the endpoints. For example, a rule could be created where all medical devices are in a particular segment and isolated from the rest of the connected nodes. If a medical device moves, the policy goes with it and there’s no need to reconfigure things. If Target had been using IIoT micro-segmentation and the HVAC and PoS systems were in separate segments (which they should be from a best practice standpoint), the worst that could have happened is the stores got too warm. + +Micro-segmentation has been used in data centers to secure lateral traffic that flows between virtual machines and containers. Cybersecurity teams should now look to extend the technology out to the broader network, with the first use case being to secure IIoT endpoints. This will let businesses move forward with digital transformation plans without putting their companies at risk. + +Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3437956/to-secure-industrial-iot-use-segmentation-instead-of-firewalls.html + +作者:[Zeus Kerravala][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Zeus-Kerravala/ +[b]: https://github.com/lujun9972 +[1]: https://www.networkworld.com/article/3269165/internet-of-things/a-corporate-guide-to-addressing-iot-security-concerns.html +[2]: https://www.cs2ai.org/ +[3]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr +[4]: https://www.facebook.com/NetworkWorld/ +[5]: https://www.linkedin.com/company/network-world From 88f2c9320710470650ba483b3cf59e17020360a7 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:16:56 +0800 Subject: [PATCH 046/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190911=20Can=20?= =?UTF-8?q?AMD=20convert=20its=20growing=20GPU=20presence=20into=20a=20dat?= =?UTF-8?q?a=20center=20play=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md --- ...ng GPU presence into a data center play.md | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md diff --git a/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md b/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md new file mode 100644 index 0000000000..2f38c9b2a0 --- /dev/null +++ b/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md @@ -0,0 +1,64 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Can AMD convert its growing GPU presence into a data center play?) +[#]: via: (https://www.networkworld.com/article/3438098/can-amd-convert-its-growing-gpu-presence-into-a-data-center-play.html) +[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) + +Can AMD convert its growing GPU presence into a data center play? +====== +AMD has scored high-performance computing deals recently, but to truly compete with Nvidia it needs to develop an alternative to Nvidia’s CUDA language. +AMD + +AMD's $5.4 billion purchase of ATI Technologies in 2006 seemed like an odd match. Not only were the companies in separate markets, but they were on separate coasts, with ATI in the Toronto, Canada, region and AMD in Sunnyvale, California. + +They made it work, and arguably it saved AMD from extinction because it was the graphics business that kept the company afloat while the Athlon/Opteron business was going nowhere. There were many quarters where graphics brought in more revenue than CPUs and likely saved the company from bankruptcy. + +But those days are over, and AMD is once again a highly competitive CPU company, and quarterly sales are getting very close to the $2 billion mark. While the CPU business is on fire, the GPU business continues to do well. + +**Also read: [AI boosts data-center availability and efficiency][1]** + +For the second quarter of 2019, AMD's GPU shipments increased 9.8% vs. Q1, while Nvidia's were flat and Intel's shipments decreased -1.4%, according to Jon Peddie Research. An increase over the first quarter is a very good showing because Q2 typically drops from Q1. + +AMD and Nvidia don't break out market segments, nor do they say what percentage comes from enterprise/HPC/supercomputing sales. The challenge for AMD, then, is to translate its gaming popularity into enterprise sales. + +### Competing in the high-performance computing space + +In high-performance computing (HPC), which includes artificial intelligence (AI), Nvidia clearly dominates. AMD has no answer for Nvidia's RTX 270/280 or the Tesla T4, but that hasn't stopped AMD from racking up the occasional win. The Oak Ridge National Lab plans to build an exascale supercomputer called Frontier in 2021 using AMD Epyc processors and Radeon GPUs. + +AMD CEO Lisa Su talked about it at the recent Hot Chips semiconductor conference, where she said Frontier would feature "highly optimized CPU, highly optimized GPU, highly optimized coherent interconnect between CPU and GPU, [and] working together with Cray on the node to node latency characteristics really enables us to put together a leadership system.” + +AMD has also scored deals with Google to power its cloud-based Stadia game console, providing 10.7Tflops/sec., more than the Microsoft and Sony consoles combined. And AMD has had a deal with China's Baidu to provide GPU-based computing for two years. + +The problem, according to Peddie, isn't so much the hardware as it is the software. Nvidia has a special language called CUDA, first developed by Stanford professor Ian Buck, who is now head of Nvidia's AI efforts. It allows developers to write apps that fully utilize the GPU with a familiar C++ syntax. Nvidia then went to hundreds of universities and set them up to teach CUDA to students. + +"The net result is universities around the world are cranking out thousands of grads who know CUDA, and AMD has no equivalent," said Peddie. + +The result is it's much harder to code for a Radeon than a Tesla/Volta. AMD supports the open-standard OpenCL library and the open-source project [HIP][2], which converts CUDA to portable C++ code. + +The OpenCL standard was developed by Apple but is now maintained by the [Khronos Group][3], and if there is one way for a standard to go nowhere, it's to put it in the hands of a standards group. Look what it did for OpenGL. It had the lead decades ago, then Microsoft came out with DirectX and obliterated OpenGL. The unfortunate fact is standards always fare better when there is a company behind it with something to gain. + +For AMD to gain ground in the data center and HPC/AI against Nvidia, it needs a competitor to CUDA. Up until two years ago, that simply wasn't possible because AMD was fighting for its life. But now, with hot new silicon, the time is right for the company to push into software and give Nvidia the same fits it is giving Intel. + +Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438098/can-amd-convert-its-growing-gpu-presence-into-a-data-center-play.html + +作者:[Andy Patrizio][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Andy-Patrizio/ +[b]: https://github.com/lujun9972 +[1]: https://www.networkworld.com/article/3274654/ai-boosts-data-center-availability-efficiency.html +[2]: https://github.com/ROCm-Developer-Tools/HIP +[3]: https://www.khronos.org/opencl/ +[4]: https://www.facebook.com/NetworkWorld/ +[5]: https://www.linkedin.com/company/network-world From e14022bc431ebfa4aa50655d77d0c6672399cde1 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sat, 14 Sep 2019 01:17:19 +0800 Subject: [PATCH 047/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190909=20How=20?= =?UTF-8?q?to=20use=20Terminator=20on=20Linux=20to=20run=20multiple=20term?= =?UTF-8?q?inals=20in=20one=20window?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190909 How to use Terminator on Linux to run multiple terminals in one window.md --- ...to run multiple terminals in one window.md | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 sources/tech/20190909 How to use Terminator on Linux to run multiple terminals in one window.md diff --git a/sources/tech/20190909 How to use Terminator on Linux to run multiple terminals in one window.md b/sources/tech/20190909 How to use Terminator on Linux to run multiple terminals in one window.md new file mode 100644 index 0000000000..6ee0820fdf --- /dev/null +++ b/sources/tech/20190909 How to use Terminator on Linux to run multiple terminals in one window.md @@ -0,0 +1,118 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to use Terminator on Linux to run multiple terminals in one window) +[#]: via: (https://www.networkworld.com/article/3436784/how-to-use-terminator-on-linux-to-run-multiple-terminals-in-one-window.html) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +How to use Terminator on Linux to run multiple terminals in one window +====== +Providing an option for multiple GNOME terminals within a single window frame, Terminator lets you flexibly align your workspace to suit your needs. +Sandra Henry-Stocker + +If you’ve ever wished that you could line up multiple terminal windows and organize them in a single window frame, we may have some good news for you. The Linux **Terminator** can do this for you. No problemo! + +### Splitting windows + +Terminator will initially open like a terminal window with a single window. Once you mouse click within that window, however, it will bring up an options menu that gives you the flexibility to make changes. You can choose “**split horizontally**” or “**split vertically**” to split the window you are currently position in into two smaller windows. In fact, with these menu choices, complete with tiny illustrations of the resultant split (resembling **=** and **||**), you can split windows repeatedly if you like. Of course, if you split the overall window into more than six or nine sections, you might just find that they're too small to be used effectively. + +**[ Two-Minute Linux Tips: [Learn how to master a host of Linux commands in these 2-minute video tutorials][1] ]** + +Using ASCII art to illustrate the process of splitting windows, you might see something like this: + +``` ++-------------------+ +-------------------+ +-------------------+ +| | | | | | +| | | | | | +| | ==> |-------------------| ==> |-------------------| +| | | | | | | +| | | | | | | ++-------------------+ +-------------------+ +-------------------+ + Original terminal Split horizontally Split vertically +``` + +Another option for splitting windows is to use control sequences like **Ctrl+Shift+e** to split a window vertically and **Ctrl+Shift+o** (“o" as in “open”) to split the screen horizontally. + +Once Terminator has split into smaller windows for you, you can click in any window to use it and move from window to window as your work dictates. + +### Maximizing a window + +If you want to ignore all but one of your windows for a while and focus on just one, you can click in that window and select the "**Maximize**" option from the menu. That window will then grow to claim all of the space. Click again and select "**Restore all terminals**" to return to the multi-window display. **Ctrl+Shift+x** will toggle between the normal and maximized settings. + +The window size indicators (e.g., 80x15) on window labels display the number of characters per line and the number of lines per window that each window provides. + +### Closing windows + +To close any window, bring up the Terminator menu and select **Close**. Other windows will adjust themselves to take up the space until you close the last remaining window. + +### Saving your customized setup(s) + +Setting up your customized terminator settings as your default once you've split your overall window into multiple segments is quite easy. Select **Preferences** from the pop-up menu and then **Layouts** from the tab along the top of the window that opens. You should then see **New Layout** listed. Just click on the **Save** option at the bottom and **Close** on the bottom right. Terminator will save your settings in  **~/.config/terminator/config** and will then use this file every time you use it. + +You can also enlarge your overall window by stretching it with your mouse. Again, if you want to retain the changes, select **Preferences** from the menu, **Layouts** and then **Save** and **Close** again. + +### Choosing between saved configurations + +If you like, you can set up multiple options for your Terminator window arrangements by maintaining a number of config files, renaming each afterwards (e.g., config-1, config-2) and then moving your choice into place as **~/.config/terminator/config** when you want to use that layout. Here's an example script for doing something like this script. It lets you choose between three pre-configured window arrangements: + +``` +#!/bin/bash + +PS3='Terminator options: ' +options=("Split 1" "Split 2" "Split 3" "Quit") +select opt in "${options[@]}" +do + case $opt in + "Split 1") + config=config-1 + break + ;; + "Split 2") + config=config-2 + break + ;; + "Split 3") + config=config-3 + break + ;; + *) + exit + ;; + esac +done + +cd ~/.config/terminator +cp config config- +cp $config config +cd +terminator & +``` + +You could give the options more meaningful names than "config-1" if that helps. + +### Wrap-up + +Terminator is a good choice for setting up multiple windows to work on related tasks. If you've never used it, you'll probably need to install it first with a command such as "sudo apt install terminator" or "sudo yum install -y terminator". + +Hopefully, you will enjoy using Terminator. And, as another character of the same name might say, "I'll be back!" + +Join the Network World communities on [Facebook][2] and [LinkedIn][3] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3436784/how-to-use-terminator-on-linux-to-run-multiple-terminals-in-one-window.html + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua +[2]: https://www.facebook.com/NetworkWorld/ +[3]: https://www.linkedin.com/company/network-world From 20d421a2f8642e19f3243b26feb527ad8ef52a5c Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sat, 14 Sep 2019 16:32:39 +0800 Subject: [PATCH 048/202] Rename sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md to sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md --- ... 3.34 Released With New Features - Performance Improvements.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md (100%) diff --git a/sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md similarity index 100% rename from sources/tech/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md rename to sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md From 29851a4a2572122fdb1e0468fb010a8e3b033fdb Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 18:15:59 +0800 Subject: [PATCH 049/202] PRF @LazyWolfLin --- ...12 Why const Doesn-t Make C Code Faster.md | 98 ++++++++++--------- 1 file changed, 50 insertions(+), 48 deletions(-) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md index 7ecc3e7386..1151f829ed 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md @@ -1,20 +1,22 @@ [#]: collector: (lujun9972) [#]: translator: (LazyWolfLin) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Why const Doesn't Make C Code Faster) [#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) [#]: author: (Simon Arneaud https://theartofmachinery.com) -为什么 `const` 无法让 `C` 代码跑得更快? +为什么 const 无法让 C 代码跑得更快? ====== -在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 有助于编译器优化 `C` 和 `C++` 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论并构造一些例子来论证,然后在一个真正的代码库,`Sqlite`,上做一些实验和基准测试。 +![](https://img.linux.net.cn/data/attachment/album/201909/14/181535lsrt9t93k1c1n0mt.jpg) + +在几个月前的一篇文章里,我曾说过“[有个一个流行的传言,`const` 有助于编译器优化 C 和 C++ 代码][1]”。我觉得我需要解释一下,尤其是曾经我自己也以为这是显然对的。我将会用一些理论并构造一些例子来论证,然后在一个真实的代码库 `Sqlite` 上做一些实验和基准测试。 ### 一个简单的测试 -让我们从一个最简单、最明显的例子开始,以前认为这是一个 `const` 让 `C` 代码跑得更快的例子。首先,假设我们有如下两个函数声明: +让我们从一个最简单、最明显的例子开始,以前认为这是一个 `const` 让 C 代码跑得更快的例子。首先,假设我们有如下两个函数声明: ``` void func(int *x); @@ -39,7 +41,7 @@ void constByArg(const int *x) } ``` -调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。没问题吧?让我们来看下 `GCC` 在如下编译选项下生成的汇编代码: +调用 `printf()` 时,CPU 会通过指针从 RAM 中取得 `*x` 的值。很显然,`constByArg()` 会稍微快一点,因为编译器知道 `*x` 是常量,因此不需要在调用 `constFunc()` 之后再次获取它的值。它仅是打印相同的东西。没问题吧?让我们来看下 GCC 在如下编译选项下生成的汇编代码: ``` $ gcc -S -Wall -O3 test.c @@ -62,7 +64,7 @@ byArg: xorl %eax, %eax call __printf_chk@PLT movq %rbx, %rdi - call func@PLT # The only instruction that's different in constFoo + call func@PLT # constFoo 中唯一不同的指令 movl (%rbx), %edx leaq .LC0(%rip), %rsi xorl %eax, %eax @@ -73,16 +75,16 @@ byArg: .cfi_endproc ``` -函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源码中的调用。关键字 `const` 本身并没有造成任何字面上的不同。 +函数 `byArg()` 和函数 `constByArg()` 生成的汇编代码中唯一的不同之处是 `constByArg()` 有一句汇编代码 `call constFunc@PLT`,这正是源代码中的调用。关键字 `const` 本身并没有造成任何字面上的不同。 -好了,这是 `GCC` 的结果。或许我们需要一个更聪明的编译器。`Clang` 会有更好的表现吗? +好了,这是 GCC 的结果。或许我们需要一个更聪明的编译器。Clang 会有更好的表现吗? ``` $ clang -S -Wall -O3 -emit-llvm test.c $ view test.ll ``` -这是 `IR` 代码(`LLVM` 的中间语言)。它比汇编代码更加紧凑,所以我可以把两个函数都导出来,让你可以看清楚我所说的“除了调用外,没有任何字面上的不同”是什么意思: +这是 `IR` 代码(LCTT 译注:LLVM 的中间语言)。它比汇编代码更加紧凑,所以我可以把两个函数都导出来,让你可以看清楚我所说的“除了调用外,没有任何字面上的不同”是什么意思: ``` ; Function Attrs: nounwind uwtable @@ -121,14 +123,14 @@ void localVar() void constLocalVar() { - const int x = 42; // const on the local variable + const int x = 42; // 对本地变量使用 const printf("%d\n", x); constFunc(&x); printf("%d\n", x); } ``` -下面是 `localVar()` 的汇编代码,其中有两条指令在 `constLocalVar()` 中会被优化: +下面是 `localVar()` 的汇编代码,其中有两条指令在 `constLocalVar()` 中会被优化掉: ``` localVar: @@ -146,10 +148,10 @@ localVar: call __printf_chk@PLT leaq 4(%rsp), %rdi call constFunc@PLT - movl 4(%rsp), %edx # not in constLocalVar() + movl 4(%rsp), %edx # 在 constLocalVar() 中没有 xorl %eax, %eax movl $1, %edi - leaq .LC0(%rip), %rsi # not in constLocalVar() + leaq .LC0(%rip), %rsi # 在 constLocalVar() 中没有 call __printf_chk@PLT movq 8(%rsp), %rax xorq %fs:40, %rax @@ -164,7 +166,7 @@ localVar: .cfi_endproc ``` -`LLVM` 生成的 `IR` 代码中更明显。在 `constLocalVar()` 中,第二次调用 `printf()` 之前的 `load` 会被优化掉: +在 LLVM 生成的 `IR` 代码中更明显一点。在 `constLocalVar()` 中,第二次调用 `printf()` 之前的 `load` 会被优化掉: ``` ; Function Attrs: nounwind uwtable @@ -182,9 +184,9 @@ define dso_local void @localVar() local_unnamed_addr #0 { } ``` -好吧,现在,`constLocalVar()` 成功的优化了 `*x` 的重新读取,但是可能你已经注意到一些问题:`localVar()` 和 `constLocalVar()` 在函数体中做了同样的 `constFunc()` 调用。如果编译器能够推断出 `constFunc()` 没有修改 `constLocalVar()` 中的 `*x`,那为什么不能推断出完全一样的函数调用也没有修改 `localVar()` 中的 `*x`? +好吧,现在,`constLocalVar()` 成功的省略了对 `*x` 的重新读取,但是可能你已经注意到一些问题:`localVar()` 和 `constLocalVar()` 在函数体中做了同样的 `constFunc()` 调用。如果编译器能够推断出 `constFunc()` 没有修改 `constLocalVar()` 中的 `*x`,那为什么不能推断出完全一样的函数调用也没有修改 `localVar()` 中的 `*x`? -这个解释更贴近于为什么 `C` 语言的 `const` 不能作为优化手段的核心。`C` 语言的 `const` 有两个有效的含义:它可以表示这个变量是某个可能是常数也可能不是常数的数据的一个只读别名,或者它可以表示这变量真正的常量。如果你移除了一个指向常量的指针的 `const` 属性并写入数据,那结果将是一个未定义行为。另一方面,如果是一个指向非常量值的 `const` 指针,将就没问题。 +这个解释更贴近于为什么 C 语言的 `const` 不能作为优化手段的核心原因。C 语言的 `const` 有两个有效的含义:它可以表示这个变量是某个可能是常数也可能不是常数的数据的一个只读别名,或者它可以表示该变量是真正的常量。如果你移除了一个指向常量的指针的 `const` 属性并写入数据,那结果将是一个未定义行为。另一方面,如果是一个指向非常量值的 `const` 指针,将就没问题。 这份 `constFunc()` 的可能实现揭示了这意味着什么: @@ -207,13 +209,15 @@ void doubleIt(int *x) } ``` -`localVar()` 传递给 `constFunc()` 一个指向非 `const` 变量的 `const` 指针。因为这个变量并非常量,`constFunc()` 可以撒个谎并强行修改它而不触发而不触发未定义行为。所以,编译器不能断定变量在调用 `constFunc()` 后仍是同样的值。在 `constLocalVar()` 中的变量是真正的常量,因此,编译器可以断定它不会改变——因为在 `constFunc()` 去除变量的 `const` 属性并写入它*将*会是一个未定义行为。 +`localVar()` 传递给 `constFunc()` 一个指向非 `const` 变量的 `const` 指针。因为这个变量并非常量,`constFunc()` 可以撒个谎并强行修改它而不触发未定义行为。所以,编译器不能断定变量在调用 `constFunc()` 后仍是同样的值。在 `constLocalVar()` 中的变量是真正的常量,因此,编译器可以断定它不会改变 —— 因为在 `constFunc()` 去除变量的 `const` 属性并写入它*将*会是一个未定义行为。 -第一个例子中的函数 `byArg()` 和 `constByArg()` 是没有可能优化的,因为编译器没有任何方法能够知道 `*x` 是否真的是 `const` 常量。 +第一个例子中的函数 `byArg()` 和 `constByArg()` 是没有可能优化的,因为编译器没有任何方法能知道 `*x` 是否真的是 `const` 常量。 -但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,是吗?并不。编译器不能假设 `constLocalVar()` 根本没有运行。 如果不是这样(例如,它只是代码生成器或者宏的一些未使用的额外输出),`constFunc()` 就能偷偷地修改数据而不触发未定义行为。 +> 补充(和题外话):相当多的读者已经正确地指出,使用 `const int *x`,该指针本身不是限定的常量,只是该数据被加个了别名,而 `const int * const extra_const` 是一个“双向”限定为常量的指针。但是因为指针本身的常量与别名数据的常量无关,所以结果是相同的。仅在 `extra_const` 指向使用 `const` 定义的对象时,`*(int*const)extra_const = 0` 才是未定义行为。(实际上,`*(int*)extra_const = 0` 也不会更糟。)因为它们之间的区别可以一句话说明白,一个是完全的 `const` 指针,另外一个可能是也可能不是常量本身的指针,而是一个可能是也可能不是常量的对象的只读别名,我将继续不严谨地引用“常量指针”。(题外话结束) -你可能需要重复阅读上述说明和示例,但不要担心它听起来很荒谬,它确实是正确的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器不知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 `C` 代码会在“深思熟虑”后移除 `const`。 +但是为什么不一致呢?如果编译器能够推断出 `constLocalVar()` 中调用的 `constFunc()` 不会修改它的参数,那么肯定也能继续在其他 `constFunc()` 的调用上实施相同的优化,是吗?并不。编译器不能假设 `constLocalVar()` 根本没有运行。如果不是这样(例如,它只是代码生成器或者宏的一些未使用的额外输出),`constFunc()` 就能偷偷地修改数据而不触发未定义行为。 + +你可能需要重复阅读几次上述说明和示例,但不要担心,它听起来很荒谬,它确实是正确的。不幸的是,对 `const` 变量进行写入是最糟糕的未定义行为:大多数情况下,编译器无法知道它是否将会是未定义行为。所以,大多数情况下,编译器看见 `const` 时必须假设它未来可能会被移除掉,这意味着编译器不能使用它进行优化。这在实践中是正确的,因为真实的 C 代码会在“深思熟虑”后移除 `const`。 简而言之,很多事情都可以阻止编译器使用 `const` 进行优化,包括使用指针从另一内存空间接受数据,或者在堆空间上分配数据。更糟糕的是,在大部分编译器能够使用 `const` 进行优化的情况,它都不是必须的。例如,任何像样的编译器都能推断出下面代码中的 `x` 是一个常量,甚至都不需要 `const`: @@ -224,19 +228,19 @@ y += x; printf("%d %d\n", x, y); ``` -TL;DR,`const` 对优化而言几乎无用,因为: +总结,`const` 对优化而言几乎无用,因为: - 1. 除了特殊情况,编译器需要忽略它,因为其他代码可能合法地移除它 - 2. 在 #1 以外地大多数例外中,编译器无论如何都能推断出该变量是常量 +1. 除了特殊情况,编译器需要忽略它,因为其他代码可能合法地移除它 +2. 在 #1 以外的大多数例外中,编译器无论如何都能推断出该变量是常量 ### C++ -如果你在使用 `C++` 那么有另外一个方法让 `const` 能够影响到代码的生成。你可以用 `const` 和非 `const` 的参数重载同一个函数,而非 `const` 版本的代码可能可以优化(由程序员优化而不是编译器)掉某些拷贝或者其他事情。 +如果你在使用 C++ 那么有另外一个方法让 `const` 能够影响到代码的生成:函数重载。你可以用 `const` 和非 `const` 的参数重载同一个函数,而非 `const` 版本的代码可能可以被优化(由程序员优化而不是编译器),减少某些拷贝或者其他事情。 ``` void foo(int *p) { - // 需要坐更多的数据拷贝 + // 需要做更多的数据拷贝 } void foo(const int *p) @@ -253,24 +257,22 @@ int main() } ``` -一方面,我不认为这会在实际的 `C++` 代码中大量使用。另一方面,为了导致差异,程序员需要做出编译器无法做出的假设,因为它们不受语言保护。 +一方面,我不认为这会在实际的 C++ 代码中大量使用。另一方面,为了导致差异,程序员需要假设编译器无法做出,因为它们不受语言保护。 -### 用 `Sqlite3` 进行实验 +### 用 Sqlite3 进行实验 有了足够的理论和例子。那么 `const` 在一个真正的代码库中有多大的影响呢?我将会在代码库 `Sqlite`(版本:3.30.0)上做一个测试,因为: - * 它真正地使用了 `const` - * 它不是一个简单的代码库(超过 20 万行代码) - * 作为一个代码库,它包括了字符串处理、数学计算、日期处理等一系列内容 - * 它能够在绑定 CPU 下进行负载测试 - - +* 它真正地使用了 `const` +* 它不是一个简单的代码库(超过 20 万行代码) +* 作为一个数据库,它包括了字符串处理、数学计算、日期处理等一系列内容 +* 它能够在绑定 CPU 的情况下进行负载测试 此外,作者和贡献者们已经进行了多年的性能优化工作,因此我能确定他们没有错过任何有显著效果的优化。 #### 配置 -我做了两份[源码]][2]拷贝,并且正常编译其中一份。而对于另一份拷贝,我插入了这个预处理代码段,将 `const` 变成一个空操作: +我做了两份[源码][2]拷贝,并且正常编译其中一份。而对于另一份拷贝,我插入了这个特殊的预处理代码段,将 `const` 变成一个空操作: ``` #define const @@ -299,7 +301,7 @@ sqlite3_blob_read 7lea 5jmpq 4nopl #### 分析编译结果 -`const` 版本的 `libsqlite3.so` 的大小是 4,740,704 byte,大约比 4,736,712 byte 的非 `const` 版本大了 0.1% 。在全部 1374 个导出函数(不包括类似 PLT 里的底层辅助函数)中,一共有 13 个函数的识别码不一致。 +`const` 版本的 `libsqlite3.so` 的大小是 4,740,704 字节,大约比 4,736,712 字节的非 `const` 版本大了 0.1% 。在全部 1374 个导出函数(不包括类似 PLT 里的底层辅助函数)中,一共有 13 个函数的识别码不一致。 其中的一些改变是由于插入的预处理代码。举个例子,这里有一个发生了更改的函数(已经删去一些 `Sqlite` 特有的定义): @@ -330,7 +332,7 @@ static int64_t doubleToInt64(double r){ 删去 `const` 使得这些常量变成了 `static` 变量。我不明白为什么会有不了解 `const` 的人让这些变量加上 `static`。同时删去 `static` 和 `const` 会让 GCC 再次认为它们是常量,而我们将得到同样的编译输出。由于类似这样的局部的 `static const` 变量,使得 13 个函数中有 3 个函数产生假的变化,但我一个都不打算修复它们。 -`Sqlite` 使用了很多全局变量,而这正是大多数真正的 `const` 优化产生的地方。通常情况下,它们类似于将一个变量比较代替成一个常量比较,或者一个循环在部分展开的一步。([Radare toolkit][3] 可以很方便的找出这些优化措施。)一些变化则令人失望。`sqlite3ParseUri()` 有 487 指令,但 `const` 产生的唯一区别是进行了这个比较: +`Sqlite` 使用了很多全局变量,而这正是大多数真正的 `const` 优化产生的地方。通常情况下,它们类似于将一个变量比较代替成一个常量比较,或者一个循环在部分展开的一步。([Radare toolkit][3] 可以很方便的找出这些优化措施。)一些变化则令人失望。`sqlite3ParseUri()` 有 487 个指令,但 `const` 产生的唯一区别是进行了这个比较: ``` test %al, %al @@ -352,18 +354,18 @@ je `Sqlite` 自带了一个性能回归测试,因此我尝试每个版本的代码执行一百次,仍然使用默认的 `Sqlite` 编译设置。以秒为单位的测试结果如下: -| const | No const +| | const | 非 const ---|---|--- -Minimum | 10.658s | 10.803s -Median | 11.571s | 11.519s -Maximum | 11.832s | 11.658s -Mean | 11.531s | 11.492s +最小值 | 10.658s | 10.803s +中间值 | 11.571s | 11.519s +最大值 | 11.832s | 11.658s +平均值 | 11.531s | 11.492s -就我个人看来,我没有发现足够的证据说明这个差异值得关注。我是说,我从整个程序中删去 `const`,所以如果它有明显的差别,那么我希望它是显而易见的。但也许你关心任何微小的差异,因为你正在做一些绝对性能非常重要的事。那让我们试一下统计分析。 +就我个人看来,我没有发现足够的证据来说明这个差异值得关注。我是说,我从整个程序中删去 `const`,所以如果它有明显的差别,那么我希望它是显而易见的。但也许你关心任何微小的差异,因为你正在做一些绝对性能非常重要的事。那让我们试一下统计分析。 -我喜欢使用类似 Mann-Whitney U 检验这样的东西。它类似于更著名的 T 检验,但对你在机器上计时时产生的复杂随机变量(由于不可预测的上下文切换,页错误等)更加鲁棒。以下是结果: +我喜欢使用类似 Mann-Whitney U 检验这样的东西。它类似于更著名的 T 检验,但对你在机器上计时时产生的复杂随机变量(由于不可预测的上下文切换、页错误等)更加健壮。以下是结果: -|| const | No const| +|| const | 非 const| ---|---|--- N | 100 | 100 Mean rank | 121.38 | 79.62 @@ -378,11 +380,11 @@ HL median difference | -0.056s U 检验已经发现统计意义上具有显著的性能差异。但是,令人惊讶的是,实际上是非 `const` 版本更快——大约 60ms,0.5%。似乎 `const` 启用的少量“优化”不值得额外代码的开销。这不像是 `const` 启用了任何类似于自动矢量化的重要的优化。当然,你的结果可能因为编译器配置、编译器版本或者代码库等等而有所不同,但是我觉得这已经说明了 `const` 是否能够有效地提高 `C` 的性能,我们现在已经看到答案了。 -### 那么,`const` 有什么用呢? +### 那么,const 有什么用呢? -尽管存在缺陷,`C/C++` 的 `const` 仍有助于类型安全。特别是,结合 `C++` 的移动语义和 `std::unique_pointer`,`const` 可以使指针所有权显式化。在超过十万行代码的 `C++` 旧代码库里,指针所有权模糊是一个大难题,我对此深有感触。 +尽管存在缺陷,C/C++ 的 `const` 仍有助于类型安全。特别是,结合 C++ 的移动语义和 `std::unique_pointer`,`const` 可以使指针所有权显式化。在超过十万行代码的 C++ 旧代码库里,指针所有权模糊是一个大难题,我对此深有感触。 -但是,我以前常常使用 `const` 来实现有意义的类型安全。我曾听说过基于性能上的原因,最好是尽可能多地使用 `const`。我曾听说过当性能很重要时,重构代码并添加更多的 `const` 非常重要,即使以降低代码可读性的方式。当时觉得这没问题,但后来我才知道这并不对。 +但是,我以前常常使用 `const` 来实现有意义的类型安全。我曾听说过基于性能上的原因,最好是尽可能多地使用 `const`。我曾听说过当性能很重要时,重构代码并添加更多的 `const` 非常重要,即使以降低代码可读性的方式。**当时觉得这没问题,但后来我才知道这并不对。** -------------------------------------------------------------------------------- @@ -391,7 +393,7 @@ via: https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html 作者:[Simon Arneaud][a] 选题:[lujun9972][b] 译者:[LazyWolfLin](https://github.com/LazyWolfLin) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From ce94e8ed60e3ccc63570cbc6028d7404037e80ae Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 18:18:32 +0800 Subject: [PATCH 050/202] PUB @LazyWolfLin https://linux.cn/article-11339-1.html --- .../20190812 Why const Doesn-t Make C Code Faster.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190812 Why const Doesn-t Make C Code Faster.md (99%) diff --git a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md b/published/20190812 Why const Doesn-t Make C Code Faster.md similarity index 99% rename from translated/tech/20190812 Why const Doesn-t Make C Code Faster.md rename to published/20190812 Why const Doesn-t Make C Code Faster.md index 1151f829ed..852b62f0c7 100644 --- a/translated/tech/20190812 Why const Doesn-t Make C Code Faster.md +++ b/published/20190812 Why const Doesn-t Make C Code Faster.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (LazyWolfLin) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11339-1.html) [#]: subject: (Why const Doesn't Make C Code Faster) [#]: via: (https://theartofmachinery.com/2019/08/12/c_const_isnt_for_performance.html) [#]: author: (Simon Arneaud https://theartofmachinery.com) From 53ba1a65bb9d692abc6b4f6d2b9dbeae3a927e24 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sat, 14 Sep 2019 18:19:49 +0800 Subject: [PATCH 051/202] Rename sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md to sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md --- ...ux Graduates From A Hobby Project To A Professional Project.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md (100%) diff --git a/sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md similarity index 100% rename from sources/tech/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md rename to sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md From aa6eef2351c969fcd3ce3825483497d05f2fbd3c Mon Sep 17 00:00:00 2001 From: MjSeven Date: Fri, 13 Sep 2019 15:19:18 +0800 Subject: [PATCH 052/202] =?UTF-8?q?=E7=BF=BB=E8=AF=91=E5=AE=8C=E6=88=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...0190409 Working with variables on Linux.md | 267 ------------------ ...0190409 Working with variables on Linux.md | 262 +++++++++++++++++ 2 files changed, 262 insertions(+), 267 deletions(-) delete mode 100644 sources/tech/20190409 Working with variables on Linux.md create mode 100644 translated/tech/20190409 Working with variables on Linux.md diff --git a/sources/tech/20190409 Working with variables on Linux.md b/sources/tech/20190409 Working with variables on Linux.md deleted file mode 100644 index a926fe67b4..0000000000 --- a/sources/tech/20190409 Working with variables on Linux.md +++ /dev/null @@ -1,267 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (MjSeven) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Working with variables on Linux) -[#]: via: (https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all) -[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) - -Working with variables on Linux -====== -Variables often look like $var, but they also look like $1, $*, $? and $$. Let's take a look at what all these $ values can tell you. -![Mike Lawrence \(CC BY 2.0\)][1] - -A lot of important values are stored on Linux systems in what we call “variables,” but there are actually several types of variables and some interesting commands that can help you work with them. In a previous post, we looked at [environment variables][2] and where they are defined. In this post, we're going to look at variables that are used on the command line and within scripts. - -### User variables - -While it's quite easy to set up a variable on the command line, there are a few interesting tricks. To set up a variable, all you need to do is something like this: - -``` -$ myvar=11 -$ myvar2="eleven" -``` - -To display the values, you simply do this: - -``` -$ echo $myvar -11 -$ echo $myvar2 -eleven -``` - -You can also work with your variables. For example, to increment a numeric variable, you could use any of these commands: - -``` -$ myvar=$((myvar+1)) -$ echo $myvar -12 -$ ((myvar=myvar+1)) -$ echo $myvar -13 -$ ((myvar+=1)) -$ echo $myvar -14 -$ ((myvar++)) -$ echo $myvar -15 -$ let "myvar=myvar+1" -$ echo $myvar -16 -$ let "myvar+=1" -$ echo $myvar -17 -$ let "myvar++" -$ echo $myvar -18 -``` - -With some of these, you can add more than 1 to a variable's value. For example: - -``` -$ myvar0=0 -$ ((myvar0++)) -$ echo $myvar0 -1 -$ ((myvar0+=10)) -$ echo $myvar0 -11 -``` - -With all these choices, you'll probably find at least one that is easy to remember and convenient to use. - -You can also _unset_ a variable — basically undefining it. - -``` -$ unset myvar -$ echo $myvar -``` - -Another interesting option is that you can set up a variable and make it **read-only**. In other words, once set to read-only, its value cannot be changed (at least not without some very tricky command line wizardry). That means you can't unset it either. - -``` -$ readonly myvar3=1 -$ echo $myvar3 -1 -$ ((myvar3++)) --bash: myvar3: readonly variable -$ unset myvar3 --bash: unset: myvar3: cannot unset: readonly variable -``` - -You can use any of those setting and incrementing options for assigning and manipulating variables within scripts, but there are also some very useful _internal variables_ for working within scripts. Note that you can't reassign their values or increment them. - -### Internal variables - -There are quite a few variables that can be used within scripts to evaluate arguments and display information about the script itself. - - * $1, $2, $3 etc. represent the first, second, third, etc. arguments to the script. - * $# represents the number of arguments. - * $* represents the string of arguments. - * $0 represents the name of the script itself. - * $? represents the return code of the previously run command (0=success). - * $$ shows the process ID for the script. - * $PPID shows the process ID for your shell (the parent process for the script). - - - -Some of these variables also work on the command line but show related information: - - * $0 shows the name of the shell you're using (e.g., -bash). - * $$ shows the process ID for your shell. - * $PPID shows the process ID for your shell's parent process (for me, this is sshd). - - - -If we throw all of these variables into a script just to see the results, we might do this: - -``` -#!/bin/bash - -echo $0 -echo $1 -echo $2 -echo $# -echo $* -echo $? -echo $$ -echo $PPID -``` - -When we call this script, we'll see something like this: - -``` -$ tryme one two three -/home/shs/bin/tryme <== script name -one <== first argument -two <== second argument -3 <== number of arguments -one two three <== all arguments -0 <== return code from previous echo command -10410 <== script's process ID -10109 <== parent process's ID -``` - -If we check the process ID of the shell once the script is done running, we can see that it matches the PPID displayed within the script: - -``` -$ echo $$ -10109 <== shell's process ID -``` - -Of course, we're more likely to use these variables in considerably more useful ways than simply displaying their values. Let's check out some ways we might do this. - -Checking to see if arguments have been provided: - -``` -if [ $# == 0 ]; then - echo "$0 filename" - exit 1 -fi -``` - -Checking to see if a particular process is running: - -``` -ps -ef | grep apache2 > /dev/null -if [ $? != 0 ]; then - echo Apache is not running - exit -fi -``` - -Verifying that a file exists before trying to access it: - -``` -if [ $# -lt 2 ]; then - echo "Usage: $0 lines filename" - exit 1 -fi - -if [ ! -f $2 ]; then - echo "Error: File $2 not found" - exit 2 -else - head -$1 $2 -fi -``` - -And in this little script, we check if the correct number of arguments have been provided, if the first argument is numeric, and if the second argument is an existing file. - -``` -#!/bin/bash - -if [ $# -lt 2 ]; then - echo "Usage: $0 lines filename" - exit 1 -fi - -if [[ $1 != [0-9]* ]]; then - echo "Error: $1 is not numeric" - exit 2 -fi - -if [ ! -f $2 ]; then - echo "Error: File $2 not found" - exit 3 -else - echo top of file - head -$1 $2 -fi -``` - -### Renaming variables - -When writing a complicated script, it's often useful to assign names to the script's arguments rather than continuing to refer to them as $1, $2, and so on. By the 35th line, someone reading your script might have forgotten what $2 represents. It will be a lot easier on that person if you assign an important parameter's value to $filename or $numlines. - -``` -#!/bin/bash - -if [ $# -lt 2 ]; then - echo "Usage: $0 lines filename" - exit 1 -else - numlines=$1 - filename=$2 -fi - -if [[ $numlines != [0-9]* ]]; then - echo "Error: $numlines is not numeric" - exit 2 -fi - -if [ ! -f $ filename]; then - echo "Error: File $filename not found" - exit 3 -else - echo top of file - head -$numlines $filename -fi -``` - -Of course, this example script does nothing more than run the head command to show the top X lines in a file, but it is meant to show how internal parameters can be used within scripts to help ensure the script runs well or fails with at least some clarity. - -**[ Watch Sandra Henry-Stocker's Two-Minute Linux Tips[to learn how to master a host of Linux commands][3] ]** - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all - -作者:[Sandra Henry-Stocker][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/variable-key-keyboard-100793080-large.jpg -[2]: https://www.networkworld.com/article/3385516/how-to-manage-your-linux-environment.html -[3]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/translated/tech/20190409 Working with variables on Linux.md b/translated/tech/20190409 Working with variables on Linux.md new file mode 100644 index 0000000000..e0cac381f1 --- /dev/null +++ b/translated/tech/20190409 Working with variables on Linux.md @@ -0,0 +1,262 @@ +[#]: collector: (lujun9972) +[#]: translator: (MjSeven) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Working with variables on Linux) +[#]: via: (https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +在 Linux 中使用变量 +====== +变量通常看起来像 $var,但它们也有 $1、$*、$? 和 $$ 这种形式。让我们来看看所有这些 $ 值可以告诉你什么。 +![Mike Lawrence \(CC BY 2.0\)][1] + +我们称为“变量”的许多重要的值都存储在 Linux 系统中,但实际上有几种类型的变量和一些有趣的命令可以帮助你使用它们。在上一篇文章中,我们研究了[环境变量][2]以及它们在哪定义。在本文中,我们来看一看在命令行和脚本中使用的变量。 + +### 用户变量 + +虽然在命令行中设置变量非常容易,但是有一些有趣的技巧。要设置变量,你只需这样做: + +``` +$ myvar=11 +$ myvar2="eleven" +``` + +要显示这些值,只需这样做: + +``` +$ echo $myvar +11 +$ echo $myvar2 +eleven +``` + +你也可以使用这些变量。例如,要递增一个数字变量,使用以下任意一个命令: + +``` +$ myvar=$((myvar+1)) +$ echo $myvar +12 +$ ((myvar=myvar+1)) +$ echo $myvar +13 +$ ((myvar+=1)) +$ echo $myvar +14 +$ ((myvar++)) +$ echo $myvar +15 +$ let "myvar=myvar+1" +$ echo $myvar +16 +$ let "myvar+=1" +$ echo $myvar +17 +$ let "myvar++" +$ echo $myvar +18 +``` + +使用其中的一些,你可以增加一个变量的值。例如: + +``` +$ myvar0=0 +$ ((myvar0++)) +$ echo $myvar0 +1 +$ ((myvar0+=10)) +$ echo $myvar0 +11 +``` + +通过这些选项,你可能会发现至少有一个是容易记忆且使用方便的。 + +你也可以 _删除_ 一个变量 -- 这意味着没有定义它。 + +``` +$ unset myvar +$ echo $myvar +``` + +另一个有趣的选项是,你可以设置一个变量并将其设为**只读**。换句话说,变量一旦设置为只读,它的值就不能改变(除非一些非常复杂的命令行魔法才可以)。这意味着你也不能删除它。 + +``` +$ readonly myvar3=1 +$ echo $myvar3 +1 +$ ((myvar3++)) +-bash: myvar3: readonly variable +$ unset myvar3 +-bash: unset: myvar3: cannot unset: readonly variable +``` + +你可以使用这些设置和递增选项中的任何一个来赋值和操作脚本中的变量,但也有一些非常有用的 _内部变量_ 用于在脚本中工作。注意,你无法重新赋值或增加它们的值。 + +### 内部变量 + +在脚本中可以使用很多变量来计算参数并显示有关脚本本身的信息。 + + * $1、$2、$3 等表示脚本的第一个、第二个、第三个等参数。 + * $# 表示参数的数量。 + * $* 表示所有参数。 + * $0 表示脚本的名称。 + * $? 表示先前运行的命令的返回码(0 代表成功)。 + * $$ 显示脚本的进程 ID。 + * $PPID 显示 shell 的进程 ID(脚本的父进程)。 + +其中一些变量也适用于命令行,但显示相关信息: + + * $0 显示你正在使用的 shell 的名称(例如,-bash)。 + * $$ 显示 shell 的进程 ID。 + * $PPID 显示 shell 的父进程的进程 ID(对我来说,是 sshd)。 + +为了查看它们的结果,如果我们将所有这些变量都放入一个脚本中,比如: + +``` +#!/bin/bash + +echo $0 +echo $1 +echo $2 +echo $# +echo $* +echo $? +echo $$ +echo $PPID +``` + +当我们调用这个脚本时,我们会看到如下内容: +``` +$ tryme one two three +/home/shs/bin/tryme <== 脚本名称 +one <== 第一个参数 +two <== 第二个参数 +3 <== 参数的个数 +one two three <== 所有的参数 +0 <== 上一条 echo 命令的返回码 +10410 <== 脚本的进程 ID +10109 <== 父进程 ID +``` + +如果我们在脚本运行完毕后检查 shell 的进程 ID,我们可以看到它与脚本中显示的 PPID 相匹配: + +``` +$ echo $$ +10109 <== shell 的进程 ID +``` + +当然,比起简单地显示它们的值,我们更多的是在需要它们的时候来使用它们。我们来看一看它们可能的用处。 + +检查是否已提供参数: + +``` +if [ $# == 0 ]; then + echo "$0 filename" + exit 1 +fi +``` + +检查特定进程是否正在运行: + +``` +ps -ef | grep apache2 > /dev/null +if [ $? != 0 ]; then + echo Apache is not running + exit +fi +``` + +在尝试访问文件之前验证文件是否存在: + +``` +if [ $# -lt 2 ]; then + echo "Usage: $0 lines filename" + exit 1 +fi + +if [ ! -f $2 ]; then + echo "Error: File $2 not found" + exit 2 +else + head -$1 $2 +fi +``` + +在下面的小脚本中,我们检查是否提供了正确数量的参数、第一个参数是否为数字,以及第二个参数代表的文件是否存在。 + +``` +#!/bin/bash + +if [ $# -lt 2 ]; then + echo "Usage: $0 lines filename" + exit 1 +fi + +if [[ $1 != [0-9]* ]]; then + echo "Error: $1 is not numeric" + exit 2 +fi + +if [ ! -f $2 ]; then + echo "Error: File $2 not found" + exit 3 +else + echo top of file + head -$1 $2 +fi +``` + +### 重命名变量 + +在编写复杂的脚本时,为脚本的参数指定名称通常很有用,而不是继续将它们称为 $1, $2 等。等到第 35 行,阅读你脚本的人可能已经忘了 $2 表示什么。如果你将一个重要参数的值赋给 $filename 或 $numlines,那么他就不容易忘记。 + +``` +#!/bin/bash + +if [ $# -lt 2 ]; then + echo "Usage: $0 lines filename" + exit 1 +else + numlines=$1 + filename=$2 +fi + +if [[ $numlines != [0-9]* ]]; then + echo "Error: $numlines is not numeric" + exit 2 +fi + +if [ ! -f $ filename]; then + echo "Error: File $filename not found" + exit 3 +else + echo top of file + head -$numlines $filename +fi +``` + +当然,这个示例脚本只是运行 head 命令来显示文件中的前 x 行,但它的目的是显示如何在脚本中使用内部参数来帮助确保脚本运行良好,或在失败时清晰地知道失败原因。 + +**观看 Sandra Henry-Stocker 的两分钟 Linux 技巧:[学习如何掌握大量 Linux 命令][3]。** + +加入 [Facebook][4] 和 [Linkedln][5] 上的网络社区,评论最热的主题。 + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[MjSeven](https://github.com/MjSeven) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://images.idgesg.net/images/article/2019/04/variable-key-keyboard-100793080-large.jpg +[2]: https://www.networkworld.com/article/3385516/how-to-manage-your-linux-environment.html +[3]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua +[4]: https://www.facebook.com/NetworkWorld/ +[5]: https://www.linkedin.com/company/network-world From 6107065c2c3596c5a5cffb9e59295507af705dba Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 19:07:38 +0800 Subject: [PATCH 053/202] PRF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @heguangzhi 我很失望,这篇几乎就是用谷歌翻译直接提交的,请勿如此。 --- ...ow to Create and Use Swap File on Linux.md | 125 ++++++++---------- 1 file changed, 58 insertions(+), 67 deletions(-) diff --git a/translated/tech/20190830 How to Create and Use Swap File on Linux.md b/translated/tech/20190830 How to Create and Use Swap File on Linux.md index 6c5e1561be..60b621040a 100644 --- a/translated/tech/20190830 How to Create and Use Swap File on Linux.md +++ b/translated/tech/20190830 How to Create and Use Swap File on Linux.md @@ -1,7 +1,7 @@ [#]: collector: (lujun9972) -[#]: translator: (hello-wn) -[#]: reviewer: ( ) -[#]: publisher: (heguangzhi) +[#]: translator: (heguangzhi) +[#]: reviewer: (wxy) +[#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to Create and Use Swap File on Linux) [#]: via: (https://itsfoss.com/create-swap-file-linux/) @@ -10,29 +10,29 @@ 如何在 Linux 上创建和使用交换文件 ====== -本教程讨论了 Linux 中交换文件的概念,为什么使用它以及它相对于传统交换分区的优势。您将学习如何创建交换文件和调整其大小。 +本教程讨论了 Linux 中交换文件的概念,为什么使用它以及它相对于传统交换分区的优势。你将学习如何创建交换文件和调整其大小。 ### 什么是 Linux 的交换文件? -交换文件允许 Linux 将磁盘空间模拟为内存。当您的系统开始耗尽内存时,它会使用交换空间将内存的一些内容交换到磁盘空间上。这样释放了内存,为更重要的进程服务。当内存再次空闲时,它会从磁盘交换回数据。我建议[阅读这篇文章,了解更多关于交换在 Linux ][1]。 +交换文件允许 Linux 将磁盘空间模拟为内存。当你的系统开始耗尽内存时,它会使用交换空间将内存的一些内容交换到磁盘空间上。这样释放了内存,为更重要的进程服务。当内存再次空闲时,它会从磁盘交换回数据。我建议[阅读这篇文章,了解 Linux 上的交换空间的更多内容][1]。 -传统上,交换空间被用作磁盘上的一个独立分区。安装 Linux 时,只需创建一个单独的分区进行交换。但是这种趋势在最近几年发生了变化。 +传统上,交换空间是磁盘上的一个独立分区。安装 Linux 时,只需创建一个单独的分区进行交换。但是这种趋势在最近几年发生了变化。 -使用交换文件,您不再需要单独的分区。您在 root 下创建一个文件,并告诉您的系统将其用作交换空间就行了。 +使用交换文件,你不再需要单独的分区。你会根目录下创建一个文件,并告诉你的系统将其用作交换空间就行了。 -使用专用的交换分区,在许多情况下,调整交换空间的大小是一个噩梦,也是一项不可能完成的任务。但是有了交换文件,你可以随意调整它们的大小。 +使用专用的交换分区,在许多情况下,调整交换空间的大小是一个可怕而不可能的任务。但是有了交换文件,你可以随意调整它们的大小。 -最新版本的 Ubuntu 和其他一些 Linux 发行版已经开始 [默认使用交换文件][2]。即使您没有创建交换分区,Ubuntu 也会自己创建一个 1GB 左右的交换文件。 +最新版本的 Ubuntu 和其他一些 Linux 发行版已经开始 [默认使用交换文件][2]。甚至如果你没有创建交换分区,Ubuntu 也会自己创建一个 1GB 左右的交换文件。 让我们看看交换文件的更多信息。 -![][3] +![](https://img.linux.net.cn/data/attachment/album/201909/14/190637uggjgsjoogxg3vh0.jpg) ### 检查 Linux 的交换空间 -在您开始添加交换空间之前,最好检查一下您的系统中是否已经有了交换空间。 +在你开始添加交换空间之前,最好检查一下你的系统中是否已经有了交换空间。 -你可以用[ free 命令在Linux][4]检查它。就我而言,我的[戴尔XPS][5]有 14GB 的交换容量。 +你可以用[Linux 上的 free 命令][4]检查它。就我而言,我的[戴尔 XPS][5]有 14GB 的交换容量。 ``` free -h @@ -40,7 +40,8 @@ free -h Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G Swap: 14G 0B 14G ``` -free 命令给出了交换空间的大小,但它并没有告诉你它是真正的交换分区还是交换文件。swapon 命令在这方面会更好。 + +`free` 命令给出了交换空间的大小,但它并没有告诉你它是真实的交换分区还是交换文件。`swapon` 命令在这方面会更好。 ``` swapon --show @@ -48,7 +49,7 @@ NAME TYPE SIZE USED PRIO /dev/nvme0n1p4 partition 14.9G 0B -2 ``` -如您所见,我有 14.9GB 的交换空间,它在一个单独的分区上。如果是交换文件,类型应该是文件而不是分区。 +如你所见,我有 14.9GB 的交换空间,它在一个单独的分区上。如果是交换文件,类型应该是 `file` 而不是 `partition`。 ``` swapon --show @@ -56,7 +57,7 @@ NAME TYPE SIZE USED PRIO /swapfile file 2G 0B -2 ``` -如果您的系统上没有交换空间,它应该显示如下内容: +如果你的系统上没有交换空间,它应该显示如下内容: ``` free -h @@ -65,59 +66,55 @@ Mem: 7.5G 4.1G 267M 971M 3.1G 2.2G Swap: 0B 0B 0B ``` -swapon 命令不会显示任何输出。 +而 `swapon` 命令不会显示任何输出。 ### 在 Linux 上创建交换文件 -如果您的系统没有交换空间,或者您认为交换空间不足,您可以在 Linux 上创建交换文件。您也可以创建多个交换文件。 - -[][6] - -建议阅读 Ubuntu 14.04 的修复缺失系统设置[快速提示] +如果你的系统没有交换空间,或者你认为交换空间不足,你可以在 Linux 上创建交换文件。你也可以创建多个交换文件。 让我们看看如何在 Linux 上创建交换文件。我在本教程中使用 Ubuntu 18.04,但它也应该适用于其他 Linux 发行版本。 -#### 步骤1:创建一个新的交换文件 +#### 步骤 1:创建一个新的交换文件 -首先,创建一个具有所需交换空间大小的文件。假设我想给我的系统增加 1GB 的交换空间。使用fallocate 命令创建大小为 1GB 的文件。 +首先,创建一个具有所需交换空间大小的文件。假设我想给我的系统增加 1GB 的交换空间。使用`fallocate` 命令创建大小为 1GB 的文件。 ``` sudo fallocate -l 1G /swapfile ``` -建议只允许 root 用户读写交换文件。当您尝试将此文件用于交换区域时,您甚至会看到类似“建议的不安全权限0644,0600”的警告。 +建议只允许 `root` 用户读写该交换文件。当你尝试将此文件用于交换区域时,你甚至会看到类似“不安全权限 0644,建议 0600”的警告。 ``` sudo chmod 600 /swapfile ``` -请注意,交换文件的名称可以是任意的。如果您需要多个交换空间,您可以给它任何合适的名称,如swap_file_1、swap_file_2等。它只是一个预定义大小的文件。 +请注意,交换文件的名称可以是任意的。如果你需要多个交换空间,你可以给它任何合适的名称,如 `swap_file_1`、`swap_file_2` 等。它们只是一个预定义大小的文件。 -#### 步骤2:将新文件标记为交换空间 +#### 步骤 2:将新文件标记为交换空间 -您需要告诉 Linux 系统该文件将被用作交换空间。你可以用 [mkswap][7] 工具做到这一点。 +你需要告诉 Linux 系统该文件将被用作交换空间。你可以用 [mkswap][7] 工具做到这一点。 ``` sudo mkswap /swapfile ``` -您应该会看到这样的输出: +你应该会看到这样的输出: ``` Setting up swapspace version 1, size = 1024 MiB (1073737728 bytes) no label, UUID=7e1faacb-ea93-4c49-a53d-fb40f3ce016a ``` -#### 步骤3:启用交换文件 +#### 步骤 3:启用交换文件 -现在,您的系统知道文件交换文件可以用作交换空间。但是还没有完成。您需要启用交换文件,以便系统可以开始使用该文件作为交换。 +现在,你的系统知道文件 `swapfile` 可以用作交换空间。但是还没有完成。你需要启用该交换文件,以便系统可以开始使用该文件作为交换。 ``` sudo swapon /swapfile ``` -现在,如果您检查交换空间,您应该会看到您的Linux系统识别并使用它作为交换区域: +现在,如果你检查交换空间,你应该会看到你的 Linux 系统会识别并使用它作为交换空间: ``` swapon --show @@ -125,58 +122,51 @@ NAME TYPE SIZE USED PRIO /swapfile file 1024M 0B -2 ``` -#### 第四步:让改变持久化 +#### 步骤 4:让改变持久化 -迄今为止您所做的一切都是暂时的。重新启动系统,所有更改都将消失。 +迄今为止你所做的一切都是暂时的。重新启动系统,所有更改都将消失。 -您可以通过将新创建的交换文件添加到 /etc/fstab 文件来使更改持久化。 +你可以通过将新创建的交换文件添加到 `/etc/fstab` 文件来使更改持久化。 -对 /etc/fstab 文件进行任何更改之前,最好先进行备份。 +对 `/etc/fstab` 文件进行任何更改之前,最好先进行备份。 ``` sudo cp /etc/fstab /etc/fstab.back ``` -如何将以下行添加到 /etc/fstab 文件的末尾: +现在将以下行添加到 `/etc/fstab` 文件的末尾: ``` /swapfile none swap sw 0 0 ``` - -您可以使用[命令行文本编辑器][8]手动执行,或者只使用以下命令: +你可以使用[命令行文本编辑器][8]手动操作,或者使用以下命令: ``` echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab ``` -现在一切都准备好了。即使在重新启动您的 Linux 系统后,您的交换文件也会被使用。 +现在一切都准备好了。即使在重新启动你的 Linux 系统后,你的交换文件也会被使用。 -### 调整交换 +### 调整 swappiness 参数 -交换参数决定了交换空间的使用频率。交换值的范围从0到100。较高的值意味着交换空间将被更频繁地使用。 +`swappiness` 参数决定了交换空间的使用频率。`s`wappiness` 值的范围从 0 到 100。较高的值意味着交换空间将被更频繁地使用。 -Ubuntu 桌面的默认交 换度是 60,而服务器的默认交换度是 1。您可以使用以下命令检查swappiness: +Ubuntu 桌面的默认的 `swappiness` 是 60,而服务器的默认 `swappiness` 是 1。你可以使用以下命令检查 `swappiness`: ``` cat /proc/sys/vm/swappiness ``` -为什么服务器应该使用低交换率?因为交换比内存慢,为了获得更好的性能,应该尽可能多地使用内存。在服务器上,性能因素至关重要,因此交换性尽可能低。 +为什么服务器应该使用低的 `swappiness` 值?因为交换空间比内存慢,为了获得更好的性能,应该尽可能多地使用内存。在服务器上,性能因素至关重要,因此 `swappiness` 应该尽可能低。 -[][9] - -建议阅读如何在双引导区用另一个替换一个 Linux 发行版[保留主分区] - - -您可以使用以下系统命令动态更改变: +你可以使用以下系统命令动态更改 `swappiness`: ``` sudo sysctl vm.swappiness=25 ``` -这种改变只是暂时的。如果要使其永久化,可以编辑 /etc/sysctl.conf 文件,并在文件末尾添加swappiness 值: - +这种改变只是暂时的。如果要使其永久化,可以编辑 `/etc/sysctl.conf` 文件,并在文件末尾添加`swappiness` 值: ``` vm.swappiness=25 @@ -184,58 +174,59 @@ vm.swappiness=25 ### 在 Linux 上调整交换空间的大小 -在 Linux 上有几种方法可以调整交换空间的大小。但是在您看到这一点之前,您应该了解一些关于它的事情。 +在 Linux 上有几种方法可以调整交换空间的大小。但是在你看到这一点之前,你应该了解一些关于它的事情。 -当您要求系统停止将交换文件用于交换区域时,它会将所有数据(确切地说是页面)传输回内存。所以你应该有足够的空闲内存,然后再停止交换。 +当你要求系统停止将交换文件用于交换空间时,它会将所有数据(确切地说是内存页)传输回内存。所以你应该有足够的空闲内存,然后再停止交换。 -这就是为什么创建和启用另一个临时交换文件是一个好的做法原因。这样,当您交换原始交换区域时,您的系统将使用临时交换文件。现在您可以调整原始交换空间的大小。您可以手动删除临时交换文件或保持原样,下次启动时会自动删除。 +这就是为什么创建和启用另一个临时交换文件是一个好的做法的原因。这样,当你关闭原来的交换空间时,你的系统将使用临时交换文件。现在你可以调整原来的交换空间的大小。你可以手动删除临时交换文件或留在那里,下次启动时会自动删除(LCTT 译注:存疑?)。 -如果您有足够的可用内存或者创建了临时交换空间,那就使您的原始交换文件下线。 +如果你有足够的可用内存或者创建了临时交换空间,那就关闭你原来的交换文件。 ``` sudo swapoff /swapfile ``` -现在您可以使用 fallocate 命令来更改文件的大小。比方说,您将其大小更改为 2GB: +现在你可以使用 `fallocate` 命令来更改文件的大小。比方说,你将其大小更改为 2GB: ``` sudo fallocate -l 2G /swapfile ``` -现在再次将文件标记为交换空间: +现在再次将文件标记为交换空间: ``` sudo mkswap /swapfile ``` -并再次使交换文件上线: +并再次启用交换文件: ``` sudo swapon /swapfile ``` -您也可以选择同时拥有多个交换文件。 + +你也可以选择同时拥有多个交换文件。 ### 删除 Linux 中的交换文件 -您可能有不在 Linux 上使用交换文件的原因。如果您想删除它,该过程类似于您刚才看到的调整交换大小的过程。 +你可能有不在 Linux 上使用交换文件的原因。如果你想删除它,该过程类似于你刚才看到的调整交换大小的过程。 -首先,确保你有足够的空闲内存。现在使交换文件离线: +首先,确保你有足够的空闲内存。现在关闭交换文件: ``` sudo swapoff /swapfile ``` -下一步是从 /etc/fstab 文件中删除相应的条目。 +下一步是从 `/etc/fstab` 文件中删除相应的条目。 -最后,您可以删除文件来释放空间: +最后,你可以删除该文件来释放空间: ``` sudo rm /swapfile ``` -**你交换吗?** +### 你用了交换空间了吗? -我想您现在已经很好地理解了 Linux 中的交换文件概念。现在,您可以根据需要轻松创建交换文件或调整它们的大小。 +我想你现在已经很好地理解了 Linux 中的交换文件概念。现在,你可以根据需要轻松创建交换文件或调整它们的大小。 如果你对这个话题有什么要补充的或者有任何疑问,请在下面留下评论。 @@ -245,8 +236,8 @@ via: https://itsfoss.com/create-swap-file-linux/ 作者:[Abhishek Prakash][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) +译者:[heguangzhi](https://github.com/heguangzhi) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 3dcb86e680068a322360ce8550540cff52de3b84 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 19:08:06 +0800 Subject: [PATCH 054/202] PUB @heguangzhi https://linux.cn/article-11341-1.html --- .../20190830 How to Create and Use Swap File on Linux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190830 How to Create and Use Swap File on Linux.md (99%) diff --git a/translated/tech/20190830 How to Create and Use Swap File on Linux.md b/published/20190830 How to Create and Use Swap File on Linux.md similarity index 99% rename from translated/tech/20190830 How to Create and Use Swap File on Linux.md rename to published/20190830 How to Create and Use Swap File on Linux.md index 60b621040a..2213774e6e 100644 --- a/translated/tech/20190830 How to Create and Use Swap File on Linux.md +++ b/published/20190830 How to Create and Use Swap File on Linux.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (heguangzhi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11341-1.html) [#]: subject: (How to Create and Use Swap File on Linux) [#]: via: (https://itsfoss.com/create-swap-file-linux/) [#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) From 3ea777e7cfa71569b76d60fcb366f0223f48a008 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sat, 14 Sep 2019 19:09:37 +0800 Subject: [PATCH 055/202] Rename sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md to sources/talk/20190913 Why the founder of Apache is all-in on blockchain.md --- .../20190913 Why the founder of Apache is all-in on blockchain.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190913 Why the founder of Apache is all-in on blockchain.md (100%) diff --git a/sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md b/sources/talk/20190913 Why the founder of Apache is all-in on blockchain.md similarity index 100% rename from sources/tech/20190913 Why the founder of Apache is all-in on blockchain.md rename to sources/talk/20190913 Why the founder of Apache is all-in on blockchain.md From f67d77a0b5c644c5a04dcc4dd981ee9253b1ba5c Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 19:17:31 +0800 Subject: [PATCH 056/202] PRF --- published/20190830 How to Create and Use Swap File on Linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/published/20190830 How to Create and Use Swap File on Linux.md b/published/20190830 How to Create and Use Swap File on Linux.md index 2213774e6e..d8db4b5623 100644 --- a/published/20190830 How to Create and Use Swap File on Linux.md +++ b/published/20190830 How to Create and Use Swap File on Linux.md @@ -150,7 +150,7 @@ echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab ### 调整 swappiness 参数 -`swappiness` 参数决定了交换空间的使用频率。`s`wappiness` 值的范围从 0 到 100。较高的值意味着交换空间将被更频繁地使用。 +`swappiness` 参数决定了交换空间的使用频率。`swappiness` 值的范围从 0 到 100。较高的值意味着交换空间将被更频繁地使用。 Ubuntu 桌面的默认的 `swappiness` 是 60,而服务器的默认 `swappiness` 是 1。你可以使用以下命令检查 `swappiness`: From 49d9b12543ccfbd353bcb0e0fafc913476564f38 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sat, 14 Sep 2019 19:31:40 +0800 Subject: [PATCH 057/202] Rename sources/tech/20190912 3 ways to handle transient faults for DevOps.md to sources/talk/20190912 3 ways to handle transient faults for DevOps.md --- .../20190912 3 ways to handle transient faults for DevOps.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190912 3 ways to handle transient faults for DevOps.md (100%) diff --git a/sources/tech/20190912 3 ways to handle transient faults for DevOps.md b/sources/talk/20190912 3 ways to handle transient faults for DevOps.md similarity index 100% rename from sources/tech/20190912 3 ways to handle transient faults for DevOps.md rename to sources/talk/20190912 3 ways to handle transient faults for DevOps.md From 3164480dd589483bdea1e8ebc96241bbc5b485b9 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sat, 14 Sep 2019 19:36:15 +0800 Subject: [PATCH 058/202] Rename sources/tech/20190911 How Linux came to the mainframe.md to sources/talk/20190911 How Linux came to the mainframe.md --- .../{tech => talk}/20190911 How Linux came to the mainframe.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190911 How Linux came to the mainframe.md (100%) diff --git a/sources/tech/20190911 How Linux came to the mainframe.md b/sources/talk/20190911 How Linux came to the mainframe.md similarity index 100% rename from sources/tech/20190911 How Linux came to the mainframe.md rename to sources/talk/20190911 How Linux came to the mainframe.md From 768845fd7cb0b9692e0269b7d321e0beca36d704 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 22:47:12 +0800 Subject: [PATCH 059/202] APL --- sources/tech/20190403 Use Git as the backend for chat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190403 Use Git as the backend for chat.md b/sources/tech/20190403 Use Git as the backend for chat.md index e564bbc6e7..b270eb3bd8 100644 --- a/sources/tech/20190403 Use Git as the backend for chat.md +++ b/sources/tech/20190403 Use Git as the backend for chat.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 2fa138efacd7062fc7932c1541186cdb2e1c2a7f Mon Sep 17 00:00:00 2001 From: MjSeven Date: Sat, 14 Sep 2019 22:52:49 +0800 Subject: [PATCH 060/202] Translating by MjSeven --- ...o fix common pitfalls with the Python ORM tool SQLAlchemy.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md b/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md index b9bb3e51ec..c373e85502 100644 --- a/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md +++ b/sources/tech/20190912 How to fix common pitfalls with the Python ORM tool SQLAlchemy.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (MjSeven ) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 19c3ca9a1da6f1722a32414df1d994723d12acc1 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sat, 14 Sep 2019 23:56:44 +0800 Subject: [PATCH 061/202] TSL --- ...0190403 Use Git as the backend for chat.md | 141 ----------------- ...0190403 Use Git as the backend for chat.md | 145 ++++++++++++++++++ 2 files changed, 145 insertions(+), 141 deletions(-) delete mode 100644 sources/tech/20190403 Use Git as the backend for chat.md create mode 100644 translated/tech/20190403 Use Git as the backend for chat.md diff --git a/sources/tech/20190403 Use Git as the backend for chat.md b/sources/tech/20190403 Use Git as the backend for chat.md deleted file mode 100644 index b270eb3bd8..0000000000 --- a/sources/tech/20190403 Use Git as the backend for chat.md +++ /dev/null @@ -1,141 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Use Git as the backend for chat) -[#]: via: (https://opensource.com/article/19/4/git-based-chat) -[#]: author: (Seth Kenlon https://opensource.com/users/seth) - -Use Git as the backend for chat -====== -GIC is a prototype chat application that showcases a novel way to use Git. -![Team communication, chat][1] - -[Git][2] is one of those rare applications that has managed to encapsulate so much of modern computing into one program that it ends up serving as the computational engine for many other applications. While it's best-known for tracking source code changes in software development, it has many other uses that can make your life easier and more organized. In this series leading up to Git's 14th anniversary on April 7, we'll share seven little-known ways to use Git. Today, we'll look at GIC, a Git-based chat application - -### Meet GIC - -While the authors of Git probably expected frontends to be created for Git, they undoubtedly never expected Git would become the backend for, say, a chat client. Yet, that's exactly what developer Ephi Gabay did with his experimental proof-of-concept [GIC][3]: a chat client written in [Node.js][4] using Git as its backend database. - -GIC is by no means intended for production use. It's purely a programming exercise, but it's one that demonstrates the flexibility of open source technology. What's astonishing is that the client consists of just 300 lines of code, excluding the Node libraries and Git itself. And that's one of the best things about the chat client and about open source; the ability to build upon existing work. Seeing is believing, so you should give GIC a look for yourself. - -### Get set up - -GIC uses Git as its engine, so you need an empty Git repository to serve as its chatroom and logger. The repository can be hosted anywhere, as long as you and anyone who needs access to the chat service has access to it. For instance, you can set up a Git repository on a free Git hosting service like GitLab and grant chat users contributor access to the Git repository. (They must be able to make commits to the repository, because each chat message is a literal commit.) - -If you're hosting it yourself, create a centrally located bare repository. Each user in the chat must have an account on the server where the bare repository is located. You can create accounts specific to Git with Git hosting software like [Gitolite][5] or [Gitea][6], or you can give them individual user accounts on your server, possibly using **git-shell** to restrict their access to Git. - -Performance is best on a self-hosted instance. Whether you host your own or you use a hosting service, the Git repository you create must have an active branch, or GIC won't be able to make commits as users chat because there is no Git HEAD. The easiest way to ensure that a branch is initialized and active is to commit a README or license file upon creation. If you don't do that, you can create and commit one after the fact: - -``` -$ echo "chat logs" > README -$ git add README -$ git commit -m 'just creating a HEAD ref' -$ git push -u origin HEAD -``` - -### Install GIC - -Since GIC is based on Git and written in Node.js, you must first install Git, Node.js, and the Node package manager, npm (which should be bundled with Node). The command to install these differs depending on your Linux or BSD distribution, but here's an example command on Fedora: - -``` -$ sudo dnf install git nodejs -``` - -If you're not running Linux or BSD, follow the installation instructions on [git-scm.com][7] and [nodejs.org][8]. - -There's no install process, as such, for GIC. Each user (Alice and Bob, in this example) must clone the repository to their hard drive: - -``` -$ git cone https://github.com/ephigabay/GIC GIC -``` - -Change directory into the GIC directory and install the Node.js dependencies with **npm** : - -``` -$ cd GIC -$ npm install -``` - -Wait for the Node modules to download and install. - -### Configure GIC - -The only configuration GIC requires is the location of your Git chat repository. Edit the **config.js** file: - -``` -module.exports = { -gitRepo: '[seth@example.com][9]:/home/gitchat/chatdemo.git', -messageCheckInterval: 500, -branchesCheckInterval: 5000 -}; -``` - - -Test your connection to the Git repository before trying GIC, just to make sure your configuration is sane: - -``` -$ git clone --quiet seth@example.com:/home/gitchat/chatdemo.git > /dev/null -``` - -Assuming you receive no errors, you're ready to start chatting. - -### Chat with Git - -From within the GIC directory, start the chat client: - -``` -$ npm start -``` - -When the client first launches, it must clone the chat repository. Since it's nearly an empty repository, it won't take long. Type your message and press Enter to send a message. - -![GIC][10] - -A Git-based chat client. What will they think of next? - -As the greeting message says, a branch in Git serves as a chatroom or channel in GIC. There's no way to create a new branch from within the GIC UI, but if you create one in another terminal session or in a web UI, it shows up immediately in GIC. It wouldn't take much to patch some IRC-style commands into GIC. - -After chatting for a while, take a look at your Git repository. Since the chat happens in Git, the repository itself is also a chat log: - -``` -$ git log --pretty=format:"%p %cn %s" -4387984 Seth Kenlon Hey Chani, did you submit a talk for All Things Open this year? -36369bb Chani No I didn't get a chance. Did you? -[...] -``` - -### Exit GIC - -Not since Vim has there been an application as difficult to stop as GIC. You see, there is no way to stop GIC. It will continue to run until it is killed. When you're ready to stop GIC, open another terminal tab or window and issue this command: - -``` -$ kill `pgrep npm` -``` - -GIC is a novelty. It's a great example of how an open source ecosystem encourages and enables creativity and exploration and challenges us to look at applications from different angles. Try GIC out. Maybe it will give you ideas. At the very least, it's a great excuse to spend an afternoon with Git. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/4/git-based-chat - -作者:[Seth Kenlon (Red Hat, Community Moderator)][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/seth -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/talk_chat_team_mobile_desktop.png?itok=d7sRtKfQ (Team communication, chat) -[2]: https://git-scm.com/ -[3]: https://github.com/ephigabay/GIC -[4]: https://nodejs.org/en/ -[5]: http://gitolite.com -[6]: http://gitea.io -[7]: http://git-scm.com -[8]: http://nodejs.org -[9]: mailto:seth@example.com -[10]: https://opensource.com/sites/default/files/uploads/gic.jpg (GIC) diff --git a/translated/tech/20190403 Use Git as the backend for chat.md b/translated/tech/20190403 Use Git as the backend for chat.md new file mode 100644 index 0000000000..4a045e762a --- /dev/null +++ b/translated/tech/20190403 Use Git as the backend for chat.md @@ -0,0 +1,145 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Use Git as the backend for chat) +[#]: via: (https://opensource.com/article/19/4/git-based-chat) +[#]: author: (Seth Kenlon https://opensource.com/users/seth) + +用 Git 作为聊天应用的后端 +====== + +> GIC 是一个原型聊天应用程序,展示了一种使用 Git 的新方法。 + +![Team communication, chat][1] + +[Git][2] 是一个少有的能将如此多的现代计算封装到一个程序之中的应用程序,它可以用作许多其他应用程序的计算引擎。虽然它以跟踪软件开发中的源代码更改而闻名,但它还有许多其他用途,可以让你的生活更轻松、更有条理。在这个 Git 系列中,我们将分享七种鲜为人知的使用 Git 的方法。 + +今天我们来看看 GIC ,它是一个基于 Git 的聊天应用。 + +### 初识 GIC + +虽然 Git 的作者可能期望会为 Git 创建前端,但毫无疑问他们从未预料到 Git 会成为某种后端,如聊天客户端的后端。然而,这正是开发人员 Ephi Gabay 用他的实验性的用于概念验证的 [GIC][3] 所做的事情:用 [Node.js][4] 编写的聊天客户端使用 Git 作为其后端数据库。 + +GIC 并没有打算用于生产用途。这纯粹是一种编程练习,但它证明了开源技术的灵活性。令人惊讶的是,除了 Node 库和 Git 本身,该客户端只包含 300 行代码。这个聊天客户端和开源所反映出来的最好的地方之一是:建立在现有工作基础上的能力。眼见为实,你应该给自己亲自来了解一下 GIC。 + +### 架设起来 + +GIC 使用 Git 作为引擎,因此您需要一个空的 Git 存储库为其聊天室和记录器提供服务。存储库可以托管在任何地方,只要你和需要访问聊天服务的任何人可以访问该存储库。例如,你可以在 GitLab 等免费 Git 托管服务上设置 Git 存储库,并授予聊天用户对该 Git 存储库的贡献者访问权限。(他们必须能够提交到存储库,因为每个聊天消息都是一个文字提交。) + +如果你自己托管,请创建一个中心化的裸存储库。聊天中的每个用户必须在裸存储库所在的服务器上拥有一个帐户。你可以使用 Git 托管软件创建特定于 Git 的帐户,如 [Gitolite][5] 或 [Gitea][6],或者你可以在服务器上为他们提供个人用户帐户,可能使用 `git-shell` 来限制他们对 Git 的访问。 + +自托管实例的性能最好。无论你是自己托管还是使用托管服务,你创建的 Git 存储库都必须具有活跃分支,否则 GIC 将无法在用户聊天时进行提交,因为没有 Git HEAD。确保分支初始化和活跃的最简单方法是在创建时提交 `README` 或许可证文件。如果你没有这样做,你可以在事后创建并提交一个: + +``` +$ echo "chat logs" > README +$ git add README +$ git commit -m 'just creating a HEAD ref' +$ git push -u origin HEAD +``` + +### 安装 GIC + +由于 GIC 基于 Git 并使用 Node.js 编写,因此必须首先安装 Git、Node.js 和 Node 包管理器npm(应该与 Node 捆绑在一起安装)。安装它们的命令因 Linux 或 BSD 发行版而异,这是 Fedora 上的一个示例命令: + +``` +$ sudo dnf install git nodejs +``` + +如果你没有运行 Linux 或 BSD,请按照 [git-scm.com][7] 和 [nodejs.org][8] 上的安装说明进行操作。 + +因此,GIC 没有安装过程。每个用户(在此示例中为 Alice 和 Bob)必须将存储库克隆到其硬盘驱动器: + +``` +$ git cone https://github.com/ephigabay/GIC GIC +``` + +将目录更改为 GIC 目录并使用 `npm` 安装 Node.js 依赖项: + +``` +$ cd GIC +$ npm install +``` + +等待 Node 模块下载并安装。 + +### 配置 GIC + +GIC 唯一需要的配置是 Git 聊天存储库的位置。编辑 `config.js` 文件: + +``` +module.exports = { + gitRepo: 'seth@example.com:/home/gitchat/chatdemo.git', + messageCheckInterval: 500, + branchesCheckInterval: 5000 +}; +``` + +在尝试 GIC 之前测试你与 Git 存储库的连接,以确保你的配置是正确的: + + +``` +$ git clone --quiet seth@example.com:/home/gitchat/chatdemo.git > /dev/null +``` + +假设你没有收到任何错误,就可以开始聊天了。 + +### 用 Git 聊天 + +在 GIC 目录中启动聊天客户端: + +``` +$ npm start +``` + +客户端首次启动时,必须克隆聊天存储库。由于它几乎是一个空的存储库,因此不会花费很长时间。输入你的消息,然后按回车键发送消息。 + +![GIC][10] + +*基于Git的聊天客户端。 他们接下来会怎么想?* + +正如问候消息所说,Git 中的分支在 GIC 中就是聊天室或频道。无法在 GIC UI 中创建新分支,但如果你在另一个终端会话或 Web UI 中创建一个分支,它将立即显示在 GIC 中。将一些 IRC 式的命令加到 GIC 中并不需要太多工作。 + +聊了一会儿之后,可以看看你的 Git 存储库。由于聊天发生在 Git 中,因此存储库本身也是聊天日志: + +``` +$ git log --pretty=format:"%p %cn %s" +4387984 Seth Kenlon Hey Chani, did you submit a talk for All Things Open this year? +36369bb Chani No I didn't get a chance. Did you? +[...] +``` + +### 退出 GIC + +GIC 并不像 Vim 那么难以退出。你看,没有办法停止 GIC。它会一直运行,直到它被杀死。当你准备停止 GIC 时,打开另一个终端选项卡或窗口并发出以下命令: + +``` +$ kill `pgrep npm` +``` + +GIC 是一个新奇的事物。这是一个很好的例子,说明开源生态系统如何鼓励和促进创造力和探索,并挑战我们从不同角度审视应用程序。尝试下 GIC,也许它会给你一些思路。至少,它可以让你与 Git 度过一个下午。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/4/git-based-chat + +作者:[Seth Kenlon (Red Hat, Community Moderator)][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/talk_chat_team_mobile_desktop.png?itok=d7sRtKfQ (Team communication, chat) +[2]: https://git-scm.com/ +[3]: https://github.com/ephigabay/GIC +[4]: https://nodejs.org/en/ +[5]: http://gitolite.com +[6]: http://gitea.io +[7]: http://git-scm.com +[8]: http://nodejs.org +[9]: mailto:seth@example.com +[10]: https://opensource.com/sites/default/files/uploads/gic.jpg (GIC) From 7622b0fbbf52308d5e1f4f6b93fe424d2fee19cd Mon Sep 17 00:00:00 2001 From: DarkSun Date: Sun, 15 Sep 2019 00:54:33 +0800 Subject: [PATCH 062/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20Dell?= =?UTF-8?q?=20EMC=20updates=20PowerMax=20storage=20systems?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190913 Dell EMC updates PowerMax storage systems.md --- ...ll EMC updates PowerMax storage systems.md | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 sources/talk/20190913 Dell EMC updates PowerMax storage systems.md diff --git a/sources/talk/20190913 Dell EMC updates PowerMax storage systems.md b/sources/talk/20190913 Dell EMC updates PowerMax storage systems.md new file mode 100644 index 0000000000..b2a2559a30 --- /dev/null +++ b/sources/talk/20190913 Dell EMC updates PowerMax storage systems.md @@ -0,0 +1,57 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Dell EMC updates PowerMax storage systems) +[#]: via: (https://www.networkworld.com/article/3438325/dell-emc-updates-powermax-storage-systems.html) +[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) + +Dell EMC updates PowerMax storage systems +====== +Dell EMC's new PowerMax enterprise storage systems add support for Intel Optane drives and NVMe over Fabric. +Getty Images/Dell EMC + +Dell EMC has updated its PowerMax line of enterprise storage systems to offer Intel’s Optane persistent storage and NVMe-over-Fabric, both of which will give the PowerMax a big boost in performance. + +Last year, Dell launched the PowerMax line with high-performance storage, specifically targeting industries that need very low latency and high resiliency, such as banking, healthcare, and cloud service providers. + +The company claims the new PowerMax is the first-to-market with dual port Intel Optane SSDs and the use of storage-class memory (SCM) as persistent storage. The Optane is a new type of non-volatile storage that sits between SSDs and memory. It has the persistence of a SSD but almost the speed of a DRAM. Optane storage also has a ridiculous price tag. For example, a 512 GB stick costs nearly $8,000. + +**[ Read also: [Mass data fragmentation requires a storage rethink][1] | Get regularly scheduled insights: [Sign up for Network World newsletters][2] ]** + +The other big change is support for NVMe-oF, which allows SSDs to talk directly to each other via Fibre Channel rather than making multiple hops through the network. PowerMax already supports NVMe SSDs, but this update adds end-to-end NVMe support. + +The coupling of NVMe and Intel Optane on dual port gives the new PowerMax systems up to 15 million IOPS, a 50% improvement over the previous generation released just one year ago, with up to 50% better response times and twice the bandwidth. Response time is under 100 microseconds. + +In addition, the new Dell EMC PowerMax systems are validated for Dell Technologies Cloud, an architecture designed to bridge multi-cloud deployments. Dell offers connections between private clouds and Amazon Web Services (AWS), Microsoft Azure, and Google Cloud. + +PowerMax comes with a built-in machine learning engine for predictive analytics and pattern recognition to automatically place data on the correct media type, SCM or Flash, based on its I/O profile. PowerMax analyzes and forecasts 40 million data sets in real time, driving 6 billion decisions per day. + +It also has several important software integrations. The first is VMware’s vRealize Orchestrator (vRO) plug-in, which allows customers to develop end-to-end automation routines, including provisioning, data protection, and host operations. + +Second, it has pre-built Red Hat Ansible modules to allow customers to create Playbooks for storage provisioning, snapshots, and data management workflows for consistent and automated operations. These modules are available on GitHub now. + +Finally, there is a container storage interface (CSI) plugin that provisions and manages storage for workloads running on Kubernetes. The CSI plugin, available now on GitHub, extends PowerMax's performance and data services to a growing number of applications built on a micro-services-based architecture. + +The new PowerMax systems and PowerBricks will be available Monday, Sept.16. + +Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438325/dell-emc-updates-powermax-storage-systems.html + +作者:[Andy Patrizio][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Andy-Patrizio/ +[b]: https://github.com/lujun9972 +[1]: https://www.networkworld.com/article/3323580/mass-data-fragmentation-requires-a-storage-rethink.html +[2]: https://www.networkworld.com/newsletters/signup.html +[3]: https://www.facebook.com/NetworkWorld/ +[4]: https://www.linkedin.com/company/network-world From 42c356fd7383be9fbbf825628018f242dba041f6 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Sun, 15 Sep 2019 09:45:46 +0800 Subject: [PATCH 063/202] Rename sources/talk/20190913 Dell EMC updates PowerMax storage systems.md to sources/news/20190913 Dell EMC updates PowerMax storage systems.md --- .../20190913 Dell EMC updates PowerMax storage systems.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{talk => news}/20190913 Dell EMC updates PowerMax storage systems.md (100%) diff --git a/sources/talk/20190913 Dell EMC updates PowerMax storage systems.md b/sources/news/20190913 Dell EMC updates PowerMax storage systems.md similarity index 100% rename from sources/talk/20190913 Dell EMC updates PowerMax storage systems.md rename to sources/news/20190913 Dell EMC updates PowerMax storage systems.md From 447f237b0317cb160adb7ac9dd0f7423ccbaa563 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 10:09:43 +0800 Subject: [PATCH 064/202] PRF @wxy --- ...0190403 Use Git as the backend for chat.md | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/translated/tech/20190403 Use Git as the backend for chat.md b/translated/tech/20190403 Use Git as the backend for chat.md index 4a045e762a..b12cb2757d 100644 --- a/translated/tech/20190403 Use Git as the backend for chat.md +++ b/translated/tech/20190403 Use Git as the backend for chat.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Use Git as the backend for chat) @@ -10,27 +10,27 @@ 用 Git 作为聊天应用的后端 ====== -> GIC 是一个原型聊天应用程序,展示了一种使用 Git 的新方法。 +> GIC 是一个聊天应用程序的原型,展示了一种使用 Git 的新方法。 -![Team communication, chat][1] +![](https://img.linux.net.cn/data/attachment/album/201909/15/100905euzi3l5xgslsgx7i.png) [Git][2] 是一个少有的能将如此多的现代计算封装到一个程序之中的应用程序,它可以用作许多其他应用程序的计算引擎。虽然它以跟踪软件开发中的源代码更改而闻名,但它还有许多其他用途,可以让你的生活更轻松、更有条理。在这个 Git 系列中,我们将分享七种鲜为人知的使用 Git 的方法。 -今天我们来看看 GIC ,它是一个基于 Git 的聊天应用。 +今天我们来看看 GIC,它是一个基于 Git 的聊天应用。 ### 初识 GIC -虽然 Git 的作者可能期望会为 Git 创建前端,但毫无疑问他们从未预料到 Git 会成为某种后端,如聊天客户端的后端。然而,这正是开发人员 Ephi Gabay 用他的实验性的用于概念验证的 [GIC][3] 所做的事情:用 [Node.js][4] 编写的聊天客户端使用 Git 作为其后端数据库。 +虽然 Git 的作者们可能期望会为 Git 创建前端,但毫无疑问他们从未预料到 Git 会成为某种后端,如聊天客户端的后端。然而,这正是开发人员 Ephi Gabay 用他的实验性的概念验证应用 [GIC][3] 所做的事情:用 [Node.js][4] 编写的聊天客户端,使用 Git 作为其后端数据库。 -GIC 并没有打算用于生产用途。这纯粹是一种编程练习,但它证明了开源技术的灵活性。令人惊讶的是,除了 Node 库和 Git 本身,该客户端只包含 300 行代码。这个聊天客户端和开源所反映出来的最好的地方之一是:建立在现有工作基础上的能力。眼见为实,你应该给自己亲自来了解一下 GIC。 +GIC 并没有打算用于生产用途。这纯粹是一种编程练习,但它证明了开源技术的灵活性。令人惊讶的是,除了 Node 库和 Git 本身,该客户端只包含 300 行代码。这是这个聊天客户端和开源所反映出来的最好的地方之一:建立在现有工作基础上的能力。眼见为实,你应该自己亲自来了解一下 GIC。 ### 架设起来 -GIC 使用 Git 作为引擎,因此您需要一个空的 Git 存储库为其聊天室和记录器提供服务。存储库可以托管在任何地方,只要你和需要访问聊天服务的任何人可以访问该存储库。例如,你可以在 GitLab 等免费 Git 托管服务上设置 Git 存储库,并授予聊天用户对该 Git 存储库的贡献者访问权限。(他们必须能够提交到存储库,因为每个聊天消息都是一个文字提交。) +GIC 使用 Git 作为引擎,因此你需要一个空的 Git 存储库为聊天室和记录器提供服务。存储库可以托管在任何地方,只要你和需要访问聊天服务的人可以访问该存储库就行。例如,你可以在 GitLab 等免费 Git 托管服务上设置 Git 存储库,并授予聊天用户对该 Git 存储库的贡献者访问权限。(他们必须能够提交到存储库,因为每个聊天消息都是一个文本的提交。) -如果你自己托管,请创建一个中心化的裸存储库。聊天中的每个用户必须在裸存储库所在的服务器上拥有一个帐户。你可以使用 Git 托管软件创建特定于 Git 的帐户,如 [Gitolite][5] 或 [Gitea][6],或者你可以在服务器上为他们提供个人用户帐户,可能使用 `git-shell` 来限制他们对 Git 的访问。 +如果你自己托管,请创建一个中心化的裸存储库。聊天中的每个用户必须在裸存储库所在的服务器上拥有一个帐户。你可以使用如 [Gitolite][5] 或 [Gitea][6] 这样的 Git 托管软件创建特定于 Git 的帐户,或者你可以在服务器上为他们提供个人用户帐户,可以使用 `git-shell` 来限制他们只能访问 Git。 -自托管实例的性能最好。无论你是自己托管还是使用托管服务,你创建的 Git 存储库都必须具有活跃分支,否则 GIC 将无法在用户聊天时进行提交,因为没有 Git HEAD。确保分支初始化和活跃的最简单方法是在创建时提交 `README` 或许可证文件。如果你没有这样做,你可以在事后创建并提交一个: +自托管实例的性能最好。无论你是自己托管还是使用托管服务,你创建的 Git 存储库都必须具有一个活跃分支,否则 GIC 将无法在用户聊天时进行提交,因为没有 Git HEAD。确保分支初始化和活跃的最简单方法是在创建存储库时提交 `README` 或许可证文件。如果你没有这样做,你可以在事后创建并提交一个: ``` $ echo "chat logs" > README @@ -41,7 +41,7 @@ $ git push -u origin HEAD ### 安装 GIC -由于 GIC 基于 Git 并使用 Node.js 编写,因此必须首先安装 Git、Node.js 和 Node 包管理器npm(应该与 Node 捆绑在一起安装)。安装它们的命令因 Linux 或 BSD 发行版而异,这是 Fedora 上的一个示例命令: +由于 GIC 基于 Git 并使用 Node.js 编写,因此必须首先安装 Git、Node.js 和 Node 包管理器npm(它应该与 Node 捆绑在一起)。安装它们的命令因 Linux 或 BSD 发行版而异,这是 Fedora 上的一个示例命令: ``` $ sudo dnf install git nodejs @@ -78,7 +78,6 @@ module.exports = { 在尝试 GIC 之前测试你与 Git 存储库的连接,以确保你的配置是正确的: - ``` $ git clone --quiet seth@example.com:/home/gitchat/chatdemo.git > /dev/null ``` @@ -97,9 +96,9 @@ $ npm start ![GIC][10] -*基于Git的聊天客户端。 他们接下来会怎么想?* +*基于 Git 的聊天客户端。 他们接下来会怎么想?* -正如问候消息所说,Git 中的分支在 GIC 中就是聊天室或频道。无法在 GIC UI 中创建新分支,但如果你在另一个终端会话或 Web UI 中创建一个分支,它将立即显示在 GIC 中。将一些 IRC 式的命令加到 GIC 中并不需要太多工作。 +正如问候消息所说,Git 中的分支在 GIC 中就是聊天室或频道。无法在 GIC 的 UI 中创建新分支,但如果你在另一个终端会话或 Web UI 中创建一个分支,它将立即显示在 GIC 中。将一些 IRC 式的命令加到 GIC 中并不需要太多工作。 聊了一会儿之后,可以看看你的 Git 存储库。由于聊天发生在 Git 中,因此存储库本身也是聊天日志: @@ -112,7 +111,7 @@ $ git log --pretty=format:"%p %cn %s" ### 退出 GIC -GIC 并不像 Vim 那么难以退出。你看,没有办法停止 GIC。它会一直运行,直到它被杀死。当你准备停止 GIC 时,打开另一个终端选项卡或窗口并发出以下命令: +Vim 以来,还没有一个应用程序像 GIC 那么难以退出。你看,没有办法停止 GIC。它会一直运行,直到它被杀死。当你准备停止 GIC 时,打开另一个终端选项卡或窗口并发出以下命令: ``` $ kill `pgrep npm` @@ -124,10 +123,10 @@ GIC 是一个新奇的事物。这是一个很好的例子,说明开源生态 via: https://opensource.com/article/19/4/git-based-chat -作者:[Seth Kenlon (Red Hat, Community Moderator)][a] +作者:[Seth Kenlon][a] 选题:[lujun9972][b] 译者:[wxy](https://github.com/wxy) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From c28144f88178059a138af83a84c27ad7ace86ddd Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 10:12:43 +0800 Subject: [PATCH 065/202] PUB @wxy https://linux.cn/article-11342-1.html --- .../20190403 Use Git as the backend for chat.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190403 Use Git as the backend for chat.md (98%) diff --git a/translated/tech/20190403 Use Git as the backend for chat.md b/published/20190403 Use Git as the backend for chat.md similarity index 98% rename from translated/tech/20190403 Use Git as the backend for chat.md rename to published/20190403 Use Git as the backend for chat.md index b12cb2757d..d41011a013 100644 --- a/translated/tech/20190403 Use Git as the backend for chat.md +++ b/published/20190403 Use Git as the backend for chat.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11342-1.html) [#]: subject: (Use Git as the backend for chat) [#]: via: (https://opensource.com/article/19/4/git-based-chat) [#]: author: (Seth Kenlon https://opensource.com/users/seth) From 65bd165a744277e0aa31c7dffb94122c5cf091c1 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 10:52:04 +0800 Subject: [PATCH 066/202] PRF @MjSeven --- ...0190409 Working with variables on Linux.md | 67 +++++++++---------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/translated/tech/20190409 Working with variables on Linux.md b/translated/tech/20190409 Working with variables on Linux.md index e0cac381f1..d4a1373c7c 100644 --- a/translated/tech/20190409 Working with variables on Linux.md +++ b/translated/tech/20190409 Working with variables on Linux.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (MjSeven) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Working with variables on Linux) @@ -9,10 +9,12 @@ 在 Linux 中使用变量 ====== -变量通常看起来像 $var,但它们也有 $1、$*、$? 和 $$ 这种形式。让我们来看看所有这些 $ 值可以告诉你什么。 -![Mike Lawrence \(CC BY 2.0\)][1] -我们称为“变量”的许多重要的值都存储在 Linux 系统中,但实际上有几种类型的变量和一些有趣的命令可以帮助你使用它们。在上一篇文章中,我们研究了[环境变量][2]以及它们在哪定义。在本文中,我们来看一看在命令行和脚本中使用的变量。 +> 变量通常看起来像 `$var` 这样,但它们也有 `$1`、`$*`、`$?` 和 `$$` 这种形式。让我们来看看所有这些 `$` 值可以告诉你什么。 + +![](https://img.linux.net.cn/data/attachment/album/201909/15/105140faf2jzyybubu1d0c.jpg) + +有许多重要的值都存储在 Linux 系统中,我们称为“变量”,但实际上变量有几种类型,并且一些有趣的命令可以帮助你使用它们。在上一篇文章中,我们研究了[环境变量][2]以及它们定义在何处。在本文中,我们来看一看在命令行和脚本中使用的变量。 ### 用户变量 @@ -70,9 +72,9 @@ $ echo $myvar0 11 ``` -通过这些选项,你可能会发现至少有一个是容易记忆且使用方便的。 +通过这些选项,你可能会发现它们是容易记忆、使用方便的。 -你也可以 _删除_ 一个变量 -- 这意味着没有定义它。 +你也可以*删除*一个变量 -- 这意味着没有定义它。 ``` $ unset myvar @@ -91,25 +93,25 @@ $ unset myvar3 -bash: unset: myvar3: cannot unset: readonly variable ``` -你可以使用这些设置和递增选项中的任何一个来赋值和操作脚本中的变量,但也有一些非常有用的 _内部变量_ 用于在脚本中工作。注意,你无法重新赋值或增加它们的值。 +你可以使用这些设置和递增选项中来赋值和操作脚本中的变量,但也有一些非常有用的*内部变量*可以用于在脚本中。注意,你无法重新赋值或增加它们的值。 ### 内部变量 在脚本中可以使用很多变量来计算参数并显示有关脚本本身的信息。 - * $1、$2、$3 等表示脚本的第一个、第二个、第三个等参数。 - * $# 表示参数的数量。 - * $* 表示所有参数。 - * $0 表示脚本的名称。 - * $? 表示先前运行的命令的返回码(0 代表成功)。 - * $$ 显示脚本的进程 ID。 - * $PPID 显示 shell 的进程 ID(脚本的父进程)。 +* `$1`、`$2`、`$3` 等表示脚本的第一个、第二个、第三个等参数。 +* `$#` 表示参数的数量。 +* `$*` 表示所有参数。 +* `$0` 表示脚本的名称。 +* `$?` 表示先前运行的命令的返回码(0 代表成功)。 +* `$$` 显示脚本的进程 ID。 +* `$PPID` 显示 shell 的进程 ID(脚本的父进程)。 其中一些变量也适用于命令行,但显示相关信息: - * $0 显示你正在使用的 shell 的名称(例如,-bash)。 - * $$ 显示 shell 的进程 ID。 - * $PPID 显示 shell 的父进程的进程 ID(对我来说,是 sshd)。 +* `$0` 显示你正在使用的 shell 的名称(例如,-bash)。 +* `$$` 显示 shell 的进程 ID。 +* `$PPID` 显示 shell 的父进程的进程 ID(对我来说,是 sshd)。 为了查看它们的结果,如果我们将所有这些变量都放入一个脚本中,比如: @@ -127,26 +129,27 @@ echo $PPID ``` 当我们调用这个脚本时,我们会看到如下内容: + ``` $ tryme one two three /home/shs/bin/tryme <== 脚本名称 -one <== 第一个参数 -two <== 第二个参数 -3 <== 参数的个数 +one <== 第一个参数 +two <== 第二个参数 +3 <== 参数的个数 one two three <== 所有的参数 -0 <== 上一条 echo 命令的返回码 -10410 <== 脚本的进程 ID -10109 <== 父进程 ID +0 <== 上一条 echo 命令的返回码 +10410 <== 脚本的进程 ID +10109 <== 父进程 ID ``` 如果我们在脚本运行完毕后检查 shell 的进程 ID,我们可以看到它与脚本中显示的 PPID 相匹配: ``` $ echo $$ -10109 <== shell 的进程 ID +10109 <== shell 的进程 ID ``` -当然,比起简单地显示它们的值,我们更多的是在需要它们的时候来使用它们。我们来看一看它们可能的用处。 +当然,比起简单地显示它们的值,更有用的方式是使用它们。我们来看一看它们可能的用处。 检查是否已提供参数: @@ -209,7 +212,7 @@ fi ### 重命名变量 -在编写复杂的脚本时,为脚本的参数指定名称通常很有用,而不是继续将它们称为 $1, $2 等。等到第 35 行,阅读你脚本的人可能已经忘了 $2 表示什么。如果你将一个重要参数的值赋给 $filename 或 $numlines,那么他就不容易忘记。 +在编写复杂的脚本时,为脚本的参数指定名称通常很有用,而不是继续将它们称为 `$1`、`$2` 等。等到第 35 行,阅读你脚本的人可能已经忘了 `$2` 表示什么。如果你将一个重要参数的值赋给 `$filename` 或 `$numlines`,那么他就不容易忘记。 ``` #!/bin/bash @@ -236,27 +239,23 @@ else fi ``` -当然,这个示例脚本只是运行 head 命令来显示文件中的前 x 行,但它的目的是显示如何在脚本中使用内部参数来帮助确保脚本运行良好,或在失败时清晰地知道失败原因。 - -**观看 Sandra Henry-Stocker 的两分钟 Linux 技巧:[学习如何掌握大量 Linux 命令][3]。** - -加入 [Facebook][4] 和 [Linkedln][5] 上的网络社区,评论最热的主题。 +当然,这个示例脚本只是运行 `head` 命令来显示文件中的前 x 行,但它的目的是显示如何在脚本中使用内部参数来帮助确保脚本运行良好,或在失败时清晰地知道失败原因。 -------------------------------------------------------------------------------- -via: https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all +via: https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html 作者:[Sandra Henry-Stocker][a] 选题:[lujun9972][b] 译者:[MjSeven](https://github.com/MjSeven) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 [a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ [b]: https://github.com/lujun9972 [1]: https://images.idgesg.net/images/article/2019/04/variable-key-keyboard-100793080-large.jpg -[2]: https://www.networkworld.com/article/3385516/how-to-manage-your-linux-environment.html +[2]: https://linux.cn/article-10916-1.html [3]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua [4]: https://www.facebook.com/NetworkWorld/ [5]: https://www.linkedin.com/company/network-world From fbc514bdfa1cdf42ff42de857ecddfe07859238b Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 10:52:30 +0800 Subject: [PATCH 067/202] PUB @MjSeven https://linux.cn/article-11344-1.html --- .../20190409 Working with variables on Linux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190409 Working with variables on Linux.md (98%) diff --git a/translated/tech/20190409 Working with variables on Linux.md b/published/20190409 Working with variables on Linux.md similarity index 98% rename from translated/tech/20190409 Working with variables on Linux.md rename to published/20190409 Working with variables on Linux.md index d4a1373c7c..f51bf503fa 100644 --- a/translated/tech/20190409 Working with variables on Linux.md +++ b/published/20190409 Working with variables on Linux.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (MjSeven) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11344-1.html) [#]: subject: (Working with variables on Linux) [#]: via: (https://www.networkworld.com/article/3387154/working-with-variables-on-linux.html#tk.rss_all) [#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) From 33ab1e0baf296e63b6284b66d94943beb538bdb2 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 11:04:44 +0800 Subject: [PATCH 068/202] APL --- ....34 Released With New Features - Performance Improvements.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md index 898f3763ca..19bc789602 100644 --- a/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md +++ b/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 6b902bedace87c165fc3bca3de77df4c77cee9a1 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 11:29:20 +0800 Subject: [PATCH 069/202] TSL --- ...New Features - Performance Improvements.md | 89 ------------------- ...New Features - Performance Improvements.md | 87 ++++++++++++++++++ 2 files changed, 87 insertions(+), 89 deletions(-) delete mode 100644 sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md create mode 100644 translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md diff --git a/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md deleted file mode 100644 index 19bc789602..0000000000 --- a/sources/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md +++ /dev/null @@ -1,89 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (GNOME 3.34 Released With New Features & Performance Improvements) -[#]: via: (https://itsfoss.com/gnome-3-34-release/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -GNOME 3.34 Released With New Features & Performance Improvements -====== - -The latest version of GNOME dubbed “Thessaloniki” is here. It is an impressive upgrade over [GNOME 3.32][1] considering 6 months of work there. - -With this release, there’s a lot of new features and significant performance improvements. In addition to the new features, the level of customization has also improved. - -Here’s what’s new: - -### GNOME 3.34 Key Improvements - -You may watch this video to have a look at what’s new in GNOME 3.34: - -#### Drag and drop app icons into a folder - -The new shell theme lets you drag and drop the icons in the app drawer to re-arrange them or compile them into a folder. You may have already used a feature like this in your Android or iOS smartphone. - -![You can now drag and drop icons into a folder][2] - -#### Improved Calendar Manager - -The improved calendar manager integrates easily with 3rd party services and gives you the ability to manage your schedule right from your Linux system – without utilizing another app separately. - -![GNOME Calendar Improvements][3] - -#### Background selection settings - -It’s now easier to select a custom background for the main screen and lock screen as it displays all the available backgrounds in the same screen. Saves you at least one mouse click. - -![It’s easier to select backgrounds now][4] - -#### Re-arranging search options - -The search options/results can be re-arranged manually. So, you can decide what comes first when you head to search something. - -#### Responsive design for ‘Settings’ app - -The settings menu UI is now responsive – so that you can easily access all the options no matter what type (or size) of device you’re on. This is surely going to help GNOME on [Linux smartphones like Librem 5][5]. - -In addition to all these, the [official announcement][6] also notes useful additions for developers (additions to system profiler and virtualization improvements): - -> For developers, GNOME 3.34 includes more data sources in Sysprof, making performance profiling an application even easier. Multiple improvements to Builder include an integrated D-Bus inspector. - -![Improved Sysprof tool in GNOME 3.34][7] - -### How to get GNOME 3.34? - -Even though the new release is live – it hasn’t yet reached the official repositories of your Linux distros. So, we recommend to wait it out and upgrade it when it’s available as update packages. In either case, you can explore the [source code][8] – if you want to build it. - -[][9] - -Suggested read  Fedora 26 Is Released! Check Out The New Features - -Well, that’s about it. If you’re curious, you may check out the [full release notes][10] for technical details. - -What do you think about the new GNOME 3.34? - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/gnome-3-34-release/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://www.gnome.org/news/2019/03/gnome-3-32-released/ -[2]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/icon-grid-drag-gnome.png?ssl=1 -[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/gnome-calendar-improvements.jpg?ssl=1 -[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/background-panel-GNOME.png?resize=800%2C555&ssl=1 -[5]: https://itsfoss.com/librem-linux-phone/ -[6]: https://www.gnome.org/press/2019/09/gnome-3-34-released/ -[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/sysprof-gnome.jpg?resize=800%2C493&ssl=1 -[8]: https://download.gnome.org/ -[9]: https://itsfoss.com/fedora-26-release/ -[10]: https://help.gnome.org/misc/release-notes/3.34/ diff --git a/translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md new file mode 100644 index 0000000000..4065e34c4d --- /dev/null +++ b/translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md @@ -0,0 +1,87 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (GNOME 3.34 Released With New Features & Performance Improvements) +[#]: via: (https://itsfoss.com/gnome-3-34-release/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +GNOME 3.34 发布 +====== + +最新版本的 GNOME 代号为“塞萨洛尼基Thessaloniki”。考虑到这个版本经过了 6 个月的开发,这应该是对 [GNOME 3.32][1] 的一次令人印象深刻的升级。 + +在此版本中,有许多新功能和显著的性能改进。除了新功能外,可定制的程度也得到了提升。 + +以下是新的变化: + +### GNOME 3.34 的关键改进 + +你可以观看此视频,了解 GNOME 3.34 中的新功能: + +- [视频](https://youtu.be/qAjPRr5SGoY) + +#### 拖放图标到文件夹 + +新的 shell 主题允许你拖放应用程序抽屉中的图标以重新排列它们,或将它们组合到一个文件夹中。你可能已经在 Android 或 iOS 智能手机中使用过此类功能。 + +![You can now drag and drop icons into a folder][2] + +#### 改进的日历管理器 + +改进的日历管理器可以轻松地与第三方服务集成,使你能够直接从 Linux 系统管理日程安排,而无需单独使用其他应用程序。 + +![GNOME Calendar Improvements][3] + +#### 背景选择的设置 + +现在,更容易为主屏幕和锁定屏幕选择自定义背景,因为它在同一屏幕中显示所有可用背景。为你节省至少一次鼠标点击。 + +![It’s easier to select backgrounds now][4] + +#### 重新排列搜索选项 + +搜索选项和结果可以手动重新排列。因此,当你要搜索某些内容时,可以决定哪些内容先出现。 + +#### 响应式设计的“设置”应用 + +设置菜单 UI 现在具有响应性,因此无论你使用何种类型(或尺寸)的设备,都可以轻松访问所有选项。这肯定对 [Linux 智能手机(如 Librem 5)][5] 上的 GNOME 有所帮助。 + +除了所有这些之外,[官方公告][6]还提到到开发人员的有用补充(增加了系统分析器和虚拟化改进): + +> 对于开发人员,GNOME 3.34 在 Sysprof 中包含更多数据源,使应用程序的性能分析更加容易。对 Builder 的多项改进中包括集成的 D-Bus 检查器。 + +![Improved Sysprof tool in GNOME 3.34][7] + +### 如何获得GNOME 3.34? + +虽然新版本已经发布,但它还没有进入 Linux 发行版的官方存储库。所以,我们建议等待它,并在它作为更新包提供时进行升级。不管怎么说,如果你想构建它,你都可以在这里找到[源代码][8]。 + +嗯,就是这样。如果你感兴趣,可以查看[完整版本说明][10]以了解技术细节。 + +你如何看待新的 GNOME 3.34? + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/gnome-3-34-release/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://www.gnome.org/news/2019/03/gnome-3-32-released/ +[2]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/icon-grid-drag-gnome.png?ssl=1 +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/gnome-calendar-improvements.jpg?ssl=1 +[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/background-panel-GNOME.png?resize=800%2C555&ssl=1 +[5]: https://itsfoss.com/librem-linux-phone/ +[6]: https://www.gnome.org/press/2019/09/gnome-3-34-released/ +[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/sysprof-gnome.jpg?resize=800%2C493&ssl=1 +[8]: https://download.gnome.org/ +[9]: https://itsfoss.com/fedora-26-release/ +[10]: https://help.gnome.org/misc/release-notes/3.34/ From 131ea82ac2099eb437a02af238467798fc1eaad3 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 11:36:10 +0800 Subject: [PATCH 070/202] PUB @wxy https://linux.cn/article-11345-1.html --- ...leased With New Features - Performance Improvements.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) rename {translated/news => published}/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md (94%) diff --git a/translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md b/published/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md similarity index 94% rename from translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md rename to published/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md index 4065e34c4d..69911195d2 100644 --- a/translated/news/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md +++ b/published/20190914 GNOME 3.34 Released With New Features - Performance Improvements.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11345-1.html) [#]: subject: (GNOME 3.34 Released With New Features & Performance Improvements) [#]: via: (https://itsfoss.com/gnome-3-34-release/) [#]: author: (Ankush Das https://itsfoss.com/author/ankush/) @@ -10,6 +10,8 @@ GNOME 3.34 发布 ====== +![](https://img.linux.net.cn/data/attachment/album/201909/15/113154i3bcp9p3md3mc3bk.jpg) + 最新版本的 GNOME 代号为“塞萨洛尼基Thessaloniki”。考虑到这个版本经过了 6 个月的开发,这应该是对 [GNOME 3.32][1] 的一次令人印象深刻的升级。 在此版本中,有许多新功能和显著的性能改进。除了新功能外,可定制的程度也得到了提升。 @@ -20,7 +22,7 @@ GNOME 3.34 发布 你可以观看此视频,了解 GNOME 3.34 中的新功能: -- [视频](https://youtu.be/qAjPRr5SGoY) +- [视频](https://img.linux.net.cn/static/video/_-qAjPRr5SGoY.mp4) #### 拖放图标到文件夹 From 2302cac7ebbad5dc11330a2017e927c2f3db412b Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 12:23:40 +0800 Subject: [PATCH 071/202] PRF --- published/20190403 Use Git as the backend for chat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/published/20190403 Use Git as the backend for chat.md b/published/20190403 Use Git as the backend for chat.md index d41011a013..100750937b 100644 --- a/published/20190403 Use Git as the backend for chat.md +++ b/published/20190403 Use Git as the backend for chat.md @@ -52,7 +52,7 @@ $ sudo dnf install git nodejs 因此,GIC 没有安装过程。每个用户(在此示例中为 Alice 和 Bob)必须将存储库克隆到其硬盘驱动器: ``` -$ git cone https://github.com/ephigabay/GIC GIC +$ git clone https://github.com/ephigabay/GIC GIC ``` 将目录更改为 GIC 目录并使用 `npm` 安装 Node.js 依赖项: From 6313be3a033be52aea1e1299eeb735cb838e2388 Mon Sep 17 00:00:00 2001 From: LuMing <784315443@qq.com> Date: Sun, 15 Sep 2019 12:27:20 +0800 Subject: [PATCH 072/202] translating --- ...0190824 How to compile a Linux kernel in the 21st century.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190824 How to compile a Linux kernel in the 21st century.md b/sources/tech/20190824 How to compile a Linux kernel in the 21st century.md index 0740c0b3a0..5821826706 100644 --- a/sources/tech/20190824 How to compile a Linux kernel in the 21st century.md +++ b/sources/tech/20190824 How to compile a Linux kernel in the 21st century.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (luming) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 70e194815419aa611ec0378d500b0ad2422b76d5 Mon Sep 17 00:00:00 2001 From: hopefully2333 <787016457@qq.com> Date: Sun, 15 Sep 2019 12:39:24 +0800 Subject: [PATCH 073/202] translating by hopefully2333 translating by hopefully2333 --- sources/tech/20190911 4 open source cloud security tools.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190911 4 open source cloud security tools.md b/sources/tech/20190911 4 open source cloud security tools.md index 5d14a725df..5a9e6d9d83 100644 --- a/sources/tech/20190911 4 open source cloud security tools.md +++ b/sources/tech/20190911 4 open source cloud security tools.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (hopefully2333) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 60aeda8d9b695e1058c84e1258108acb762ae4b3 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 14:18:34 +0800 Subject: [PATCH 074/202] =?UTF-8?q?=E6=B8=85=E9=99=A4=E6=96=87=E7=AB=A0?= =?UTF-8?q?=EF=BC=8C=E5=9B=9E=E6=94=B6=E6=96=87=E7=AB=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @anonymone @acyanbird @name1e5s @WangYueScream --- ...se automation, and more industry trends.md | 85 --- ...rebras Systems launches massive AI chip.md | 57 -- ... secure, develop integrated cloud world.md | 96 --- ...ke car racing, and more industry trends.md | 72 -- ...tNICs to eliminate network load on CPUs.md | 74 --- ...Foundation-s Coding Education Challenge.md | 65 -- ...boards could allow for remote hijacking.md | 72 -- .../talk/20120911 Doug Bolden, Dunnet (IF).md | 52 -- ...iences and the Development of GNU Emacs.md | 111 ---- ...-s Review Of Dell XPS 13 Ubuntu Edition.md | 199 ------ ...n Source Components Ease Learning Curve.md | 70 -- ... GPLv3 license for its 11th anniversary.md | 65 -- .../20190429 Cisco goes all in on WiFi 6.md | 87 --- ...PE-s CEO lays out his technology vision.md | 162 ----- ...ng, adds hybrid, private-cloud services.md | 77 --- ...HPE to buy Cray, offer HPC as a service.md | 68 -- ...ns in Vendor Lock-in- Google and Huawei.md | 2 +- ...Beegoist - Richard Kenneth Eng - Medium.md | 623 ------------------ .../20190528 A Quick Look at Elvish Shell.md | 1 - ... What you need to know to be a sysadmin.md | 2 +- 20 files changed, 2 insertions(+), 2038 deletions(-) delete mode 100644 sources/news/20190820 Serverless on Kubernetes, diverse automation, and more industry trends.md delete mode 100644 sources/news/20190822 Semiconductor startup Cerebras Systems launches massive AI chip.md delete mode 100644 sources/news/20190823 VMware spends -4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world.md delete mode 100644 sources/news/20190826 Implementing edge computing, DevOps like car racing, and more industry trends.md delete mode 100644 sources/news/20190826 Mellanox introduces SmartNICs to eliminate network load on CPUs.md delete mode 100644 sources/news/20190831 Endless Grants -500,000 Fund To GNOME Foundation-s Coding Education Challenge.md delete mode 100644 sources/news/20190905 Exploit found in Supermicro motherboards could allow for remote hijacking.md delete mode 100644 sources/talk/20120911 Doug Bolden, Dunnet (IF).md delete mode 100644 sources/talk/20140412 My Lisp Experiences and the Development of GNU Emacs.md delete mode 100644 sources/talk/20170320 An Ubuntu User-s Review Of Dell XPS 13 Ubuntu Edition.md delete mode 100644 sources/talk/20171129 Inside AGL Familiar Open Source Components Ease Learning Curve.md delete mode 100644 sources/talk/20180629 Reflecting on the GPLv3 license for its 11th anniversary.md delete mode 100644 sources/talk/20190429 Cisco goes all in on WiFi 6.md delete mode 100644 sources/talk/20190513 HPE-s CEO lays out his technology vision.md delete mode 100644 sources/talk/20190515 IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services.md delete mode 100644 sources/talk/20190517 HPE to buy Cray, offer HPC as a service.md delete mode 100644 sources/tech/20140929 A Word from The Beegoist - Richard Kenneth Eng - Medium.md diff --git a/sources/news/20190820 Serverless on Kubernetes, diverse automation, and more industry trends.md b/sources/news/20190820 Serverless on Kubernetes, diverse automation, and more industry trends.md deleted file mode 100644 index 881ac5410a..0000000000 --- a/sources/news/20190820 Serverless on Kubernetes, diverse automation, and more industry trends.md +++ /dev/null @@ -1,85 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Serverless on Kubernetes, diverse automation, and more industry trends) -[#]: via: (https://opensource.com/article/19/8/serverless-kubernetes-and-more) -[#]: author: (Tim Hildred https://opensource.com/users/thildred) - -Serverless on Kubernetes, diverse automation, and more industry trends -====== -A weekly look at open source community and industry trends. -![Person standing in front of a giant computer screen with numbers, data][1] - -As part of my role as a senior product marketing manager at an enterprise software company with an open source development model, I publish a regular update about open source community, market, and industry trends for product marketers, managers, and other influencers. Here are five of my and their favorite articles from that update. - -## [10 tips for creating robust serverless components][2] - -> There are some repeated patterns that we have seen after creating 20+ serverless components. We recommend that you browse through the [available component repos on GitHub][3] and check which one is close to what you’re building. Just open up the repo and check the code and see how everything fits together. -> -> All component code is open source, and we are striving to keep it clean, simple and easy to follow. After you look around you’ll be able to understand how our core API works, how we interact with external APIs, and how we are reusing other components. - -**The impact**: Serverless Inc is striving to take probably the most hyped architecture early on in the hype cycle and make it usable and practical today. For serverless to truly go mainstream, producing something useful has to be as easy for a developer as "Hello world!," and these components are a step in that direction. - -## [Kubernetes workloads in the serverless era: Architecture, platforms, and trends][4] - -> There are many fascinating elements of the Kubernetes architecture: the containers providing common packaging, runtime and resource isolation model within its foundation; the simple control loop mechanism that monitors the actual state of components and reconciles this with the desired state; the custom resource definitions. But the true enabler for extending Kubernetes to support diverse workloads is the concept of the pod. -> -> A pod provides two sets of guarantees. The deployment guarantee ensures that the containers of a pod are always placed on the same node. This behavior has some useful properties such as allowing containers to communicate synchronously or asynchronously over localhost, over inter-process communication ([IPC][5]), or using the local file system. - -**The impact**: If developer adoption of serverless architectures is largely driven by how easily they can be productive working that way, business adoption will be driven by the ability to place this trend in the operational and business context. IT decision-makers need to see a holistic picture of how serverless adds value alongside their existing investments, and operators and architects need to envision how they'll keep it all up and running. - -## [How developers can survive the Last Mile with CodeReady Workspaces][6] - -> Inside each cloud provider, a host of tools can address CI/CD, testing, monitoring, backing up and recovery problems. Outside of those providers, the cloud native community has been hard at work cranking out new tooling from [Prometheus][7], [Knative][8], [Envoy][9] and [Fluentd][10], to [Kubenetes][11] itself and the expanding ecosystem of Kubernetes Operators. -> -> Within all of those projects, cloud-based services and desktop utilities is one major gap, however: the last mile of software development is the IDE. And despite the wealth of development projects inside the community and Cloud Native Computing Foundation, it is indeed the Eclipse Foundation, as mentioned above, that has taken on this problem with a focus on the new cloud development landscape. - -**The impact**: Increasingly complex development workflows and deployment patterns call for increasingly intelligent IDEs. While I'm sure it is possible to push a button and redeploy your microservices to a Kubernetes cluster from emacs (or vi, relax), Eclipse Che (and CodeReady Workspaces) are being built from the ground up with these types of cloud-native workflows in mind. - -## [Automate security in increasingly complex hybrid environments][12] - -> According to the [Information Security Forum][13]’s [Global Security Threat Outlook for 2019][14], one of the biggest IT trends to watch this year is the increasing sophistication of cybercrime and ransomware. And even as the volume of ransomware attacks is dropping, cybercriminals are finding new, more potent ways to be disruptive. An [article in TechRepublic][15] points to cryptojacking malware, which enables someone to hijack another's hardware without permission to mine cryptocurrency, as a growing threat for enterprise networks. -> -> To more effectively mitigate these risks, organizations could invest in automation as a component of their security plans. That’s because it takes time to investigate and resolve issues, in addition to applying controlled remediations across bare metal, virtualized systems, and cloud environments -- both private and public -- all while documenting changes.  - -**The impact**: This one is really about our ability to trust that the network service providers that we rely upon to keep our phones and smart TVs full of stutter-free streaming HD content have what they need to protect the infrastructure that makes it all possible. I for one am rooting for you! - -## [AnsibleFest 2019 session catalog][16] - -> 85 Ansible automation sessions over 3 days in Atlanta, Georgia - -**The impact**: What struck me is the range of things that can be automated with Ansible. Windows? Check. Multicloud? Check. Security? Check. The real question after those three days are over will be: Is there anything in IT that can't be automated with Ansible? Seriously, I'm asking, let me know. - -_I hope you enjoyed this list of what stood out to me from last week and come back next Monday for more open source community, market, and industry trends._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/8/serverless-kubernetes-and-more - -作者:[Tim Hildred][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/thildred -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data) -[2]: https://serverless.com/blog/10-tips-creating-robust-serverless-components/ -[3]: https://github.com/serverless-components/ -[4]: https://www.infoq.com/articles/kubernetes-workloads-serverless-era/ -[5]: https://opensource.com/article/19/4/interprocess-communication-linux-networking -[6]: https://thenewstack.io/how-developers-can-survive-the-last-mile-with-codeready-workspaces/ -[7]: https://prometheus.io/ -[8]: https://knative.dev/ -[9]: https://www.envoyproxy.io/ -[10]: https://www.fluentd.org/ -[11]: https://kubernetes.io/ -[12]: https://www.redhat.com/en/blog/automate-security-increasingly-complex-hybrid-environments -[13]: https://www.securityforum.org/ -[14]: https://www.prnewswire.com/news-releases/information-security-forum-forecasts-2019-global-security-threat-outlook-300757408.html -[15]: https://www.techrepublic.com/article/top-4-security-threats-businesses-should-expect-in-2019/ -[16]: https://agenda.fest.ansible.com/sessions diff --git a/sources/news/20190822 Semiconductor startup Cerebras Systems launches massive AI chip.md b/sources/news/20190822 Semiconductor startup Cerebras Systems launches massive AI chip.md deleted file mode 100644 index 91685c8501..0000000000 --- a/sources/news/20190822 Semiconductor startup Cerebras Systems launches massive AI chip.md +++ /dev/null @@ -1,57 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Semiconductor startup Cerebras Systems launches massive AI chip) -[#]: via: (https://www.networkworld.com/article/3433617/semiconductor-startup-cerebras-systems-launches-massive-ai-chip.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Semiconductor startup Cerebras Systems launches massive AI chip -====== - -![Cerebras][1] - -There are a host of different AI-related solutions for the data center, ranging from add-in cards to dedicated servers, like the Nvidia DGX-2. But a startup called Cerebras Systems has its own server offering that relies on a single massive processor rather than a slew of small ones working in parallel. - -Cerebras has taken the wraps off its Wafer Scale Engine (WSE), an AI chip that measures 8.46x8.46 inches, making it almost the size of an iPad and more than 50 times larger than a CPU or GPU. A typical CPU or GPU is about the size of a postage stamp. - -[Now see how AI can boost data-center availability and efficiency.][2] - -Cerebras won’t sell the chips to ODMs due to the challenges of building and cooling such a massive chip. Instead, it will come as part of a complete server to be installed in data centers, which it says will start shipping in October. - -The logic behind the design is that AI requires huge amounts of data just to run a test and current technology, even GPUs, are not fast or powerful enough. So Cerebras supersized the chip. - -The numbers are just incredible. The company’s WSE chip has 1.2 trillion transistors, 400,000 computing cores and 18 gigabytes of memory. A typical PC processor has about 2 billion transistors, four to six cores and a few megabytes of cache memory. Even a high-end GPU has 21 billion transistors and a few thousand cores. - -The 400,000 cores on the WSE are connected via the Swarm communication fabric in a 2D mesh with 100 Pb/s of bandwidth. The WSE has 18 GB of on-chip memory, all accessible within a single clock cycle, and provides 9 PB/s memory bandwidth. This is 3000x more capacity and 10,000x greater bandwidth than the best Nvidia has to offer. More to the point it eliminates the need to move data in and out of memory to and from the CPU. - -“A vast array of programmable cores provides cluster-scale compute on a single chip. High-speed memory close to each core ensures that cores are always occupied doing calculations. And by connecting everything on-die, communication is many thousands of times faster than what is possible with off-chip technologies like InfiniBand,” the company said in a [blog post][3] announcing the processor. - -The cores are called Sparse Linear Algebra Cores, or SLA. They are optimized for the sparse linear algebra that is fundamental to neural network calculation. These cores are designed specifically for AI work. They are small and fast, contain no caches, and have eliminated other features and overheads that are needed in general purpose cores but play no useful role in a deep learning processor. - -The chip is the brainchild of Andrew Feldman, who created the SeaMicro high density Atom-based server a decade ago as an alternative to overpowered Xeons for doing simple tasks like file and print or serving LAMP stacks. Feldman is a character, one of the more interesting people [I’ve interviewed][4]. He definitely thinks outside the box. - -Feldman sold SeaMicro to AMD for $334 million in 2012, which turned out to be a colossal waste of money on AMD’s part, as the product shortly disappeared from the market. Since then he’s raised $100 million in VC money. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3433617/semiconductor-startup-cerebras-systems-launches-massive-ai-chip.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/08/cerebras-wafer-scale-engine-100809084-large.jpg -[2]: https://www.networkworld.com/article/3274654/ai-boosts-data-center-availability-efficiency.html -[3]: https://www.cerebras.net/hello-world/ -[4]: https://www.serverwatch.com/news/article.php/3887471/SeaMicro-Launches-an-AtomPowered-Cloud-Computing-Server.htm -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/news/20190823 VMware spends -4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world.md b/sources/news/20190823 VMware spends -4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world.md deleted file mode 100644 index be11927309..0000000000 --- a/sources/news/20190823 VMware spends -4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world.md +++ /dev/null @@ -1,96 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (VMware spends $4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world) -[#]: via: (https://www.networkworld.com/article/3433916/vmware-spends-48b-to-grab-pivotal-carbon-black-to-secure-develop-integrated-cloud-world.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -VMware spends $4.8B to grab Pivotal, Carbon Black to secure, develop integrated cloud world -====== -VMware will spend $2.7 billion on cloud-application developer Pivotal and $2.1 billion for security vendor Carbon Black - details at next week's VMworld user conference -![Bigstock][1] - -All things cloud are major topics of conversation at the VMworld user conference next week, ratcheded up a notch by VMware's $4.8 billion plans to acquire cloud development firm Pivotal and security provider Carbon Black. - -VMware said during its quarterly financial call this week it would spend about $2.7 billion on Pivotal and its Cloud Foundry hybrid cloud development technology, and about $2.1 billion for the security technology of Carbon Black, which includes its Predictive Security Cloud and other endpoint-security software.  Both amounts represent the [enterprise value][2] of the deals the actual purchase prices will vary, experts said. - -**[ Check out [What is hybrid cloud computing][3] and learn [what you need to know about multi-cloud][4]. | Get regularly scheduled insights by [signing up for Network World newsletters][5]. ]** - -VMware has deep relationships with both companies. Carbon Black technology is part of [VMware’s AppDefense][6] endpoint security. Pivotal has a deeper relationship in that VMware and Dell, VMware’s parent company, [spun out Pivotal][7] in 2013. - -“These acquisitions address two critical technology priorities of all businesses today – building modern, enterprise-grade applications and protecting enterprise workloads and clients. With these actions we meaningfully accelerate our subscription and SaaS offerings and expand our ability to enable our customers’ digital transformation,” said VMware CEO Pat Gelsinger, on the call. - -With regards to the Pivotal acquisition Gelsinger said the time was right to own the whole compute stack. “We will now be uniquely positioned to help customers build, run and manage their cloud environment, and customers can go one place to get all of this technology,” Gelsinger said. “We embed the technology in our core VMware platform, and we will explain more about that at VMworld next week.” - -On the Carbon Black buy Gelsinger said he expects the technology to be integrated across VMware’s produce families such as NSX networking software and vSphere, VMware's flagship virtualization platform. - -“Security is broken and fundamentally customers want a different answer in the security space. We think this move will be an opportunity for major disruption.” - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][8] ]** - -Patric Morley, president and CEO of Carbon Black [wrote of the deal][9]: “VMware has a vision to create a modern security platform for any app, running on any cloud, delivered to any device – essentially, to build security into the fabric of the compute stack. Carbon Black’s cloud-native platform, our ability to see and stop attackers by leveraging the power of our rich data and behavioral analytics, and our deep cybersecurity expertise are all truly differentiating.” - -Both transactions are expected to close in the second half of VMware’s fiscal year, which ends Jan. 31. - -VMware has been on a massive buying spree this year that has included: - - * Avi Networks for multi-cloud application delivery services. - * Bitfusion for hardware virtualization. - * Uhana, a company that is employing deep learning and real-time AI in carrier networks and applications, to automate network operations and optimize application experience. - * Veriflow, for network verification, assurance, and troubleshooting. - * Heptio for its Kubernetes technology. - - - -Kubernetes integration will be a big topic at VMworld, Gelsinger hinted. “You will hear very specific announcements about how Heptio will be used. [And] we will be announcing major expansions of our Kubernetes and modern apps portfolio and help Pivotal complete that strategy. Together with Heptio and Pivotal, VMware will offer a comprehensive Kubernetes-based portfolio to build, run and manage modern applications on any cloud,” Gelsinger said. - -“VMware has increased its Kubernetes-related investments over the past year with the acquisition of Heptio to become a top-three contributor to Kubernetes, and at VMworld we will describe a major R&D effort to evolve VMware vSphere into a native Kubernetes platform for VMs and containers.” - -Other updates about where VMware vSphere and NSX-T are headed will also be hot topics. - -Introduced in 2017, NSX-T Data Center software is targeted at organizations looking to support multivendor cloud-native applications, [bare-metal][10] workloads, [hypervisor][11] environments and the growing hybrid and multi-cloud worlds. In February the [company anointed NSX-T][12] the company’s go-to platform for future software-defined cloud developments. - -VMware is battling Cisco's Application Centric Infrastructure, Juniper's Contrail system and other platforms from vendors including Pluribus, Arista and Big Switch. How NSX-T evolves will be key to how well VMware competes. - -The most recent news around vSphere was that new features of its Hybrid Cloud Extension application-mobility software enables non-vSphere as well as increased on-premises application workloads to migrate to a variety of specific cloud services. Introduced in 2017, [VMware HCX][13] lets vSphere customers tie on-premises systems and applications to cloud services. - -The HCX announcement was part of VMware’s continued evolution into cloud technologies. In July the company teamed with [Google][14] to natively support VMware workloads in its Google Cloud service, giving customers more options for deploying enterprise applications. - -Further news about that relationship is likely at VMworld as well. - -VMware also has a hybrid cloud partnership with [Microsoft’s Azure cloud service][15].  That package, called Azure VMware Solutions is built on VMware Cloud Foundation, which  is a packag of vSphere with NSX network-virtualization and VSAN software-defined storage-area-network platform. The company is expected to update developments with that platform as well. - -Join the Network World communities on [Facebook][16] and [LinkedIn][17] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3433916/vmware-spends-48b-to-grab-pivotal-carbon-black-to-secure-develop-integrated-cloud-world.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/08/hybridcloud-100808516-large.jpg -[2]: http://valuationacademy.com/what-is-the-enterprise-value-ev/ -[3]: https://www.networkworld.com/article/3233132/cloud-computing/what-is-hybrid-cloud-computing.html -[4]: https://www.networkworld.com/article/3252775/hybrid-cloud/multicloud-mania-what-to-know.html -[5]: https://www.networkworld.com/newsletters/signup.html -[6]: https://www.networkworld.com/article/3359242/vmware-firewall-takes-aim-at-defending-apps-in-data-center-cloud.html -[7]: https://www.networkworld.com/article/2225739/what-is-pivotal--emc-and-vmware-want-it-to-be-your-platform-for-building-big-data-apps.html -[8]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[9]: https://www.carbonblack.com/2019/08/22/the-next-chapter-in-our-story-vmware-carbon-black/ -[10]: https://www.networkworld.com/article/3261113/why-a-bare-metal-cloud-provider-might-be-just-what-you-need.html?nsdr=true -[11]: https://www.networkworld.com/article/3243262/what-is-a-hypervisor.html?nsdr=true -[12]: https://www.networkworld.com/article/3346017/vmware-preps-milestone-nsx-release-for-enterprise-cloud-push.html -[13]: https://docs.vmware.com/en/VMware-HCX/services/rn/VMware-HCX-Release-Notes.html -[14]: https://www.networkworld.com/article/3428497/google-cloud-to-offer-vmware-data-center-tools-natively.html -[15]: https://www.networkworld.com/article/3113394/vmware-cloud-foundation-integrates-virtual-compute-network-and-storage-systems.html -[16]: https://www.facebook.com/NetworkWorld/ -[17]: https://www.linkedin.com/company/network-world diff --git a/sources/news/20190826 Implementing edge computing, DevOps like car racing, and more industry trends.md b/sources/news/20190826 Implementing edge computing, DevOps like car racing, and more industry trends.md deleted file mode 100644 index b048ecbdab..0000000000 --- a/sources/news/20190826 Implementing edge computing, DevOps like car racing, and more industry trends.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Implementing edge computing, DevOps like car racing, and more industry trends) -[#]: via: (https://opensource.com/article/19/8/implementing-edge-more-industry-trends) -[#]: author: (Tim Hildred https://opensource.com/users/thildred) - -Implementing edge computing, DevOps like car racing, and more industry trends -====== -A weekly look at open source community and industry trends. -![Person standing in front of a giant computer screen with numbers, data][1] - -As part of my role as a senior product marketing manager at an enterprise software company with an open source development model, I publish a regular update about open source community, market, and industry trends for product marketers, managers, and other influencers. Here are five of my and their favorite articles from that update. - -## [How to implement edge computing][2] - -> "When you have hundreds or thousands of locations, it's a challenge to manage all of that compute as you continue to scale it out at the edge," said Coufal. "For organizations heavily involved with IoT, there are cases where these enterprises can find themselves with millions of different endpoints to manage. This is where you need to automate as much as you can operationally so there is less need for humans to manage the day-to-day activities." - -**The impact:** We may think that there is a lot of stuff hooked up to the internet already, but edge connected Internet of Things (IoT) devices are already proving we ain't seen nothing yet. A heuristic that breaks the potential billions of endpoints into three categories (at least in a business context) helps us think about what this IoT might actually do for us, and who should be responsible for what. - -## [Can a composable hypervisor re-imagine virtualization?][3] - -> Van de Ven explained that in talking with customers he has seen five areas emerge as needing re-imagining in order to support evolving virtualization plans. These include a platform that is lightweight; one that is fast; something that can support high density workloads; that has quick start up; and one that is secure. However, the degrees of those needs remains in flux. -> -> Van de Ven explained that a [composable][4] hypervisor was one way to deal with these varying needs, pointing to Intel’s work with the [recently launched][5] rust-vmm hypervisor. -> -> That [open source project][6] provides a set of common hypervisor components developed by contributing vendors that can provide a more secure, higher performance container technology designed for [cloud native][7] environments. - -**The impact**: The container boom has been perhaps unprecedented in both the rapidness of its onset and the breadth of its impact. You'd be forgiven for thinking that all the innovation has moved on from virtualization; not so! For one thing, most of those containers are running in virtual machines, and there are still places where virtual machines outshine containers (particularly where security is concerned). Thankfully there are projects pushing the state of hypervisors and virtualization forward. - -## [How DevOps is like auto racing][8] - -> To achieve their goals, race teams don’t think from start to finish; they flip the table to look at the race from the end goal to the beginning. They set a goal, a stretch goal, and then work backward from that goal to determine how to get there. Work is delegated to team members to push toward the objectives that will get the team to the desired outcome. - -**The impact**: Sometimes the best way to understand the impact of an idea is to re-imagine the stakes. Here we recontextualize the moving and configuration of bits as the direction of explosive power and get a better understanding of why process, roles, and responsibilities are important contributors to success. - -## [CNCF archives the rkt project][9] - -> All open source projects are subject to a lifecycle and can become less active for a number of reasons. In rkt’s case, despite its initial popularity following its creation in December 2014, and contribution to CNCF in March 2017, end user adoption has severely declined. The CNCF is also [home][10] to other container runtime projects: [containerd][11] and [CRI-O][12], and while the rkt project played an important part in the early days of cloud native adoption, in recent times user adoption has trended away from rkt towards these other projects. Furthermore, [project activity][13] and the number of contributors has also steadily declined over time, along with unpatched CVEs. - -**The impact**: Betamax and laser discs pushed cassettes and DVDs to be better, and so it is with rkt. The project showed there is more than one way to run containers at a time when it looked like there was only one way to run containers. rkt galvanized a push towards standard interfaces in the container space, and for that, we are eternally grateful. - -_I hope you enjoyed this list of what stood out to me from last week and come back next Monday for more open source community, market, and industry trends._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/8/implementing-edge-more-industry-trends - -作者:[Tim Hildred][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/thildred -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data) -[2]: https://www.techrepublic.com/article/how-to-implement-edge-computing/ -[3]: https://www.sdxcentral.com/articles/news/can-a-composable-hypervisor-re-imagine-virtualization/2019/08/ -[4]: https://www.sdxcentral.com/data-center/composable/definitions/what-is-composable-infrastructure-definition/ (What is Composable Infrastructure? Definition) -[5]: https://www.sdxcentral.com/articles/news/intel-pushes-open-source-hypervisor-with-cloud-giants/2019/05/ -[6]: https://github.com/rust-vmm -[7]: https://www.sdxcentral.com/cloud-native/ (Cloud Native) -[8]: https://developers.redhat.com/blog/2019/08/22/how-devops-is-like-auto-racing/ -[9]: https://www.cncf.io/blog/2019/08/16/cncf-archives-the-rkt-project/ -[10]: https://landscape.cncf.io/category=container-runtime&format=card-mode -[11]: https://containerd.io/ -[12]: https://cri-o.io/ -[13]: https://rkt.devstats.cncf.io diff --git a/sources/news/20190826 Mellanox introduces SmartNICs to eliminate network load on CPUs.md b/sources/news/20190826 Mellanox introduces SmartNICs to eliminate network load on CPUs.md deleted file mode 100644 index 52fdb3baf9..0000000000 --- a/sources/news/20190826 Mellanox introduces SmartNICs to eliminate network load on CPUs.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Mellanox introduces SmartNICs to eliminate network load on CPUs) -[#]: via: (https://www.networkworld.com/article/3433924/mellanox-introduces-smartnics-to-eliminate-network-load-on-cpus.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Mellanox introduces SmartNICs to eliminate network load on CPUs -====== -Mellanox unveiled two processors designed to offload network workloads from the CPU -- ConnectX-6 Dx and BlueField-2 – freeing the CPU to do its processing job. -![Natali Mis / Getty Images][1] - -If you were wondering what prompted Nvidia to [shell out nearly $7 billion for Mellanox Technologies][2], here’s your answer: The networking hardware provider has introduced a pair of processors for offloading network workloads from the CPU. - -ConnectX-6 Dx and BlueField-2 are cloud SmartNICs and I/O Processing Unit (IPU) solutions, respectively, designed to take the work of network processing off the CPU, freeing it to do its processing job. - -**[ Learn more about SDN: Find out [where SDN is going][3] and learn the [difference between SDN and NFV][4]. | Get regularly scheduled insights: [Sign up for Network World newsletters][5]. ]** - -The company promises up to 200Gbit/sec throughput with ConnectX and BlueField. It said the market for 25Gbit and faster Ethernet was 31% of the total market last year and will grow to 61% next year. With the internet of things (IoT) and artificial intelligence (AI), a lot of data needs to be moved around and Ethernet needs to get a lot faster. - -“The whole vision of [software-defined networking] and NVMe-over-Fabric was a nice vision, but as soon as people tried it in the data center, performance ground to a halt because CPUs couldn’t handle all that data,” said Kevin Deierling, vice president of marketing for Mellanox. “As you do more complex networking, the CPUs are being asked to do all that work on top of running the apps and the hypervisor. It puts a big burden on CPUs if you don’t unload that workload.” - -CPUs are getting larger, with AMD introducing a 64-core Epyc processor and Intel introducing a 56-core Xeon. But keeping those giant CPUs fed is a real challenge. You can’t use a 100Gbit link because the CPU has to look at all that traffic and it gets overwhelmed, argues Deierling. - -“Suddenly 100-200Gbits becomes possible because a CPU doesn’t have to look at every packet and decide which core needs it,” he said. - -The amount of CPU load depends on workload. A telco can have a situation where it’s as much as 70% packet processing. At a minimum workload, 30% of it would be packet processing. - -“Our goal is to bring that to 0% packet processing so the CPU can do what it does best, which is process apps,” he said. Bluefield-2 can process up to 215 million packets per second, Deierling added. - -### ConnectX-6 Dx and BlueField-2 also provide security features - -The two are also focused on offering secure, high-speed interconnects inside the firewall. With standard network security, you have a firewall but minimal security inside the network. So once a hacker breaches your firewall, he often has free reign inside the network. - -With ConnectX-6 Dx and BlueField-2, the latter of which contains a ConnectX-6 Dx processor on the NIC, your internal network communications are also protected, so even if someone breaches your firewall, they can’t get at your data. - -ConnectX-6 Dx SmartNICs provide up to two ports of 25, 50 or 100Gb/s, or a single port of 200Gb/s, Ethernet connectivity powered by 50Gb/s PAM4 SerDes technology and PCIe 4.0 host connectivity. The ConnectX-6 Dx innovative hardware offload engines include IPsec and TLS inline data-in-motion crypto, advanced network virtualization, RDMA over Converged Ethernet (RoCE), and NVMe over Fabrics (NVMe-oF) storage accelerations.  - -The BlueField-2 IPU integrates a ConnectX-6 Dx, plus an ARM processor for a single System-on-Chip (SoC), supporting both Ethernet and InfiniBand connectivity up to 200Gb/sec. BlueField-2-based SmartNICs act as a co-processor that puts a computer in front of the computer to transform bare-metal and virtualized environments using advanced software-defined networking, NVMe SNAP storage disaggregation, and enhanced security capabilities. - -Both ConnectX6 Dx and BlueField-2 are due in the fourth quarter. - -### Partnering with Nvidia - -Mellanox is in the process of being acquired by Nvidia, but the two suitors are hardly waiting for government approval. At VMworld, Mellanox announced that its Remote Direct Memory Access (RDMA) networking solutions for VMware vSphere will enable virtualized machine learning with better GPU utilization and efficiency. - -Benchmarks found Nvidia’s virtualized GPUs see a two-fold increase in efficiency by using VMware’s paravirtualized RDMA (PVRDMA) technology than when using traditional networking protocols. And that was when connecting Nvidia T4 GPUs with Mellanox’s ConnectX-5 100 GbE SmartNICs, the older generation that is supplanted by today’s announcement. - -The PVRDMA Ethernet solution enables VM-to-VM communication over RDMA, which boosts data communication performance in virtualized environments while achieving significantly higher efficiency compared with legacy TCP/IP transports. This translates into optimized server and GPU utilization, reduced machine learning training time, and improved scalability. - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3433924/mellanox-introduces-smartnics-to-eliminate-network-load-on-cpus.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/08/cso_identity_access_management_abstract_network_connections_circuits_reflected_in_eye_by_natali_mis_gettyimages-654791312_2400x1600-100808178-large.jpg -[2]: https://www.networkworld.com/article/3356444/nvidia-grabs-mellanox-out-from-under-intels-nose.html -[3]: https://www.networkworld.com/article/3209131/lan-wan/what-sdn-is-and-where-its-going.html -[4]: https://www.networkworld.com/article/3206709/lan-wan/what-s-the-difference-between-sdn-and-nfv.html -[5]: https://www.networkworld.com/newsletters/signup.html -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/news/20190831 Endless Grants -500,000 Fund To GNOME Foundation-s Coding Education Challenge.md b/sources/news/20190831 Endless Grants -500,000 Fund To GNOME Foundation-s Coding Education Challenge.md deleted file mode 100644 index 1ab956abc6..0000000000 --- a/sources/news/20190831 Endless Grants -500,000 Fund To GNOME Foundation-s Coding Education Challenge.md +++ /dev/null @@ -1,65 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Endless Grants $500,000 Fund To GNOME Foundation’s Coding Education Challenge) -[#]: via: (https://itsfoss.com/endless-gnome-coding-education-challenge/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -Endless Grants $500,000 Fund To GNOME Foundation’s Coding Education Challenge -====== - -The [GNOME foundation][1] recently announced the “**Coding Education Challenge**“, which is a three-stage competition to offer educators and students the opportunity to share their innovative ideas (projects) to teach coding with free and open-source software. - -For the funding (that covers the reward), [Endless][2] has issued a $500,000 (half a million) grant to support the competition and attract more educators/students from across the world. Yes, that is a whole lot of money to be awarded to the team (or individual) that wins the competition. - -In case you didn’t know about **Endless**, here’s a background for you – _they work on increasing digital access to children and help them to make the most out of it while also educating them about it_. Among other projects, they have [Endless OS Linux distribution][3]. They also have [inexpensive mini PCs running Linux][4] to help their educational projects. - -In the [press release][5], **Neil McGovern**, Executive Director, GNOME Foundation mentioned: - -> “We’re very grateful that Endless has come forward to provide more opportunities for individuals to learn about free and open-source ” - -He also added: - -> “We’re excited to see what can be achieved when we empower the creativity and imagination of our global community. We hope to make powerful partnerships between students and educators to explore the possibilities of our rich and diverse software ecosystem.  Reaching the next generation of developers is crucial to ensuring that free software continues for many years in the future.” - -**Matt Dalio**, founder of Endless, also shared his thoughts about this competition: - -> “We fully believe in GNOME’s mission of making technology available and providing the tools of digital agency to all. What’s so unique about the GNOME Project is that it delivers a fully-working personal computer system, which is a powerful real-world vehicle to teach kids to code. There are so many potential ways for this competition to build flourishing ecosystems that empower the next generation to create, learn and build.” - -In addition to the announcement of competition and the grant, we do not have more details. However, anyone can submit a proposal for the competition (an individual or a team). Also, it has been decided that there will be 20 winners for the first round and will be rewarded **$6500** each for their ideas. - -[][6] - -Suggested read  StationX Announces New Laptop Customized for Manjaro Linux - -For the second stage of the competition, the winners will be asked to provide a working prototype from which 5 winners will be filtered to get **$25,000** each as the prize money. - -In the final stage will involve making an end-product where only two winners will be selected. The runners up shall get **$25,000** and the winner walks away with **$100,000**. - -_**Wrapping Up**_ - -I’d love to watch out for more details on ‘Coding Education Challenge’ by GNOME Foundation. We shall update this article for more details on the competition. - -While the grant makes it look like a great initiative by GNOME Foundation, what do you think about it? Feel free to share your thoughts in the comments below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/endless-gnome-coding-education-challenge/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://www.gnome.org/ -[2]: https://endlessnetwork.com/ -[3]: https://endlessos.com/home/ -[4]: https://endlessos.com/computers/ -[5]: https://www.gnome.org/news/2019/08/gnome-foundation-launches-coding-education-challenge/ -[6]: https://itsfoss.com/stationx-manjaro-linux/ diff --git a/sources/news/20190905 Exploit found in Supermicro motherboards could allow for remote hijacking.md b/sources/news/20190905 Exploit found in Supermicro motherboards could allow for remote hijacking.md deleted file mode 100644 index 6d2b48755b..0000000000 --- a/sources/news/20190905 Exploit found in Supermicro motherboards could allow for remote hijacking.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Exploit found in Supermicro motherboards could allow for remote hijacking) -[#]: via: (https://www.networkworld.com/article/3435123/exploit-found-in-supermicro-motherboards-could-allow-for-remote-hijacking.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Exploit found in Supermicro motherboards could allow for remote hijacking -====== -The vulnerability impacts three models of Supermicro motherboards. Fortunately, a fix is already available. -IDG / Thinkstock - -A security group discovered a vulnerability in three models of Supermicro motherboards that could allow an attacker to remotely commandeer the server. Fortunately, a fix is already available. - -Eclypsium, which specializes in firmware security, announced in its blog that it had found a set of flaws in the baseboard management controller (BMC) for three different models of Supermicro server boards: the X9, X10, and X11. - -**[ Also see: [What to consider when deploying a next-generation firewall][1] | Get regularly scheduled insights: [Sign up for Network World newsletters][2] ]** - -BMCs are designed to permit administrators remote access to the computer so they can do maintenance and other updates, such as firmware and operating system patches. It’s meant to be a secure port into the computer while at the same time walled off from the rest of the server. - -Normally BMCs are locked down within the network in order to prevent this kind of malicious access in the first place. In some cases, BMCs are left open to the internet so they can be accessed from a web browser, and those interfaces are not terribly secure. That’s what Eclypsium found. - -For its BMC management console, Supermicro uses an app called virtual media application. This application allows admins to remotely mount images from USB devices and CD or DVD-ROM drives. - -When accessed remotely, the virtual media service allows for plaintext authentication, sends most of the traffic unencrypted, uses a weak encryption algorithm for the rest, and is susceptible to an authentication bypass, [according to Eclypsium][3]. - -Eclypsium was more diplomatic than I, so I’ll say it: Supermicro was sloppy. - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][4] ]** - -These issues allow an attacker to easily gain access to a server, either by capturing a legitimate user’s authentication packet, using default credentials, and in some cases, without any credentials at all. - -"This means attackers can attack the server in the same way as if they had physical access to a USB port, such as loading a new operating system image or using a keyboard and mouse to modify the server, implant malware, or even disable the device entirely," Eclypsium wrote in its blog post. - -All told, the team found four different flaws within the virtual media service of the BMC's web control interface. - -### How an attacker could exploit the Supermicro flaws - -According to Eclypsium, the easiest way to attack the virtual media flaws is to find a server with the default login or brute force an easily guessed login (root or admin). In other cases, the flaws would have to be targeted. - -Normally, access to the virtual media service is conducted by a small Java application served on the BMC’s web interface. This application then connects to the virtual media service listening on TCP port 623 on the BMC. A scan by Eclypsium on port 623 turned up 47,339 exposed BMCs around the world. - -Eclypsium did the right thing and contacted Supermicro and waited for the vendor to release [an update to fix the vulnerabilities][5] before going public. Supermicro thanked Eclypsium for not only bringing this issue to its attention but also helping validate the fixes. - -Eclypsium is on quite the roll. In July it disclosed BMC [vulnerabilities in motherboards from Lenovo, Gigabyte][6] and other vendors, and last month it [disclosed flaws in 40 device drivers][7] from 20 vendors that could be exploited to deploy malware. - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3435123/exploit-found-in-supermicro-motherboards-could-allow-for-remote-hijacking.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3236448/lan-wan/what-to-consider-when-deploying-a-next-generation-firewall.html -[2]: https://www.networkworld.com/newsletters/signup.html -[3]: https://eclypsium.com/2019/09/03/usbanywhere-bmc-vulnerability-opens-servers-to-remote-attack/ -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[5]: https://www.supermicro.com/support/security_BMC_virtual_media.cfm -[6]: https://eclypsium.com/2019/07/16/vulnerable-firmware-in-the-supply-chain-of-enterprise-servers/ -[7]: https://eclypsium.com/2019/08/10/screwed-drivers-signed-sealed-delivered/ -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20120911 Doug Bolden, Dunnet (IF).md b/sources/talk/20120911 Doug Bolden, Dunnet (IF).md deleted file mode 100644 index c856dc5be0..0000000000 --- a/sources/talk/20120911 Doug Bolden, Dunnet (IF).md +++ /dev/null @@ -1,52 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Doug Bolden, Dunnet (IF)) -[#]: via: (http://www.wyrmis.com/games/if/dunnet.html) -[#]: author: (W Doug Bolden http://www.wyrmis.com) - -Doug Bolden, Dunnet (IF) -====== - -### Dunnet (IF) - -#### Review - -When I began becoming a semi-serious hobbyist of IF last year, I mostly focused on Infocom, Adventures Unlimited, other Scott Adams based games, and freeware titles. I went on to buy some from Malinche. I picked up _1893_ and _Futureboy_ and (most recnetly) _Treasures of a Slave Kingdom_. I downloaded a lot of free games from various sites. With all of my research and playing, I never once read anything that talked about a game being bundled with Emacs. - -Partially, this is because I am a Vim guy. But I used to use Emacs. Kind of a lot. For probably my first couple of years with Linux. About as long as I have been a diehard Vim fan, now. I just never explored, it seems. - -I booted up Emacs tonight, and my fonts were hosed. Still do not know exactly why. I surfed some menus to find out what was going wrong and came across a menu option called "Adventure" under Games, which I assumed (I know, I know) meant the Crowther and Woods and 1977 variety. When I clicked it tonight, thinking that it has been a few months since I chased a bird around with a cage in a mine so I can fight off giant snakes or something, I was brought up text involving ends of roads and shovels. Trees, if shaken, that kill me with a coconut. This was not the game I thought it was. - -I dug around (or, in purely technical terms, typed "help") and got directed to [this website][1]. Well, here was an IF I had never touched before. Brand spanking new to me. I had planned to play out some _ToaSK_ tonight, but figured that could wait. Besides, I was not quite in the mood for the jocular fun of S. John Ross's commerical IF outing. I needed something a little more direct, and this apparently it. - -Most of the game plays out just like the _Colossal Cave Adventure_ cousins of the oldschool (generally commercial) IF days. There are items you pick. Each does a single task (well, there could be one exception to this, I guess). You collect treasures. Winning is a combination of getting to the end and turning in the treasures. The game slightly tweaks the formula by allowing multiple drop off points for the treasures. Since there is a weight limit, though, you usually have to drop them off at a particular time to avoid getting stuck. At several times, your "item cache" is flushed, so to speak, meaning you have to go back and replay earlier portions to find out how to bring things foward. Damage to items can occur to stop you from being able to play. Replaying is pretty much unavoidable, unless you guess outcomes just right. - -It also inherits many problems from the era it came. There is a twisty maze. I'm not sure how big it is. I just cheated and looked up a walkthrough for the maze portion. I plan on going back and replaying up to the maze bit and mapping it out, though. I was just mentally and physically beat when I played and knew that I was going to have to call it quits on the game for the night or cheat through the maze. I'm glad I cheated, because there are some interesting things after the maze. - -It also has the same sort of stilted syntax and variable levels of description that the original _Adventure_ had. Looking at one item might give you "there is nothing special about that" while looking at another might give you a sentence of flavor text. Several things mentioned in the background do not exist to the parser, which some do. Part of game play is putting up with experimenting. This includes, in cases, a tendency of room descriptions to be written from the perspective of the first time you enter. I know that the Classroom found towards the end of the game does not mention the South exit, either. There are possibly other times this occured that I didn't notice. - -It's final issue, again coming out of the era it was designed, is random death syndrome. This is not too common, but there are a few places where things that have no initially apparent fatal outcome lead to one anyhow. In some ways, this "fatal outcome" is just the game reaching an unwinnable state. For an example of the former, type "shake trees" in the first room. For an example of the latter, send either the lamp, the key, or the shovel through the ftp without switching ftp modes first. At least with the former, there is a sense of exploration in finding out new ways to die. In IF, creative deaths is a form of victory in their own right. - -_Dunnet_ has a couple of differences from most IF. The former difference is minor. There are little odd descriptions throughout the game. "This room is red" or "The towel has a picture of Snoopy one it" or "There is a cliff here" that do not seem to have an immediate effect on the game. Sure, you can jump over the cliff (and die, obviously) but but it still comes off as a bright spot in the standard description matrix. Towards the end, you will be forced to bring back these details. It makes a neat little diversion of looking around and exploring things. Most of the details are cute and/or add to the surreality of the game overall. - -The other big difference, and the one that greatly increased both my annoyance with and my enjoyment of the game, revolves around the two-three computer oriented scenes in the game. You have to type commands into two different computers throughout. One is a VAX and the other is, um, something like a PC (I forget). In both cases, there are clues to be found by knowing your way around the interface. This is a game for computer folk, so most who play it will have a sense of how to type "ls" or "dir" depending on the OS. But not all, will. Beating the game requires a general sense of computer literacy. You must know what types are in ftp. You must know how to determine what type a file is. You must know how to read a text file on a DOS style prompt. You must know something about protocols and etiquette for logging into ftp servers. All this sort of thing. If you do, or are willing to learn (I looked up some of the stuff online) then you can get past this portion with no problem. But this can be like the maze to some people, requiring several replays to get things right. - -The end result is a quirky but fun game that I wish I had known about before because now I have the feeling that my computer is hiding other secrets from me. Glad to have played. Will likely play again to see how many ways I can die. - --------------------------------------------------------------------------------- - -via: http://www.wyrmis.com/games/if/dunnet.html - -作者:[W Doug Bolden][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: http://www.wyrmis.com -[b]: https://github.com/lujun9972 -[1]: http://www.driver-aces.com/ronnie.html diff --git a/sources/talk/20140412 My Lisp Experiences and the Development of GNU Emacs.md b/sources/talk/20140412 My Lisp Experiences and the Development of GNU Emacs.md deleted file mode 100644 index 7be913c3bf..0000000000 --- a/sources/talk/20140412 My Lisp Experiences and the Development of GNU Emacs.md +++ /dev/null @@ -1,111 +0,0 @@ -My Lisp Experiences and the Development of GNU Emacs -====== - -> (Transcript of Richard Stallman's Speech, 28 Oct 2002, at the International Lisp Conference). - -Since none of my usual speeches have anything to do with Lisp, none of them were appropriate for today. So I'm going to have to wing it. Since I've done enough things in my career connected with Lisp I should be able to say something interesting. - -My first experience with Lisp was when I read the Lisp 1.5 manual in high school. That's when I had my mind blown by the idea that there could be a computer language like that. The first time I had a chance to do anything with Lisp was when I was a freshman at Harvard and I wrote a Lisp interpreter for the PDP-11. It was a very small machine — it had something like 8k of memory — and I managed to write the interpreter in a thousand instructions. This gave me some room for a little bit of data. That was before I got to see what real software was like, that did real system jobs. - -I began doing work on a real Lisp implementation with JonL White once I started working at MIT. I got hired at the Artificial Intelligence Lab not by JonL, but by Russ Noftsker, which was most ironic considering what was to come — he must have really regretted that day. - -During the 1970s, before my life became politicized by horrible events, I was just going along making one extension after another for various programs, and most of them did not have anything to do with Lisp. But, along the way, I wrote a text editor, Emacs. The interesting idea about Emacs was that it had a programming language, and the user's editing commands would be written in that interpreted programming language, so that you could load new commands into your editor while you were editing. You could edit the programs you were using and then go on editing with them. So, we had a system that was useful for things other than programming, and yet you could program it while you were using it. I don't know if it was the first one of those, but it certainly was the first editor like that. - -This spirit of building up gigantic, complicated programs to use in your own editing, and then exchanging them with other people, fueled the spirit of free-wheeling cooperation that we had at the AI Lab then. The idea was that you could give a copy of any program you had to someone who wanted a copy of it. We shared programs to whomever wanted to use them, they were human knowledge. So even though there was no organized political thought relating the way we shared software to the design of Emacs, I'm convinced that there was a connection between them, an unconscious connection perhaps. I think that it's the nature of the way we lived at the AI Lab that led to Emacs and made it what it was. - -The original Emacs did not have Lisp in it. The lower level language, the non-interpreted language — was PDP-10 Assembler. The interpreter we wrote in that actually wasn't written for Emacs, it was written for TECO. It was our text editor, and was an extremely ugly programming language, as ugly as could possibly be. The reason was that it wasn't designed to be a programming language, it was designed to be an editor and command language. There were commands like ‘5l’, meaning ‘move five lines’, or ‘i’ and then a string and then an ESC to insert that string. You would type a string that was a series of commands, which was called a command string. You would end it with ESC ESC, and it would get executed. - -Well, people wanted to extend this language with programming facilities, so they added some. For instance, one of the first was a looping construct, which was < >. You would put those around things and it would loop. There were other cryptic commands that could be used to conditionally exit the loop. To make Emacs, we (1) added facilities to have subroutines with names. Before that, it was sort of like Basic, and the subroutines could only have single letters as their names. That was hard to program big programs with, so we added code so they could have longer names. Actually, there were some rather sophisticated facilities; I think that Lisp got its unwind-protect facility from TECO. - -We started putting in rather sophisticated facilities, all with the ugliest syntax you could ever think of, and it worked — people were able to write large programs in it anyway. The obvious lesson was that a language like TECO, which wasn't designed to be a programming language, was the wrong way to go. The language that you build your extensions on shouldn't be thought of as a programming language in afterthought; it should be designed as a programming language. In fact, we discovered that the best programming language for that purpose was Lisp. - -It was Bernie Greenberg, who discovered that it was (2). He wrote a version of Emacs in Multics MacLisp, and he wrote his commands in MacLisp in a straightforward fashion. The editor itself was written entirely in Lisp. Multics Emacs proved to be a great success — programming new editing commands was so convenient that even the secretaries in his office started learning how to use it. They used a manual someone had written which showed how to extend Emacs, but didn't say it was a programming. So the secretaries, who believed they couldn't do programming, weren't scared off. They read the manual, discovered they could do useful things and they learned to program. - -So Bernie saw that an application — a program that does something useful for you — which has Lisp inside it and which you could extend by rewriting the Lisp programs, is actually a very good way for people to learn programming. It gives them a chance to write small programs that are useful for them, which in most arenas you can't possibly do. They can get encouragement for their own practical use — at the stage where it's the hardest — where they don't believe they can program, until they get to the point where they are programmers. - -At that point, people began to wonder how they could get something like this on a platform where they didn't have full service Lisp implementation. Multics MacLisp had a compiler as well as an interpreter — it was a full-fledged Lisp system — but people wanted to implement something like that on other systems where they had not already written a Lisp compiler. Well, if you didn't have the Lisp compiler you couldn't write the whole editor in Lisp — it would be too slow, especially redisplay, if it had to run interpreted Lisp. So we developed a hybrid technique. The idea was to write a Lisp interpreter and the lower level parts of the editor together, so that parts of the editor were built-in Lisp facilities. Those would be whatever parts we felt we had to optimize. This was a technique that we had already consciously practiced in the original Emacs, because there were certain fairly high level features which we re-implemented in machine language, making them into TECO primitives. For instance, there was a TECO primitive to fill a paragraph (actually, to do most of the work of filling a paragraph, because some of the less time-consuming parts of the job would be done at the higher level by a TECO program). You could do the whole job by writing a TECO program, but that was too slow, so we optimized it by putting part of it in machine language. We used the same idea here (in the hybrid technique), that most of the editor would be written in Lisp, but certain parts of it that had to run particularly fast would be written at a lower level. - -Therefore, when I wrote my second implementation of Emacs, I followed the same kind of design. The low level language was not machine language anymore, it was C. C was a good, efficient language for portable programs to run in a Unix-like operating system. There was a Lisp interpreter, but I implemented facilities for special purpose editing jobs directly in C — manipulating editor buffers, inserting leading text, reading and writing files, redisplaying the buffer on the screen, managing editor windows. - -Now, this was not the first Emacs that was written in C and ran on Unix. The first was written by James Gosling, and was referred to as GosMacs. A strange thing happened with him. In the beginning, he seemed to be influenced by the same spirit of sharing and cooperation of the original Emacs. I first released the original Emacs to people at MIT. Someone wanted to port it to run on Twenex — it originally only ran on the Incompatible Timesharing System we used at MIT. They ported it to Twenex, which meant that there were a few hundred installations around the world that could potentially use it. We started distributing it to them, with the rule that “you had to send back all of your improvements” so we could all benefit. No one ever tried to enforce that, but as far as I know people did cooperate. - -Gosling did, at first, seem to participate in this spirit. He wrote in a manual that he called the program Emacs hoping that others in the community would improve it until it was worthy of that name. That's the right approach to take towards a community — to ask them to join in and make the program better. But after that he seemed to change the spirit, and sold it to a company. - -At that time I was working on the GNU system (a free software Unix-like operating system that many people erroneously call “Linux”). There was no free software Emacs editor that ran on Unix. I did, however, have a friend who had participated in developing Gosling's Emacs. Gosling had given him, by email, permission to distribute his own version. He proposed to me that I use that version. Then I discovered that Gosling's Emacs did not have a real Lisp. It had a programming language that was known as ‘mocklisp’, which looks syntactically like Lisp, but didn't have the data structures of Lisp. So programs were not data, and vital elements of Lisp were missing. Its data structures were strings, numbers and a few other specialized things. - -I concluded I couldn't use it and had to replace it all, the first step of which was to write an actual Lisp interpreter. I gradually adapted every part of the editor based on real Lisp data structures, rather than ad hoc data structures, making the data structures of the internals of the editor exposable and manipulable by the user's Lisp programs. - -The one exception was redisplay. For a long time, redisplay was sort of an alternate world. The editor would enter the world of redisplay and things would go on with very special data structures that were not safe for garbage collection, not safe for interruption, and you couldn't run any Lisp programs during that. We've changed that since — it's now possible to run Lisp code during redisplay. It's quite a convenient thing. - -This second Emacs program was ‘free software’ in the modern sense of the term — it was part of an explicit political campaign to make software free. The essence of this campaign was that everybody should be free to do the things we did in the old days at MIT, working together on software and working with whomever wanted to work with us. That is the basis for the free software movement — the experience I had, the life that I've lived at the MIT AI lab — to be working on human knowledge, and not be standing in the way of anybody's further using and further disseminating human knowledge. - -At the time, you could make a computer that was about the same price range as other computers that weren't meant for Lisp, except that it would run Lisp much faster than they would, and with full type checking in every operation as well. Ordinary computers typically forced you to choose between execution speed and good typechecking. So yes, you could have a Lisp compiler and run your programs fast, but when they tried to take `car` of a number, it got nonsensical results and eventually crashed at some point. - -The Lisp machine was able to execute instructions about as fast as those other machines, but each instruction — a car instruction would do data typechecking — so when you tried to get the car of a number in a compiled program, it would give you an immediate error. We built the machine and had a Lisp operating system for it. It was written almost entirely in Lisp, the only exceptions being parts written in the microcode. People became interested in manufacturing them, which meant they should start a company. - -There were two different ideas about what this company should be like. Greenblatt wanted to start what he called a “hacker” company. This meant it would be a company run by hackers and would operate in a way conducive to hackers. Another goal was to maintain the AI Lab culture (3). Unfortunately, Greenblatt didn't have any business experience, so other people in the Lisp machine group said they doubted whether he could succeed. They thought that his plan to avoid outside investment wouldn't work. - -Why did he want to avoid outside investment? Because when a company has outside investors, they take control and they don't let you have any scruples. And eventually, if you have any scruples, they also replace you as the manager. - -So Greenblatt had the idea that he would find a customer who would pay in advance to buy the parts. They would build machines and deliver them; with profits from those parts, they would then be able to buy parts for a few more machines, sell those and then buy parts for a larger number of machines, and so on. The other people in the group thought that this couldn't possibly work. - -Greenblatt then recruited Russell Noftsker, the man who had hired me, who had subsequently left the AI Lab and created a successful company. Russell was believed to have an aptitude for business. He demonstrated this aptitude for business by saying to the other people in the group, “Let's ditch Greenblatt, forget his ideas, and we'll make another company.” Stabbing in the back, clearly a real businessman. Those people decided they would form a company called Symbolics. They would get outside investment, not have scruples, and do everything possible to win. - -But Greenblatt didn't give up. He and the few people loyal to him decided to start Lisp Machines Inc. anyway and go ahead with their plans. And what do you know, they succeeded! They got the first customer and were paid in advance. They built machines and sold them, and built more machines and more machines. They actually succeeded even though they didn't have the help of most of the people in the group. Symbolics also got off to a successful start, so you had two competing Lisp machine companies. When Symbolics saw that LMI was not going to fall flat on its face, they started looking for ways to destroy it. - -Thus, the abandonment of our lab was followed by “war” in our lab. The abandonment happened when Symbolics hired away all the hackers, except me and the few who worked at LMI part-time. Then they invoked a rule and eliminated people who worked part-time for MIT, so they had to leave entirely, which left only me. The AI lab was now helpless. And MIT had made a very foolish arrangement with these two companies. It was a three-way contract where both companies licensed the use of Lisp machine system sources. These companies were required to let MIT use their changes. But it didn't say in the contract that MIT was entitled to put them into the MIT Lisp machine systems that both companies had licensed. Nobody had envisioned that the AI lab's hacker group would be wiped out, but it was. - -So Symbolics came up with a plan (4). They said to the lab, “We will continue making our changes to the system available for you to use, but you can't put it into the MIT Lisp machine system. Instead, we'll give you access to Symbolics' Lisp machine system, and you can run it, but that's all you can do.” - -This, in effect, meant that they demanded that we had to choose a side, and use either the MIT version of the system or the Symbolics version. Whichever choice we made determined which system our improvements went to. If we worked on and improved the Symbolics version, we would be supporting Symbolics alone. If we used and improved the MIT version of the system, we would be doing work available to both companies, but Symbolics saw that we would be supporting LMI because we would be helping them continue to exist. So we were not allowed to be neutral anymore. - -Up until that point, I hadn't taken the side of either company, although it made me miserable to see what had happened to our community and the software. But now, Symbolics had forced the issue. So, in an effort to help keep Lisp Machines Inc. going (5) — I began duplicating all of the improvements Symbolics had made to the Lisp machine system. I wrote the equivalent improvements again myself (i.e., the code was my own). - -After a while (6), I came to the conclusion that it would be best if I didn't even look at their code. When they made a beta announcement that gave the release notes, I would see what the features were and then implement them. By the time they had a real release, I did too. - -In this way, for two years, I prevented them from wiping out Lisp Machines Incorporated, and the two companies went on. But, I didn't want to spend years and years punishing someone, just thwarting an evil deed. I figured they had been punished pretty thoroughly because they were stuck with competition that was not leaving or going to disappear (7). Meanwhile, it was time to start building a new community to replace the one that their actions and others had wiped out. - -The Lisp community in the 70s was not limited to the MIT AI Lab, and the hackers were not all at MIT. The war that Symbolics started was what wiped out MIT, but there were other events going on then. There were people giving up on cooperation, and together this wiped out the community and there wasn't much left. - -Once I stopped punishing Symbolics, I had to figure out what to do next. I had to make a free operating system, that was clear — the only way that people could work together and share was with a free operating system. - -At first, I thought of making a Lisp-based system, but I realized that wouldn't be a good idea technically. To have something like the Lisp machine system, you needed special purpose microcode. That's what made it possible to run programs as fast as other computers would run their programs and still get the benefit of typechecking. Without that, you would be reduced to something like the Lisp compilers for other machines. The programs would be faster, but unstable. Now that's okay if you're running one program on a timesharing system — if one program crashes, that's not a disaster, that's something your program occasionally does. But that didn't make it good for writing the operating system in, so I rejected the idea of making a system like the Lisp machine. - -I decided instead to make a Unix-like operating system that would have Lisp implementations to run as user programs. The kernel wouldn't be written in Lisp, but we'd have Lisp. So the development of that operating system, the GNU operating system, is what led me to write the GNU Emacs. In doing this, I aimed to make the absolute minimal possible Lisp implementation. The size of the programs was a tremendous concern. - -There were people in those days, in 1985, who had one-megabyte machines without virtual memory. They wanted to be able to use GNU Emacs. This meant I had to keep the program as small as possible. - -For instance, at the time the only looping construct was ‘while’, which was extremely simple. There was no way to break out of the ‘while’ statement, you just had to do a catch and a throw, or test a variable that ran the loop. That shows how far I was pushing to keep things small. We didn't have ‘caar’ and ‘cadr’ and so on; “squeeze out everything possible” was the spirit of GNU Emacs, the spirit of Emacs Lisp, from the beginning. - -Obviously, machines are bigger now, and we don't do it that way any more. We put in ‘caar’ and ‘cadr’ and so on, and we might put in another looping construct one of these days. We're willing to extend it some now, but we don't want to extend it to the level of common Lisp. I implemented Common Lisp once on the Lisp machine, and I'm not all that happy with it. One thing I don't like terribly much is keyword arguments (8). They don't seem quite Lispy to me; I'll do it sometimes but I minimize the times when I do that. - -That was not the end of the GNU projects involved with Lisp. Later on around 1995, we were looking into starting a graphical desktop project. It was clear that for the programs on the desktop, we wanted a programming language to write a lot of it in to make it easily extensible, like the editor. The question was what it should be. - -At the time, TCL was being pushed heavily for this purpose. I had a very low opinion of TCL, basically because it wasn't Lisp. It looks a tiny bit like Lisp, but semantically it isn't, and it's not as clean. Then someone showed me an ad where Sun was trying to hire somebody to work on TCL to make it the “de-facto standard extension language” of the world. And I thought, “We've got to stop that from happening.” So we started to make Scheme the standard extensibility language for GNU. Not Common Lisp, because it was too large. The idea was that we would have a Scheme interpreter designed to be linked into applications in the same way TCL was linked into applications. We would then recommend that as the preferred extensibility package for all GNU programs. - -There's an interesting benefit you can get from using such a powerful language as a version of Lisp as your primary extensibility language. You can implement other languages by translating them into your primary language. If your primary language is TCL, you can't very easily implement Lisp by translating it into TCL. But if your primary language is Lisp, it's not that hard to implement other things by translating them. Our idea was that if each extensible application supported Scheme, you could write an implementation of TCL or Python or Perl in Scheme that translates that program into Scheme. Then you could load that into any application and customize it in your favorite language and it would work with other customizations as well. - -As long as the extensibility languages are weak, the users have to use only the language you provided them. Which means that people who love any given language have to compete for the choice of the developers of applications — saying “Please, application developer, put my language into your application, not his language.” Then the users get no choices at all — whichever application they're using comes with one language and they're stuck with [that language]. But when you have a powerful language that can implement others by translating into it, then you give the user a choice of language and we don't have to have a language war anymore. That's what we're hoping ‘Guile’, our scheme interpreter, will do. We had a person working last summer finishing up a translator from Python to Scheme. I don't know if it's entirely finished yet, but for anyone interested in this project, please get in touch. So that's the plan we have for the future. - -I haven't been speaking about free software, but let me briefly tell you a little bit about what that means. Free software does not refer to price; it doesn't mean that you get it for free. (You may have paid for a copy, or gotten a copy gratis.) It means that you have freedom as a user. The crucial thing is that you are free to run the program, free to study what it does, free to change it to suit your needs, free to redistribute the copies of others and free to publish improved, extended versions. This is what free software means. If you are using a non-free program, you have lost crucial freedom, so don't ever do that. - -The purpose of the GNU project is to make it easier for people to reject freedom-trampling, user-dominating, non-free software by providing free software to replace it. For those who don't have the moral courage to reject the non-free software, when that means some practical inconvenience, what we try to do is give a free alternative so that you can move to freedom with less of a mess and less of a sacrifice in practical terms. The less sacrifice the better. We want to make it easier for you to live in freedom, to cooperate. - -This is a matter of the freedom to cooperate. We're used to thinking of freedom and cooperation with society as if they are opposites. But here they're on the same side. With free software you are free to cooperate with other people as well as free to help yourself. With non-free software, somebody is dominating you and keeping people divided. You're not allowed to share with them, you're not free to cooperate or help society, anymore than you're free to help yourself. Divided and helpless is the state of users using non-free software. - -We've produced a tremendous range of free software. We've done what people said we could never do; we have two operating systems of free software. We have many applications and we obviously have a lot farther to go. So we need your help. I would like to ask you to volunteer for the GNU project; help us develop free software for more jobs. Take a look at [http://www.gnu.org/help][1] to find suggestions for how to help. If you want to order things, there's a link to that from the home page. If you want to read about philosophical issues, look in /philosophy. If you're looking for free software to use, look in /directory, which lists about 1900 packages now (which is a fraction of all the free software out there). Please write more and contribute to us. My book of essays, “Free Software and Free Society”, is on sale and can be purchased at [www.gnu.org][2]. Happy hacking! - --------------------------------------------------------------------------------- - -via: https://www.gnu.org/gnu/rms-lisp.html - -作者:[Richard Stallman][a] -选题:[lujun9972](https://github.com/lujun9972) -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]:https://www.gnu.org -[1]:https://www.gnu.org/help/ -[2]:http://www.gnu.org/ diff --git a/sources/talk/20170320 An Ubuntu User-s Review Of Dell XPS 13 Ubuntu Edition.md b/sources/talk/20170320 An Ubuntu User-s Review Of Dell XPS 13 Ubuntu Edition.md deleted file mode 100644 index 5e00b887d2..0000000000 --- a/sources/talk/20170320 An Ubuntu User-s Review Of Dell XPS 13 Ubuntu Edition.md +++ /dev/null @@ -1,199 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (anonymone ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (An Ubuntu User’s Review Of Dell XPS 13 Ubuntu Edition) -[#]: via: (https://itsfoss.com/dell-xps-13-ubuntu-review) -[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) - -An Ubuntu User’s Review Of Dell XPS 13 Ubuntu Edition -====== - -_**Brief: Sharing my feel and experience about Dell XPS 13 Kaby Lake Ubuntu edition after using it for over three months.**_ - -During Black Friday sale last year, I took the bullet and ordered myself a [Dell XPS 13][1] with the new [Intel Kaby Lake processor][2]. It got delivered in the second week of December and if you [follow It’s FOSS on Facebook][3], you might have seen the [live unboxing][4]. - -Though I was tempted to do the review of Dell XPS 13 Ubuntu edition almost at the same time, I knew it won’t be fair. A brand new system will, of course, feel good and work smooth. - -But that’s not the real experience. The real experience of any system comes after weeks, if not months, of use. That’s the reason I hold myself back and waited three months to review Dell XPS Kobylake Ubuntu edition. - -### Dell XPS 13 Ubuntu Edition Review - -Before we saw what’s hot and what’s not in the latest version of Dell XPS 13, I should tell you that I was using an Acer R13 ultrabook book before this. So I may compare the new Dell system with the older Acer one. - -![Dell XPS 13 Ubuntu Edition System Settings][5]![Dell XPS 13 Ubuntu Edition System Settings][5] - -Dell XPS 13 has several versions based on processor. The one I am reviewing is Dell XPS13 MLK (9360). It has i5-7200U 7th generation processor. Since I hardly used the touch screen in Acer Aspire R13, I chose to go with the non-touch version of XPS. This decision also saved me a couple of hundreds of Euro. - -It has 8 GB of LPDDR3 1866MHz RAM and 256 GB SSD PCIe. Graphics is Intel HD. On connectivity side, it’s got Killer 1535 Wi-Fi 802.11ac 2×2 and Bluetooth 4.1. Screen is InfinityEdge Full HD (1 920 x 1080). - -Now, you know what kind of hardware we’ve got here, let’s see what works and what sucks. - -#### Look and feel - -![Dell XPS 13 Kaby Lake Ubuntu Edition][6]![Dell XPS 13 Kaby Lake Ubuntu Edition][6] - -At 13.3″, Dell XPS 13 looks even smaller than a regular 13.3″ laptop, thanks to its non-existent bezel which is the specialty of the infinite display. It is light as a feather with weight just under 1.23 Kg. - -The outer surface is metallic, not very shiny but a decent aluminum look. On the interior, the palm rest is made of carbon fiber which is very comfortable at the rest. Unlike the MacBook Air that uses metallic palm rests, the carbon fiber ones are more friendly, especially in winters. - -It is almost centimeter and a half high at it’s thickest part (around hinges). This also adds a plus point to the elegance of XPS 13. - -Overall, Dell XPS 13 has a compact body and an elegant body. - -#### Keyboard and touchpad - -The keyboard and touchpad mix well with the carbon fiber interiors. The keys are smooth with springs in the back (perhaps) and give a rich feel while typing. All of the important keys are present and are not tiny in size, something you might be worried of, considering the overall tiny size of XPS13. - -Oh! the keyboards have backlit support. Which adds to the rich feel of this expensive laptop. - -While the keyboard is a great experience, the same cannot be said about the touchpad. In fact, the touchpad is the weakest part which mars the overall good experience of XPS 13. - -The touchpad has a cheap feeling because it makes an irritating sound while tapping on the right side as if it’s hollow underneath. This is [something that has been noticed in the earlier versions of XPS 13][7] but hasn’t been given enough consideration to fix it. This is something you do not expect from a product at such a price. - -Also, the touchpad scroll on websites is hideous. It is also not suitable for pixel works because of difficulty in moving little adjustments. - -#### Ports - -Dell XPS 13 has two USB 3.0 ports, one of them with PowerShare. If you did not know, [USB 3.0 PowerShare][8] ports allow you to charge external devices even when your system is turned off. - -![Dell XPS 13 Kaby Lake Ubuntu edition ports][9]![Dell XPS 13 Kaby Lake Ubuntu edition ports][9] - -It also has a [Thunderbolt][10] (doubles up as [USB Type-C port][11]). It doesn’t have HDMI port, Ethernet port or VGA port. However, all of these three can be used via the Thunderbolt port and external adapters (sold separately). - -![Dell XPS 13 Kaby Lake Ubuntu edition ports][12]![Dell XPS 13 Kaby Lake Ubuntu edition ports][12] - -It also has an SD card reader and a headphone jack. In addition to all these, there is an [anti-theft slot][13] (a common security practice in enterprises). - -#### Display - -The model I have packs 1920x1080px. It’s full HD and display quality is at par. It perfectly displays the high definition pictures and 1080p video files. - -I cannot compare it with the [qHD model][14] as I never used it. But considering that there are not enough 4K contents for now, full HD display should be sufficient for next few years. - -#### Sound - -Compared to Acer R13, XPS 13 has better audio quality. Even the max volume is louder than that of Acer R13. The dual speakers give a nice stereo effect. - -#### Webcam - -The weirdest part of Dell XPS 13 review comes now. We all have been accustomed of seeing the webcam at the top-middle position on any laptop. But this is not the case here. - -XPS 13 puts the webcam on the bottom left corner of the laptop. This is done to keep the bezel as thin as possible. But this creates a problem. - -![Image captured with laptop screen at 90 degree][15] - -When you video chat with someone, it is natural to look straight up. With the top-middle webcam, your face is in direct line with the camera. But with the bottom left position of web cam, it looks like those weird accidental selfies you take with the front camera of your smartphone. Heck, people on the other side might see inside of your nostrils. - -#### Battery - -Battery life is the strongest point of Dell XPS 13. While Dell claims an astounding 21-hour battery life, but in my experience, it smoothly gives a battery life of 8-10 hours. This is when I watch movies, browse the internet and other regular stuff. - -There is one strange thing that I noticed, though. It charges pretty quick until 90% but the charging slows down afterward. And it almost never goes beyond 98%. - -The battery indicator turns red when the battery status falls below 30% and it starts displaying notifications if the battery goes below 10%. There is small light indicator under the touchpad that turns yellow when the battery is low and it turns white when the charger is plugged in. - -#### Overheating - -I have previously written about ways to [reduce laptop overheating in Linux][16]. Thankfully, so far, I didn’t need to employ those tricks. - -Dell XPS 13 remains surprisingly cool when you are using it on battery, even in long runs. The bottom does get heated a little when you use it while charging. - -Overall, XPS 13 manages overheating very well. - -#### The Ubuntu experience with Dell XPS 13 - -So far we have seen pretty generic things about the Dell XPS 13. Let’s talk about how good a Linux laptop it is. - -Until now, I used to manually [install Linux on Windows laptop][17]. This is the first Linux laptop I ever bought. I would also like to mention the awesome first boot animation of Dell’s Ubuntu laptop. Here’s a YouTube video of the same: - -One thing I would like to mention here is that Dell never displays Ubuntu laptops on its website. You’ll have to search the website with Ubuntu then you’ll see the Ubuntu editions. Also, Ubuntu edition is cheaper just by 50 Euro in comparison to its Windows counterpart whereas I was expecting it to be at least 100 Euro less than that of Windows. - -Despite being an Ubuntu preloaded laptop, the super key still comes with Windows logo on it. It’s trivial but I would have loved to see the Ubuntu logo on it. - -Now talking about Ubuntu experience, the first thing I noticed was that there was no hardware issue. Even the function and media keys work perfectly in Ubuntu, which is a pleasant surprise. - -Dell has also added its own repository in the software sources to provide for some Dell specific tools. You can see the footprints of Dell in the entire system. - -You might be interested to see how Dell partitioned the 256Gb of disk space. Let me show that to you. - -![Default disk partition by Dell][18] - -As you can see, there is 524MB reserved for [EFI][19]. Then there is 3.2 GB of factory restore image perhaps. - -Dell is using 17Gb of Swap partition, which is more than double of the RAM size. It seems Dell didn’t put enough thought here because this is simply waste of disk space, in my opinion. I would have used not [more than 11 GB of Swap partition][20] here. - -As I mentioned before, Dell adds a “restore to factory settings” option in the Grub menu. This is a nice little feature to have. - -One thing which I don’t like in the XPS 13 Ubuntu edition is the long boot time. It takes entire 23 seconds to reach the login screen after pressing the power button. I would expect it to be faster considering that it uses SSD PCIe. - -If it interests you, the XPS 13 had Chromium and Google Chrome browsers installed by default instead of Firefox. - -As far my experience goes, I am fairly impressed with Dell XPS 13 Ubuntu edition. It gives a smooth Ubuntu experience. The laptop seems to be a part of Ubuntu. Though it is an expensive laptop, I would say it is definitely worth the money. - -To summarize, let’s see the good, the bad and the ugly of Dell XPS 13 Ubuntu edition. - -#### The Good - - * Ultralight weight - * Compact - * Keyboard - * Carbon fiber palm rest - * Full hardware support for Ubuntu - * Factory restore option for Ubuntu - * Nice display and sound quality - * Good battery life - - - -#### The bad - - * Poor touchpad - * A little pricey - * Long boot time for SSD powered laptop - * Windows key still present :P - - - -#### The ugly - - * Weird webcam placement - - - -How did you like the **Dell XPS 13 Ubuntu edition review** from an Ubuntu user’s point of view? Do you find it good enough to spend over a thousand bucks? Do share your views in the comment below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/dell-xps-13-ubuntu-review - -作者:[Abhishek Prakash][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/abhishek/ -[b]: https://github.com/lujun9972 -[1]: https://amzn.to/2ImVkCV -[2]: http://www.techradar.com/news/computing-components/processors/kaby-lake-intel-core-processor-7th-gen-cpu-news-rumors-and-release-date-1325782 -[3]: https://www.facebook.com/itsfoss/ -[4]: https://www.facebook.com/itsfoss/videos/810293905778045/ -[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2017/02/Dell-XPS-13-Ubuntu-Edition-spec.jpg?resize=540%2C337&ssl=1 -[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2017/03/Dell-XPS-13-Ubuntu-review.jpeg?resize=800%2C600&ssl=1 -[7]: https://www.youtube.com/watch?v=Yt5SkI0c3lM -[8]: http://www.dell.com/support/article/fr/fr/frbsdt1/SLN155147/usb-powershare-feature?lang=EN -[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2017/03/Dell-Ubuntu-XPS-13-Kaby-Lake-ports-1.jpg?resize=800%2C435&ssl=1 -[10]: https://en.wikipedia.org/wiki/Thunderbolt_(interface) -[11]: https://en.wikipedia.org/wiki/USB-C -[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2017/03/Dell-Ubuntu-XPS-13-Kaby-Lake-ports-2.jpg?resize=800%2C325&ssl=1 -[13]: http://accessories.euro.dell.com/sna/productdetail.aspx?c=ie&l=en&s=dhs&cs=iedhs1&sku=461-10169 -[14]: https://recombu.com/mobile/article/quad-hd-vs-qhd-vs-4k-ultra-hd-what-does-it-all-mean_M20472.html -[15]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2017/03/Dell-XPS-13-webcam-issue.jpg?resize=800%2C450&ssl=1 -[16]: https://itsfoss.com/reduce-overheating-laptops-linux/ -[17]: https://itsfoss.com/install-ubuntu-1404-dual-boot-mode-windows-8-81-uefi/ -[18]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2017/03/Dell-XPS-13-Ubuntu-Edition-disk-partition.jpeg?resize=800%2C448&ssl=1 -[19]: https://en.wikipedia.org/wiki/EFI_system_partition -[20]: https://itsfoss.com/swap-size/ diff --git a/sources/talk/20171129 Inside AGL Familiar Open Source Components Ease Learning Curve.md b/sources/talk/20171129 Inside AGL Familiar Open Source Components Ease Learning Curve.md deleted file mode 100644 index 9eee39888a..0000000000 --- a/sources/talk/20171129 Inside AGL Familiar Open Source Components Ease Learning Curve.md +++ /dev/null @@ -1,70 +0,0 @@ -Inside AGL: Familiar Open Source Components Ease Learning Curve -============================================================ - -![Matt Porter](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/porter-elce-agl.png?itok=E-5xG98S "Matt Porter") -Konsulko’s Matt Porter (pictured) and Scott Murray ran through the major components of the AGL’s Unified Code Base at Embedded Linux Conference Europe.[The Linux Foundation][1] - -Among the sessions at the recent [Embedded Linux Conference Europe (ELCE)][5] — 57 of which are [available on YouTube][2] -- are several reports on the Linux Foundation’s [Automotive Grade Linux project][6]. These include [an overview from AGL Community Manager Walt Miner ][3]showing how AGL’s Unified Code Base (UCB) Linux distribution is expanding from in-vehicle infotainment (IVI) to ADAS. There was even a presentation on using AGL to build a remote-controlled robot (see links below). - -Here we look at the “State of AGL: Plumbing and Services,” from Konsulko Group’s CTO Matt Porter and senior staff software engineer Scott Murray. Porter and Murray ran through the components of the current [UCB 4.0 “Daring Dab”][7] and detailed major upstream components and API bindings, many of which will be appear in the Electric Eel release due in Jan. 2018. - -Despite the automotive focus of the AGL stack, most of the components are already familiar to Linux developers. “It looks a lot like a desktop distro,” Porter told the ELCE attendees in Prague. “All these familiar friends.” - -Some of those friends include the underlying Yocto Project “Poky” with OpenEmbedded foundation, which is topped with layers like oe-core, meta-openembedded, and metanetworking. Other components are based on familiar open source software like systemd (application control), Wayland and Weston (graphics), BlueZ (Bluetooth), oFono (telephony), PulseAudio and ALSA (audio), gpsd (location), ConnMan (Internet), and wpa-supplicant (WiFi), among others. - -UCB’s application framework is controlled through a WebSocket interface to the API bindings, thereby enabling apps to talk to each other. There’s also a new W3C widget for an alternative application packaging scheme, as well as support for SmartDeviceLink, a technology developed at Ford that automatically syncs up IVI systems with mobile phones.  - -AGL UCB’s Wayland/Weston graphics layer is augmented with an “IVI shell” that works with the layer manager. “One of the unique requirements of automotive is the ability to separate aspects of the application in the layers,” said Porter. “For example, in a navigation app, the graphics rendering for the map may be completely different than the engine used for the UI decorations. One engine layers to a surface in Wayland to expose the map while the decorations and controls are handled by another layer.” - -For audio, ALSA and PulseAudio are joined by GENIVI AudioManager, which works together with PulseAudio. “We use AudioManager for policy driven audio routing,” explained Porter. “It allows you to write a very complex XML-based policy using a rules engine with audio routing.” - -UCB leans primarily on the well-known [Smack Project][8] for security, and also incorporates Tizen’s [Cynara][9] safe policy-checker service. A Cynara-enabled D-Bus daemon is used to control Cynara security policies. - -Porter and Murray went on to explain AGL’s API binding mechanism, which according to Murray “abstracts the UI from its back-end logic so you can replace it with your own custom UI.” You can re-use application logic with different UI implementations, such as moving from the default Qt to HTML5 or a native toolkit. Application binding requests and responses use JSON via HTTP or WebSocket. Binding calls can be made from applications or from other bindings, thereby enabling “stacking” of bindings. - -Porter and Murray concluded with a detailed description of each binding. These include upstream bindings currently in various stages of development. The first is a Master binding that manages the application lifecycle, including tasks such as install, uninstall, start, and terminate. Other upstream bindings include the WiFi binding and the BlueZ-based Bluetooth binding, which in the future will be upgraded with Bluetooth [PBAP][10] (Phone Book Access Profile). PBAP can connect with contacts databases on your phone, and links to the Telephony binding to replicate caller ID. - -The oFono-based Telephony binding also makes calls to the Bluetooth binding for Bluetooth Hands-Free-Profile (HFP) support. In the future, Telephony binding will add support for sent dial tones, call waiting, call forwarding, and voice modem support. - -Support for AM/FM radio is not well developed in the Linux world, so for its Radio binding, AGL started by supporting [RTL-SDR][11] code for low-end radio dongles. Future plans call for supporting specific automotive tuner devices. - -The MediaPlayer binding is in very early development, and is currently limited to GStreamer based audio playback and control. Future plans call for adding playlist controls, as well as one of the most actively sought features among manufacturers: video playback support. - -Location bindings include the [gpsd][12] based GPS binding, as well as GeoClue and GeoFence. GeoClue, which is built around the [GeoClue][13] D-Bus geolocation service, “overlaps a little with GPS, which uses the same location data,” says Porter. GeoClue also gathers location data from WiFi AP databases, 3G/4G tower info, and the GeoIP database — sources that are useful “if you’re inside or don’t have a good fix,” he added. - -GeoFence depends on the GPS binding, as well. It lets you establish a bounding box, and then track ingress and egress events. GeoFence also tracks “dwell” status, which is determined by arriving at home and staying for 10 minutes. “It then triggers some behavior based on a timeout,” said Porter. Future plans call for a customizable dwell transition time. - -While most of these Upstream bindings are well established, there are also Work in Progress (WIP) bindings that are still in the early stages, including CAN, HomeScreen, and WindowManager bindings. Farther out, there are plans to add speech recognition and text-to-speech bindings, as well as a WWAN modem binding. - -In conclusion, Porter noted: “Like any open source project, we desperately need more developers.” The Automotive Grade Linux project may seem peripheral to some developers, but it offers a nice mix of familiarity — grounded in many widely used open source projects -- along with the excitement of expanding into a new and potentially game changing computing form factor: your automobile. AGL has also demonstrated success — you can now [check out AGL in action in the 2018 Toyota Camry][14], followed in the coming month by most Toyota and Lexus vehicles sold in North America. - -Watch the complete video below: - -[视频][15] - --------------------------------------------------------------------------------- - -via: https://www.linux.com/blog/event/elce/2017/11/inside-agl-familiar-open-source-components-ease-learning-curve - -作者:[ ERIC BROWN][a] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]:https://www.linux.com/users/ericstephenbrown -[1]:https://www.linux.com/licenses/category/linux-foundation -[2]:https://www.youtube.com/playlist?list=PLbzoR-pLrL6pISWAq-1cXP4_UZAyRtesk -[3]:https://www.youtube.com/watch?v=kfwEmjSjAzM&index=14&list=PLbzoR-pLrL6pISWAq-1cXP4_UZAyRtesk -[4]:https://www.linux.com/files/images/porter-elce-aglpng -[5]:http://events.linuxfoundation.org/events/embedded-linux-conference-europe -[6]:https://www.automotivelinux.org/ -[7]:https://www.linux.com/blog/2017/8/automotive-grade-linux-moves-ucb-40-launches-virtualization-workgroup -[8]:http://schaufler-ca.com/ -[9]:https://wiki.tizen.org/Security:Cynara -[10]:https://wiki.maemo.org/Bluetooth_PBAP -[11]:https://www.rtl-sdr.com/about-rtl-sdr/ -[12]:http://www.catb.org/gpsd/ -[13]:https://www.freedesktop.org/wiki/Software/GeoClue/ -[14]:https://www.linux.com/blog/event/automotive-linux-summit/2017/6/linux-rolls-out-toyota-and-lexus-vehicles -[15]:https://youtu.be/RgI-g5h1t8I diff --git a/sources/talk/20180629 Reflecting on the GPLv3 license for its 11th anniversary.md b/sources/talk/20180629 Reflecting on the GPLv3 license for its 11th anniversary.md deleted file mode 100644 index af352aefe1..0000000000 --- a/sources/talk/20180629 Reflecting on the GPLv3 license for its 11th anniversary.md +++ /dev/null @@ -1,65 +0,0 @@ -Reflecting on the GPLv3 license for its 11th anniversary -====== - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/LAW_vaguepatent_520x292.png?itok=_zuxUwyt) - -Last year, I missed the opportunity to write about the 10th anniversary of [GPLv3][1], the third version of the GNU General Public License. GPLv3 was officially released by the Free Software Foundation (FSF) on June 29, 2007—better known in technology history as the date Apple launched the iPhone. Now, one year later, I feel some retrospection on GPLv3 is due. For me, much of what is interesting about GPLv3 goes back somewhat further than 11 years, to the public drafting process in which I was an active participant. - -In 2005, following nearly a decade of enthusiastic self-immersion in free software, yet having had little open source legal experience to speak of, I was hired by Eben Moglen to join the Software Freedom Law Center as counsel. SFLC was then outside counsel to the FSF, and my role was conceived as focusing on the incipient public phase of the GPLv3 drafting process. This opportunity rescued me from a previous career turn that I had found rather dissatisfying. Free and open source software (FOSS) legal matters would come to be my new specialty, one that I found fascinating, gratifying, and intellectually rewarding. My work at SFLC, and particularly the trial by fire that was my work on GPLv3, served as my on-the-job training. - -GPLv3 must be understood as the product of an earlier era of FOSS, the contours of which may be difficult for some to imagine today. By the beginning of the public drafting process in 2006, Linux and open source were no longer practically synonymous, as they might have been for casual observers several years earlier, but the connection was much closer than it is now. - -Reflecting the profound impact that Linux was already having on the technology industry, everyone assumed GPL version 2 was the dominant open source licensing model. We were seeing the final shakeout of a Cambrian explosion of open source (and pseudo-open source) business models. A frothy business-fueled hype surrounded open source (for me most memorably typified by the Open Source Business Conference) that bears little resemblance to the present-day embrace of open source development by the software engineering profession. Microsoft, with its expanding patent portfolio and its competitive opposition to Linux, was commonly seen in the FOSS community as an existential threat, and the [SCO litigation][2] had created a cloud of legal risk around Linux and the GPL that had not quite dissipated. - -That environment necessarily made the drafting of GPLv3 a high-stakes affair, unprecedented in free software history. Lawyers at major technology companies and top law firms scrambled for influence over the license, convinced that GPLv3 was bound to take over and thoroughly reshape open source and all its massive associated business investment. - -A similar mindset existed within the technical community; it can be detected in the fears expressed in the final paragraph of the Linux kernel developers' momentous September 2006 [denunciation][3] of GPLv3. Those of us close to the FSF knew a little better, but I think we assumed the new license would be either an overwhelming success or a resounding failure—where "success" meant something approximating an upgrade of the existing GPLv2 project ecosystem to GPLv3, though perhaps without the kernel. The actual outcome was something in the middle. - -I have no confidence in attempts to measure open source license adoption, which have in recent years typically been used to demonstrate a loss of competitive advantage for copyleft licensing. My own experience, which is admittedly distorted by proximity to Linux and my work at Red Hat, suggests that GPLv3 has enjoyed moderate popularity as a license choice for projects launched since 2007, though most GPLv2 projects that existed before 2007, along with their post-2007 offshoots, remained on the old license. (GPLv3's sibling licenses LGPLv3 and AGPLv3 never gained comparable popularity.) Most of the existing GPLv2 projects (with a few notable exceptions like the kernel and Busybox) were licensed as "GPLv2 or any later version." The technical community decided early on that "GPLv2 or later" was a politically neutral license choice that embraced both GPLv2 and GPLv3; this goes some way to explain why adoption of GPLv3 was somewhat gradual and limited, especially within the Linux community. - -During the GPLv3 drafting process, some expressed concerns about a "balkanized" Linux ecosystem, whether because of the overhead of users having to understand two different, strong copyleft licenses or because of GPLv2/GPLv3 incompatibility. These fears turned out to be entirely unfounded. Within mainstream server and workstation Linux stacks, the two licenses have peacefully coexisted for a decade now. This is partly because such stacks are made up of separate units of strong copyleft scope (see my discussion of [related issues in the container setting][4]). As for incompatibility inside units of strong copyleft scope, here, too, the prevalence of "GPLv2 or later" was seen by the technical community as neatly resolving the theoretical problem, despite the fact that nominal license upgrading of GPLv2-or-later to GPLv3 hardly ever occurred. - -I have alluded to the handwringing that some of us FOSS license geeks have brought to the topic of supposed copyleft decline. GPLv3 has taken its share of abuse from critics as far back as the beginning of the public drafting process, and some, predictably, have drawn a link between GPLv3 in particular and GPL or copyleft disfavor in general. - -I have viewed it somewhat differently: Largely because of its complexity and baroqueness, GPLv3 was a lost opportunity to create a strong copyleft license that would appeal very broadly to modern individual software authors and corporate licensors. I believe individual developers today tend to prefer short, simple, easy to understand, minimalist licenses, the most obvious example of which is the [MIT License][5]. - -Some corporate decisionmakers around open source license selection may naturally share that view, while others may associate some parts of GPLv3, such as the patent provisions or the anti-lockdown requirements, as too risky or incompatible with their business models. The great irony is that the characteristics of GPLv3 that fail to attract these groups are there in part because of conscious attempts to make the license appeal to these same sorts of interests. - -How did GPLv3 come to be so baroque? As I have said, GPLv3 was the product of an earlier time, in which FOSS licenses were viewed as the primary instruments of project governance. (Today, we tend to associate governance with other kinds of legal or quasi-legal tools, such as structuring of nonprofit organizations, rules around project decision making, codes of conduct, and contributor agreements.) - -GPLv3, in its drafting, was the high point of an optimistic view of FOSS licenses as ambitious means of private regulation. This was already true of GPLv2, but GPLv3 took things further by addressing in detail a number of new policy problems—software patents, anti-circumvention laws, device lockdown. That was bound to make the license longer and more complex than GPLv2, as the FSF and SFLC noted apologetically in the first GPLv3 [rationale document][6]. - -But a number of other factors at play in the drafting of GPLv3 unintentionally caused the complexity of the license to grow. Lawyers representing vendors' and commercial users' interests provided useful suggestions for improvements from a legal and commercial perspective, but these often took the form of making simply worded provisions more verbose, arguably without net increases in clarity. Responses to feedback from the technical community, typically identifying loopholes in license provisions, had a similar effect. - -The GPLv3 drafters also famously got entangled in a short-term political crisis—the controversial [Microsoft/Novell deal][7] of 2006—resulting in the permanent addition of new and unusual conditions in the patent section of the license, which arguably served little purpose after 2007 other than to make license compliance harder for conscientious patent-holding vendors. Of course, some of the complexity in GPLv3 was simply the product of well-intended attempts to make compliance easier, especially for community project developers, or to codify FSF interpretive practice. Finally, one can take issue with the style of language used in GPLv3, much of which had a quality of playful parody or mockery of conventional software license legalese; a simpler, straightforward form of phrasing would in many cases have been an improvement. - -The complexity of GPLv3 and the movement towards preferring brevity and simplicity in license drafting and unambitious license policy objectives meant that the substantive text of GPLv3 would have little direct influence on later FOSS legal drafting. But, as I noted with surprise and [delight][8] back in 2012, MPL 2.0 adapted two parts of GPLv3: the 30-day cure and 60-day repose language from the GPLv3 termination provision, and the assurance that downstream upgrading to a later license version adds no new obligations on upstream licensors. - -The GPLv3 cure language has come to have a major impact, particularly over the past year. Following the Software Freedom Conservancy's promulgation, with the FSF's support, of the [Principles of Community-Oriented GPL Enforcement][9], which calls for extending GPLv3 cure opportunities to GPLv2 violations, the Linux Foundation Technical Advisory Board published a [statement][10], endorsed by over a hundred Linux kernel developers, which incorporates verbatim the cure language of GPLv3. This in turn was followed by a Red Hat-led series of [corporate commitments][11] to extend the GPLv3 cure provisions to GPLv2 and LGPLv2.x noncompliance, a campaign to get individual open source developers to extend the same commitment, and an announcement by Red Hat that henceforth GPLv2 and LGPLv2.x projects it leads will use the commitment language directly in project repositories. I discussed these developments in a recent [blog post][12]. - -One lasting contribution of GPLv3 concerns changed expectations for how revisions of widely-used FOSS licenses are done. It is no longer acceptable for such licenses to be revised entirely in private, without opportunity for comment from the community and without efforts to consult key stakeholders. The drafting of MPL 2.0 and, more recently, EPL 2.0 reflects this new norm. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/18/6/gplv3-anniversary - -作者:[Richard Fontana][a] -选题:[lujun9972](https://github.com/lujun9972) -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]:https://opensource.com/users/fontana -[1]:https://www.gnu.org/licenses/gpl-3.0.en.html -[2]:https://en.wikipedia.org/wiki/SCO%E2%80%93Linux_disputes -[3]:https://lwn.net/Articles/200422/ -[4]:https://opensource.com/article/18/1/containers-gpl-and-copyleft -[5]:https://opensource.org/licenses/MIT -[6]:http://gplv3.fsf.org/gpl-rationale-2006-01-16.html -[7]:https://en.wikipedia.org/wiki/Novell#Agreement_with_Microsoft -[8]:https://opensource.com/law/12/1/the-new-mpl -[9]:https://sfconservancy.org/copyleft-compliance/principles.html -[10]:https://www.kernel.org/doc/html/v4.16/process/kernel-enforcement-statement.html -[11]:https://www.redhat.com/en/about/press-releases/technology-industry-leaders-join-forces-increase-predictability-open-source-licensing -[12]:https://www.redhat.com/en/blog/gpl-cooperation-commitment-and-red-hat-projects?source=author&term=26851 diff --git a/sources/talk/20190429 Cisco goes all in on WiFi 6.md b/sources/talk/20190429 Cisco goes all in on WiFi 6.md deleted file mode 100644 index decd25500a..0000000000 --- a/sources/talk/20190429 Cisco goes all in on WiFi 6.md +++ /dev/null @@ -1,87 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco goes all in on WiFi 6) -[#]: via: (https://www.networkworld.com/article/3391919/cisco-goes-all-in-on-wifi-6.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco goes all in on WiFi 6 -====== -Cisco rolls out Catalyst and Meraki WiFi 6-based access points, Catalyst 9000 switch -![undefined / Getty Images][1] - -Cisco has taken the wraps off a family of WiFi 6 access points, roaming technology and developer-community support all to make wireless a solid enterprise equal with the wired world. - -“Best-effort’ wireless for enterprise customers doesn’t cut it any more. There’s been a change in customer expectations that there will be an uninterrupted unplugged experience,” said Scott Harrell, senior vice president and general manager of enterprise networking at Cisco. **“ **It is now a wireless-first world.** ”** - -**More about 802.11ax (Wi-Fi 6)** - - * [Why 802.11ax is the next big thing in wireless][2] - * [FAQ: 802.11ax Wi-Fi][3] - * [Wi-Fi 6 (802.11ax) is coming to a router near you][4] - * [Wi-Fi 6 with OFDMA opens a world of new wireless possibilities][5] - * [802.11ax preview: Access points and routers that support Wi-Fi 6 are on tap][6] - - - -Bringing a wireless-first enterprise world together is one of the drivers behind a new family of WiFi 6-based access points (AP) for Cisco’s Catalyst and Meraki portfolios. WiFi 6 (802.11ax) is designed for high-density public or private environments. But it also will be beneficial in internet of things (IoT) deployments, and in offices that use bandwidth-hogging applications like videoconferencing. - -The Cisco Catalyst 9100 family and Meraki [MR 45/55][7] WiFi-6 access points are built on Cisco silicon and communicate via pre-802.1ax protocols. The silicon in these access points now acts a rich sensor providing IT with insights about what is going on the wireless network in real-time, and that enables faster reactions to problems and security concerns, Harrell said. - -Aside from WiFi 6, the boxes include support for visibility and communications with Zigbee, BLE and Thread protocols. The Catalyst APs support uplink speeds of 2.5 Gbps, in addition to 100 Mbps and 1 Gbps. All speeds are supported on Category 5e cabling for an industry first, as well as 10GBASE-T (IEEE 802.3bz) cabling, Cisco said. - -Wireless traffic aggregates to wired networks so and the wired network must also evolve. Technology like multi-gigabit Ethernet must be driven into the access layer, which in turn drives higher bandwidth needs at the aggregation and core layers, [Harrell said][8]. - -Handling this influx of wireless traffic was part of the reason Cisco also upgraded its iconic Catalyst 6000 with the [Catalyst 9600 this week][9]. The 9600 brings with it support for Cat 6000 features such as support for MPLS, virtual switching and IPv6, while adding or bolstering support for wireless netowrks as well as Intent-based networking (IBN) and security segmentation. The 9600 helps fill out the company’s revamped lineup which includes the 9200 family of access switches, the 9500 aggregation switch and 9800 wireless controller. - -“WiFi doesn’t exist in a vacuum – how it connects to the enterprise and the data center or the Internet is key and in Cisco’s case that key is now the 9600 which has been built to handle the increased traffic,” said Lee Doyle, principal analyst with Doyle Research. - -The new 9600 ties in with the recently [released Catalyst 9800][10], which features 40Gbps to 100Gbps performance, depending on the model, hot-patching to simplify updates and eliminate update-related downtime, Encrypted Traffic Analytics (ETA), policy-based micro- and macro-segmentation and Trustworthy solutions to detect malware on wired or wireless connected devices, Cisco said. - -All Catalyst 9000 family members support other Cisco products such as [DNA Center][11] , which controls automation capabilities, assurance setting, fabric provisioning and policy-based segmentation for enterprise wired and wireless networks. - -The new APs are pre-standard, but other vendors including Aruba, NetGear and others are also selling pre-standard 802.11ax devices. Cisco getting into the market solidifies the validity of this strategy, said Brandon Butler, a senior research analyst with IDC. - -Many experts [expect the standard][12] to be ratified late this year. - -“We expect to see volume shipments of WiFi 6 products by early next year and it being the de facto WiFi standard by 2022.” - -On top of the APs and 9600 switch, Cisco extended its software development community – [DevNet][13] – to offer WiFi 6 learning labs, sandboxes and developer resources. - -The Cisco Catalyst and Meraki access platforms are open and programmable all the way down to the chipset level, allowing applications to take advantage of network programmability, Cisco said. - -Cisco also said it had added more vendors to now include Apple, Samsung, Boingo, Presidio and Intel for its ongoing [OpenRoaming][14] project. OpenRoaming, which is in beta promises to let users move seamlessly between wireless networks and LTE without interruption. - -Join the Network World communities on [Facebook][15] and [LinkedIn][16] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3391919/cisco-goes-all-in-on-wifi-6.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/cisco_catalyst_wifi_coffee-cup_coffee-beans_-100794990-large.jpg -[2]: https://www.networkworld.com/article/3215907/mobile-wireless/why-80211ax-is-the-next-big-thing-in-wi-fi.html -[3]: https://%20https//www.networkworld.com/article/3048196/mobile-wireless/faq-802-11ax-wi-fi.html -[4]: https://www.networkworld.com/article/3311921/mobile-wireless/wi-fi-6-is-coming-to-a-router-near-you.html -[5]: https://www.networkworld.com/article/3332018/wi-fi/wi-fi-6-with-ofdma-opens-a-world-of-new-wireless-possibilities.html -[6]: https://www.networkworld.com/article/3309439/mobile-wireless/80211ax-preview-access-points-and-routers-that-support-the-wi-fi-6-protocol-on-tap.html -[7]: https://meraki.cisco.com/lib/pdf/meraki_datasheet_MR55.pdf -[8]: https://blogs.cisco.com/news/unplugged-and-uninterrupted -[9]: https://www.networkworld.com/article/3391580/venerable-cisco-catalyst-6000-switches-ousted-by-new-catalyst-9600.html -[10]: https://www.networkworld.com/article/3321000/cisco-links-wireless-wired-worlds-with-new-catalyst-9000-switches.html -[11]: https://www.networkworld.com/article/3280988/cisco/cisco-opens-dna-center-network-control-and-management-software-to-the-devops-masses.html -[12]: https://www.networkworld.com/article/3336263/is-jumping-ahead-to-wi-fi-6-the-right-move.html -[13]: https://developer.cisco.com/wireless/?utm_campaign=colaunch-wireless19&utm_source=pressrelease&utm_medium=ciscopress-wireless-main -[14]: https://www.cisco.com/c/en/us/solutions/enterprise-networks/802-11ax-solution/openroaming.html -[15]: https://www.facebook.com/NetworkWorld/ -[16]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190513 HPE-s CEO lays out his technology vision.md b/sources/talk/20190513 HPE-s CEO lays out his technology vision.md deleted file mode 100644 index c9a8de9c8a..0000000000 --- a/sources/talk/20190513 HPE-s CEO lays out his technology vision.md +++ /dev/null @@ -1,162 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (HPE’s CEO lays out his technology vision) -[#]: via: (https://www.networkworld.com/article/3394879/hpe-s-ceo-lays-out-his-technology-vision.html) -[#]: author: (Eric Knorr ) - -HPE’s CEO lays out his technology vision -====== -In an exclusive interview, HPE CEO Antonio Neri unpacks his portfolio of technology initiatives, from edge computing to tomorrow’s memory-driven architecture. -![HPE][1] - -Like Microsoft's Satya Nadella, HPE CEO Antonio Neri is a technologist with a long history of leading initiatives in his company. Meg Whitman, his former boss at HPE, showed her appreciation of Neri’s acumen by promoting him to HPE Executive Vice President in 2015 – and gave him the green light to acquire [Aruba][2], [SimpliVity][3], [Nimble Storage][4], and [Plexxi][5], all of which added key items to HPE’s portfolio. - -Neri succeeded Whitman as CEO just 16 months ago. In a recent interview with Network World, Neri’s engineering background was on full display as he explained HPE’s technology roadmap. First and foremost, he sees a huge opportunity in [edge computing][6], into which HPE is investing $4 billion over four years to further develop edge “connectivity, security, and obviously cloud and analytics.” - -**More about edge networking** - - * [How edge networking and IoT will reshape data centers][7] - * [Edge computing best practices][8] - * [How edge computing can help secure the IoT][9] - - - -Although his company abandoned its public cloud efforts in 2015, Neri is also bullish on the self-service “cloud experience,” which he asserts HPE is already implementing on-prem today in a software-defined, consumption-driven model. More fundamentally, he believes we are on the brink of a memory-driven computing revolution, where storage and memory become one and, depending on the use case, various compute engines are brought to bear on zettabytes of data. - -This interview, conducted by Network World Editor-in-Chief Eric Knorr and edited for length and clarity, digs into Neri’s technology vision. [A companion interview on CIO][10] centers on Neri’s views of innovation, management, and company culture. - -**Eric Knorr: ** Your biggest and highest profile investment so far has been in edge computing. My understanding of edge computing is that we’re really talking about mini-data centers, defined by IDC as less than 100 square feet in size. What’s the need for a $4 billion investment in that? - -**Antonio Neri:** It’s twofold. We focus first on connectivity. Think about Aruba as a platform company, a cloud-enabled company. Now we offer branch solutions and edge data center solutions that include [wireless][11], LAN, [WAN][12] connectivity and soon [5G][13]. We give you a control plane so that that connectivity experience can be seen consistently the same way. All the policy management, the provisioning and the security aspects of it. - -**Knorr:** Is 5G a big focus? - -**[[Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][14] ]** - -**Neri:** It’s a big focus for us. What customers are telling us is that it’s hard to get 5G inside the building. How you do hand off between 5G and Wi-Fi and give them the same experience? Because the problem is that we have LAN, wireless, and WAN already fully integrated into the control plane, but 5G sits over here. If you are an enterprise, you have to manage these two pipes independently. - -With the new spectrum, though, they are kind of comingling anyway. [Customers ask] why don’t you give me [a unified] experience on top of that, with all this policy management and cloud-enablement, so I can provision the right connectivity for the right use case? A sensor can use a lower radio access or [Bluetooth][15] or other type of connectivity because you don’t need persistent connectivity and you don’t have the power to do it. - -In some cases, you just put a SIM on it, and you have 5G, but in another one it’s just wireless connectivity. Wi-Fi connectivity is significantly lower cost than 5G. The use cases will dictate what type of connectivity you need, but the reality is they all want one experience. And we can do that because we have a great platform and a great partnership with MSPs, telcos, and providers. - -**Knorr:** So it sounds like much of your investment is going into that integration. - -**Neri:** The other part is how we provide the ability to provision the right cloud computing at the edge for the right use cases. Think about, for example, a manufacturing floor. We can converge the OT and IT worlds through a converged infrastructure aspect that digitizes the analog process into a digital process. We bring the cloud compute in there, which is fully virtualized and containerized, we integrate Wi-Fi connectivity or LAN connectivity, and we eliminate all these analog processes that are multi-failure touchpoints because you have multiple things that have to come together. - -That’s a great example of a cloud at the edge. And maybe that small cloud is connected to a big cloud which could be in the large data center, which the customer owns – or it can be one of the largest public cloud providers. - -**Knorr:** It’s difficult to talk about the software-defined data center and private cloud without talking about [VMware][16]. Where do your software-defined solutions leave off and where does VMware begin? - -**Neri:** Where we stop is everything below the hypervisor, including the software-defined storage and things like SimpliVity. That has been the advantage we’ve had with [HPE OneView][17], so we can provision and manage the infrastructure-life-cycle and software-defined aspects at the infrastructure level. And let’s not forget security, because we’ve integrated [silicon root of trust][18] into our systems, which is a good advantage for us in the government space. - -Then above that we continue to develop capabilities. Customers want choice. That’s why [the partnership with Nutanix][19] was important. We offer an alternative to vSphere and vCloud Foundation with Nutanix Prism and Acropolis. - -**Knorr:** VMware has become the default for the private cloud, though. - -**Neri:** Obviously, VMware owns 60 percent of the on-prem virtualized environment, but more and more, containers are becoming the way to go in a cloud-native approach. For us, we own the full container stack, because we base our solution on Kubernetes. We deploy that. That’s why the partnership with Nutanix is important. With Nutanix, we offer KVM and the Prism stack and then we’re fully integrated with HPE OneView for the rest of the infrastructure. - -**Knorr:** You also offer GKE [Google [Kubernetes][20] Engine] on-prem. - -**Neri:** Correct. We’re working with Google on the next version of that. - -**Knorr:** How long do you think it will be before you start seeing Kubernetes and containers on bare metal? - -**Neri:** It’s an interesting question. Many customers tell us it’s like going back to the future. It’s like we’re paying this tax on the virtualization layer. - -**Knorr:** Exactly. - -**Neri:** I can go bare metal and containers and be way more efficient. It is a little bit back to the future. But it’s a different future. - -**Knorr:** And it makes the promise of [hybrid cloud][21] a little more real. I know HPE has been very bullish on hybrid. - -**Neri:** We have been the one to say the world would be hybrid. - -**Knorr:** But today, how hybrid is hybrid really? I mean, you have workloads in the public cloud, you have workloads in a [private cloud][22]. Can you really rope it all together into hybrid? - -**Neri:** I think you have to have portability eventually. - -**Knorr:** Eventually. It’s not really true now, though. - -**Neri:** No, not true now. If you look at it from the software brokering perspective that makes hybrid very small. We know this eventually has to be all connected, but it’s not there yet. More and more of these workloads have to go back and forth. - -If you ask me what the CIO role of the future will look like, it would be a service provider. I wake up in the morning, have a screen that says – oh, you know what? Today it’s cheaper to run that app here. I just slice it there and then it just moves. Whatever attributes on the data I want to manage and so forth – oh, today I have capacity here and by the way, why are you not using it? Slide it back here. That’s the hybrid world. - -Many people, when they started with the cloud, thought, “I’ll just virtualize everything,” but that’s not the cloud. You’re [virtualizing][23], but you have to make it self-service. Obviously, cloud-native applications have developed that are different today. That’s why containers are definitely a much more efficient way, and that’s why I agree that the bare-metal piece of this is coming back. - -**Knorr:** Do you worry about public cloud incursions into the [data center][24]? - -**Neri:** It’s happening. Of course I’m worried. But what at least gives me comfort is twofold. One is that the customer wants choice. They don’t want to be locked in. Service is important. It’s one thing to say: Here’s the system. The other is: Who’s going to maintain it for me? Who is going to run it for me? And even though you have all the automation tools in the world, somebody has to watch this thing. Our job is to bring the public-cloud experience on prem, so that the customer has that choice. - -**Knorr:** Part of that is economics. - -**Neri:** When you look at economics it’s no longer just the cost of compute anymore. What we see more and more is the cost of the data bandwidth back and forth. That’s why the first question a customer asks is: Where should I put my data? And that dictates a lot of things, because today the data transfer bill is way higher than the cost of renting a VM. - -The other thing is that when you go on the public cloud you can spin up a VM, but the problem is if you don’t shut it off, the bill keeps going. We brought, in the context of [composability][25], the ability to shut it off automatically. That’s why composability is important, because we can run, first of all, multi-workloads in the same infrastructure – whether it’s bare metal, virtualized or containerized. It’s called composable because the software layers of intelligence compose the right solutions from compute, storage, fabric and memory to that workload. When it doesn’t need it, it gives it back. - -**Knorr:** Is there any opportunity left at the hardware level to innovate? - -**Neri:** That’s why we think about memory-driven computing. Today we have a very CPU-centric approach. This is a limiting factor, and the reality is, if you believe data is the core of the architecture going forward, then the CPU can’t be the core of the architecture anymore. - -You have a bunch of inefficiency by moving data back and forth across the system, which also creates energy waste and so forth. What we are doing is basically rearchitecting this for once in 70 years. We take memory and storage and collapse the two into one, so this becomes one central pool, which is nonvolatile and becomes the core. And then we bring the right computing capability to the data. - -In an AI use case, you don’t move the data. You bring accelerators or GPUs to the data. For general purpose, you may use an X86, and maybe in video transcoding, you use an ARM-based architecture. The magic is this: You can do this on zettabytes of data and the benefit is there is no waste, very little power to keep it alive, and it’s persistent. - -We call this the Generation Z fabric, which is based on a data fabric and silicon photonics. Now we go from copper, which is generating a lot of waste and a lot of heat and energy, to silicon photonics. So we not only scale this to zettabytes, we can do massive amounts of computation by bringing the right compute at the speed that’s needed to the data – and we solve a cost and scale problem too, because copper today costs a significant amount of money, and gold-plated connectors are hundreds of dollars. - -We’re going to actually implement this capability in silicon photonics in our current architectures by the end of the year. In Synergy, for example, which is a composable blade system, at the back of the rack you can swap from Ethernet to silicon photonics. It was designed that way. We already prototyped this in a simple 2U chassis with 160 TB of memory and 2000 cores. We were able to process a billion-record database with 55 million combinations of algorithms in less than a minute. - -**Knorr:** So you’re not just focusing on the edge, but the core, too. - -**Neri:** As you go down from the cloud to the edge, that architecture actually scales to the smallest things. You can do it on a massive scale or you can do it on a small scale. We will deploy these technologies in our systems architectures now. Once the whole ecosystem is developed, because we also need an ISV ecosystem that can code applications in this new world or you’re not taking advantage of it. Also, the current Linux kernel can only handle so much memory, so you have to rewrite the kernel. We are working with two universities to do that. - -The hardware will continue to evolve and develop, but there still is a lot of innovation that has to happen. What’s holding us back, honestly, is the software. - -**Knorr:** And that’s where a lot of your investment is going? - -**Neri:** Correct. Exactly right. Systems software, not application software. It’s the system software that makes this infrastructure solution-oriented, workload-optimized, autonomous and efficient. - -Join the Network World communities on [Facebook][26] and [LinkedIn][27] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394879/hpe-s-ceo-lays-out-his-technology-vision.html - -作者:[Eric Knorr][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/antonio-neri_hpe_new-100796112-large.jpg -[2]: https://www.networkworld.com/article/2891130/aruba-networks-is-different-than-hps-failed-wireless-acquisitions.html -[3]: https://www.networkworld.com/article/3158784/hpe-buying-simplivity-for-650-million-to-boost-hyperconvergence.html -[4]: https://www.networkworld.com/article/3177376/hpe-to-pay-1-billion-for-nimble-storage-after-cutting-emc-ties.html -[5]: https://www.networkworld.com/article/3273113/hpe-snaps-up-hyperconverged-network-hcn-vendor-plexxi.html -[6]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html -[7]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[8]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[9]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[10]: https://www.cio.com/article/3394598/hpe-ceo-antonio-neri-rearchitects-for-the-future.html -[11]: https://www.networkworld.com/article/3238664/80211-wi-fi-standards-and-speeds-explained.html -[12]: https://www.networkworld.com/article/3248989/what-is-a-wide-area-network-a-definition-examples-and-where-wans-are-headed.html -[13]: https://www.networkworld.com/article/3203489/what-is-5g-how-is-it-better-than-4g.html -[14]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[15]: https://www.networkworld.com/article/3235124/internet-of-things-definitions-a-handy-guide-to-essential-iot-terms.html -[16]: https://www.networkworld.com/article/3340259/vmware-s-transformation-takes-hold.html -[17]: https://www.networkworld.com/article/2174203/hp-expands-oneview-into-vmware-environs.html -[18]: https://www.networkworld.com/article/3199826/hpe-highlights-innovation-in-software-defined-it-security-at-discover.html -[19]: https://www.networkworld.com/article/3388297/hpe-and-nutanix-partner-for-hyperconverged-private-cloud-systems.html -[20]: https://www.infoworld.com/article/3268073/what-is-kubernetes-container-orchestration-explained.html -[21]: https://www.networkworld.com/article/3268448/what-is-hybrid-cloud-really-and-whats-the-best-strategy.html -[22]: https://www.networkworld.com/article/2159885/cloud-computing-gartner-5-things-a-private-cloud-is-not.html -[23]: https://www.networkworld.com/article/3285906/whats-the-future-of-server-virtualization.html -[24]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[25]: https://www.networkworld.com/article/3266106/what-is-composable-infrastructure.html -[26]: https://www.facebook.com/NetworkWorld/ -[27]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190515 IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services.md b/sources/talk/20190515 IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services.md deleted file mode 100644 index b69109641d..0000000000 --- a/sources/talk/20190515 IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services.md +++ /dev/null @@ -1,77 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services) -[#]: via: (https://www.networkworld.com/article/3395776/ibm-overhauls-mainframe-software-pricing-adds-hybrid-private-cloud-services.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -IBM overhauls mainframe-software pricing, adds hybrid, private-cloud services -====== -IBM brings cloud consumption model to the mainframe, adds Docker container extensions -![Thinkstock][1] - -IBM continues to adopt new tools and practices for its mainframe customers to keep the Big Iron relevant in a cloud world. - -First of all, the company switched-up its 20-year mainframe software pricing scheme to make it more palatable to hybrid and multicloud users who might be thinking of moving workloads off the mainframe and into the cloud. - -**[ Check out[What is hybrid cloud computing][2] and learn [what you need to know about multi-cloud][3]. | Get regularly scheduled insights by [signing up for Network World newsletters][4]. ]** - -Specifically IBM rolled out Tailored Fit Pricing for the IBM Z mainframe which offers two consumption-based pricing models that can help customers cope with ever-changing workload – and hence software – costs. - -Tailored Fit Pricing removes the need for complex and restrictive capping, which typically weakens responsiveness and can impact service level availability, IBM said. IBM’s standard monthly mainframe licensing model calculates costs as a “rolling four-hour average” (R4HA) which would determine cost based on a customer’s peak usage during the month. Customers would many time cap usage to keep costs down, experts said - -Systems can now be configured to support optimal response times and service level agreements, rather than artificially slowing down workloads to manage software licensing costs, IBM stated. - -Predicting demand for IT services can be a major challenge and in the era of hybrid and multicloud, everything is connected and workload patterns constantly change, wrote IBM’s Ross Mauri, General Manager, IBM Z in a [blog][5] about the new pricing and services. “In this environment, managing demand for IT services can be a major challenge. As more customers shift to an enterprise IT model that incorporates on-premises, private cloud and public we’ve developed a simple cloud pricing model to drive the transformation forward.” - -[Tailored Fit Pricing][6] for IBM Z comes in two flavors, the Enterprise Consumption Solution and the Enterprise Capacity Solution. - -IBM said the Enterprise Consumption model is a tailored usage-based pricing model, where customers pay only for what they use, removing the need for complex and restrictive capping, IBM said. - -The Enterprise Capacity model lets customers mix and match workloads to help maximize use of the full capacity of the platform. Charges are referenced to the overall size of the physical environment and are calculated based on the estimated mix of workloads running, while providing the flexibility to vary actual usage across workloads, IBM said. - -The software pricing changes should be a welcome benefit to customers, experts said. - -“By making access to Z mainframes more flexible and ‘cloud-like,’ IBM is making it less likely that customers will consider shifting Z workloads to other systems and environments. As cloud providers become increasingly able to support mission critical applications, that’s a big deal,” wrote Charles King, president and principal analyst for Pund-IT in a [blog][7] about the IBM changes. - -“A notable point about both models is that discounted growth pricing is offered on all workloads – whether they be 40-year old Assembler programs or 4-day old JavaScript apps. This is in contrast to previous models which primarily rewarded only brand-new applications with growth pricing. By thinking outside the Big Iron box, the company has substantially eased the pain for its largest clients’ biggest mainframe-related headaches,” King wrote. - -IBM’s Tailored Fit Pricing supports an increasing number of enterprises that want to continue to grow and build new services on top of this mission-critical platform, wrote [John McKenny][8] vice president of strategy for ZSolutions Optimization at BMC Software. “In not yet released results from the 2019 BMC State of the Mainframe Survey, 62% of the survey respondents reported that they are planning to expand MIPS/MSU consumption and are growing their mainframe workloads. For customers with no current plans for growth, the affordability and cost-competitiveness of the new pricing model will re-ignite interest in also using this platform as an integral part of their hybrid cloud strategies.” - -In addition to the pricing, IBM announced some new services that bring the mainframe closer to cloud workloads. - -First, IBM rolled out z/OS Container Extensions (zCX), which makes it possible to run Linux on Z applications that are packaged as Docker Container images on z/OS. Application developers can develop and data centers can operate popular open source packages, Linux applications, IBM software, and third-party software together with z/OS applications and data, IBM said. zCX will let customers use the latest open source tools, popular NoSQL databases, analytics frameworks, application servers, and so on within the z/OS environment. - -“With z/OS Container Extensions, customers will be able to access the most recent development tools and processes available in Linux on the Z ecosystem, giving developers the flexibility to build new, cloud-native containerized apps and deploy them on z/OS without requiring Linux or a Linux partition,” IBM’s Mauri stated. - -Big Blue also rolled out z/OS Cloud Broker which will let customers access and deploy z/OS resources and services on [IBM Cloud Private][9]. [IBM Cloud Private][10] is the company’s Kubernetes-based Platform as a Service (PaaS) environment for developing and managing containerized applications. IBM said z/OS Cloud Broker is designed to help cloud application developers more easily provision and deprovision apps in z/OS environments. - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3395776/ibm-overhauls-mainframe-software-pricing-adds-hybrid-private-cloud-services.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2015/08/thinkstockphotos-520137237-100610459-large.jpg -[2]: https://www.networkworld.com/article/3233132/cloud-computing/what-is-hybrid-cloud-computing.html -[3]: https://www.networkworld.com/article/3252775/hybrid-cloud/multicloud-mania-what-to-know.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.ibm.com/blogs/systems/ibm-z-defines-the-future-of-hybrid-cloud/ -[6]: https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?infotype=AN&subtype=CA&htmlfid=897/ENUS219-014&appname=USN -[7]: https://www.pund-it.com/blog/ibm-reinvents-the-z-mainframe-again/ -[8]: https://www.bmc.com/blogs/bmc-supports-ibm-tailored-fit-pricing-ibm-z/ -[9]: https://www.ibm.com/marketplace/cloud-private-on-z-and-linuxone -[10]: https://www.networkworld.com/article/3340043/ibm-marries-on-premises-private-and-public-cloud-data.html -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190517 HPE to buy Cray, offer HPC as a service.md b/sources/talk/20190517 HPE to buy Cray, offer HPC as a service.md deleted file mode 100644 index a1dafef683..0000000000 --- a/sources/talk/20190517 HPE to buy Cray, offer HPC as a service.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (HPE to buy Cray, offer HPC as a service) -[#]: via: (https://www.networkworld.com/article/3396220/hpe-to-buy-cray-offer-hpc-as-a-service.html) -[#]: author: (Tim Greene https://www.networkworld.com/author/Tim-Greene/) - -HPE to buy Cray, offer HPC as a service -====== -High-performance computing offerings from HPE plus Cray could enable things like AI, ML, high-speed financial trading, creation digital twins for entire enterprise networks. -![Cray Inc.][1] - -HPE has agreed to buy supercomputer-maker Cray for $1.3 billion, a deal that the companies say will bring their corporate customers high-performance computing as a service to help with analytics needed for artificial intelligence and machine learning, but also products supporting high-performance storage, compute and software. - -In addition to bringing HPC capabilities that can blend with and expand HPE’s current products, Cray brings with it customers in government and academia that might be interested in HPE’s existing portfolio as well. - -**[ Now read:[Who's developing quantum computers][2] ]** - -The companies say they expect to close the cash deal by the end of next April. - -The HPC-as-a-service would be offered through [HPE GreenLake][3], the company’s public-, private-, hybrid-cloud service. Such a service could address periodic enterprise need for fast computing that might otherwise be too expensive, says Tim Zimmerman, an analyst with Gartner. - -Businesses could use the service, for example, to create [digital twins][4] of their entire networks and use them to test new code to see how it will impact the network before deploying it live, Zimmerman says. - -Cray has HPC technology that HPE Labs might be exploring on its own, but that can be brought to market in a much quicker timeframe. - -HPE says that overall, buying cray give it technologies needed for massively data-intensive workloads such as AI and ML that is used for engineering services, transaction-based trading by financial firms, pharmaceutical research and academic studies into weather and genomes, for instance, Zimmerman says. - -As HPE puts it, Cray supercomputing platforms “have the ability to handle massive data sets, converged modelling, simulation, AI and analytics workloads.” - -Cray is working on [what it says will be the world’s fastest supercomputer][5] when it’s finished in 2021, cranking out 1.5 exaflops. The current fastest supercomputer is 143.5 petaflops. [Click [here][6] to see the current top 10 fastest supercomputers.] - -In general, HPE says it hopes to create a comprehensive line of products to support HPC infrastructure including “compute, high-performance storage, system interconnects, software and services.” - -Together, the talent in the two companies and their combined technologies should be able to increase innovation, HPE says. - -Earlier this month, HPE’s CEO Antonio Neri said in [an interview with _Network World_][7] that the company will be investing $4 billion over four years in a range of technology to boost “connectivity, security, and obviously cloud and analytics.” In laying out the company’s roadmap he made no specific mention of HPC. - -HPE net revenues last fiscal year were $30.9 billion. Cray’s total revenue was $456 million, with a gross profit of $130 million. - -The acquisition will pay $35 per share for Cray stock. - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396220/hpe-to-buy-cray-offer-hpc-as-a-service.html - -作者:[Tim Greene][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Tim-Greene/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/06/the_cray_xc30_piz_daint_system_at_the_swiss_national_supercomputing_centre_via_cray_inc_3x2_978x652-100762113-large.jpg -[2]: https://www.networkworld.com/article/3275385/who-s-developing-quantum-computers.html -[3]: https://www.networkworld.com/article/3280996/hpe-adds-greenlake-hybrid-cloud-to-enterprise-service-offerings.html -[4]: https://www.networkworld.com/article/3280225/what-is-digital-twin-technology-and-why-it-matters.html -[5]: https://www.networkworld.com/article/3373539/doe-plans-worlds-fastest-supercomputer.html -[6]: https://www.networkworld.com/article/3236875/embargo-10-of-the-worlds-fastest-supercomputers.html -[7]: https://www.networkworld.com/article/3394879/hpe-s-ceo-lays-out-his-technology-vision.html -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md b/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md index 445075b49f..fe92b389a9 100644 --- a/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md +++ b/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md @@ -1,5 +1,5 @@ [#]: collector: "lujun9972" -[#]: translator: "acyanbird " +[#]: translator: " " [#]: reviewer: " " [#]: publisher: " " [#]: url: " " diff --git a/sources/tech/20140929 A Word from The Beegoist - Richard Kenneth Eng - Medium.md b/sources/tech/20140929 A Word from The Beegoist - Richard Kenneth Eng - Medium.md deleted file mode 100644 index 707c2942a7..0000000000 --- a/sources/tech/20140929 A Word from The Beegoist - Richard Kenneth Eng - Medium.md +++ /dev/null @@ -1,623 +0,0 @@ -A Word from The Beegoist – Richard Kenneth Eng – Medium -====== -I like the [Go programming language][22]. I sought to use Go to write web applications. To this end, I examined two of the “full stack” web frameworks available to Go developers (aka “Gophers”): [Beego][23] and [Revel][24]. - -The reason I looked for full stack was because of my prior experience with [web2py][25], a Python-based framework with extraordinary capability that was also [deliciously easy to get started and be highly productive in][26]. (I also cut my teeth on Smalltalk-based [Seaside][27], which has the same qualities.) In my opinion, full stack is the only way to go because developers should not waste time and effort on the minutiae of tool configuration and setup. The focus should be almost entirely on writing your application. - -Between Beego and Revel, I chose the former. It seemed to be more mature and better documented. It also had a built-in [ORM][28]. - -To be sure, Beego isn’t as easy and productive as web2py, but I believe in Go, so it is worth the effort to give Beego my best shot. To get started with Beego, I needed a project, a useful exercise that covered all the bases, such as database management, CSS styling, email capability, form validation, etc., and also provided a useful end product. - -The project I selected was a user account management component for web applications. All of my previous applications required user registration/login, and Beego did not appear to have anything like that available. - -Now that I’ve completed the project, I believe it would be an excellent foundation for a Beego tutorial. I do not pretend that the code is optimal, nor do I pretend that it is bug-free, but if there are any bugs, it would be a good exercise for a novice to resolve them. - -The inspiration for this tutorial arose from my failure to find good, thorough tutorials when I first started learning Beego. There is one 2-part tutorial that is often mentioned, but I found Part 2 sorely lacking. Throwing source code at you for you to figure out on your own is no way to teach. Thus, I wanted to offer my take on a tutorial. Only history will determine whether it was successful. - -So, without further ado, let’s begin. The word is “Go!” - -### Basic Assumptions - -You have some familiarity with the Go language. I highly recommend you follow this [Go tutorial][1]. - -You’ve installed [Go][2] and [Beego][3] on your computer. There are plenty of good online resources to help you here (for [example][4]). It’s really quite easy. - -You have basic knowledge of CSS, HTML, and databases. You have at least one database package installed on your computer such as [MySQL][5] (Community Edition) or [SQLite][6]. I have SQLite because it’s much easier to use. - -You have some experience writing software; basic skills are assumed. If you studied computer programming in school, then you’re off to a good start. - -You will be using your favourite programming editor in conjunction with the command line. I use [LiteIDE][7] (on the Mac), but I can suggest alternatives such as [TextMate][8] for the Mac, [Notepad++][9] for Windows, and [vim][10] for Linux. - -These basic assumptions define the target audience for the tutorial. If you’re a programming veteran, though, you’ll breeze through it and hopefully gain much useful knowledge, as well. - -### Creating the Project - -First, we must create a Beego project. We’ll call it ‘[ACME][11]’. From the command line, change directory (cd) to $GOPATH/src and enter: -``` -$ bee new acme - -``` - -The following directory structure will be created: -``` -acme -....conf -....controllers -....models -....routers -....static -........css -........img -........js -....tests -....views - -``` - -Note that Beego is a MVC framework (Model/View/Controller), which means that your application will be separated into three general sections. Model refers to the internal database structure of your application. View is all about how your application looks on the computer screen; in our case, this includes HTML and CSS code. And Controller is where you have your business logic and user interactions. - -You can immediately compile and run your application by changing directory (cd acme) and typing: -``` -$ bee run - -``` - -In your browser, go to to see the running application. It doesn’t do anything fancy right now; it simply greets you. But upon this foundation, we shall raise an impressive edifice. - -### The Source Code - -To follow along, you may [download the source code][12] for this tutorial. Cd to $GOPATH/src and unzip the file. [When you download the source, the filename that Github uses is ‘acme-master’. You must change it to ‘acme’.] - -### Program Design - -The user account management component provides the following functionality: - - 1. User registration (account creation) - 2. Account verification (via email) - 3. Login (create a session) - 4. Logout (delete the session) - 5. User profile (can change name, email, or password) - 6. Remove user account - - - -The essence of a web application is the mapping of URLs (webpages) to the server functions that will process the HTTP requests. This mapping is what generates the work flow in the application. In Beego, the mapping is defined within the ‘router’. Here’s the code for our router (look at router.go in the ‘routers’ directory): -``` -beego.Router("/home", &controllers.MainController{}) -beego.Router("/user/login/:back", &controllers.MainController{}, "get,post:Login") -beego.Router("/user/logout", &controllers.MainController{}, "get:Logout") -beego.Router("/user/register", &controllers.MainController{}, "get,post:Register") -beego.Router("/user/profile", &controllers.MainController{}, "get,post:Profile") -beego.Router("/user/verify/:uuid({[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}})", &controllers.MainController{}, "get:Verify") -beego.Router("/user/remove", &controllers.MainController{}, "get,post:Remove") -beego.Router("/notice", &controllers.MainController{}, "get:Notice") - -``` - -For example, in the line for ‘login’, “get,post:Login” says that both the GET and POST operations are handled by the ‘Login’ function. The ‘:back’ is a request parameter; in this case, it tells us what page to return to after successful login. - -In the line for ‘verify’, the ‘:uuid’ is a request parameter that must match the [regular expression][13] for a Version 4 UUID. The GET operation is handled by the ‘Verify’ function. - -More on this when we talk about controllers. - -Note that I’ve added ‘/home’ to the first line in the router (it was originally ‘/’). This makes it convenient to go to the home page, which we often do in our application. - -### Model - -The database model for a user account is represented by the following struct: -``` -package models - -``` -``` -import ( - "github.com/astaxie/beego/orm" - "time" -) - -``` -``` -type AuthUser struct { - Id int - First string - Last string - Email string `orm:"unique"` - Password string - Reg_key string - Reg_date time.Time `orm:"auto_now_add;type(datetime)"` -} - -``` -``` -func init() { - orm.RegisterModel(new(AuthUser)) -} - -``` - -Place this in models.go in the ‘models’ directory. Ignore the init() for the time being. - -‘Id’ is the primary key which is auto-incremented in the database. We also have ‘First’ and ‘Last’ names. ‘Password’ contains the hexadecimal representation of the [PBKDF2 hash][14] of the plaintext password. - -‘Reg_key’ contains the [UUID][15] string that is used for account verification (via email). ‘Reg_date’ is the timestamp indicating the time of registration. - -The funny-looking string literals associated with both ‘Email’ and ‘Reg_date’ are used to tell the database the special requirements of these fields. ‘Email’ must be a unique key. ‘Reg_date’ will be automatically assigned the date and time of database insertion. - -By the way, don’t be scared of the PBKDF2 and UUID references. PBKDF2 is simply a way to securely store a user’s password in the database. A UUID is a unique identifier that can be used to ensure the identity of the user for verification purposes. - -### View - -For our CSS template design, I’ve chosen the [Stardust][16] theme (pictured at the start of this article). We will use its index.html as a basis for the view layout. - -Place the appropriate files from the Stardust theme into the ‘css’ and ‘img’ directories of ‘static’ directory. The link statement in the header of index.html must be amended to: -``` - - -``` - -And all references to image gifs and jpegs in index.html and default.css must point to ‘/static/img/’. - -The view layout contains a header section, a footer section, a sidebar section, and the central section where most of the action will take place. We will be using Go’s templating facility which allows us to replace embedded codes, signified by ‘{{‘ and ‘}}’, with actual HTML. Here’s our basic-layout.tpl (.tpl for ‘template’): -``` -{{.Header}} -{{.LayoutContent}} -{{.Sidebar}} -{{.Footer}} - -``` - -Since every webpage in our application will need to adhere to this basic layout, we need a common method to set it up (look at default.go): -``` -func (this *MainController) activeContent(view string) { - this.Layout = "basic-layout.tpl" - this.LayoutSections = make(map[string]string) - this.LayoutSections["Header"] = "header.tpl" - this.LayoutSections["Sidebar"] = "sidebar.tpl" - this.LayoutSections["Footer"] = "footer.tpl" - this.TplNames = view + ".tpl" - -``` -``` - sess := this.GetSession("acme") - if sess != nil { - this.Data["InSession"] = 1 // for login bar in header.tpl - m := sess.(map[string]interface{}) - this.Data["First"] = m["first"] - } -} - -``` - -The template parameters, such as ‘.Sidebar’, correspond to the keys used in the LayoutSections map. ‘.LayoutContent’ is a special, implicit template parameter. We’ll get to the GetSession stuff further below. - -Of course, we need to create the various template files (such as footer.tpl) in the ‘views’ directory. From index.html, we can carve out the header section for header.tpl: -``` - - - - -StarDust by Free Css Templates - - - - - -``` -``` - - -
- -
- - -
- -``` - -I leave it as an exercise for you to carve out the sections for sidebar.tpl and footer.tpl. - -Note the lines in bold. I added them to facilitate a “login bar” at the top of every webpage. Once you’ve logged into the application, you will see the bar as so: - -![][17] - -This login bar works in conjunction with the GetSession code snippet we saw in activeContent(). The logic is, if the user is logged in (ie, there is a non-nil session), then we set the InSession parameter to a value (any value), which tells the templating engine to use the “Welcome” bar instead of “Login”. We also extract the user’s first name from the session so that we can present the friendly affectation “Welcome, Richard”. - -The home page, represented by index.tpl, uses the following snippet from index.html: -``` - -
-
-

Welcome to StarDust

- // to save space, I won't enter the remainder - // of the snippet -
- - -``` - -#### Special Note - -The template files for the user module reside in the ‘user’ directory within ‘views’, just to keep things tidy. So, for example, the call to activeContent() for login is: -``` -this.activeContent("user/login") - -``` - -### Controller - -A controller handles requests by handing them off to the appropriate function or ‘method’. We only have one controller for our application and it’s defined in default.go. The default method Get() for handling a GET operation is associated with our home page: -``` -func (this *MainController) Get() { - this.activeContent("index") - -``` -``` - //bin //boot //dev //etc //home //lib //lib64 //media //mnt //opt //proc //root //run //sbin //speedup //srv //sys //tmp //usr //var This page requires login - sess := this.GetSession("acme") - if sess == nil { - this.Redirect("/user/login/home", 302) - return - } - m := sess.(map[string]interface{}) - fmt.Println("username is", m["username"]) - fmt.Println("logged in at", m["timestamp"]) -} - -``` - -I’ve made login a requirement for accessing this page. Logging in means creating a session, which by default expires after 3600 seconds of inactivity. A session is typically maintained on the client side by a ‘cookie’. - -In order to support sessions in the application, the ‘SessionOn’ flag must be set to true. There are two ways to do this: - - 1. Insert ‘beego.SessionOn = true’ in the main program, main.go. - 2. Insert ‘sessionon = true’ in the configuration file, app.conf, which can be found in the ‘conf’ directory. - - - -I chose #1. (But note that I used the configuration file to set ‘EnableAdmin’ to true: ‘enableadmin = true’. EnableAdmin allows you to use the Supervisor Module in Beego that keeps track of CPU, memory, Garbage Collector, threads, etc., via port 8088: .) - -#### The Main Program - -The main program is also where we initialize the database to be used with the ORM (Object Relational Mapping) component. ORM makes it more convenient to perform database activities within our application. The main program’s init(): -``` -func init() { - orm.RegisterDriver("sqlite", orm.DR_Sqlite) - orm.RegisterDataBase("default", "sqlite3", "acme.db") - name := "default" - force := false - verbose := false - err := orm.RunSyncdb(name, force, verbose) - if err != nil { - fmt.Println(err) - } -} - -``` - -To use SQLite, we must import ‘go-sqlite3', which can be installed with the command: -``` -$ go get github.com/mattn/go-sqlite3 - -``` - -As you can see in the code snippet, the SQLite driver must be registered and ‘acme.db’ must be registered as our SQLite database. - -Recall in models.go, there was an init() function: -``` -func init() { - orm.RegisterModel(new(AuthUser)) -} - -``` - -The database model has to be registered so that the appropriate table can be generated. To ensure that this init() function is executed, you must import ‘models’ without actually using it within the main program, as follows: -``` -import _ "acme/models" - -``` - -RunSyncdb() is used to autogenerate the tables when you start the program. (This is very handy for creating the database tables without having to **manually** do it in the database command line utility.) If you set ‘force’ to true, it will drop any existing tables and recreate them. - -#### The User Module - -User.go contains all the methods for handling login, registration, profile, etc. There are several third-party packages we need to import; they provide support for email, PBKDF2, and UUID. But first we must get them into our project… -``` -$ go get github.com/alexcesaro/mail/gomail -$ go get github.com/twinj/uuid - -``` - -I originally got **github.com/gokyle/pbkdf2** , but this package was pulled from Github, so you can no longer get it. I’ve incorporated this package into my source under the ‘utilities’ folder, and the import is: -``` -import pk "acme/utilities/pbkdf2" - -``` - -The ‘pk’ is a convenient alias so that I don’t have to type the rather unwieldy ‘pbkdf2'. - -#### ORM - -It’s pretty straightforward to use ORM. The basic pattern is to create an ORM object, specify the ‘default’ database, and select which ORM operation you want, eg, -``` -o := orm.NewOrm() -o.Using("default") -err := o.Insert(&user) // or -err := o.Read(&user, "Email") // or -err := o.Update(&user) // or -err := o.Delete(&user) - -``` - -#### Flash - -By the way, Beego provides a way to present notifications on your webpage through the use of ‘flash’. Basically, you create a ‘flash’ object, give it your notification message, store the flash in the controller, and then retrieve the message in the template file, eg, -``` -flash := beego.NewFlash() -flash.Error("You've goofed!") // or -flash.Notice("Well done!") -flash.Store(&this.Controller) - -``` - -And in your template file, reference the Error flash with: -``` -{{if .flash.error}} -

{{.flash.error}}

-  -{{end}} - -``` - -#### Form Validation - -Once the user posts a request (by pressing the Submit button, for example), our handler must extract and validate the form input. So, first, check that we have a POST operation: -``` -if this.Ctx.Input.Method() == "POST" { - -``` - -Let’s get a form element, say, email: -``` -email := this.GetString("email") - -``` - -The string “email” is the same as in the HTML form: -``` - - -``` - -To validate it, we create a validation object, specify the type of validation, and then check to see if there are any errors: -``` -valid := validation.Validation{} -valid.Email(email, "email") // must be a proper email address -if valid.HasErrors() { - for _, err := range valid.Errors { - -``` - -What you do with the errors is up to you. I like to present all of them at once to the user, so as I go through the range of valid.Errors, I add them to a map of errors that will eventually be used in the template file. Hence, the full snippet: -``` -if this.Ctx.Input.Method() == "POST" { - email := this.GetString("email") - password := this.GetString("password") - valid := validation.Validation{} - valid.Email(email, "email") - valid.Required(password, "password") - if valid.HasErrors() { - errormap := []string{} - for _, err := range valid.Errors { - errormap = append(errormap, "Validation failed on "+err.Key+": "+err.Message+"\n") - } - this.Data["Errors"] = errormap - return - } - -``` - -### The User Management Methods - -We’ve looked at the major pieces of the controller. Now, we get to the meat of the application, the user management methods: - - * Login() - * Logout() - * Register() - * Verify() - * Profile() - * Remove() - - - -Recall that we saw references to these functions in the router. The router associates each URL (and HTTP request) with the corresponding controller method. - -#### Login() - -Let’s look at the pseudocode for this method: -``` -if the HTTP request is "POST" then - Validate the form (extract the email address and password). - Read the password hash from the database, keying on email. - Compare the submitted password with the one on record. - Create a session for this user. -endif - -``` - -In order to compare passwords, we need to give pk.MatchPassword() a variable with members ‘Hash’ and ‘Salt’ that are **byte slices**. Hence, -``` -var x pk.PasswordHash - -``` -``` -x.Hash = make([]byte, 32) -x.Salt = make([]byte, 16) -// after x has the password from the database, then... - -``` -``` -if !pk.MatchPassword(password, &x) { - flash.Error("Bad password") - flash.Store(&this.Controller) - return -} - -``` - -Creating a session is trivial, but we want to store some useful information in the session, as well. So we make a map and store first name, email address, and the time of login: -``` -m := make(map[string]interface{}) -m["first"] = user.First -m["username"] = email -m["timestamp"] = time.Now() -this.SetSession("acme", m) -this.Redirect("/"+back, 302) // go to previous page after login - -``` - -Incidentally, the name “acme” passed to SetSession is completely arbitrary; you just need to reference the same name to get the same session. - -#### Logout() - -This one is trivially easy. We delete the session and redirect to the home page. - -#### Register() -``` -if the HTTP request is "POST" then - Validate the form. - Create the password hash for the submitted password. - Prepare new user record. - Convert the password hash to hexadecimal string. - Generate a UUID and insert the user into database. - Send a verification email. - Flash a message on the notification page. -endif - -``` - -To send a verification email to the user, we use **gomail** … -``` -link := "http://localhost:8080/user/verify/" + u // u is UUID -host := "smtp.gmail.com" -port := 587 -msg := gomail.NewMessage() -msg.SetAddressHeader("From", "acmecorp@gmail.com", "ACME Corporation") -msg.SetHeader("To", email) -msg.SetHeader("Subject", "Account Verification for ACME Corporation") -msg.SetBody("text/html", "To verify your account, please click on the link: "+link+"

Best Regards,
ACME Corporation") -m := gomail.NewMailer(host, "youraccount@gmail.com", "YourPassword", port) -if err := m.Send(msg); err != nil { - return false -} - -``` - -I chose Gmail as my email relay (you will need to open your own account). Note that Gmail ignores the “From” address (in our case, “[acmecorp@gmail.com][18]”) because Gmail does not permit you to alter the sender address in order to prevent phishing. - -#### Notice() - -This special router method is for displaying a flash message on a notification page. It’s not really a user module function; it’s general enough that you can use it in many other places. - -#### Profile() - -We’ve already discussed all the pieces in this function. The pseudocode is: -``` -Login required; check for a session. -Get user record from database, keyed on email (or username). -if the HTTP request is "POST" then - Validate the form. - if there is a new password then - Validate the new password. - Create the password hash for the new password. - Convert the password hash to hexadecimal string. - endif - Compare submitted current password with the one on record. - Update the user record. - - update the username stored in session -endif - -``` - -#### Verify() - -The verification email contains a link which, when clicked by the recipient, causes Verify() to process the UUID. Verify() attempts to read the user record, keyed on the UUID or registration key, and if it’s found, then the registration key is removed from the database. - -#### Remove() - -Remove() is pretty much like Login(), except that instead of creating a session, you delete the user record from the database. - -### Exercise - -I left out one user management method: What if the user has forgotten his password? We should provide a way to reset the password. I leave this as an exercise for you. All the pieces you need are in this tutorial. (Hint: You’ll need to do it in a way similar to Registration verification. You should add a new Reset_key to the AuthUser table. And make sure the user email address exists in the database before you send the Reset email!) - -[Okay, so I’ll give you the [exercise solution][19]. I’m not cruel.] - -### Wrapping Up - -Let’s review what we’ve learned. We covered the mapping of URLs to request handlers in the router. We showed how to incorporate a CSS template design into our views. We discussed the ORM package, and how it’s used to perform database operations. We examined a number of third-party utilities useful in writing our application. The end result is a component useful in many scenarios. - -This is a great deal of material in a tutorial, but I believe it’s the best way to get started in writing a practical application. - -[For further material, look at the [sequel][20] to this article, as well as the [final edition][21].] - --------------------------------------------------------------------------------- - -via: https://medium.com/@richardeng/a-word-from-the-beegoist-d562ff8589d7 - -作者:[Richard Kenneth Eng][a] -选题:[lujun9972](https://github.com/lujun9972) -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]:https://medium.com/@richardeng?source=post_header_lockup -[1]:http://tour.golang.org/ -[2]:http://golang.org/ -[3]:http://beego.me/ -[4]:https://medium.com/@richardeng/in-the-beginning-61c7e63a3ea6 -[5]:http://www.mysql.com/ -[6]:http://www.sqlite.org/ -[7]:https://code.google.com/p/liteide/ -[8]:http://macromates.com/ -[9]:http://notepad-plus-plus.org/ -[10]:https://medium.com/@richardeng/back-to-the-future-9db24d6bcee1 -[11]:http://en.wikipedia.org/wiki/Acme_Corporation -[12]:https://github.com/horrido/acme -[13]:http://en.wikipedia.org/wiki/Regular_expression -[14]:http://en.wikipedia.org/wiki/PBKDF2 -[15]:http://en.wikipedia.org/wiki/Universally_unique_identifier -[16]:http://www.freewebtemplates.com/download/free-website-template/stardust-141989295/ -[17]:https://cdn-images-1.medium.com/max/1600/1*1OpYy1ISYGUaBy0U_RJ75w.png -[18]:mailto:acmecorp@gmail.com -[19]:https://github.com/horrido/acme-exercise -[20]:https://medium.com/@richardeng/a-word-from-the-beegoist-ii-9561351698eb -[21]:https://medium.com/@richardeng/a-word-from-the-beegoist-iii-dbd6308b2594 -[22]: http://golang.org/ -[23]: http://beego.me/ -[24]: http://revel.github.io/ -[25]: http://www.web2py.com/ -[26]: https://medium.com/@richardeng/the-zen-of-web2py-ede59769d084 -[27]: http://www.seaside.st/ -[28]: http://en.wikipedia.org/wiki/Object-relational_mapping diff --git a/sources/tech/20190528 A Quick Look at Elvish Shell.md b/sources/tech/20190528 A Quick Look at Elvish Shell.md index 82927332a7..778965d442 100644 --- a/sources/tech/20190528 A Quick Look at Elvish Shell.md +++ b/sources/tech/20190528 A Quick Look at Elvish Shell.md @@ -1,4 +1,3 @@ -Translating by name1e5s [#]: collector: (lujun9972) [#]: translator: ( ) [#]: reviewer: ( ) diff --git a/sources/tech/20190718 What you need to know to be a sysadmin.md b/sources/tech/20190718 What you need to know to be a sysadmin.md index bd482f3ca4..55947b8456 100644 --- a/sources/tech/20190718 What you need to know to be a sysadmin.md +++ b/sources/tech/20190718 What you need to know to be a sysadmin.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: (WangYueScream ) +[#]: translator: ( ) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 59d02baeb9416390cbd013d978f5bcfb047fd12a Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 14:32:23 +0800 Subject: [PATCH 075/202] APL --- .../talk/20180904 How blockchain can complement open source.md | 1 + 1 file changed, 1 insertion(+) diff --git a/sources/talk/20180904 How blockchain can complement open source.md b/sources/talk/20180904 How blockchain can complement open source.md index 7712539f3f..7d7aa335b5 100644 --- a/sources/talk/20180904 How blockchain can complement open source.md +++ b/sources/talk/20180904 How blockchain can complement open source.md @@ -1,3 +1,4 @@ +wxy has applied How blockchain can complement open source ====== From 53c3681f08778d0fc65e51b20e79ee4b534ca5e5 Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Sun, 15 Sep 2019 20:04:54 +0800 Subject: [PATCH 076/202] Update 20190828 Managing Ansible environments on MacOS with Conda.md --- ...90828 Managing Ansible environments on MacOS with Conda.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md index 7aa3a4181b..f46a301e44 100644 --- a/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md +++ b/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (heguangzhi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) @@ -159,7 +159,7 @@ via: https://opensource.com/article/19/8/using-conda-ansible-administration-maco 作者:[James Farrell][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[译者ID](https://github.com/heguangzhi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From f8ea7a11ecfdbd96701b888e711b53910d60bd7c Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 20:08:11 +0800 Subject: [PATCH 077/202] TSL --- ...w blockchain can complement open source.md | 96 ------------------- ...w blockchain can complement open source.md | 87 +++++++++++++++++ 2 files changed, 87 insertions(+), 96 deletions(-) delete mode 100644 sources/talk/20180904 How blockchain can complement open source.md create mode 100644 translated/talk/20180904 How blockchain can complement open source.md diff --git a/sources/talk/20180904 How blockchain can complement open source.md b/sources/talk/20180904 How blockchain can complement open source.md deleted file mode 100644 index 7d7aa335b5..0000000000 --- a/sources/talk/20180904 How blockchain can complement open source.md +++ /dev/null @@ -1,96 +0,0 @@ -wxy has applied -How blockchain can complement open source -====== - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/block-quilt-chain.png?itok=mECoDbrc) - -[The Cathedral and The Bazaar][1] is a classic open source story, written 20 years ago by Eric Steven Raymond. In the story, Eric describes a new revolutionary software development model where complex software projects are built without (or with a very little) central management. This new model is open source. - -Eric's story compares two models: - - * The classic model (represented by the cathedral), in which software is crafted by a small group of individuals in a closed and controlled environment through slow and stable releases. - * And the new model (represented by the bazaar), in which software is crafted in an open environment where individuals can participate freely but still produce a stable and coherent system. - - - -Some of the reasons open source is so successful can be traced back to the founding principles Eric describes. Releasing early, releasing often, and accepting the fact that many heads are inevitably better than one allows open source projects to tap into the world’s pool of talent (and few companies can match that using the closed source model). - -Two decades after Eric's reflective analysis of the hacker community, we see open source becoming dominant. It is no longer a model only for scratching a developer’s personal itch, but instead, the place where innovation happens. Even the world's [largest][2] software companies are transitioning to this model in order to continue dominating. - -### A barter system - -If we look closely at how the open source model works in practice, we realize that it is a closed system, exclusive only to open source developers and techies. The only way to influence the direction of a project is by joining the open source community, understanding the written and the unwritten rules, learning how to contribute, the coding standards, etc., and doing it yourself. - -This is how the bazaar works, and it is where the barter system analogy comes from. A barter system is a method of exchanging services and goods in return for other services and goods. In the bazaar—where the software is built—that means in order to take something, you must also be a producer yourself and give something back in return. And that is by exchanging your time and knowledge for getting something done. A bazaar is a place where open source developers interact with other open source developers and produce open source software the open source way. - -The barter system is a great step forward and an evolution from the state of self-sufficiency where everybody must be a jack of all trades. The bazaar (open source model) using the barter system allows people with common interests and different skills to gather, collaborate, and create something that no individual can create on their own. The barter system is simple and lacks complex problems of the modern monetary systems, but it also has some limitations, such as: - - * Lack of divisibility: In the absence of a common medium of exchange, a large indivisible commodity/value cannot be exchanged for a smaller commodity/value. For example, if you want to do even a small change in an open source project, you may sometimes still need to go through a high entry barrier. - * Storing value: If a project is important to your company, you may want to have a large investment/commitment in it. But since it is a barter system among open source developers, the only way to have a strong say is by employing many open source committers, and that is not always possible. - * Transferring value: If you have invested in a project (trained employees, hired open source developers) and want to move focus to another project, it is not possible to transfer expertise, reputation, and influence quickly. - * Temporal decoupling: The barter system does not provide a good mechanism for deferred or advance commitments. In the open source world, that means a user cannot express commitment or interest in a project in a measurable way in advance, or continuously for future periods. - - - -Below, we will explore how to address these limitations using the back door to the bazaar. - -### A currency system - -People are hanging at the bazaar for different reasons: Some are there to learn, some are there to scratch a personal developer's itch, and some work for large software farms. Because the only way to have a say in the bazaar is to become part of the open source community and join the barter system, in order to gain credibility in the open source world, many large software companies employ these developers and pay them in monetary value. This represents the use of a currency system to influence the bazaar. Open source is no longer only for scratching the personal developer itch. It also accounts for a significant part of the overall software production worldwide, and there are many who want to have an influence. - -Open source sets the guiding principles through which developers interact and build a coherent system in a distributed way. It dictates how a project is governed, how software is built, and how the output distributed to users. It is an open consensus model for decentralized entities for building quality software together. But the open source model does not cover how open source is subsidized. Whether it is sponsored, directly or indirectly, through intrinsic or extrinsic motivators is irrelevant to the bazaar. - -![](https://opensource.com/sites/default/files/uploads/tokenomics_-_page_4.png) - -Currently, there is no equivalent of the decentralized open source development model for subsidization purposes. The majority of open source subsidization is centralized, where typically one company dominates a project by employing the majority of the open source developers of that project. And to be honest, this is currently the best-case scenario, as it guarantees that the developers will be paid for a long period and the project will continue to flourish. - -There are also exceptions for the project monopoly scenario: For example, some Cloud Native Computing Foundation projects are developed by a large number of competing companies. Also, the Apache Software Foundation aims for their projects not to be dominated by a single vendor by encouraging diverse contributors, but most of the popular projects, in reality, are still single-vendor projects. - -What we are missing is an open and decentralized model that works like the bazaar without a central coordination and ownership, where consumers (open source users) and producers (open source developers) interact with each other, driven by market forces and open source value. In order to complement open source, such a model must also be open and decentralized, and this is why I think the blockchain technology would [fit best here][3]. - -Most of the existing blockchain (and non-blockchain) platforms that aim to subsidize open source development are targeting primarily bug bounties, small and piecemeal tasks. A few also focus on funding new open source projects. But not many aim to provide mechanisms for sustaining continued development of open source projects—basically, a system that would emulate the behavior of an open source service provider company, or open core, open source-based SaaS product company: ensuring developers get continued and predictable incentives and guiding the project development based on the priorities of the incentivizers; i.e., the users. Such a model would address the limitations of the barter system listed above: - - * Allow divisibility: If you want something small fixed, you can pay a small amount rather than the full premium of becoming an open source developer for a project. - * Storing value: You can invest a large amount into a project and ensure both its continued development and that your voice is heard. - * Transferring value: At any point, you can stop investing in the project and move funds into other projects. - * Temporal decoupling: Allow regular recurring payments and subscriptions. - - - -There would be also other benefits, purely from the fact that such a blockchain-based system is transparent and decentralized: to quantify a project’s value/usefulness based on its users’ commitment, open roadmap commitment, decentralized decision making, etc. - -### Conclusion - -On the one hand, we see large companies hiring open source developers and acquiring open source startups and even foundational platforms (such as Microsoft buying GitHub). Many, if not most, long-running successful open source projects are centralized around a single vendor. The significance of open source and its centralization is a fact. - -On the other hand, the challenges around [sustaining open source][4] software are becoming more apparent, and many are investigating this space and its foundational issues more deeply. There are a few projects with high visibility and a large number of contributors, but there are also many other still-important projects that lack enough contributors and maintainers. - -There are [many efforts][3] trying to address the challenges of open source through blockchain. These projects should improve the transparency, decentralization, and subsidization and establish a direct link between open source users and developers. This space is still very young, but it is progressing quickly, and with time, the bazaar is going to have a cryptocurrency system. - -Given enough time and adequate technology, decentralization is happening at many levels: - - * The internet is a decentralized medium that has unlocked the world’s potential for sharing and acquiring knowledge. - * Open source is a decentralized collaboration model that has unlocked the world’s potential for innovation. - * Similarly, blockchain can complement open source and become the decentralized open source subsidization model. - - - -Follow me on [Twitter][5] for other posts in this space. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/18/9/barter-currency-system - -作者:[Bilgin lbryam][a] -选题:[lujun9972](https://github.com/lujun9972) -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/bibryam -[1]: http://catb.org/ -[2]: http://oss.cash/ -[3]: https://opensource.com/article/18/8/open-source-tokenomics -[4]: https://www.youtube.com/watch?v=VS6IpvTWwkQ -[5]: http://twitter.com/bibryam diff --git a/translated/talk/20180904 How blockchain can complement open source.md b/translated/talk/20180904 How blockchain can complement open source.md new file mode 100644 index 0000000000..d5c9691abf --- /dev/null +++ b/translated/talk/20180904 How blockchain can complement open source.md @@ -0,0 +1,87 @@ +区块链是如何补充开源的 +====== + +![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/block-quilt-chain.png?itok=mECoDbrc) + +[大教堂与集市][1]是 20 年前由埃里克·史蒂文·雷蒙德Eric Steven Raymond(ESR)撰写的经典开源故事。在这个故事中,ESR 描述了一种新的革命性软件开发模型,其中复杂的软件项目是在没有(或者很少的)集中管理的情况下构建的。这个新模型就是开源。 + +ESR 的故事比较了两种模式: + +* 经典模型(由“大教堂”代表),其中软件由一小群人在封闭和受控的环境中通过缓慢而稳定的发布版本制作而成。 +* 以及新模式(由“集市”代表),其中软件是在开放的环境中制作的,个人可以自由参与,但仍然可以产生一个稳定和连贯的系统。 +   +开源如此成功的一些原因可以追溯到 ESR 所描述的基础原则。尽早发布、经常发布,并接受许多头脑必然比一个更好的事实,会让开源项目进入全世界的人才库(很少有公司能够使用闭源模式与之匹敌)。 + +在 ESR 对黑客社区的反思分析 20 年后,我们看到开源成为占据主导地位的的模式。它不再仅仅是开发人员的个人癖好的模式,而是创新发生的地方。即使是全球[最大][2]软件公司也正在转向这种模式,以便继续占据主导地位。 + +### 易货系统 + +如果我们仔细研究开源模型在实践中的运作方式,我们就会发现它是一个封闭的系统,专属于开源开发人员和技术人员。影响项目方向的唯一方法是加入开源社区,了解成文和不成文的规则,学习如何贡献,编码标准等,并自己亲力完成。 + +这就是集市的运作方式,也是易货系统类比的来源。易货系统是一种交换服务和货物以换取其他服务和货物的方法。在市场中(即软件的构建)这意味着为了获取某些东西,你必须自己也是一个生产者并回馈一些东西——那就是通过交换你的时间和知识来完成任务。集市是开源开发人员与其他开源开发人员交互并以开源方式生成开源软件的地方。 + +易货系统向前迈出了一大步,从自给自足的状态演变而来,而在自给自足的状态下,每个人都必须成为所有行业的杰出人选。使用易货系统的集市(开源模式)允许具有共同兴趣和不同技能的人们收集、协作和创造个人无法自己创造的东西。易货系统简单,而不像现代货币系统那么复杂,但也有一些局限性,例如: + +* 缺乏可分性:在没有共同的交换媒介的情况下,不能将较大的不可分割的商品/价值换成较小的商品/价值。例如,如果你想在开源项目中进行一些小的更改,有时你可能仍需要经历一个高进入门槛。 +* 存储价值:如果项目对贵公司很重要,你可能想要投入大量投资/承诺。但由于它是开源开发人员之间的易货系统,因此拥有强大发言权的唯一方法是雇佣许多开源贡献者,但这并非总是可行的。 +* 转移价值:如果你投资了一个项目(受过培训的员工、雇用开源开发人员)并希望将重点转移到另一个项目,却不可能快速转移(你在上一个项目中拥有的)专业知识、声誉和影响力。 +* 时间脱钩:易货系统没有为延期或提前承诺提供良好的机制。在开源世界中,这意味着用户无法提前或在未来期间以可衡量的方式表达对项目的承诺或兴趣。 +   +下面,我们将探讨如何使用集市的后门解决这些限制。 + +### 货币系统 + +人们因为不同的原因勾连在集市上:有些人在那里学习,有些是出于满足开发人员个人的喜好,有些人在大型软件工厂工作。因为在集市中拥有发言权的唯一方法是成为开源社区的一份子并加入这个易货系统,为了在开源世界获得信誉,许多大型软件公司雇用这些开发者并以货币方式支付薪酬。这代表使用货币系统来影响集市。开源不再只是为了满足开发人员个人的喜好。它也占据全球整体软件生产的重要部分,并且有许多人想要产生影响。 + +开源设置了开发人员交互的指导原则,并以分布式方式构建一致的系统。它决定了项目的治理方式、软件的构建方式以及其成果如何分配给用户。它是分散实体共同构建高质量软件的开放共识模型。但是开源模型并没有包括如何补贴开源。无论是直接还是间接地通过内在或外在动机的赞助,都与集市无关。 + +![](https://opensource.com/sites/default/files/uploads/tokenomics_-_page_4.png) + +目前,没有相当于以补贴为目的的去中心化式开源开发模型。大多数开源补贴都是集中式的,通常一家公司通过雇用该项目的主要开源开发人员来支配该项目。说实话,这是目前最好的情况,因为它保证了开发人员将长期获得报酬,项目也将继续蓬勃发展。 + +项目垄断情景也有例外情况:例如,一些云原生计算基金会(CNCF)项目是由大量的竞争公司开发的。此外,Apache 软件基金会(ASF)旨在通过鼓励不同的贡献者来使他们的项目不被单一供应商所主导,但实际上大多数受欢迎的项目仍然是单一供应商项目。 + +我们缺少的是一个开放的、去中心化的模式,就像一个没有集中协调和所有权的集市一样,消费者(开源用户)和生产者(开源开发者)在市场力量和开源价值的驱动下相互作用。为了补充开源,这样的模型也必须是开放和去中心化的,这就是为什么我认为区块链技术[最适合][3]的原因。 + +旨在补贴开源开发的大多数现有区块链(和非区块链)平台主要针对的是错误赏金、小型和零碎的任务。少数人还专注于资助新的开源项目。但并没有很多人的目标是提供维持开源项目持续开发的机制 —— 基本上,这个系统可以模仿开源服务提供商公司或开放核心、基于开源的 SaaS 产品公司的行为:确保开发人员继续进行可预测的激励措施,并根据激励者(即用户)的优先事项指导项目开发。这种模型将解决上面列出的易货系统的局限性: + +* 允许可分性:如果你想要一些小的修复,你可以支付少量费用,而不是成为项目的开源开发人员的全部费用。 +* 存储价值:你可以在项目中投入大量资金,并确保其持续发展和你的发言权。 +* 转移价值:在任何时候,你都可以停止投资项目并将资金转移到其他项目中。 +* 时间脱钩:允许定期定期付款和订阅。 + +还有其他好处,纯粹是因为这种基于区块链的系统是透明和去中心化的:根据用户的承诺、开放的路线图承诺、去中心化决策等来量化项目的价值/实用性。 + +### 总结 + +一方面,我们看到大公司雇用开源开发人员并收购开源初创公司甚至基础平台(例如微软收购 GitHub)。许多(甚至大多数)长期成功的开源项目集中在一个供应商周围。开源的重要性及其集中化是一个事实。 + +另一方面,围绕[持续开源][4]软件的挑战正变得越来越明显,许多人正在更深入地研究这个领域及其基础问题。有一些项目具有很高的知名度和大量的贡献者,但还有许多其他一样重要的项目缺乏足够的贡献者和维护者。 + +有[许多努力][3]试图通过区块链来解决开源的挑战。这些项目应提高透明度、去中心化和补贴,并在开源用户和开发人员之间建立直接联系。这个领域还很年轻,但是进展很快,随着时间的推移,集市将会有一个加密货币系统。 + +如果有足够的时间和足够的技术,去中心化就会发生在很多层面: + +* 互联网是一种去中心化的媒介,它释放了全球分享和获取知识的潜力。 +* 开源是一种去中心化的协作模式,它释放了全球的创新潜力。 +* 同样,区块链可以补充开源,成为去中心化的开源补贴模式。 + +请在[推特][5]上关注我在这个领域的其他帖子。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/18/9/barter-currency-system + +作者:[Bilgin lbryam][a] +选题:[lujun9972](https://github.com/lujun9972) +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/bibryam +[1]: http://catb.org/ +[2]: http://oss.cash/ +[3]: https://opensource.com/article/18/8/open-source-tokenomics +[4]: https://www.youtube.com/watch?v=VS6IpvTWwkQ +[5]: http://twitter.com/bibryam From f42f5b98cc86f35c6746c070a02b27f4e28632a7 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 20:35:33 +0800 Subject: [PATCH 078/202] APL --- ...-Party Cookies, Autoplay Videos - Cryptominers by Default.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md b/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md index 1cb11a5e59..a02464a631 100644 --- a/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md +++ b/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 0e7a01307ecc6708435de90b791424b5ac06510d Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 21:27:11 +0800 Subject: [PATCH 079/202] TSL&PRF --- ...toplay Videos - Cryptominers by Default.md | 96 ------------------- ...toplay Videos - Cryptominers by Default.md | 96 +++++++++++++++++++ 2 files changed, 96 insertions(+), 96 deletions(-) delete mode 100644 sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md create mode 100644 translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md diff --git a/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md b/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md deleted file mode 100644 index a02464a631..0000000000 --- a/sources/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md +++ /dev/null @@ -1,96 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Great News! Firefox 69 Blocks Third-Party Cookies, Autoplay Videos & Cryptominers by Default) -[#]: via: (https://itsfoss.com/firefox-69/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -Great News! Firefox 69 Blocks Third-Party Cookies, Autoplay Videos & Cryptominers by Default -====== - -If you’re using [Mozilla Firefox][1] and haven’t updated yet to the latest version, you are missing a lot of new and important features. - -### Awesome new features in Firefox 69 release - -To start with, Mozilla Firefox 69 enforces stronger security and privacy options by default. Here are some of the major highlights of the new release. - -#### Firefox 69 blocks autoplay videos - -![][2] - -A lot of websites offer auto-play videos nowadays. No matter whether it is a pop-up video or a video embedded in an article set to autoplay, it is blocked by default (or you may be prompted about it). - -The [Block Autoplay][3] feature gives users to block any video playing automatically. - -#### No more third party tracking cookies - -By default, as part of the Enhanced Tracking Protection feature, it will now block third-party tracking cookies and crypto miners. This is a very useful change to enhance privacy protection while using Mozilla Firefox. - -There are two kind of cookies: first party and third party. The first party cookies are owned by the website itself. These are the ‘good cookies’ that improve your browsing experience by keeping you logged in, remembering your password or entry fields etc. The third party cookies are owned by domains other than the website you visit. Ad servers use these cookies to track you and serve you tracking ads on all the website you visit. Firefox 69 aims to block these. - -You will observe the shield icon in the address bar when it’s active. You may choose to disable it for specific websites. - -![Firefox Blocking Tracking][4] - -#### No more cryptomining off your CPU - -![][5] - -The lust for cryptocurrency has plagued the world. The cost of GPU has gone high because the professional cryptominers use them for mining cryptocurrency. - -People are using computers at work to secretly mine cryptocurrency. And when I say work, I don’t necessarily mean an IT company. Only this year, [people got caught mining cryptocurency at a nuclear plant in Ukrain][6][.][6] - -That’s not it. If you visit some websites, they run scripts and use your computer’s CPU to mine cryptocurrency. This is called [cryptojacking][7] in IT terms. - -The good thing is that Firefox 69 will automatically blocking cryptominers. So websites should not be able to exploit your system resources for cryptojacking. - -#### Stronger Privacy with Firefox 69 - -![][8] - -If you take it up a notch with a stricter setting, it will block fingerprinters as well. So, you won’t have to worry about sharing your computer’s configuration info via [fingerprinters][9] when you choose the strict privacy setting in Firefox 69. - -In the [official blog post about the release][10], Mozilla mentions that with this release, they expect to provide protection for 100% of our users by default. - -#### Performance Improvements - -Even though Linux hasn’t been mentioned in the changelog – it mentions performance, UI, and battery life improvements for systems running on Windows 10/mac OS. If you observe any performance improvements, do mention it in comments. - -**Wrapping Up** - -In addition to all these, there’s a lot of under-the-hood improvements as well. You can check out the details in the [release notes][11]. - -Firefox 69 is an impressive update for users concerned about their privacy. Similar to our recommendation on some of the [secure email services][12] recently, we recommend you to update your browser to get the best out of it. The new update is already available in most Linux distributions. You just have to update your system. - -If you are interested in browsers that block ads and tracking cookies, try [open source Brave browser][13]. They are even giving you their own cryptocurrency for using their web browser. You can use it to reward your favorite publishers. - -What do you think about this release? Let us know your thoughts in the comments below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/firefox-69/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://itsfoss.com/why-firefox/ -[2]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/auto-block-firefox.png?ssl=1 -[3]: https://support.mozilla.org/en-US/kb/block-autoplay -[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-blocking-tracking.png?ssl=1 -[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-shield.png?ssl=1 -[6]: https://thenextweb.com/hardfork/2019/08/22/ukrainian-nuclear-powerplant-mine-cryptocurrency-state-secrets/ -[7]: https://hackernoon.com/cryptojacking-in-2019-is-not-dead-its-evolving-984b97346d16 -[8]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-secure.jpg?ssl=1 -[9]: https://clearcode.cc/blog/device-fingerprinting/ -[10]: https://blog.mozilla.org/blog/2019/09/03/todays-firefox-blocks-third-party-tracking-cookies-and-cryptomining-by-default/ -[11]: https://www.mozilla.org/en-US/firefox/69.0/releasenotes/ -[12]: https://itsfoss.com/secure-private-email-services/ -[13]: https://itsfoss.com/brave-web-browser/ diff --git a/translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md b/translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md new file mode 100644 index 0000000000..5d2ae54a63 --- /dev/null +++ b/translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md @@ -0,0 +1,96 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Great News! Firefox 69 Blocks Third-Party Cookies, Autoplay Videos & Cryptominers by Default) +[#]: via: (https://itsfoss.com/firefox-69/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +Firefox 69 默认阻拦第三方 Cookie、自动播放的视频和加密矿工 +====== + +如果你使用的是 [Mozilla Firefox][1] 并且尚未更新到最新版本,那么你将错过许多新的重要功能。 + +### Firefox 69 版本中的一些新功能 + +首先,Mozilla Firefox 69 会默认强制执行更强大的安全和隐私选项。以下是新版本的一些主要亮点。 + +#### Firefox 69 阻拦视频自动播放 + +![][2] + +现在很多网站都提供了自动播放视频。无论是弹出视频还是嵌入在文章中设置为自动播放的视频,默认情况下,Firefox 69 都会阻止它(或者可能会提示你)。 + +这个[阻拦自动播放][3]功能可让用户自动阻止任何视频播放。 + +#### 禁止第三方跟踪 cookie + +默认情况下,作为增强型跟踪保护Enhanced Tracking Protection功能的一部分,它现在将阻止第三方跟踪 Cookie 和加密矿工。这是 Mozilla Firefox 的增强隐私保护功能的非常有用的改变。 + +Cookie 有两种:第一方的和第三方的。第一方 cookie 由网站本身拥有。这些是“好的 cookie”,可以让你保持登录、记住你的密码或输入字段等来改善浏览体验。第三方 cookie 由你访问的网站以外的域所有。广告服务器使用这些 Cookie 来跟踪你,并在你访问的所有网站上跟踪广告。Firefox 69 旨在阻止这些。 + +当它发挥作用时,你将在地址栏中看到盾牌图标。你可以选择为特定网站禁用它。 + +![Firefox Blocking Tracking][4] + +#### 禁止加密矿工消耗你的 CPU + +![][5] + +对加密货币的欲望一直困扰着这个世界。GPU 的价格已经高企,因为专业的加密矿工们使用它们来挖掘加密货币。 + +人们使用工作场所的计算机秘密挖掘加密货币。当我说工作场所时,我不一定是指 IT 公司。就在今年,[人们在乌克兰的一家核电站抓住了偷挖加密货币的活动][6]。 + +不仅如此。如果你访问某些网站,他们会运行脚本并使用你的计算机的 CPU 来挖掘加密货币。这在 IT 术语中被称为 [挖矿攻击][7]cryptojacking。 + +好消息是 Firefox 69 会自动阻止这些加密矿工脚本。因此,网站不再能利用你的系统资源进行挖矿攻击了。 + +#### Firefox 69 带来的更强隐私保护 + +![][8] + +如果你把隐私保护设置得更严格,那么它也会阻止指纹。因此,当你在 Firefox 69 中选择严格的隐私设置时,你不必担心通过[指纹][9]共享计算机的配置信息。 + +在[关于这次发布的官方博客文章][10]中,Mozilla 提到,在此版本中,他们希望默认情况下为 100% 的用户提供保护。 + +#### 性能改进 + +尽管在更新日志中没有提及 Linux,但它提到了在 Windows 10/mac OS 上运行性能、UI 和电池寿命有所改进。如果你发现任何性能改进,请在评论中提及。 + +### 总结 + +除了所有这些之外,还有很多底层的改进。你可以查看[发行说明][11]中的详细信息。 + +Firefox 69 对于关注其隐私的用户来说是一个令人印象深刻的更新。与我们最近对某些[安全电子邮件服务][12]的建议类似,我们建议你更新浏览器以充分受益。新版本已在大多数 Linux 发行版中提供,你只需要更新你的系统即可。 + +如果你对阻止广告和跟踪 Cookie 的浏览器感兴趣,请尝试[开源的 Brave 浏览器][13],他们甚至给你提供了加密货币以让你使用他们的浏览器,你可以使用这些加密货币来奖励你最喜爱的发布商。 + +你觉得这个版本怎么样?请在下面的评论中告诉我们你的想法。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/firefox-69/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/why-firefox/ +[2]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/auto-block-firefox.png?ssl=1 +[3]: https://support.mozilla.org/en-US/kb/block-autoplay +[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-blocking-tracking.png?ssl=1 +[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-shield.png?ssl=1 +[6]: https://thenextweb.com/hardfork/2019/08/22/ukrainian-nuclear-powerplant-mine-cryptocurrency-state-secrets/ +[7]: https://hackernoon.com/cryptojacking-in-2019-is-not-dead-its-evolving-984b97346d16 +[8]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/firefox-secure.jpg?ssl=1 +[9]: https://clearcode.cc/blog/device-fingerprinting/ +[10]: https://blog.mozilla.org/blog/2019/09/03/todays-firefox-blocks-third-party-tracking-cookies-and-cryptomining-by-default/ +[11]: https://www.mozilla.org/en-US/firefox/69.0/releasenotes/ +[12]: https://itsfoss.com/secure-private-email-services/ +[13]: https://itsfoss.com/brave-web-browser/ From 2c34a456ca0cf58b7f4af83a574b3d25b9b7dab9 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Sun, 15 Sep 2019 21:37:36 +0800 Subject: [PATCH 080/202] PUB --- ...arty Cookies, Autoplay Videos - Cryptominers by Default.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/news => published}/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md (98%) diff --git a/translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md b/published/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md similarity index 98% rename from translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md rename to published/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md index 5d2ae54a63..9118ebe1ef 100644 --- a/translated/news/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md +++ b/published/20190906 Great News- Firefox 69 Blocks Third-Party Cookies, Autoplay Videos - Cryptominers by Default.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11346-1.html) [#]: subject: (Great News! Firefox 69 Blocks Third-Party Cookies, Autoplay Videos & Cryptominers by Default) [#]: via: (https://itsfoss.com/firefox-69/) [#]: author: (Ankush Das https://itsfoss.com/author/ankush/) From 3588d9807ac84f2775f5f70beaf97f3b0cee1561 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Mon, 16 Sep 2019 00:52:30 +0800 Subject: [PATCH 081/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190915=20How=20?= =?UTF-8?q?to=20Configure=20SFTP=20Server=20with=20Chroot=20in=20Debian=20?= =?UTF-8?q?10?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190915 How to Configure SFTP Server with Chroot in Debian 10.md --- ...re SFTP Server with Chroot in Debian 10.md | 197 ++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 sources/tech/20190915 How to Configure SFTP Server with Chroot in Debian 10.md diff --git a/sources/tech/20190915 How to Configure SFTP Server with Chroot in Debian 10.md b/sources/tech/20190915 How to Configure SFTP Server with Chroot in Debian 10.md new file mode 100644 index 0000000000..877845b87a --- /dev/null +++ b/sources/tech/20190915 How to Configure SFTP Server with Chroot in Debian 10.md @@ -0,0 +1,197 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Configure SFTP Server with Chroot in Debian 10) +[#]: via: (https://www.linuxtechi.com/configure-sftp-chroot-debian10/) +[#]: author: (Pradeep Kumar https://www.linuxtechi.com/author/pradeep/) + +How to Configure SFTP Server with Chroot in Debian 10 +====== + +**SFTP** stands for Secure File Transfer Protocol / SSH File Transfer Protocol, it is one of the most common method which is used to transfer files securely over ssh from our local system to remote server and vice-versa. The main advantage of sftp is that we don’t need to install any additional package except ‘**openssh-server**’, in most of the Linux distributions ‘openssh-server’ package is the part of default installation. Other benefit of sftp is that we can allow user to use sftp only not ssh. + +[![Configure-sftp-debian10][1]][2] + +Recently Debian 10, Code name ‘Buster’ has been released, in this article we will demonstrate how to configure sftp with Chroot ‘Jail’ like environment in Debian 10 System. Here Chroot Jail like environment means that user’s cannot go beyond from their respective home directories or users cannot change directories from their home directories.  Following are the lab details: + + * OS = Debian 10 + * IP Address = 192.168.56.151 + + + +Let’s jump into SFTP Configuration Steps, + +### Step:1) Create a Group for sftp using groupadd command + +Open the terminal, create a group with a name “**sftp_users**” using below groupadd command, + +``` +root@linuxtechi:~# groupadd sftp_users +``` + +### Step:2) Add Users to Group ‘sftp_users’ and set permissions + +In case you want to create new user and want to add that user to ‘sftp_users’ group, then run the following command, + +**Syntax:** #  useradd -m -G sftp_users <user_name> + +Let’s suppose user name is ’Jonathan’ + +``` +root@linuxtechi:~# useradd -m -G sftp_users jonathan +``` + +set the password using following chpasswd command, + +``` +root@linuxtechi:~# echo "jonathan:" | chpasswd +``` + +In case you want to add existing users to ‘sftp_users’ group then run beneath usermod command, let’s suppose already existing user name is ‘chris’ + +``` +root@linuxtechi:~# usermod -G sftp_users chris +``` + +Now set the required permissions on Users, + +``` +root@linuxtechi:~# chown root /home/jonathan /home/chris/ +``` + +Create an upload folder in both the user’s home directory and set the correct ownership, + +``` +root@linuxtechi:~# mkdir /home/jonathan/upload +root@linuxtechi:~# mkdir /home/chris/upload +root@linuxtechi:~# chown jonathan /home/jonathan/upload +root@linuxtechi:~# chown chris /home/chris/upload +``` + +**Note:** User like Jonathan and Chris can upload files and directories to upload folder from their local systems. + +### Step:3) Edit sftp configuration file (/etc/ssh/sshd_config) + +As we have already stated that sftp operations are done over the ssh, so it’s configuration file is “**/etc/ssh/sshd_config**“, Before making any changes I would suggest first take the backup and then edit this file and add the following content, + +``` +root@linuxtechi:~# cp /etc/ssh/sshd_config /etc/ssh/sshd_config-org +root@linuxtechi:~# vim /etc/ssh/sshd_config +……… +#Subsystem sftp /usr/lib/openssh/sftp-server +Subsystem sftp internal-sftp + +Match Group sftp_users + X11Forwarding no + AllowTcpForwarding no + ChrootDirectory %h + ForceCommand internal-sftp +………… +``` + +Save & exit the file. + +To make above changes into the affect, restart ssh service using following systemctl command + +``` +root@linuxtechi:~# systemctl restart sshd +``` + +In above ‘sshd_config’ file we have commented out the line which starts with “Subsystem” and added new entry “Subsystem       sftp    internal-sftp” and new lines like, + +“**Match Group sftp_users”**  –> It means if a user is a part of ‘sftp_users’ group then apply rules which are mentioned below to this entry. + +“**ChrootDierctory %h**” –> It means users can only change directories within their respective home directories, they cannot go beyond their home directories, or in other words we can say users are not permitted to change directories, they will get jai like environment within their directories and can’t access any other user’s and system’s directories. + +“**ForceCommand internal-sftp**” –> It means users are limited to sftp command only. + +### Step:4) Test and Verify sftp + +Login to any other Linux system which is on the same network of your sftp server and then try to ssh sftp server via the users that we have mapped in ‘sftp_users’ group. + +``` +[root@linuxtechi ~]# ssh root@linuxtechi +root@linuxtechi's password: +Write failed: Broken pipe +[root@linuxtechi ~]# ssh root@linuxtechi +root@linuxtechi's password: +Write failed: Broken pipe +[root@linuxtechi ~]# +``` + +Above confirms that users are not allowed to SSH , now try sftp using following commands, + +``` +[root@linuxtechi ~]# sftp root@linuxtechi +root@linuxtechi's password: +Connected to 192.168.56.151. +sftp> ls -l +drwxr-xr-x 2 root 1001 4096 Sep 14 07:52 debian10-pkgs +-rw-r--r-- 1 root 1001 155 Sep 14 07:52 devops-actions.txt +drwxr-xr-x 2 1001 1002 4096 Sep 14 08:29 upload +``` + +Let’s try to download a file using sftp ‘**get**‘ command + +``` +sftp> get devops-actions.txt +Fetching /devops-actions.txt to devops-actions.txt +/devops-actions.txt 100% 155 0.2KB/s 00:00 +sftp> +sftp> cd /etc +Couldn't stat remote file: No such file or directory +sftp> cd /root +Couldn't stat remote file: No such file or directory +sftp> +``` + +Above output confirms that we are able to download file from our sftp server to local machine and apart from this we have also tested that users cannot change directories. + +Let’s try to upload a file under “**upload**” folder, + +``` +sftp> cd upload/ +sftp> put metricbeat-7.3.1-amd64.deb +Uploading metricbeat-7.3.1-amd64.deb to /upload/metricbeat-7.3.1-amd64.deb +metricbeat-7.3.1-amd64.deb 100% 38MB 38.4MB/s 00:01 +sftp> ls -l +-rw-r--r-- 1 1001 1002 40275654 Sep 14 09:18 metricbeat-7.3.1-amd64.deb +sftp> +``` + +This confirms that we have successfully uploaded a file from our local system to sftp server. + +Now test the SFTP server with winscp tool, enter the sftp server ip address along user’s credentials, + +[![Winscp-sftp-debian10][1]][3] + +Click on Login and then try to download and upload files + +[![Download-file-winscp-debian10-sftp][1]][4] + +Now try to upload files in upload folder, + +[![Upload-File-using-winscp-Debian10-sftp][1]][5] + +Above window confirms that uploading is also working fine, that’s all from this article. If these steps help you to configure SFTP server with chroot environment in Debian 10 then please do share your feedback and comments. + +-------------------------------------------------------------------------------- + +via: https://www.linuxtechi.com/configure-sftp-chroot-debian10/ + +作者:[Pradeep Kumar][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linuxtechi.com/author/pradeep/ +[b]: https://github.com/lujun9972 +[1]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +[2]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Configure-sftp-debian10.jpg +[3]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Winscp-sftp-debian10.jpg +[4]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Download-file-winscp-debian10-sftp.jpg +[5]: https://www.linuxtechi.com/wp-content/uploads/2019/09/Upload-File-using-winscp-Debian10-sftp.jpg From 916c452177d5d257118de1c8e5cadbcb3e5e4e40 Mon Sep 17 00:00:00 2001 From: geekpi Date: Mon, 16 Sep 2019 08:55:05 +0800 Subject: [PATCH 082/202] translated --- ... from SAR Reports Using the Bash Script.md | 49 ++++++++++--------- 1 file changed, 25 insertions(+), 24 deletions(-) rename {sources => translated}/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md (74%) diff --git a/sources/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md b/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md similarity index 74% rename from sources/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md rename to translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md index e844870d70..52bfcd19a8 100644 --- a/sources/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md +++ b/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md @@ -7,36 +7,37 @@ [#]: via: (https://www.2daygeek.com/linux-get-average-cpu-memory-utilization-from-sar-data-report/) [#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) -How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script +如何使用 Bash 脚本从 SAR 报告中获取 CPU 和内存的平均使用情况 ====== -Most Linux administrator monitor system performance with **[SAR report][1]** because it collect performance data for a week. +大多数 Linux 管理员使用 **[SAR 报告][1]**监控系统性能,因为它会收集一周的性能数据。 -But you can easily extend this to four weeks by making changes to the “/etc/sysconfig/sysstat” file. -Also, this period can be extended beyond one month. If the value exceeds 28, the log files are placed in multiple directories, one for each month. +但是,你可以通过更改 “/etc/sysconfig/sysstat” 文件轻松地将其延长到四周。 -To extend the coverage period to 28 days, make the following change to the “/etc/sysconfig/sysstat” file. +同样,这段时间可以延长一个月以上。如果超过 28,那么日志文件将放在多个目录中,每月一个。 -Edit the sysstat file and change HISTORY=7 to HISTORY=28. +要将覆盖期延长至 28 天,请对 “/etc/sysconfig/sysstat” 文件做以下更改。 -In this article we have added three bash scripts that will help you to easily view each data file averages in one place. +编辑 sysstat 文件并将 HISTORY=7 更改为 HISTORY=28.。 -We have added many useful shell scripts in the past. If you want to check out that collection, go to the link below. +在本文中,我们添加了三个 bash 脚本,它们可以帮助你在一个地方轻松查看每个数据文件的平均值。 - * **[How to automate daily operations using shell script][2]** +我们过去加过许多有用的 shell 脚本。如果你想查看它们,请进入下面的链接。 + + * **[如何使用 shell 脚本自动化日常操作][2]** -These scripts are simple and straightforward. For testing purposes, we have included only two performance metrics, namely CPU and memory. +这些脚本简单明了。出于测试目的,我们仅包括两个性能指标,即 CPU 和内存。 -You can modify other performance metrics in the script to suit your needs. +你可以修改脚本中的其他性能指标以满足你的需求。 -### Script-1: Bash Script to Get Average CPU Utilization from SAR Reports +### 脚本 1:从 SAR 报告中获取平均 CPU 利用率的 Bash 脚本 -This bash script collects the CPU average from each data file and display it on one page. +该 bash 脚本从每个数据文件中收集 CPU 平均值并将其显示在一个页面上。 -Since this is a month end, it shows 28 days data for August 2019. +由于是月末,它显示了 2019 年 8 月的 28 天数据。 ``` # vi /opt/scripts/sar-cpu-avg.sh @@ -62,7 +63,7 @@ done echo "+----------------------------------------------------------------------------------+" ``` -Once you run the script, you will get an output like the one below. +运行脚本后,你将看到如下输出。 ``` # sh /opt/scripts/sar-cpu-avg.sh @@ -88,11 +89,11 @@ Once you run the script, you will get an output like the one below. +----------------------------------------------------------------------------------+ ``` -### Script-2: Bash Script to Get Average Memory Utilization from SAR Reports +### 脚本 2:从 SAR 报告中获取平均内存利用率的 Bash 脚本 -This bash script will collect memory averages from each data file and display it on one page. +该 bash 脚本从每个数据文件中收集内存平均值并将其显示在一个页面上。 -Since this is a month end, it shows 28 days data for August 2019. +由于是月末,它显示了 2019 年 8 月的 28 天数据。 ``` # vi /opt/scripts/sar-memory-avg.sh @@ -118,7 +119,7 @@ done echo "+-------------------------------------------------------------------------------------------------------------------+" ``` -Once you run the script, you will get an output like the one below. +运行脚本后,你将看到如下输出。 ``` # sh /opt/scripts/sar-memory-avg.sh @@ -144,11 +145,11 @@ Once you run the script, you will get an output like the one below. +-------------------------------------------------------------------------------------------------------------------+ ``` -### Script-3: Bash Script to Get Average CPU & Memory Utilization from SAR Reports +### 脚本 3:从 SAR 报告中获取 CPU 和内存平均利用率的 Bash 脚本 -This bash script collects the CPU & memory averages from each data file and displays them on a page. +该 bash 脚本从每个数据文件中收集 CPU 和内存平均值并将其显示在一个页面上。 -This bash script is slightly different compared to the above script. It shows the average of both (CPU & Memory) in one location, not the other data. +该脚本与上面相比稍微不同。它在同一位置同时显示两者(CPU 和内存)平均值,而不是其他数据。 ``` # vi /opt/scripts/sar-cpu-mem-avg.sh @@ -172,7 +173,7 @@ do done ``` -Once you run the script, you will get an output like the one below. +运行脚本后,你将看到如下输出。 ``` # sh /opt/scripts/sar-cpu-mem-avg.sh @@ -221,7 +222,7 @@ via: https://www.2daygeek.com/linux-get-average-cpu-memory-utilization-from-sar- 作者:[Magesh Maruthamuthu][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[geekpi](https://github.com/geekpi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 63fb1eaedd53e3a256151f9b5a73e8862282ab6b Mon Sep 17 00:00:00 2001 From: geekpi Date: Mon, 16 Sep 2019 08:59:32 +0800 Subject: [PATCH 083/202] translating --- sources/news/20190909 Firefox 69 available in Fedora.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190909 Firefox 69 available in Fedora.md b/sources/news/20190909 Firefox 69 available in Fedora.md index 817d4f391e..256c9c9f5e 100644 --- a/sources/news/20190909 Firefox 69 available in Fedora.md +++ b/sources/news/20190909 Firefox 69 available in Fedora.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 080be01fc9a9864133448350406e3f1dc55dfb5d Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Mon, 16 Sep 2019 10:56:48 +0800 Subject: [PATCH 084/202] tranlated 20190828 --- ...nsible environments on MacOS with Conda.md | 174 ----------------- ...nsible environments on MacOS with Conda.md | 179 ++++++++++++++++++ 2 files changed, 179 insertions(+), 174 deletions(-) delete mode 100644 sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md create mode 100644 translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md diff --git a/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md deleted file mode 100644 index f46a301e44..0000000000 --- a/sources/tech/20190828 Managing Ansible environments on MacOS with Conda.md +++ /dev/null @@ -1,174 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (heguangzhi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Managing Ansible environments on MacOS with Conda) -[#]: via: (https://opensource.com/article/19/8/using-conda-ansible-administration-macos) -[#]: author: (James Farrell https://opensource.com/users/jamesf) - -Managing Ansible environments on MacOS with Conda -====== -Conda corrals everything you need for Ansible into a virtual environment -and keeps it separate from your other projects. -![CICD with gears][1] - -If you are a Python developer using MacOS and involved with Ansible administration, you may want to use the Conda package manager to keep your Ansible work separate from your core OS and other local projects. - -Ansible is based on Python. Conda is not required to make Ansible work on MacOS, but it does make managing Python versions and package dependencies easier. This allows you to use an upgraded Python version on MacOS and keep Python package dependencies separate between your system, Ansible, and other programming projects. - -There are other ways to install Ansible on MacOS. You could use [Homebrew][2], but if you are into Python development (or Ansible development), you might find managing Ansible in a Python virtual environment reduces some confusion. I find this to be simpler; rather than trying to load a Python version and dependencies into the system or in **/usr/local**, Conda helps me corral everything I need for Ansible into a virtual environment and keep it all completely separate from other projects. - -This article focuses on using Conda to manage Ansible as a Python project to keep it clean and separated from other projects. Read on to learn how to install Conda, create a new virtual environment, install Ansible, and test it. - -### Prelude - -Recently, I wanted to learn [Ansible][3], so I needed to figure out the best way to install it. - -I am generally wary of installing things into my daily use workstation. I especially dislike applying manual updates to the vendor's default OS installation (a preference I developed from years of Unix system administration). I really wanted to use Python 3.7, but MacOS packages the older 2.7, and I was not going to install any global Python packages that might interfere with the core MacOS system. - -So, I started my Ansible work using a local Ubuntu 18.04 virtual machine. This provided a real level of safe isolation, but I soon found that managing it was tedious. I set out to see how to get a flexible but isolated Ansible system on native MacOS. - -Since Ansible is based on Python, Conda seemed to be the ideal solution. - -### Installing Conda - -Conda is an open source utility that provides convenient package- and environment-management features. It can help you manage multiple versions of Python, install package dependencies, perform upgrades, and maintain project isolation. If you are manually managing Python virtual environments, Conda will help streamline and manage your work. Surf on over to the [Conda documentation][4] for all the details. - -I chose the [Miniconda][5] Python 3.7 installation for my workstation because I wanted the latest Python version. Regardless of which version you select, you can always install new virtual environments with other versions of Python. - -To install Conda, download the PKG format file, do the usual double-click, and select the "Install for me only" option. The install took about 158MB of space on my system. - -After the installation, bring up a terminal to see what you have. You should see: - - * A new **miniconda3** directory in your **home** - * The shell prompt modified to prepend the word "(base)" - * **.bash_profile** updated with Conda-specific settings - - - -Now that the base is installed, you have your first Python virtual environment. Running the usual Python version check should prove this, and your PATH will point to the new location: - - -``` -(base) $ which python -/Users/jfarrell/miniconda3/bin/python -(base) $ python --version -Python 3.7.1 -``` - -Now that Conda is installed, the next step is to set up a virtual environment, then get Ansible installed and running. - -### Creating a virtual environment for Ansible - -I want to keep Ansible separate from my other Python projects, so I created a new virtual environment and switched over to it: - - -``` -(base) $ conda create --name ansible-env --clone base -(base) $ conda activate ansible-env -(ansible-env) $ conda env list -``` - -The first command clones the Conda base into a new virtual environment called **ansible-env**. The clone brings in the Python 3.7 version and a bunch of default Python modules that you can add to, remove, or upgrade as needed. - -The second command changes the shell context to this new **ansible-env** environment. It sets the proper paths for Python and the modules it contains. Notice that your shell prompt changes after the **conda activate ansible-env** command. - -The third command is not required; it lists what Python modules are installed with their version and other data. - -You can always switch out of a virtual environment and into another with Conda's **activate** command. This will bring you back to the base: **conda activate base**. - -### Installing Ansible - -There are various ways to install Ansible, but using Conda keeps the Ansible version and all desired dependencies packaged in one place. Conda provides the flexibility both to keep everything separated and to add in other new environments as needed (as I'll demonstrate later). - -To install a relatively recent version of Ansible, use: - - -``` -(base) $ conda activate ansible-env -(ansible-env) $ conda install -c conda-forge ansible -``` - -Since Ansible is not part of Conda's default channels, the **-c** is used to search and install from an alternate channel. Ansible is now installed into the **ansible-env** virtual environment and is ready to use. - -### Using Ansible - -Now that you have installed a Conda virtual environment, you're ready to use it. First, make sure the node you want to control has your workstation's SSH key installed to the right user account. - -Bring up a new shell and run some basic Ansible commands: - - -``` -(base) $ conda activate ansible-env -(ansible-env) $ ansible --version -ansible 2.8.1 -  config file = None -  configured module search path = ['/Users/jfarrell/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] -  ansible python module location = /Users/jfarrell/miniconda3/envs/ansibleTest/lib/python3.7/site-packages/ansible -  executable location = /Users/jfarrell/miniconda3/envs/ansibleTest/bin/ansible -  python version = 3.7.1 (default, Dec 14 2018, 13:28:58) [Clang 4.0.1 (tags/RELEASE_401/final)] -(ansible-env) $ ansible all -m ping -u ansible -192.168.99.200 | SUCCESS => { -    "ansible_facts": { -        "discovered_interpreter_python": "/usr/bin/python" -    }, -    "changed": false, -    "ping": "pong" -} -``` - -Now that Ansible is working, you can pull your playbooks out of source control and start using them from your MacOS workstation. - -### Cloning the new Ansible for Ansible development - -This part is purely optional; it's only needed if you want additional virtual environments to modify Ansible or to safely experiment with questionable Python modules. You can clone your main Ansible environment into a development copy with: - - -``` -(ansible-env) $ conda create --name ansible-dev --clone ansible-env -(ansible-env) $ conda activte ansible-dev -(ansible-dev) $ -``` - -### Gotchas to look out for - -Occasionally you may get into trouble with Conda. You can usually delete a bad environment with: - - -``` -$ conda activate base -$ conda remove --name ansible-dev --all -``` - -If you get errors that you cannot resolve, you can usually delete the environment directly by finding it in **~/miniconda3/envs** and removing the entire directory. If the base becomes corrupt, you can remove the entire **~/miniconda3** directory and reinstall it from the PKG file. Just be sure to preserve any desired environments you have in **~/miniconda3/envs**, or use the Conda tools to dump the environment configuration and recreate it later. - -The **sshpass** program is not included on MacOS. It is needed only if your Ansible work requires you to supply Ansible with an SSH login password. You can find the current [sshpass source][6] on SourceForge. - -Finally, the base Conda Python module list may lack some Python modules you need for your work. If you need to install one, the **conda install <package>** command is preferred, but **pip** can be used where needed, and Conda will recognize the install modules. - -### Conclusion - -Ansible is a powerful automation utility that's worth all the effort to learn. Conda is a simple and effective Python virtual environment management tool. - -Keeping software installs separated on your MacOS environment is a prudent approach to maintain stability and sanity with your daily work environment. Conda can be especially helpful to upgrade your Python version, separate Ansible from your other projects, and safely hack on Ansible. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/8/using-conda-ansible-administration-macos - -作者:[James Farrell][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jamesf -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cicd_continuous_delivery_deployment_gears.png?itok=kVlhiEkc (CICD with gears) -[2]: https://brew.sh/ -[3]: https://docs.ansible.com/?extIdCarryOver=true&sc_cid=701f2000001OH6uAAG -[4]: https://conda.io/projects/conda/en/latest/index.html -[5]: https://docs.conda.io/en/latest/miniconda.html -[6]: https://sourceforge.net/projects/sshpass/ diff --git a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md new file mode 100644 index 0000000000..3f62adac32 --- /dev/null +++ b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md @@ -0,0 +1,179 @@ +[#]: collector: (lujun9972) +[#]: translator: (heguangzhi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Managing Ansible environments on MacOS with Conda) +[#]: via: (https://opensource.com/article/19/8/using-conda-ansible-administration-macos) +[#]: author: (James Farrell https://opensource.com/users/jamesf) + + +使用 Conda 管理 MacOS 上的 Ansible 环境 +===== + +Conda 将 Ansible 所需的一切都收集到虚拟环境中并将它与其他项目分开。 +![CICD with gears][1] + +如果您是一名使用 MacOS 并参与 Ansible 管理的 Python 开发人员,您可能希望使用 Conda 包管理器将 Ansible 的工作与核心操作系统和其他本地项目分开。 + +Ansible 基于 Python的。让 Ansible 在 MacOS 上工作 Conda 并不是必须要的,但是它确实让管理 Python 版本和包依赖变得更加容易。这允许您在 MacOS 上使用升级的 Python 版本,并在您的系统、Ansible 和其他编程项目之间保持 Python 包的依赖性是独立的。 + +还有其他方法在 MacOS 上安装 Ansible 。您可以使用[Homebrew][2],但是如果您对 Python 开发(或 Ansible 开发)感兴趣,您可能会发现在一个 Python 虚拟环境中管理 Ansible 可以减少一些混乱。我觉得这更简单;与其试图将 Pythn版本和依赖项加载到系统或在 **/usr/local** 目录中 ,Conda 还能帮助我将 Ansibl e所需的一切都收集到一个虚拟环境中,并将其与其他项目完全分开。 + +This article focuses on using Conda to manage Ansible as a Python project to keep it clean and separated from other projects. Read on to learn how to install Conda, create a new virtual environment, install Ansible, and test it. + +本文着重于使用 Conda 作为 Python 项目来管理 Ansible ,以保持它的干净并与其他项目分开。请继续阅读,并了解如何安装 Conda、创建新的虚拟环境、安装 Ansible 并对其进行测试。 + +### 序幕 + +最近,我想学习[Ansible][3],所以我需要找到安装它的最佳方法。 + +我通常对在我的日常工作站上安装东西很谨慎。我尤其不喜欢对供应商的默认操作系统安装应用手动更新(这是我多年作为 Unix 系统管理的首选)。我真的很想使用 Python 3.7,但是 MacOS 包是旧的2.7,我不会安装任何可能干扰核心 MacOS 系统的全局Python包。 + +所以,我使用本地 Ubuntu 18.04 虚拟机上开始了我的 Ansible 工作。这提供了真正程度的安全隔离,但我很快发现管理它是非常乏味的。所以我着手研究如何在本机 MacOS 上获得一个灵活但独立的 Ansible 系统。 + +由于 Ansible 基于 Python,Conda 似乎是理想的解决方案。 + +### 安装Conda + +Conda 是一个开源软件,它提供方便的包和环境管理功能。它可以帮助您管理多个版本的 Python 、安装软件包依赖关系、执行升级和维护项目隔离。如果您手动管理 Python 虚拟环境,Conda 将有助于简化和管理您的工作。浏览[ Conda 文档][4]可以了解更多细节。 + +我选择了 [Miniconda][5] Python 3.7 安装在我的工作站中,因为我想要最新的 Pytho n版本。无论选择哪个版本,您都可以使用其他版本的 Python 安装新的虚拟环境。 + +要安装 Conda,请下载 PKG 格式的文件,进行通常的双击,并选择 “Install for me only” 选项。安装在我的系统上占用了大约158兆的空间。 + +安装完成后,调出一个终端来查看您有什么了。您应该看到: + + * 一个 **miniconda3** 目录在您的 **home** 目录中 + * shell 提示符被修改为 "(base)" + * **.bash_profile** 文件被 Conda-specific 设置内容更新 + +现在已经安装了基础,您就有了第一个 Python 虚拟环境。运行 Python 版本检查可以证明这一点,您的 PATH 将指向新的位置: + +``` +(base) $ which python +/Users/jfarrell/miniconda3/bin/python +(base) $ python --version +Python 3.7.1 +``` +现在安装了 Conda ,下一步是建立一个虚拟环境,然后安装 Ansible 并运行。 + +### 为 Ansible 创建虚拟环境 + + + +我想将 Ansible 与我的其他 Python 项目分开,所以我创建了一个新的虚拟环境并切换到它: + +``` +(base) $ conda create --name ansible-env --clone base +(base) $ conda activate ansible-env +(ansible-env) $ conda env list +``` + + +第一个命令将 Conda 库克隆到一个名为 **ansible-env** 的新虚拟环境中。克隆引入了 Python 3.7 版本和一系列默认的 Python 模块,您可以根据需要添加、删除或升级这些模块。 + +第二个命令将 shell 上下文更改为这个新的环境。它为 Python 及其包含的模块设置了正确的路径。请注意,在 **conda activate ansible-env** 命令后,您的 shell 提示符会发生变化。 + +第三个命令不是必须的;它列出了安装了哪些 Python 模块及其版本和其他数据。 + +您可以随时使用 Conda 的 **activate** 命令切换到另一个虚拟环境。这将带您回到基本的: **conda 基本的**。 + +### 安装 Ansible + +安装 Ansible 有多种方法,但是使用 Conda 可以将 Ansible 版本和所有需要的依赖项打包在一个地方。Conda 提供了灵活的,既可以将所有内容分开,又可以根据需要添加其他新环境(我将在后面演示)。 + +要安装 Ansible 的相对较新版本,请使用: + + +``` +(base) $ conda activate ansible-env +(ansible-env) $ conda install -c conda-forge ansible +``` + +由于 Ansible 不是 Conda 默认的一部分,因此**-c**用于从备用通道搜索和安装。Ansible 现已安装到**ansible-env**虚拟环境中,可以使用了。 + + +### 使用 Ansible + +Now that you have installed a Conda virtual environment, you're ready to use it. First, make sure the node you want to control has your workstation's SSH key installed to the right user account. + +既然您已经安装了 Conda 虚拟环境,就可以使用它了。首先,确保要控制的节点已将工作站的 SSH 密钥安装到正确的用户帐户。 + +调出一个新的 shell 并运行一些基本的Ansible命令: + + +``` +(base) $ conda activate ansible-env +(ansible-env) $ ansible --version +ansible 2.8.1 +  config file = None +  configured module search path = ['/Users/jfarrell/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] +  ansible python module location = /Users/jfarrell/miniconda3/envs/ansibleTest/lib/python3.7/site-packages/ansible +  executable location = /Users/jfarrell/miniconda3/envs/ansibleTest/bin/ansible +  python version = 3.7.1 (default, Dec 14 2018, 13:28:58) [Clang 4.0.1 (tags/RELEASE_401/final)] +(ansible-env) $ ansible all -m ping -u ansible +192.168.99.200 | SUCCESS => { +    "ansible_facts": { +        "discovered_interpreter_python": "/usr/bin/python" +    }, +    "changed": false, +    "ping": "pong" +} +``` + +现在 Ansible 正在工作了,您可以在控制台中抽身,并从您的 MacOS 工作站中使用它们。 + +### 克隆新的Ansible进行Ansible开发 + +这部分完全是可选的;只有当您想要额外的虚拟环境来修改 Ansible 或者安全地使用有问题的 Python 模块时,才需要它。您可以通过以下方式将主 Ansible 环境克隆到开发副本中: + +``` +(ansible-env) $ conda create --name ansible-dev --clone ansible-env +(ansible-env) $ conda activte ansible-dev +(ansible-dev) $ +``` + +### 需要注意的问题 + +Occasionally you may get into trouble with Conda. You can usually delete a bad environment with: + +偶尔您可能遇到使用 Conda 的麻烦。您通常可以通过以下方式删除不良环境: + +``` +$ conda activate base +$ conda remove --name ansible-dev --all +``` +如果出现无法解决的错误,通常可以通过在 **~/miniconda3/envs** 中找到环境并删除整个目录来直接删除环境。如果基础损坏了,您可以删除整个 **~/miniconda3**,然后从 PKG 文件中重新安装。只要确保保留 **~/miniconda3/envs** ,或使用 Conda 工具导出环境配置并在以后重新创建即可。 + +MacOS 上不包括 **sshpass** 程序。只有当您的 Ansible工 作要求您向 Ansible 提供SSH登录密码时,才需要它。您可以在 SourceForge 上找到当前的[sshpass source][6]。 + +Finally, the base Conda Python module list may lack some Python modules you need for your work. If you need to install one, the **conda install <package>** command is preferred, but **pip** can be used where needed, and Conda will recognize the install modules. + +最后,基础 Conda Python 模块列表可能缺少您工作所需的一些Python模块。如果您需要安装一个模块,**conda install <package>** 命令是首选的,但是 **pip** 可以在需要的地方使用,Conda会识别安装模块。 + +### 结论 + +Ansible 是一个强大的自动化工具,值得我们去学习。Conda是一个简单有效的 Python 虚拟环境管理工具。 + +在您的 MacOS 环境中保持软件安装分离是保持日常工作环境的稳定性和健全性的谨慎方法。Conda 尤其有助于升级您的Python 版本,将 Ansible 从其他项目中分离出来,并安全地使用 Ansible。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/8/using-conda-ansible-administration-macos + +作者:[James Farrell][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/heguangzhi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/jamesf +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cicd_continuous_delivery_deployment_gears.png?itok=kVlhiEkc (CICD with gears) +[2]: https://brew.sh/ +[3]: https://docs.ansible.com/?extIdCarryOver=true&sc_cid=701f2000001OH6uAAG +[4]: https://conda.io/projects/conda/en/latest/index.html +[5]: https://docs.conda.io/en/latest/miniconda.html +[6]: https://sourceforge.net/projects/sshpass/ From c69e66d9cc93bfd5157d7eec73efb4aca28dcda8 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 11:15:51 +0800 Subject: [PATCH 085/202] PRF @wxy --- ...w blockchain can complement open source.md | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/translated/talk/20180904 How blockchain can complement open source.md b/translated/talk/20180904 How blockchain can complement open source.md index d5c9691abf..5937af45de 100644 --- a/translated/talk/20180904 How blockchain can complement open source.md +++ b/translated/talk/20180904 How blockchain can complement open source.md @@ -1,51 +1,53 @@ -区块链是如何补充开源的 +区块链能如何补充开源 ====== +> 了解区块链如何成为去中心化的开源补贴模型。 + ![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/block-quilt-chain.png?itok=mECoDbrc) -[大教堂与集市][1]是 20 年前由埃里克·史蒂文·雷蒙德Eric Steven Raymond(ESR)撰写的经典开源故事。在这个故事中,ESR 描述了一种新的革命性软件开发模型,其中复杂的软件项目是在没有(或者很少的)集中管理的情况下构建的。这个新模型就是开源。 +《[大教堂与集市][1]The Cathedral and The Bazaar》是 20 年前由埃里克·史蒂文·雷蒙德Eric Steven Raymond(ESR)撰写的经典开源故事。在这个故事中,ESR 描述了一种新的革命性的软件开发模型,其中复杂的软件项目是在没有(或者很少的)集中管理的情况下构建的。这个新模型就是开源open source。 ESR 的故事比较了两种模式: -* 经典模型(由“大教堂”代表),其中软件由一小群人在封闭和受控的环境中通过缓慢而稳定的发布版本制作而成。 -* 以及新模式(由“集市”代表),其中软件是在开放的环境中制作的,个人可以自由参与,但仍然可以产生一个稳定和连贯的系统。 +* 经典模型(由“大教堂”所代表),其中软件由一小群人在封闭和受控的环境中通过缓慢而稳定的发布制作而成。 +* 以及新模式(由“集市”所代表),其中软件是在开放的环境中制作的,个人可以自由参与,但仍然可以产生一个稳定和连贯的系统。    -开源如此成功的一些原因可以追溯到 ESR 所描述的基础原则。尽早发布、经常发布,并接受许多头脑必然比一个更好的事实,会让开源项目进入全世界的人才库(很少有公司能够使用闭源模式与之匹敌)。 +开源如此成功的一些原因可以追溯到 ESR 所描述的创始原则。尽早发布、经常发布,并接受许多头脑必然比一个更好的事实,让开源项目进入全世界的人才库(很少有公司能够使用闭源模式与之匹敌)。 -在 ESR 对黑客社区的反思分析 20 年后,我们看到开源成为占据主导地位的的模式。它不再仅仅是开发人员的个人癖好的模式,而是创新发生的地方。即使是全球[最大][2]软件公司也正在转向这种模式,以便继续占据主导地位。 +在 ESR 对黑客社区的反思分析 20 年后,我们看到开源成为占据主导地位的的模式。它不再仅仅是为了满足开发人员的个人喜好,而是创新发生的地方。甚至是全球[最大][2]软件公司也正在转向这种模式,以便继续占据主导地位。 ### 易货系统 -如果我们仔细研究开源模型在实践中的运作方式,我们就会发现它是一个封闭的系统,专属于开源开发人员和技术人员。影响项目方向的唯一方法是加入开源社区,了解成文和不成文的规则,学习如何贡献,编码标准等,并自己亲力完成。 +如果我们仔细研究开源模型在实践中的运作方式,我们就会意识到它是一个封闭系统,只对开源开发者和技术人员开放。影响项目方向的唯一方法是加入开源社区,了解成文和不成文的规则,学习如何贡献、编码标准等,并自己亲力完成。 -这就是集市的运作方式,也是易货系统类比的来源。易货系统是一种交换服务和货物以换取其他服务和货物的方法。在市场中(即软件的构建)这意味着为了获取某些东西,你必须自己也是一个生产者并回馈一些东西——那就是通过交换你的时间和知识来完成任务。集市是开源开发人员与其他开源开发人员交互并以开源方式生成开源软件的地方。 +这就是集市的运作方式,也是这个易货系统类比的来源。易货系统是一种交换服务和货物以换取其他服务和货物的方法。在市场中(即软件的构建地)这意味着为了获取某些东西,你必须自己也是一个生产者并回馈一些东西——那就是通过交换你的时间和知识来完成任务。集市是开源开发者与其他开源开发者交互并以开源方式生成开源软件的地方。 -易货系统向前迈出了一大步,从自给自足的状态演变而来,而在自给自足的状态下,每个人都必须成为所有行业的杰出人选。使用易货系统的集市(开源模式)允许具有共同兴趣和不同技能的人们收集、协作和创造个人无法自己创造的东西。易货系统简单,而不像现代货币系统那么复杂,但也有一些局限性,例如: +易货系统向前迈出了一大步,从自给自足的状态演变而来,而在自给自足的状态下,每个人都必须成为所有行业的杰出人选。使用易货系统的集市(开源模式)允许具有共同兴趣和不同技能的人们收集、协作和创造个人无法自行创造的东西。易货系统简单,没有现代货币系统那么复杂,但也有一些局限性,例如: -* 缺乏可分性:在没有共同的交换媒介的情况下,不能将较大的不可分割的商品/价值换成较小的商品/价值。例如,如果你想在开源项目中进行一些小的更改,有时你可能仍需要经历一个高进入门槛。 -* 存储价值:如果项目对贵公司很重要,你可能想要投入大量投资/承诺。但由于它是开源开发人员之间的易货系统,因此拥有强大发言权的唯一方法是雇佣许多开源贡献者,但这并非总是可行的。 -* 转移价值:如果你投资了一个项目(受过培训的员工、雇用开源开发人员)并希望将重点转移到另一个项目,却不可能快速转移(你在上一个项目中拥有的)专业知识、声誉和影响力。 +* 缺乏可分性:在没有共同的交换媒介的情况下,不能将较大的不可分割的商品/价值兑换成较小的商品/价值。例如,如果你想在开源项目中进行一些哪怕是小的更改,有时你可能仍需要经历一个高进入门槛。 +* 存储价值:如果一个项目对贵公司很重要,你可能需要投入大量投资/承诺。但由于它是开源开发者之间的易货系统,因此拥有强大发言权的唯一方法是雇佣许多开源贡献者,但这并非总是可行的。 +* 转移价值:如果你投资了一个项目(受过培训的员工、雇用开源开发者)并希望将重点转移到另一个项目,却不可能快速转移(你在上一个项目中拥有的)专业知识、声誉和影响力。 * 时间脱钩:易货系统没有为延期或提前承诺提供良好的机制。在开源世界中,这意味着用户无法提前或在未来期间以可衡量的方式表达对项目的承诺或兴趣。    下面,我们将探讨如何使用集市的后门解决这些限制。 ### 货币系统 -人们因为不同的原因勾连在集市上:有些人在那里学习,有些是出于满足开发人员个人的喜好,有些人在大型软件工厂工作。因为在集市中拥有发言权的唯一方法是成为开源社区的一份子并加入这个易货系统,为了在开源世界获得信誉,许多大型软件公司雇用这些开发者并以货币方式支付薪酬。这代表使用货币系统来影响集市。开源不再只是为了满足开发人员个人的喜好。它也占据全球整体软件生产的重要部分,并且有许多人想要产生影响。 +人们因为不同的原因勾连于集市上:有些人在那里学习,有些是出于满足开发者个人的喜好,有些人为大型软件工厂工作。因为在集市中拥有发言权的唯一方法是成为开源社区的一份子并加入这个易货系统,为了在开源世界获得信誉,许多大型软件公司雇用这些开发者并以货币方式支付薪酬。这代表可以使用货币系统来影响集市,开源不再只是为了满足开发者个人的喜好,它也占据全球整体软件生产的重要部分,并且有许多人想要施加影响。 -开源设置了开发人员交互的指导原则,并以分布式方式构建一致的系统。它决定了项目的治理方式、软件的构建方式以及其成果如何分配给用户。它是分散实体共同构建高质量软件的开放共识模型。但是开源模型并没有包括如何补贴开源。无论是直接还是间接地通过内在或外在动机的赞助,都与集市无关。 +开源设定了开发人员交互的指导原则,并以分布式方式构建一致的系统。它决定了项目的治理方式、软件的构建方式以及其成果如何分发给用户。它是分散的实体共同构建高质量软件的开放共识模型。但是开源模型并没有包括如何补贴开源的部分,无论是直接还是间接地,通过内在或外在动机的赞助,都与集市无关。 ![](https://opensource.com/sites/default/files/uploads/tokenomics_-_page_4.png) -目前,没有相当于以补贴为目的的去中心化式开源开发模型。大多数开源补贴都是集中式的,通常一家公司通过雇用该项目的主要开源开发人员来支配该项目。说实话,这是目前最好的情况,因为它保证了开发人员将长期获得报酬,项目也将继续蓬勃发展。 +目前,没有相当于以补贴为目的的去中心化式开源开发模型。大多数开源补贴都是集中式的,通常一家公司通过雇用该项目的主要开源开发者来主导该项目。说实话,这是目前最好的状况,因为它保证了开发人员将长期获得报酬,项目也将继续蓬勃发展。 -项目垄断情景也有例外情况:例如,一些云原生计算基金会(CNCF)项目是由大量的竞争公司开发的。此外,Apache 软件基金会(ASF)旨在通过鼓励不同的贡献者来使他们的项目不被单一供应商所主导,但实际上大多数受欢迎的项目仍然是单一供应商项目。 +项目垄断情景也有例外情况:例如,一些云原生计算基金会(CNCF)项目是由大量的竞争公司开发的。此外,Apache 软件基金会(ASF)旨在通过鼓励不同的贡献者来使他们管理的项目不被单一供应商所主导,但实际上大多数受欢迎的项目仍然是单一供应商项目。 我们缺少的是一个开放的、去中心化的模式,就像一个没有集中协调和所有权的集市一样,消费者(开源用户)和生产者(开源开发者)在市场力量和开源价值的驱动下相互作用。为了补充开源,这样的模型也必须是开放和去中心化的,这就是为什么我认为区块链技术[最适合][3]的原因。 -旨在补贴开源开发的大多数现有区块链(和非区块链)平台主要针对的是错误赏金、小型和零碎的任务。少数人还专注于资助新的开源项目。但并没有很多人的目标是提供维持开源项目持续开发的机制 —— 基本上,这个系统可以模仿开源服务提供商公司或开放核心、基于开源的 SaaS 产品公司的行为:确保开发人员继续进行可预测的激励措施,并根据激励者(即用户)的优先事项指导项目开发。这种模型将解决上面列出的易货系统的局限性: +旨在补贴开源开发的大多数现有区块链(和非区块链)平台主要针对的是漏洞赏金、小型和零碎的任务。少数人还专注于资助新的开源项目。但并没有多少平台旨在提供维持开源项目持续开发的机制 —— 基本上,这个系统可以模仿开源服务提供商公司或开放核心、基于开源的 SaaS 产品公司的行为:确保开发人员可以获得持续和可预测的激励,并根据激励者(即用户)的优先事项指导项目开发。这种模型将解决上面列出的易货系统的局限性: -* 允许可分性:如果你想要一些小的修复,你可以支付少量费用,而不是成为项目的开源开发人员的全部费用。 +* 允许可分性:如果你想要一些小的修复,你可以支付少量费用,而不是成为项目的开源开发者的全部费用。 * 存储价值:你可以在项目中投入大量资金,并确保其持续发展和你的发言权。 * 转移价值:在任何时候,你都可以停止投资项目并将资金转移到其他项目中。 * 时间脱钩:允许定期定期付款和订阅。 @@ -54,9 +56,9 @@ ESR 的故事比较了两种模式: ### 总结 -一方面,我们看到大公司雇用开源开发人员并收购开源初创公司甚至基础平台(例如微软收购 GitHub)。许多(甚至大多数)长期成功的开源项目集中在一个供应商周围。开源的重要性及其集中化是一个事实。 +一方面,我们看到大公司雇用开源开发者并收购开源初创公司甚至基础平台(例如微软收购 GitHub)。许多(甚至大多数)能够长期成功运行的开源项目都集中在单个供应商周围。开源的重要性及其集中化是一个事实。 -另一方面,围绕[持续开源][4]软件的挑战正变得越来越明显,许多人正在更深入地研究这个领域及其基础问题。有一些项目具有很高的知名度和大量的贡献者,但还有许多其他一样重要的项目缺乏足够的贡献者和维护者。 +另一方面,[维持开源软件][4]的挑战正变得越来越明显,许多人正在更深入地研究这个领域及其基本问题。有一些项目具有很高的知名度和大量的贡献者,但还有许多其他也重要的项目缺乏足够的贡献者和维护者。 有[许多努力][3]试图通过区块链来解决开源的挑战。这些项目应提高透明度、去中心化和补贴,并在开源用户和开发人员之间建立直接联系。这个领域还很年轻,但是进展很快,随着时间的推移,集市将会有一个加密货币系统。 From 5261369b36ecf2c945a2019f6108de871c6526bd Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 11:16:43 +0800 Subject: [PATCH 086/202] PUB @wxy https://linux.cn/article-11348-1.html --- .../20180904 How blockchain can complement open source.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename {translated/talk => published}/20180904 How blockchain can complement open source.md (98%) diff --git a/translated/talk/20180904 How blockchain can complement open source.md b/published/20180904 How blockchain can complement open source.md similarity index 98% rename from translated/talk/20180904 How blockchain can complement open source.md rename to published/20180904 How blockchain can complement open source.md index 5937af45de..a8f2d15a3c 100644 --- a/translated/talk/20180904 How blockchain can complement open source.md +++ b/published/20180904 How blockchain can complement open source.md @@ -3,7 +3,7 @@ > 了解区块链如何成为去中心化的开源补贴模型。 -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/block-quilt-chain.png?itok=mECoDbrc) +![](https://img.linux.net.cn/data/attachment/album/201909/16/111521od1yn9r1nr1eii9o.jpg) 《[大教堂与集市][1]The Cathedral and The Bazaar》是 20 年前由埃里克·史蒂文·雷蒙德Eric Steven Raymond(ESR)撰写的经典开源故事。在这个故事中,ESR 描述了一种新的革命性的软件开发模型,其中复杂的软件项目是在没有(或者很少的)集中管理的情况下构建的。这个新模型就是开源open source。 From e5373b564d96aac12e3d914f89bb086dce385bdd Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Mon, 16 Sep 2019 11:22:56 +0800 Subject: [PATCH 087/202] Update 20190828 Managing Ansible environments on MacOS with Conda.md --- ...nsible environments on MacOS with Conda.md | 33 ++++++++----------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md index 3f62adac32..f5d72eaa4e 100644 --- a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md +++ b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md @@ -11,16 +11,14 @@ 使用 Conda 管理 MacOS 上的 Ansible 环境 ===== -Conda 将 Ansible 所需的一切都收集到虚拟环境中并将它与其他项目分开。 +Conda 将 Ansible 所需的一切都收集到虚拟环境中并将其与其他项目分开。 ![CICD with gears][1] -如果您是一名使用 MacOS 并参与 Ansible 管理的 Python 开发人员,您可能希望使用 Conda 包管理器将 Ansible 的工作与核心操作系统和其他本地项目分开。 +如果您是一名使用 MacOS 并参与 Ansible 管理的 Python 开发人员,您可能希望使用 Conda 包管理器将 Ansible 的工作内容与核心操作系统和其他本地项目分开。 -Ansible 基于 Python的。让 Ansible 在 MacOS 上工作 Conda 并不是必须要的,但是它确实让管理 Python 版本和包依赖变得更加容易。这允许您在 MacOS 上使用升级的 Python 版本,并在您的系统、Ansible 和其他编程项目之间保持 Python 包的依赖性是独立的。 +Ansible 基于 Python的。让 Ansible 在 MacOS 上工作 Conda 并不是必须要的,但是它确实让您管理 Python 版本和包依赖变得更加容易。这允许您在 MacOS 上使用升级的 Python 版本,并在您的系统中、Ansible 和其他编程项目之间保持 Python 包的依赖性是相互独立的。 -还有其他方法在 MacOS 上安装 Ansible 。您可以使用[Homebrew][2],但是如果您对 Python 开发(或 Ansible 开发)感兴趣,您可能会发现在一个 Python 虚拟环境中管理 Ansible 可以减少一些混乱。我觉得这更简单;与其试图将 Pythn版本和依赖项加载到系统或在 **/usr/local** 目录中 ,Conda 还能帮助我将 Ansibl e所需的一切都收集到一个虚拟环境中,并将其与其他项目完全分开。 - -This article focuses on using Conda to manage Ansible as a Python project to keep it clean and separated from other projects. Read on to learn how to install Conda, create a new virtual environment, install Ansible, and test it. +在 MacOS 上安装 Ansible 还有其他方法。您可以使用[Homebrew][2],但是如果您对 Python 开发(或 Ansible 开发)感兴趣,您可能会发现在一个独立 Python 虚拟环境中管理 Ansible 可以减少一些混乱。我觉得这更简单;与其试图将 Python 版本和依赖项加载到系统或在 **/usr/local** 目录中 ,还不如使用 Conda 帮助我将 Ansible 所需的一切都收集到一个虚拟环境中,并将其与其他项目完全分开。 本文着重于使用 Conda 作为 Python 项目来管理 Ansible ,以保持它的干净并与其他项目分开。请继续阅读,并了解如何安装 Conda、创建新的虚拟环境、安装 Ansible 并对其进行测试。 @@ -28,17 +26,17 @@ This article focuses on using Conda to manage Ansible as a Python project to kee 最近,我想学习[Ansible][3],所以我需要找到安装它的最佳方法。 -我通常对在我的日常工作站上安装东西很谨慎。我尤其不喜欢对供应商的默认操作系统安装应用手动更新(这是我多年作为 Unix 系统管理的首选)。我真的很想使用 Python 3.7,但是 MacOS 包是旧的2.7,我不会安装任何可能干扰核心 MacOS 系统的全局Python包。 +我通常对在我的日常工作站上安装东西很谨慎。我尤其不喜欢对供应商的默认操作系统安装应用手动更新(这是我多年作为 Unix 系统管理的首选)。我真的很想使用 Python 3.7,但是 MacOS 包是旧的2.7,我不会安装任何可能干扰核心 MacOS 系统的全局 Python 包。 -所以,我使用本地 Ubuntu 18.04 虚拟机上开始了我的 Ansible 工作。这提供了真正程度的安全隔离,但我很快发现管理它是非常乏味的。所以我着手研究如何在本机 MacOS 上获得一个灵活但独立的 Ansible 系统。 +所以,我使用本地 Ubuntu 18.04 虚拟机上开始了我的 Ansible 工作。这提供了真正意义上的的安全隔离,但我很快发现管理它是非常乏味的。所以我着手研究如何在本机 MacOS 上获得一个灵活但独立的 Ansible 系统。 由于 Ansible 基于 Python,Conda 似乎是理想的解决方案。 -### 安装Conda +### 安装 Conda -Conda 是一个开源软件,它提供方便的包和环境管理功能。它可以帮助您管理多个版本的 Python 、安装软件包依赖关系、执行升级和维护项目隔离。如果您手动管理 Python 虚拟环境,Conda 将有助于简化和管理您的工作。浏览[ Conda 文档][4]可以了解更多细节。 +Conda 是一个开源软件,它提供方便的包和环境管理功能。它可以帮助您管理多个版本的 Python 、安装软件包依赖关系、执行升级和维护项目隔离。如果您手动管理 Python 虚拟环境,Conda 将有助于简化和管理您的工作。浏览 [Conda 文档][4]可以了解更多细节。 -我选择了 [Miniconda][5] Python 3.7 安装在我的工作站中,因为我想要最新的 Pytho n版本。无论选择哪个版本,您都可以使用其他版本的 Python 安装新的虚拟环境。 +我选择了 [Miniconda][5] Python 3.7 安装在我的工作站中,因为我想要最新的 Python 版本。无论选择哪个版本,您都可以使用其他版本的 Python 安装新的虚拟环境。 要安装 Conda,请下载 PKG 格式的文件,进行通常的双击,并选择 “Install for me only” 选项。安装在我的系统上占用了大约158兆的空间。 @@ -77,7 +75,7 @@ Python 3.7.1 第三个命令不是必须的;它列出了安装了哪些 Python 模块及其版本和其他数据。 -您可以随时使用 Conda 的 **activate** 命令切换到另一个虚拟环境。这将带您回到基本的: **conda 基本的**。 +您可以随时使用 Conda 的 **activate** 命令切换到另一个虚拟环境。这将带您回到基本的: **conda base**。 ### 安装 Ansible @@ -96,8 +94,6 @@ Python 3.7.1 ### 使用 Ansible -Now that you have installed a Conda virtual environment, you're ready to use it. First, make sure the node you want to control has your workstation's SSH key installed to the right user account. - 既然您已经安装了 Conda 虚拟环境,就可以使用它了。首先,确保要控制的节点已将工作站的 SSH 密钥安装到正确的用户帐户。 调出一个新的 shell 并运行一些基本的Ansible命令: @@ -124,7 +120,7 @@ ansible 2.8.1 现在 Ansible 正在工作了,您可以在控制台中抽身,并从您的 MacOS 工作站中使用它们。 -### 克隆新的Ansible进行Ansible开发 +### 克隆新的 Ansible 进行 Ansible 开发 这部分完全是可选的;只有当您想要额外的虚拟环境来修改 Ansible 或者安全地使用有问题的 Python 模块时,才需要它。您可以通过以下方式将主 Ansible 环境克隆到开发副本中: @@ -136,8 +132,6 @@ ansible 2.8.1 ### 需要注意的问题 -Occasionally you may get into trouble with Conda. You can usually delete a bad environment with: - 偶尔您可能遇到使用 Conda 的麻烦。您通常可以通过以下方式删除不良环境: ``` @@ -146,11 +140,10 @@ $ conda remove --name ansible-dev --all ``` 如果出现无法解决的错误,通常可以通过在 **~/miniconda3/envs** 中找到环境并删除整个目录来直接删除环境。如果基础损坏了,您可以删除整个 **~/miniconda3**,然后从 PKG 文件中重新安装。只要确保保留 **~/miniconda3/envs** ,或使用 Conda 工具导出环境配置并在以后重新创建即可。 -MacOS 上不包括 **sshpass** 程序。只有当您的 Ansible工 作要求您向 Ansible 提供SSH登录密码时,才需要它。您可以在 SourceForge 上找到当前的[sshpass source][6]。 +MacOS 上不包括 **sshpass** 程序。只有当您的 Ansible 工作要求您向 Ansible 提供SSH登录密码时,才需要它。您可以在 SourceForge 上找到当前的[sshpass source][6]。 -Finally, the base Conda Python module list may lack some Python modules you need for your work. If you need to install one, the **conda install <package>** command is preferred, but **pip** can be used where needed, and Conda will recognize the install modules. -最后,基础 Conda Python 模块列表可能缺少您工作所需的一些Python模块。如果您需要安装一个模块,**conda install <package>** 命令是首选的,但是 **pip** 可以在需要的地方使用,Conda会识别安装模块。 +最后,基础 Conda Python 模块列表可能缺少您工作所需的一些 Python 模块。如果您需要安装一个模块,**conda install <package>** 命令是首选的,但是 **pip** 可以在需要的地方使用,Conda会识别安装模块。 ### 结论 From f6665af171c314ac608f3d9aecceacd8cdc1107f Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Mon, 16 Sep 2019 14:27:37 +0800 Subject: [PATCH 088/202] Update 20190823 The Linux kernel- Top 5 innovations.md --- sources/tech/20190823 The Linux kernel- Top 5 innovations.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sources/tech/20190823 The Linux kernel- Top 5 innovations.md b/sources/tech/20190823 The Linux kernel- Top 5 innovations.md index 95e35bc309..5e35982290 100644 --- a/sources/tech/20190823 The Linux kernel- Top 5 innovations.md +++ b/sources/tech/20190823 The Linux kernel- Top 5 innovations.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (heguangzhi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) @@ -81,7 +81,7 @@ via: https://opensource.com/article/19/8/linux-kernel-top-5-innovations 作者:[Seth Kenlon][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[heguangzhi](https://github.com/heguangzhi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 992f761c2f1f01b687f84e54499b9cc8936b5afe Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 16:22:58 +0800 Subject: [PATCH 089/202] APL --- ... Graduates From A Hobby Project To A Professional Project.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md index 1431b9f76f..4b4e4f84d4 100644 --- a/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md +++ b/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 9b0a3617e394c4484910c9fed7c109796683fe8a Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 17:00:25 +0800 Subject: [PATCH 090/202] =?UTF-8?q?=E6=B8=85=E9=99=A4=E4=BC=81=E4=B8=9A?= =?UTF-8?q?=E7=BA=A7=E5=86=85=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 清除部分企业特定的文章,清除部分较少关注的技术方向 @lujun9972 --- sources/talk/20170908 Betting on the Web.md | 467 ------------------ ...d old versions of MS-DOS as open source.md | 74 --- ...rce its Reverse Engineering Tool GHIDRA.md | 89 ---- ...he Art of Unix Programming, reformatted.md | 54 -- ...- Ubuntu is NOT Replacing Apt with Snap.md | 76 --- .../20190331 Codecademy vs. The BBC Micro.md | 129 ----- ...tionally dangerous DNS hijacking attack.md | 130 ----- ...e attack adds new tools, morphs tactics.md | 97 ---- ...-speed connections from the edge to AWS.md | 69 --- ...with multicloud-to-branch access system.md | 89 ---- ...s to pick up in the second half of 2019.md | 56 --- ... adds AMP to SD-WAN for ISR-ASR routers.md | 74 --- ... Supermicro moves production from China.md | 58 --- ...ng Network Automation to the Enterprise.md | 56 --- ...ort, public safety with IoT deployments.md | 65 --- ... gear with Teridion-s cloud WAN service.md | 74 --- ... does not own the supercomputing market.md | 59 --- ...oft Office 365 e-mail phishing increase.md | 92 ---- ...ge computing platform for AI processing.md | 53 -- .../talk/20190601 HPE Synergy For Dummies.md | 77 --- ... AI-ML to boost intent-based networking.md | 87 ---- ...curity could help drive interest in SDN.md | 89 ---- ...ches a developer-community cert program.md | 66 --- ...oud-based security for SD-WAN resources.md | 95 ---- ...d VxBlock integration with new features.md | 72 --- ... with AI and machine learning abilities.md | 68 --- ...tries to hook its tentacles into SD-WAN.md | 71 --- ...pen-source zettabyte storage initiative.md | 60 --- ... in to simplify hybrid cloud deployment.md | 85 ---- ...security warnings on SD-WAN, DNA Center.md | 111 ----- ...ord Not to Use a Business-Driven SD-WAN.md | 78 --- ...based cloud services to enterprise edge.md | 94 ---- ...work connectivity and microsegmentation.md | 99 ---- ...ptical technology with -2.6B Acacia buy.md | 72 --- ...ioned- a costly, time-consuming project.md | 73 --- ...IBM-s acquisition be the end of Red Hat.md | 66 --- ...MPLS is hanging on in this SD-WAN world.md | 71 --- ... evolution of enterprise IoT technology.md | 102 ---- ... for Apple CarPlay could define the IoT.md | 73 --- ...offer VMware data-center tools natively.md | 69 --- ...ment with Microsoft Azure collaboration.md | 67 --- ...possible by exploiting flaws in Vxworks.md | 85 ---- ...uld be a game-changer for GPU computing.md | 58 --- ...op enterprise SD-WAN technology drivers.md | 96 ---- ...Hat-s to launch hybrid-cloud juggernaut.md | 68 --- .../talk/20190809 Goodbye, Linux Journal.md | 67 --- ...GA cards that can match GPU performance.md | 69 --- ...etter battle Cisco, Arista, HPE, others.md | 64 --- ...he need for natural language processing.md | 71 --- ...es Kubernetes to star enterprise status.md | 68 --- ...ncing, security intelligence, analytics.md | 83 ---- ...20190828 VMware touts hyperscale SD-WAN.md | 88 ---- ...ders Need to Get Aggressive with SD-WAN.md | 47 -- ...5 HPE-s vision for the intelligent edge.md | 88 ---- ...cessors crush four Intel Xeons in tests.md | 59 --- ...ng GPU presence into a data center play.md | 64 --- 56 files changed, 4681 deletions(-) delete mode 100644 sources/talk/20170908 Betting on the Web.md delete mode 100644 sources/talk/20181024 Why it matters that Microsoft released old versions of MS-DOS as open source.md delete mode 100644 sources/talk/20190108 NSA to Open Source its Reverse Engineering Tool GHIDRA.md delete mode 100644 sources/talk/20190115 The Art of Unix Programming, reformatted.md delete mode 100644 sources/talk/20190223 No- Ubuntu is NOT Replacing Apt with Snap.md delete mode 100644 sources/talk/20190331 Codecademy vs. The BBC Micro.md delete mode 100644 sources/talk/20190417 Cisco Talos details exceptionally dangerous DNS hijacking attack.md delete mode 100644 sources/talk/20190424 Cisco- DNSpionage attack adds new tools, morphs tactics.md delete mode 100644 sources/talk/20190501 Vapor IO provides direct, high-speed connections from the edge to AWS.md delete mode 100644 sources/talk/20190506 Cisco boosts SD-WAN with multicloud-to-branch access system.md delete mode 100644 sources/talk/20190507 Server shipments to pick up in the second half of 2019.md delete mode 100644 sources/talk/20190509 Cisco adds AMP to SD-WAN for ISR-ASR routers.md delete mode 100644 sources/talk/20190510 Supermicro moves production from China.md delete mode 100644 sources/talk/20190514 Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise.md delete mode 100644 sources/talk/20190514 Las Vegas targets transport, public safety with IoT deployments.md delete mode 100644 sources/talk/20190523 Cisco ties its security-SD-WAN gear with Teridion-s cloud WAN service.md delete mode 100644 sources/talk/20190528 With Cray buy, HPE rules but does not own the supercomputing market.md delete mode 100644 sources/talk/20190529 Cisco security spotlights Microsoft Office 365 e-mail phishing increase.md delete mode 100644 sources/talk/20190529 Nvidia launches edge computing platform for AI processing.md delete mode 100644 sources/talk/20190601 HPE Synergy For Dummies.md delete mode 100644 sources/talk/20190605 Cisco will use AI-ML to boost intent-based networking.md delete mode 100644 sources/talk/20190606 Juniper- Security could help drive interest in SDN.md delete mode 100644 sources/talk/20190611 Cisco launches a developer-community cert program.md delete mode 100644 sources/talk/20190612 Cisco offers cloud-based security for SD-WAN resources.md delete mode 100644 sources/talk/20190612 Dell and Cisco extend VxBlock integration with new features.md delete mode 100644 sources/talk/20190613 Oracle updates Exadata at long last with AI and machine learning abilities.md delete mode 100644 sources/talk/20190614 Report- Mirai tries to hook its tentacles into SD-WAN.md delete mode 100644 sources/talk/20190614 Western Digital launches open-source zettabyte storage initiative.md delete mode 100644 sources/talk/20190619 Cisco connects with IBM in to simplify hybrid cloud deployment.md delete mode 100644 sources/talk/20190619 Cisco issues critical security warnings on SD-WAN, DNA Center.md delete mode 100644 sources/talk/20190625 You Can-t Afford Not to Use a Business-Driven SD-WAN.md delete mode 100644 sources/talk/20190626 Juniper-s Mist adds WiFi 6, AI-based cloud services to enterprise edge.md delete mode 100644 sources/talk/20190701 Tempered Networks simplifies secure network connectivity and microsegmentation.md delete mode 100644 sources/talk/20190709 Cisco goes deeper into photonic, optical technology with -2.6B Acacia buy.md delete mode 100644 sources/talk/20190710 The Titan supercomputer is being decommissioned- a costly, time-consuming project.md delete mode 100644 sources/talk/20190710 Will IBM-s acquisition be the end of Red Hat.md delete mode 100644 sources/talk/20190717 MPLS is hanging on in this SD-WAN world.md delete mode 100644 sources/talk/20190718 Smart cities offer window into the evolution of enterprise IoT technology.md delete mode 100644 sources/talk/20190724 How BMW-s new annual fee for Apple CarPlay could define the IoT.md delete mode 100644 sources/talk/20190730 Google Cloud to offer VMware data-center tools natively.md delete mode 100644 sources/talk/20190731 Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration.md delete mode 100644 sources/talk/20190731 Remote code execution is possible by exploiting flaws in Vxworks.md delete mode 100644 sources/talk/20190731 VMware-s Bitfusion acquisition could be a game-changer for GPU computing.md delete mode 100644 sources/talk/20190801 Cisco assesses the top enterprise SD-WAN technology drivers.md delete mode 100644 sources/talk/20190801 IBM fuses its software with Red Hat-s to launch hybrid-cloud juggernaut.md delete mode 100644 sources/talk/20190809 Goodbye, Linux Journal.md delete mode 100644 sources/talk/20190812 Xilinx launches new FPGA cards that can match GPU performance.md delete mode 100644 sources/talk/20190815 Extreme-s acquisitions have prepped it to better battle Cisco, Arista, HPE, others.md delete mode 100644 sources/talk/20190815 Nvidia rises to the need for natural language processing.md delete mode 100644 sources/talk/20190826 VMware plan elevates Kubernetes to star enterprise status.md delete mode 100644 sources/talk/20190827 VMware boosts load balancing, security intelligence, analytics.md delete mode 100644 sources/talk/20190828 VMware touts hyperscale SD-WAN.md delete mode 100644 sources/talk/20190903 IT Leaders Need to Get Aggressive with SD-WAN.md delete mode 100644 sources/talk/20190905 HPE-s vision for the intelligent edge.md delete mode 100644 sources/talk/20190906 Two AMD Epyc processors crush four Intel Xeons in tests.md delete mode 100644 sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md diff --git a/sources/talk/20170908 Betting on the Web.md b/sources/talk/20170908 Betting on the Web.md deleted file mode 100644 index 84d70e164f..0000000000 --- a/sources/talk/20170908 Betting on the Web.md +++ /dev/null @@ -1,467 +0,0 @@ -[Betting on the Web][27] -============================================================ - -![](https://static.joreteg.com/large_background.jpg) - - _Note: I just spoke at [Coldfront 2017][12] about why I’m such a big proponent of the Web. What follows is essentially that talk as a blog post (I’ll add a link to the video once it is published)._ - - _Also: the Starbucks PWA mentioned in the talk has shipped! 🎉_ - -I’m  _not_  going to tell you what to do. Instead, I’m going to explain why I’ve chosen to bet my whole career on this crazy Web thing. "Betting" sounds a bit haphazard, it’s more calculated than that. It would probably be better described as "investing." - -Investing what? Our time and attention. - -Many of us only have maybe 6 or so  _really_  productive hours per day when we’re capable of being super focused and doing our absolute best work. So how we chose to invest that very limited time is kind of a big deal. Even though I really enjoy programming I rarely do it aimlessly just for the pure joy of it. Ultimately, I’m investing that productive time expecting to get  _some kind of return_  even if it’s just mastering something or solving a difficult problem. - - [### "So what, what’s your point?"][28] - -> > More than most of us realize we are  _constantly_  investing - -Sure, someone may be paying for our time directly but there’s more to it than just trading hours for money. In the long run, what we chose to invest our professional efforts into has other effects: - -**1\. Building Expertise:** We learn as we work and gain valuable experience in the technologies and platform we’re investing in. That expertise impacts our future earning potential and what types of products we’re capable of building. - -**2\. Building Equity:** Hopefully we’re generating equity and adding value to whatever product we’re building. - -**3\. Shaping tomorrow’s job market:** We’re building tomorrow’s legacy code today™. Today’s new hotness is tomorrow’s maintenance burden. In many cases the people that initially build a product or service are not the ones that ultimately maintain it. This means the technology choices we make when building a new product or service, determine whether or not there will be jobs later that require expertise in that particular platform/technology. So, those tech choices  _literally shape tomorrow’s job market!_ - -**4\. Body of knowledge:** As developers, we’re pretty good at sharing what we learn. We blog, we "Stack-Overflow", etc. These things all contribute to the corpus of knowledge available about that given platform which adds significant value by making it easier/faster for others to build things using these tools. - -**5\. Open Source:** We solve problems and share our work. When lots of developers do this it adds  _tremendous value_  to the technologies and platforms these tools are for. The sheer volume of work that we  _don’t have to do_  because we can use someone else’s library that already does it is mind-boggling. Millions and millions of hours of development work are available to us for free with a simple `npm install`. - -**6\. Building apps for users on that platform:** Last but not least, without apps there is no platform. By making more software available to end users, we’re contributing significant value to the platforms that run our apps. - -Looking at that list, the last four items are not about  _us_  at all. They represent other significant long-term impacts. - -> > We often have a broader impact than we realize - -We’re not just investing time into a job, we're also shaping the platform, community, and technologies we use. - -We’re going to come back to this, but hopefully, recognizing that greater impact can help us make better investments. - - [### With all investing comes  _risk_][29] - -We can’t talk about investing without talking about risk. So what are some of the potential risks? - - [### Are we building for the right platform?][30] - -Platform stability is indeed A Thing™. Just ask a Flash developer, Windows Phone developer, or Blackberry developer. Platforms  _can_  go away. - -If we look at those three platforms, what do they have in common? They’re  _closed_  platforms. What I mean is there’s a single controlling interest. When you build for them, you’re building for a specific operating system and coding against a particular implementation as opposed to coding against a set of  _open standards_ . You could argue, that at least to some degree, Flash died because of its "closed-ness". Regardless, one thing is clear from a risk mitigation perspective: open is better than closed. - -the Web is  _incredibly_  open. It would be quite difficult for any one entity to kill it off. - -Now, for Windows Phone/Blackberry it failed due to a lack of interested users... or was it lack of interested developers?? - -![](https://d33wubrfki0l68.cloudfront.net/9c118bc64747a753804bf88f16237bfe1c71905e/8d334/images/2/ballmer.jpg) - -Maybe if Ballmer ☝️ has just yelled "developers"  _one more time_  we’d all have Windows Phones in our pockets right now 😜. - -From a risk mitigation perspective, two things are clear with regard to platform stability: - -1. Having  _many users_  is better than having few users - -2. Having  _more developers_  building for the platform is better than having few developers - -> > There is no bigger more popular open platform than the Web - - [### Are we building the right software?][31] - -Many of us are building apps. Well, we used to build "applications" but that wasn’t nearly cool enough. So now we build "apps" instead 😎. - -What does "app" mean to a user? This is important because I think it’s changed a bit over the years. To a user, I would suggest it basically means: "a thing I put on my phone." - -But for our purposes I want to get a bit more specific. I’d propose that an app is really: - -1. An "ad hoc" user interface - -2. That is local(ish) to the device - -The term "ad hoc" is Latin and translates to **"for this"**. This actually matches pretty closely with what Apple’s marketing campaigns have been teaching the masses: - -> There’s an app **for that** -> -> – Apple - -The point is it helps you  _do_  something. The emphasis is on action. I happen to think this is largely the difference between a "site" and an "app". A news site for example has articles that are resources in and of themselves. Where a news app is software that runs on the device that helps you consume news articles. - -Another way to put it would be that site is more like a book, while an app is a tool. - - [### Should we be building apps at all?!][32] - -Remember when chatbots were supposed to take over the world? Or perhaps we’ll all be walking around with augmented reality glasses and that’s how we’ll interact with the world? - -I’ve heard it said that "the future app is  _no_  app" and virtual assistants will take over everything. - -![](https://d33wubrfki0l68.cloudfront.net/447b9cdc5e549f874d40fcccbdc6a4225d898677/b3dce/images/2/echo.png) - -I’ve had one of these sitting in my living room for a couple of years, but I find it all but useless. It’s just a nice bluetooth speaker that I can yell at to play me music. - -But I find it very interesting that: - -> > Even Alexa has an app! - -Why? Because there’s no screen! As it turns out these "ad hoc visual interfaces" are extremely efficient. - -Sure, I can yell out "Alexa, what’s the weather going to be like today" and I’ll hear a reply with high and low and whether it’s cloudy, rainy, or sunny. But in that same amount of time, I can pull my phone out tap the weather app and before Alexa can finish telling me those 3 pieces of data, I can visually scan the entire week’s worth of data, air quality, sunrise/sunset times, etc. It’s just  _so much more_  efficient as a mechanism for consuming this type of data. - -As a result of that natural efficiency, I believe that having a visual interface is going to continue to be useful for all sorts of things for a long time to come. - -That’s  _not_  to say virtual assistants aren’t useful! Google Assistant on my Pixel is quite useful in part because it can show me answers and can tolerate vagueness in a way that an app with a fixed set of buttons never could. - -But, as is so often the case with new useful tech, rarely does it complete replace everything that came before it, instead, it augments what we already have. - - [### If apps are so great why are we so "apped out"?][33] - -How do we explain that supposed efficiency when there’s data like this? - -* [65% of smartphone users download zero apps per month][13] - -* [More than 75% of app downloads open an app once and never come back][14] - -I think to answer that we have to really look at what isn’t working well. - - [### What sucks about apps?][34] - -1. **Downloading them certainly sucks.** No one wants to open an app store, search for the app they’re trying to find, then wait to download the huge file. These days a 50mb app is pretty small. Facebook for iOS 346MB, Twitter iOS 212MB. - -2. **Updating them sucks.** Every night I plug in my phone I download a whole slew of app updates that I, as a user, **could not possibly care less about**. In addition, many of these apps are things I installed  _once_ and will **never open again, ever!**. I’d love to know the global stats on how much bandwidth has been wasted on app updates for apps that were never opened again. - -3. **Managing them sucks.** Sure, when I first got an iPhone ages ago and could first download apps my home screen was impeccable. Then when we got folders!! Wow... what an amazing development! Now I could finally put all those pesky uninstallable Apple apps in a folder called "💩" and pretend they didn’t exist. But now, my home screen is a bit of a disaster. Sitting there dragging apps around is not my idea of a good time. So eventually things get all cluttered up again. - -The thing I’ve come to realize, is this: - -> > We don’t care how they got there. We only care that they’re  _there_  when we need them. - -For example, I love to go mountain biking and I enjoy tracking my rides with an app called Strava. I get all geared up for my ride, get on my bike and then go, "Oh right, gotta start Strava." So I pull out my phone  _with my gloves on_  and go: "Ok Google, open Strava". - -I  _could not care less_  about where that app was or where it came from when I said that. - -I don’t care if it was already installed, I don’t care if it never existed on my home screen, or if it was generated out of thin air on the spot. - -> > Context is  _everything_ ! - -If I’m at a parking meter, I want the app  _for that_ . If I’m visiting Portland, I want their public transit app. - -But I certainly  _do not_  want it as soon as I’ve left. - -If I’m at a conference, I might want a conference app to see the schedule, post questions to speakers, or whatnot. But wow, talk about something that quickly becomes worthless as soon as that conference is over! - -As it turns out the more "ad hoc" these things are, the better! The more  _disposable_  and  _re-inflatable_  the better! - -Which also reminds me of something that I feel like we often forget. We always assume people want our shiny apps and we measure things like "engagement" and "time spent in the app" when really, and there certainly are exceptions to this such as apps that are essentially entertainment, but often... - -> > People don’t want to use your app. They want  _to be done_  using your app. - - [### Enter PWAs][35] - -I’ve been contracting with Starbucks for the past 18 months. They’ve taken on the ambitious project of essentially re-building a lot of their web stuff in Node.js and React. One of the things I’ve helped them with (and pushed hard for) was to build a PWA (Progressive Web App) that could provide similar functionality as their native apps. Coincidentally it was launched today: [https://preview.starbucks.com][18]! - - - -This gives is a nice real world example: - -* Starbucks iOS: 146MB - -* Starbucks PWA: ~600KB - -The point is there’s a  _tremendous_  size difference. - -It’s 0.4% of the size. To put it differently, I could download the PWA **243 times**in the same amount of time it would take to download the iOS app. Then, of course on iOS it then also still has to install and boot up! - -Personally, I’d have loved it if the app ended up even smaller and there are plans to shrink it further. But even still, they’re  _not even on the same planet_  in terms of file-size! - -Market forces are  _strongly_  aligned with PWAs here: - -* Few app downloads - -* User acquisition is  _hard_ - -* User acquisition is  _expensive_ - -If the goal is to get people to sign up for the rewards program, that type of size difference could very well make the difference of getting someone signed up and using the app experience (via PWA) by the time they reach the front of the line at Starbucks or not. - -User acquisition is hard enough already, the more time and barriers that can be removed from that process, the better. - - [### Quick PWA primer][36] - -As mentioned, PWA stands for "Progressive Web Apps" or, as I like to call them: "Web Apps" 😄 - -Personally I’ve been trying to build what a user would define as an "app" with web technology for  _years_ . But until PWAs came along, as hard as we tried, you couldn’t quite build a  _real app_  with just web tech. Honestly, I kinda hate myself for saying that, but in terms of something that a user would understand as an "app" I’m afraid that statement has probably true until very recently. - -So what’s a PWA? As one of its primary contributors put it: - -> It’s just a website that took all the right vitamins. -> -> – Alex Russell - -It involves a few specific technologies, namely: - -* Service Worker. Which enable true reliability on the web. What I mean by that is I can build an app that as long as you loaded it while you were online, from then on it will  _always_  open, even if you’re not. This puts it on equal footing with other apps. - -* HTTPS. Requires encrypted connections - -* Web App Manifest. A simple JSON file that describes your application. What icons to use is someone adds it to their home screen, what its name is, etc. - -There are plenty of other resources about PWAs on the web. The point for my purposes is: - -> > It is now possible to build PWAs that are  _indistinguishable_  from their native counter parts - -They can be up and running in a fraction of the time whether or not they were already "installed" and unlike "apps" can be saved as an app on the device  _at the user’s discretion!_ - -Essentially they’re really great for creating "ad hoc" experiences that can be "cold started" on a whim nearly as fast as if it were already installed. - -I’ve said it before and I’ll say it again: - -> PWAs are the biggest thing to happen to the mobile web since the iPhone. -> -> – Um... that was me - - [### Let’s talk Internet of things][37] - -I happen to think that PWAs + IoT = ✨ MAGIC ✨. As several smart folks have pointed out. - -The one-app-per-device approach to smart devices probably isn’t particularly smart. - -It doesn’t scale well and it completely fails in terms of "ad hoc"-ness. Sure, if I have a Nest thermostat and Phillips Hue lightbulbs, it’s reasonable to have two apps installed. But even that sucks as soon as I want someone else to be able to use control them. If  _I just let you into my house_ , trust me... I’m perfectly happy to let you flip a light switch, you’re in my house, after all. But for the vast majority of these things there’s no concept of "nearby apps" and, it’s silly for my guest (or a house-sitter) to download an app they don’t actually want, just so I can let them control my lights. - -The whole "nearby apps" thing has so many uses: - -* thermostat - -* lights - -* locks - -* garage doors - -* parking meter - -* setting refrigerator temp - -* conference apps - -Today there are lots of new capabilities being added to the web to enable web apps to interact with physical devices in the real world. Things like WebUSB, WebBluetooth, WebNFC, and efforts like [Physical Web][19]. Even for things like Augmented (and Virtual) reality, the idea of the items we want to interact with having URLs makes so much sense and I can’t imagine a better, more flexible use of those URLs than for them to point to a PWA that lets you interact with that device! - - [### Forward looking statements...][38] - -I’ve been talking about all this in terms of investing. If you’ve ever read any company statement that discusses the future you always see this line explaining that things that are about to be discussed contains "forward looking statements" that may or may not ultimately happen. - -So, here are  _my_  forward looking statements. - - [### 1\. PWA-only startups][39] - -Given the cost (and challenge) of user-acquisition and the quality of app you can build with PWAs these days, I feel like this is inevitable. If you’re trying to get something off the ground, it just isn’t very efficient to spin up  _three whole teams_  to build for iOS, Android, and the Web. - - [### 2\. PWAs listed in App Stores][40] - -So, there’s a problem with "web only" which is that for the good part of a decade we’ve been training users to look for apps in the app store for their given platform. So if you’re already a recognized brand, especially if you already have a native app that you’re trying to replace, it simply isn’t smart for you  _not to exist_  in the app stores. - -So, some of this isn’t all that "forward looking" as it turns out [Microsoft has already committed to listing PWAs in the Windows Store][20], more than once! - -**They haven’t even finished implementing Service Worker in Edge yet!** But they’re already committing hard to PWAs. In addition to post linked above, one of their lead Developer Relations folks, Aaron Gustafson just [wrote an article for A List Apart][21] telling everyone to build PWAs. - -But if you think about it from their perspective, of course they should do that! As I said earlier they’ve struggled to attract developer to build for their mobile phones. In fact, they’ve at times  _paid_  companies to write apps for them simply to make sure apps exist so that users will be able to have apps they want when using a Windows Phone. Remember how I said developer time is a scarce resource and without apps, the platform is worthless? So  _of course_  they should add first class support for PWAs. If you build a PWA like a lot of folks are doing then TADA!!! 🎉 You just made a Windows/Windows Phone app! - -I’m of the opinion that the writing is on the wall for Google to do the same thing. It’s pure speculation, but it certainly seems like they are taking steps that suggest they may be planning on listing PWAs too. Namely that the Chrome folks recently shipped a feature referred to as "WebAPKs" for Chrome stable on Android (yep, everyone). In the past I’ve [explained in more detail][22] why I think this is a big deal. But a shorted version would be that before this change, sure you could save a PWA to your home screen...  _But_ , in reality it was actually a glorified bookmark. That’s what changes with WebAPKs. Instead, when you add a PWA to your home screen it generates and "side loads" an actual `.apk`file on the fly. This allows that PWA to enjoy some privileges that were simply impossible until the operating system recognized it as "an app." For example: - -* You can now mute push notifications for a specific PWA without muting it for all of Chrome. - -* The PWA is listed in the "app tray" that shows all installed apps (previously it was just the home screen). - -* You can see power usage, and permissions granted to the PWA just like any other app. - -* The app developer can now update the icon for the app by publishing an update to the app manifest. Before, there was no way to updated the icon once it had been added. - -* And a slew of other similar benefits... - -If you’ve ever installed an Android app from a source other than the Play Store (or carriers/OEMs store) you know that you have to flip a switch in settings to allow installs from "untrusted sources". So, how then, you might ask, can they generate and install an actual `.apk` file for a PWA without requiring that you change that setting? As it turns out the answer is quite simple: Use a trusted source! - -> > As it turns out WebAPKs are managed through Google Play Services! - -I’m no rocket scientist, but based on their natural business alignment with the web, their promotion of PWAs, the lengths they’ve gone to to grant PWAs equal status on the operating system as native apps, it only seems natural that they’d eventually  _list them in the store_ . - -Additionally, if Google did start listing PWAs in the Play Store both them and Microsoft would be doing it  _leaving Apple sticking out like a sore thumb and looking like the laggard_ . Essentially, app developers would be able to target a  _massive_  number of users on a range of platforms with a single well-built PWA. But, just like developers grew to despise IE for not keeping up with the times and forcing them to jump through extra hoops to support it, the same thing would happen here. Apple does  _not_  want to be the next IE and I’ve already seen many prominent developers suggesting they already are. - -Which bring us to another forward-looking statement: - - [### 3\. PWAs on iOS][41] - -Just a few weeks ago the Safari folks announced that Service Worker is now [officially under development][23]. - - [### 4\. PWAs everywhere][42] - -I really think we’ll start seeing them everywhere: - -* Inside VR/AR/MR experiences - -* Inside chat bots (again, pulling up an ad-hoc interface is so much more efficient). - -* Inside Xbox?! - -As it turns out, if you look at Microsoft’s status page for Edge about Service Worker you see this: - -![](https://d33wubrfki0l68.cloudfront.net/6e28110b29d042e6472c3512748ecb9f541dcb67/a2b7d/images/2/edge.png) - -I hinted at this already, but I also think PWAs pair very nicely with virtual assistants being able to pull up an PWA on a whim without requiring it to already be installed would add tremendous power to the virtual assistant. Incidentally, this also becomes easier if there’s a known "registered" name of a PWA listed in an app store. - -Some other fun use cases: - -* Apparently the new digital menu displays in McDonald’s Restaurants (at least in the U.S.) are actually a web app built with Polymer ([source][15]). I don’t know if there’s a Service Worker or not, but it would make sense for there to be. - -* Sports score boards!? I’m a [independent consultant][16], and someone approached me about potentially using a set of TVs and web apps to build a score keeping system at an arena. Point is, there are so many cool examples! - -The web really is the universal platform! - - [### For those who think PWAs are just a Google thing][43] - -First off, I’m pretty sure Microsoft, Opera, Firefox, and Samsung folks would want to punch you for that. It [simply isn’t true][24] and increasingly we’re seeing a lot more compatibility efforts between browser vendors. - -For example: check out the [Web Platform Tests][25] which is essentially Continuous Integration for web features that are run against new releases of major browsers. Some folks will recall that when Apple first claimed they implemented IndexedDb in Safari, the version they shipped was essentially unusable because it had major shortcomings and bugs. - -Now, with the WPTs, you can drill into these features (to quite some detail) and see whether a given browser passes or fails. No more claiming "we shipped!" but not actually shipping. - - [### What about feature "x" on platform "y" that we need?][44] - -It could well be that you have a need that isn’t yet covered by the web platform. In reality, that list is getting shorter and shorter, also... HAVE YOU ASKED?! Despite what it may feel like, browser vendors eagerly want to know what you’re trying to do that you can’t. If there are missing features, be loud, be nice, but from my experience it’s worth making your desires known. - -Also, it doesn’t take much to wrap a web view and add hooks into the native OS that your JavaScript can call to do things that aren’t  _quite_  possible yet. - -But that also brings me to another point, in terms of investing, as the world’s greatest hockey player said: - -> Skate to where the puck is going, not where it has been. -> -> – Wayne Gretzky - -Based on what I’ve outlined thus far, it could be more risky to building an entire application for a whole other platform that you ultimately may not need than to at least exhaust your options seeing what you can do with the Web first. - -So to line ’em up in terms of PWA support: - -* Chrome: yup - -* Firefox: yup - -* Opera: yup - -* Samsung Internet ([the 3rd largest browser surprise!][17]): yup - -* Microsoft: huge public commitment - -* Safari: at least implementing Service Worker - - [### Ask them add your feature!][45] - -Sure, it may not happen, it may take a long time but  _at least_  try. Remember, developers have a lot more influence over platforms than we typically realize. Make. your. voice. heard. - - [### Side note about React-Native/Expo][46] - -These projects are run by awesome people, the tech is incredibly impressive. If you’re Facebook and you’re trying to consolidate your development efforts, for the same basic reasons as why it makes sense for them to create their on [VM for running PHP][26]. They have realities to deal with at a scale that most of us will never have to deal with. Personally, I’m not Facebook. - -As a side note, I find it interesting that building native apps and having as many people do that as possible, plays nicely into their advertising competition with Google. - -It just so happens that Google is well positioned to capitalize off of people using the Web. Inversely, I’m fairly certain Facebook wouldn’t mind that ad revenue  _not_  going Google. Facebook, seemingly would much rather  _be_  your web, that be part of the Web. - -Anyway, all that aside, for me it’s also about investing well. - -By building a native app you’re volunteering for a 30% app-store tax. Plus, like we covered earlier odds are that no one wants to go download your app. Also, though it seems incredibly unlikely, I feel compelled to point out that in terms of "openness" Apple’s App Store is very clearly  _anything_  but that. Apple could decide one day that they really don’t like how it’s possible to essentially circumvent their normal update/review process when you use Expo. One day they could just decide to reject all React Native apps. I really don’t think they would because of the uproar it would cause. I’m simply pointing out that it’s  _their_  platform and they would have  _every_  right to do so. - - [### So is it all about investing for your own gain?][47] - -So far, I’ve presented all this from kind of a cold, heartless investor perspective: getting the most for your time. - -But, that’s not the whole story is it? - -Life isn’t all about me. Life isn’t all about us. - -I want to invest in platforms that increase opportunities **for others**. Personally, I really hope the next friggin’ Mark Zuckerburg isn’t an ivy-league dude. Wouldn’t it be amazing if instead the next huge success was, I don’t know, perhaps a young woman in Nairobi or something? The thing is, if owning an iPhone is a prerequisite for building apps, it  _dramatically_  decreases the odds of something like that happening. I feel like the Web really is the closest thing we have to a level playing field. - -**I want to invest in and improve  _that_  platform!** - -This quote really struck me and has stayed with me when thinking about these things: - -> If you’re the kind of person who tends to succeed in what you start, -> -> changing what you start could be  _the most extraordinary thing_  you could do. -> -> – Anand Giridharadas - -Thanks for your valuable attention ❤️. I’ve presented the facts as I see them and I’ve done my best not to "should on you." - -Ultimately though, no matter how prepared we are or how much research we’ve done; investing is always a bit of a gamble. - -So I guess the only thing left to say is: - -> > I’m all in. - --------------------------------------------------------------------------------- - -via: https://joreteg.com/blog/betting-on-the-web - -作者:[Joreteg][a] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]:https://joreteg.com/ -[1]:https://twitter.com/davidbrunelle -[2]:https://twitter.com/intent/tweet?in_reply_to=905931990444244995 -[3]:https://twitter.com/intent/retweet?tweet_id=905931990444244995 -[4]:https://twitter.com/intent/like?tweet_id=905931990444244995 -[5]:https://twitter.com/davidbrunelle/status/905931990444244995/photo/1 -[6]:https://twitter.com/davidbrunelle -[7]:https://twitter.com/Starbucks -[8]:https://t.co/tEUXM8BLgP -[9]:https://twitter.com/davidbrunelle/status/905931990444244995 -[10]:https://twitter.com/davidbrunelle/status/905931990444244995/photo/1 -[11]:https://support.twitter.com/articles/20175256 -[12]:https://2017.coldfront.co/ -[13]:https://qz.com/253618/most-smartphone-users-download-zero-apps-per-month/ -[14]:http://fortune.com/2016/05/19/app-economy/ -[15]:https://twitter.com/AJStacy06/status/857628546507968512 -[16]:http://consulting.joreteg.com/ -[17]:https://medium.com/samsung-internet-dev/think-you-know-the-top-web-browsers-458a0a070175 -[18]:https://preview.starbucks.com/ -[19]:https://google.github.io/physical-web/ -[20]:https://blogs.windows.com/msedgedev/2016/07/08/the-progress-of-web-apps/ -[21]:https://alistapart.com/article/yes-that-web-project-should-be-a-pwa -[22]:https://joreteg.com/blog/installing-web-apps-for-real -[23]:https://webkit.org/status/#specification-service-workers -[24]:https://jakearchibald.github.io/isserviceworkerready/ -[25]:http://wpt.fyi/ -[26]:http://hhvm.com/ -[27]:https://joreteg.com/blog/betting-on-the-web -[28]:https://joreteg.com/blog/betting-on-the-web#quotso-what-whats-your-pointquot -[29]:https://joreteg.com/blog/betting-on-the-web#with-all-investing-comes -[30]:https://joreteg.com/blog/betting-on-the-web#are-we-building-for-the-right-platform -[31]:https://joreteg.com/blog/betting-on-the-web#are-we-building-the-right-software -[32]:https://joreteg.com/blog/betting-on-the-web#should-we-be-building-apps-at-all -[33]:https://joreteg.com/blog/betting-on-the-web#if-apps-are-so-great-why-are-we-so-quotapped-outquot -[34]:https://joreteg.com/blog/betting-on-the-web#what-sucks-about-apps -[35]:https://joreteg.com/blog/betting-on-the-web#enter-pwas -[36]:https://joreteg.com/blog/betting-on-the-web#quick-pwa-primer -[37]:https://joreteg.com/blog/betting-on-the-web#lets-talk-internet-of-things -[38]:https://joreteg.com/blog/betting-on-the-web#forward-looking-statements -[39]:https://joreteg.com/blog/betting-on-the-web#1-pwa-only-startups -[40]:https://joreteg.com/blog/betting-on-the-web#2-pwas-listed-in-app-stores -[41]:https://joreteg.com/blog/betting-on-the-web#3-pwas-on-ios -[42]:https://joreteg.com/blog/betting-on-the-web#4-pwas-everywhere -[43]:https://joreteg.com/blog/betting-on-the-web#for-those-who-think-pwas-are-just-a-google-thing -[44]:https://joreteg.com/blog/betting-on-the-web#what-about-feature-quotxquot-on-platform-quotyquot-that-we-need -[45]:https://joreteg.com/blog/betting-on-the-web#ask-them-add-your-feature -[46]:https://joreteg.com/blog/betting-on-the-web#side-note-about-react-nativeexpo -[47]:https://joreteg.com/blog/betting-on-the-web#so-is-it-all-about-investing-for-your-own-gain diff --git a/sources/talk/20181024 Why it matters that Microsoft released old versions of MS-DOS as open source.md b/sources/talk/20181024 Why it matters that Microsoft released old versions of MS-DOS as open source.md deleted file mode 100644 index 3129e4b6f0..0000000000 --- a/sources/talk/20181024 Why it matters that Microsoft released old versions of MS-DOS as open source.md +++ /dev/null @@ -1,74 +0,0 @@ -Why it matters that Microsoft released old versions of MS-DOS as open source -====== - -Microsoft's release of MS-DOS 1.25 and 2.0 on GitHub adopts an open source license that's compatible with GNU GPL. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/open_business_sign_store.jpg?itok=g4QibRqg) - -One open source software project I work on is the FreeDOS Project. It's a complete, free, DOS-compatible operating system that you can use to play classic DOS games, run legacy business software, or develop embedded systems. Any program that works on MS-DOS should also run on FreeDOS. - -So I took notice when Microsoft recently released the source code to MS-DOS 1.25 and 2.0 via a [GitHub repository][1]. This is a huge step for Microsoft, and I’d like to briefly explain why it is significant. - -### MS-DOS as open source software - -Some open source fans may recall that this is not the first time Microsoft has officially released the MS-DOS source code. On March 25, 2014, Microsoft posted the source code to MS-DOS 1.1 and 2.0 via the [Computer History Museum][2]. Unfortunately, this source code was released under a “look but do not touch” license that limited what you could do with it. According to the license from the 2014 source code release, users were barred from re-using it in other projects and could use it “[solely for non-commercial research, experimentation, and educational purposes.][3]” - -The museum license wasn’t friendly to open source software, and as a result, the MS-DOS source code was ignored. On the FreeDOS Project, we interpreted the “look but do not touch” license as a potential risk to FreeDOS, so we decided developers who had viewed the MS-DOS source code could not contribute to FreeDOS. - -But Microsoft’s recent MS-DOS source code release represents a significant change. This MS-DOS source code uses the MIT License (also called the Expat License). Quoting Microsoft’s [LICENSE.md][4] file on GitHub: - -> ## MS-DOS v1.25 and v2.0 Source Code -> -> Copyright © Microsoft Corporation. -> -> All rights reserved. -> -> MIT License. -> -> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -> -> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,TORT OR OTHERWISE, ARISING FROM OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -If that text looks familiar to you, it is because that’s the same text as the MIT License recognized by the [Open Source Initiative][5]. It’s also the same as the Expat License recognized by the [Free Software Foundation][6]. - -The Free Software Foundation (via GNU) says the Expat License is compatible with the [GNU General Public License][7]. Specifically, GNU describes the Expat License as “a lax, permissive non-copyleft free software license, compatible with the GNU GPL. It is sometimes ambiguously referred to as the MIT License.” Also according to GNU, when they say a license is [compatible with the GNU GPL][8], “you can combine code released under the other license [MIT/Expat License] with code released under the GNU GPL in one larger program.” - -Microsoft’s use of the MIT/Expat License for the original MS-DOS source code is significant because the license is not only open source software but free software. - -### What does it mean? - -This is great, but there’s a practical side to the source code release. You might think, “If Microsoft has released the MS-DOS source code under a license compatible with the GNU GPL, will that help FreeDOS?” - -Not really. Here's why: FreeDOS started from an original source code base, independent from MS-DOS. Certain functions and behaviors of MS-DOS were identified and documented in the comprehensive [Interrupt List by Ralf Brown][9], and we provided MS-DOS compatibility in FreeDOS by referencing the Interrupt List. But many significant fundamental technical differences remain between FreeDOS and MS-DOS. For example, FreeDOS uses a completely different memory structure and memory layout. You can’t simply forklift MS-DOS source code into FreeDOS and expect it to work. The code assumptions are quite different. - -There’s also the simple matter that these are very old versions of MS-DOS. For example, MS-DOS 2.0 was the first version to support directories and redirection. But these versions of MS-DOS did not yet include more advanced features, including networking, CDROM support, and ’386 support such as EMM386. These features have been standard in FreeDOS for a long time. - -So the MS-DOS source code release is interesting, but FreeDOS would not be able to reuse this code for any modern features anyway. FreeDOS has already surpassed these versions of MS-DOS in functionality and features. - -### Congratulations - -Still, it’s important to recognize the big step that Microsoft has taken in releasing these versions of MS-DOS as open source software. The new MS-DOS source code release on GitHub does away with the restrictive license from 2014 and adopts a recognized open source software license that is compatible with the GNU GPL. Congratulations to Microsoft for releasing MS-DOS 1.25 and 2.0 under an open source license! - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/18/10/microsoft-open-source-old-versions-ms-dos - -作者:[Jim Hall][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jim-hall -[b]: https://github.com/lujun9972 -[1]: https://github.com/Microsoft/MS-DOS -[2]: http://www.computerhistory.org/press/ms-source-code.html -[3]: http://www.computerhistory.org/atchm/microsoft-research-license-agreement-msdos-v1-1-v2-0/ -[4]: https://github.com/Microsoft/MS-DOS/blob/master/LICENSE.md -[5]: https://opensource.org/licenses/MIT -[6]: https://directory.fsf.org/wiki/License:Expat -[7]: https://www.gnu.org/licenses/license-list.en.html#Expat -[8]: https://www.gnu.org/licenses/gpl-faq.html#WhatDoesCompatMean -[9]: http://www.cs.cmu.edu/~ralf/files.html diff --git a/sources/talk/20190108 NSA to Open Source its Reverse Engineering Tool GHIDRA.md b/sources/talk/20190108 NSA to Open Source its Reverse Engineering Tool GHIDRA.md deleted file mode 100644 index 78922f9525..0000000000 --- a/sources/talk/20190108 NSA to Open Source its Reverse Engineering Tool GHIDRA.md +++ /dev/null @@ -1,89 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (NSA to Open Source its Reverse Engineering Tool GHIDRA) -[#]: via: (https://itsfoss.com/nsa-ghidra-open-source) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -NSA to Open Source its Reverse Engineering Tool GHIDRA -====== - -GHIDRA – NSA’s reverse engineering tool is getting ready for a free public release this March at the [RSA Conference 2019][1] to be held in San Francisco. - -The National Security Agency (NSA) did not officially announce this – however – a senior NSA advisor, Robert Joyce’s [session description][2] on the official RSA conference website revealed about it before any official statement or announcement. - -Here’s what it mentioned: - -![][3] -Image Credits: [Twitter][4] - -In case the text in the image isn’t properly visible, let me quote the description here: - -> NSA has developed a software reverse engineering framework known as GHIDRA, which will be demonstrated for the first time at RSAC 2019. An interactive GUI capability enables reverse engineers to leverage an integrated set of features that run on a variety of platforms including Windows, Mac OS, and Linux and supports a variety of processor instruction sets. The GHISDRA platform includes all the features expected in high-end commercial tools, with new and expanded functionality NSA uniquely developed. and will be released for free public use at RSA. - -### What is GHIDRA? - -GHIDRA is a software reverse engineering framework developed by [NSA][5] that is in use by the agency for more than a decade. - -Basically, a software reverse engineering tool helps to dig up the source code of a proprietary program which further gives you the ability to detect virus threats or potential bugs. You should read how [reverse engineering][6] works to know more. - -The tool is is written in Java and quite a few people compared it to high-end commercial reverse engineering tools available like [IDA][7]. - -A [Reddit thread][8] involves more detailed discussion where you will find some ex-employees giving good amount of details before the availability of the tool. - -![NSA open source][9] - -### GHIDRA was a secret tool, how do we know about it? - -The existence of the tool was uncovered in a series of leaks by [WikiLeaks][10] as part of [Vault 7 documents of CIA][11]. - -### Is it going to be open source? - -We do think that the reverse engineering tool to be released could be made open source. Even though there is no official confirmation mentioning “open source” – but a lot of people do believe that NSA is definitely targeting the open source community to help improve their tool while also reducing their effort to maintain this tool. - -This way the tool can remain free and the open source community can help improve GHIDRA as well. - -You can also check out the existing [Vault 7 document at WikiLeaks][12] to come up with your prediction. - -### Is NSA doing a good job here? - -The reverse engineering tool is going to be available for Windows, Linux, and Mac OS for free. - -Of course, we care about the Linux platform here – which could be a very good option for people who do not want to or cannot afford a thousand dollar license for a reverse engineering tool with the best-in-class features. - -### Wrapping Up - -If GHIDRA becomes open source and is available for free, it would definitely help a lot of researchers and students and on the other side – the competitors will be forced to adjust their pricing. - -What are your thoughts about it? Is it a good thing? What do you think about the tool going open sources Let us know what you think in the comments below. - -![][13] - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/nsa-ghidra-open-source - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://www.rsaconference.com/events/us19 -[2]: https://www.rsaconference.com/events/us19/agenda/sessions/16608-come-get-your-free-nsa-reverse-engineering-tool -[3]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/01/come-get-your-free-nsa.jpg?fit=800%2C337&ssl=1 -[4]: https://twitter.com/0xffff0800/status/1080909700701405184 -[5]: http://nsa.gov -[6]: https://en.wikipedia.org/wiki/Reverse_engineering -[7]: https://en.wikipedia.org/wiki/Interactive_Disassembler -[8]: https://www.reddit.com/r/ReverseEngineering/comments/ace2m3/come_get_your_free_nsa_reverse_engineering_tool/ -[9]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/01/nsa-open-source.jpeg?resize=800%2C450&ssl=1 -[10]: https://www.wikileaks.org/ -[11]: https://en.wikipedia.org/wiki/Vault_7 -[12]: https://wikileaks.org/ciav7p1/cms/page_9536070.html -[13]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/01/nsa-open-source.jpeg?fit=800%2C450&ssl=1 diff --git a/sources/talk/20190115 The Art of Unix Programming, reformatted.md b/sources/talk/20190115 The Art of Unix Programming, reformatted.md deleted file mode 100644 index 73ffb4c955..0000000000 --- a/sources/talk/20190115 The Art of Unix Programming, reformatted.md +++ /dev/null @@ -1,54 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The Art of Unix Programming, reformatted) -[#]: via: (https://arp242.net/weblog/the-art-of-unix-programming.html) -[#]: author: (Martin Tournoij https://arp242.net/) - -The Art of Unix Programming, reformatted -====== - -tl;dr: I reformatted Eric S. Raymond’s The Art of Unix Programming for readability; [read it here][1]. - -I recently wanted to look up a quote for an article I was writing, and I was fairly sure I had read it in The Art of Unix Programming. Eric S. Raymond (esr) has [kindly published it online][2], but it’s difficult to search as it’s distributed over many different pages, and the formatting is not exactly conducive for readability. - -I `wget --mirror`’d it to my drive, and started out with a simple [script][3] to join everything to a single page, but eventually ended up rewriting a lot of the HTML from crappy 2003 docbook-generated tagsoup to more modern standards, and I slapped on some CSS to make it more readable. - -The results are fairly nice, and it should work well in any version of any browser (I haven’t tested Internet Explorer and Edge, lacking access to a Windows computer, but I’m reasonably confident it should work without issues; if not, see the bottom of this page on how to get in touch). - -The HTML could be simplified further (so rms can read it too), but dealing with 360k lines of ill-formatted HTML is not exactly my idea of fun, so this will have to do for now. - -The entire page is self-contained. You can save it to your laptop or mobile phone and read it on a plane or whatnot. - -Why spend so much work on an IT book from 2003? I think a substantial part of the book still applies very much today, for all programmers (not just Unix programmers). For example the [Basics of the Unix Philosophy][4] was good advice in 1972, is still good advice in 2019, and will continue to be good advice well in to the future. - -Other parts have aged less gracefully; for example “since 2000, practice has been moving toward use of XML-DocBook as a documentation interchange format” doesn’t really represent the current state of things, and the [Data File Metaformats][5] section mentions XML and INI, but not JSON or YAML (as they weren’t invented until after the book was written) - -I find this adds, rather than detracts. It makes for an interesting window in to past. The downside is that the uninitiated will have a bit of a hard time distinguishing between the good and outdated parts; as a rule of thumb: if it talks about abstract concepts, it probably still applies today. If it talks about specific software, it may be outdated. - -I toyed with the idea of updating or annotating the text, but the license doesn’t allow derivative works, so that’s not going to happen. Perhaps I’ll email esr and ask nicely. Another project, for another weekend :-) - -You can mail me at [martin@arp242.net][6] or [create a GitHub issue][7] for feedback, questions, etc. - --------------------------------------------------------------------------------- - -via: https://arp242.net/weblog/the-art-of-unix-programming.html - -作者:[Martin Tournoij][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://arp242.net/ -[b]: https://github.com/lujun9972 -[1]: https://arp242.net/the-art-of-unix-programming/ -[2]: http://catb.org/~esr/writings/taoup/html/ -[3]: https://arp242.net/the-art-of-unix-programming/fix-taoup.py -[4]: https://arp242.net/the-art-of-unix-programming#ch01s06 -[5]: https://arp242.net/the-art-of-unix-programming/#ch05s02 -[6]: mailto:martin@arp242.net -[7]: https://github.com/Carpetsmoker/arp242.net/issues/new diff --git a/sources/talk/20190223 No- Ubuntu is NOT Replacing Apt with Snap.md b/sources/talk/20190223 No- Ubuntu is NOT Replacing Apt with Snap.md deleted file mode 100644 index bb7dd14943..0000000000 --- a/sources/talk/20190223 No- Ubuntu is NOT Replacing Apt with Snap.md +++ /dev/null @@ -1,76 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (No! Ubuntu is NOT Replacing Apt with Snap) -[#]: via: (https://itsfoss.com/ubuntu-snap-replaces-apt-blueprint/) -[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) - -No! Ubuntu is NOT Replacing Apt with Snap -====== - -Stop believing the rumors that Ubuntu is planning to replace Apt with Snap in the [Ubuntu 19.04 release][1]. These are only rumors. - -![Snap replacing apt rumors][2] - -Don’t get what I am talking about? Let me give you some context. - -There is a ‘blueprint’ on Ubuntu’s launchpad website, titled ‘Replace APT with snap as default package manager’. It talks about replacing Apt (package manager at the heart of Debian) with Snap ( a new packaging system by Ubuntu). - -> Thanks to Snap, the need for APT is disappearing, fast… why don’t we use snap at the system level? - -The post further says “Imagine, for example, being able to run “sudo snap install cosmic” to upgrade to the current release, “sudo snap install –beta disco” (in March) to upgrade to a beta release, or, for that matter, “sudo snap install –edge disco” to upgrade to a pre-beta release. It would make the whole process much easier, and updates could simply be delivered as updates to the corresponding snap, which could then just be pushed to the repositories and there it is. This way, instead of having a separate release updater, it would be possible to A, run all system updates completely and silently in the background to avoid nagging the user (a la Chrome OS), and B, offer release upgrades in the GNOME software store, Mac-style, as banners, so the user can install them easily. It would make the user experience both more consistent and even more user-friendly than it currently is.” - -It might sound good and promising and if you take a look at [this link][3], even you might start believing the rumor. Why? Because at the bottom of the blueprint information, it lists Ubuntu-founder Mark Shuttleworth as the approver. - -![Apt being replaced with Snap blueprint rumor][4]Mark Shuttleworth’s name adds to the confusion - -The rumor got fanned when the Switch to Linux YouTube channel covered it. You can watch the video from around 11:30. - - - -When this ‘news’ was brought to my attention, I reached out to Alan Pope of Canonical and asked him if he or his colleagues at Canonical (Ubuntu’s parent company) could confirm it. - -Alan clarified that the so called blueprint was not associated with official Ubuntu team. It was created as a proposal by some community member not affiliated with Ubuntu. - -> That’s not anything official. Some random community person made it. Anyone can write a blueprint. -> -> Alan Pope, Canonical - -Alan further elaborated that anyone can create such blueprints and tag Mark Shuttleworth or other Ubuntu members in it. Just because Mark’s name was listed as the approver, it doesn’t mean he already approved the idea. - -Canonical has no such plans to replace Apt with Snap. It’s not as simple as the blueprint in question suggests. - -After talking with Alan, I decided to not write about this topic because I don’t want to fan baseless rumors and confuse people. - -Unfortunately, the ‘replace Apt with Snap’ blueprint is still being shared on various Ubuntu and Linux related groups and forums. Alan had to publicly dismiss these rumors in a series of tweets: - -> Seen this [#Ubuntu][5] blueprint being shared around the internet. It's not official, not a thing we're doing. Just because someone made a blueprint, doesn't make it fact. -> -> — Alan Pope 🇪🇺🇬🇧 (@popey) [February 23, 2019][6] - -I don’t want you, the It’s FOSS reader, to fell for such silly rumors so I quickly penned this article. - -If you come across ‘apt being replaced with snap’ discussion, you may tell people that it’s not true and provide them this link as a reference. - - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/ubuntu-snap-replaces-apt-blueprint/ - -作者:[Abhishek Prakash][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/abhishek/ -[b]: https://github.com/lujun9972 -[1]: https://itsfoss.com/ubuntu-19-04-release-features/ -[2]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/02/snap-replacing-apt.png?resize=800%2C450&ssl=1 -[3]: https://blueprints.launchpad.net/ubuntu/+spec/package-management-default-snap -[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/02/apt-snap-blueprint.jpg?ssl=1 -[5]: https://twitter.com/hashtag/Ubuntu?src=hash&ref_src=twsrc%5Etfw -[6]: https://twitter.com/popey/status/1099238146393468931?ref_src=twsrc%5Etfw diff --git a/sources/talk/20190331 Codecademy vs. The BBC Micro.md b/sources/talk/20190331 Codecademy vs. The BBC Micro.md deleted file mode 100644 index e4720315cc..0000000000 --- a/sources/talk/20190331 Codecademy vs. The BBC Micro.md +++ /dev/null @@ -1,129 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Codecademy vs. The BBC Micro) -[#]: via: (https://twobithistory.org/2019/03/31/bbc-micro.html) -[#]: author: (Two-Bit History https://twobithistory.org) - -Codecademy vs. The BBC Micro -====== - -In the late 1970s, the computer, which for decades had been a mysterious, hulking machine that only did the bidding of corporate overlords, suddenly became something the average person could buy and take home. An enthusiastic minority saw how great this was and rushed to get a computer of their own. For many more people, the arrival of the microcomputer triggered helpless anxiety about the future. An ad from a magazine at the time promised that a home computer would “give your child an unfair advantage in school.” It showed a boy in a smart blazer and tie eagerly raising his hand to answer a question, while behind him his dim-witted classmates look on sullenly. The ad and others like it implied that the world was changing quickly and, if you did not immediately learn how to use one of these intimidating new devices, you and your family would be left behind. - -In the UK, this anxiety metastasized into concern at the highest levels of government about the competitiveness of the nation. The 1970s had been, on the whole, an underwhelming decade for Great Britain. Both inflation and unemployment had been high. Meanwhile, a series of strikes put London through blackout after blackout. A government report from 1979 fretted that a failure to keep up with trends in computing technology would “add another factor to our poor industrial performance.”1 The country already seemed to be behind in the computing arena—all the great computer companies were American, while integrated circuits were being assembled in Japan and Taiwan. - -In an audacious move, the BBC, a public service broadcaster funded by the government, decided that it would solve Britain’s national competitiveness problems by helping Britons everywhere overcome their aversion to computers. It launched the _Computer Literacy Project_ , a multi-pronged educational effort that involved several TV series, a few books, a network of support groups, and a specially built microcomputer known as the BBC Micro. The project was so successful that, by 1983, an editor for BYTE Magazine wrote, “compared to the US, proportionally more of Britain’s population is interested in microcomputers.”2 The editor marveled that there were more people at the Fifth Personal Computer World Show in the UK than had been to that year’s West Coast Computer Faire. Over a sixth of Great Britain watched an episode in the first series produced for the _Computer Literacy Project_ and 1.5 million BBC Micros were ultimately sold.3 - -[An archive][1] containing every TV series produced and all the materials published for the _Computer Literacy Project_ was put on the web last year. I’ve had a huge amount of fun watching the TV series and trying to imagine what it would have been like to learn about computing in the early 1980s. But what’s turned out to be more interesting is how computing was _taught_. Today, we still worry about technology leaving people behind. Wealthy tech entrepreneurs and governments spend lots of money trying to teach kids “to code.” We have websites like Codecademy that make use of new technologies to teach coding interactively. One would assume that this approach is more effective than a goofy ’80s TV series. But is it? - -### The Computer Literacy Project - -The microcomputer revolution began in 1975 with the release of [the Altair 8800][2]. Only two years later, the Apple II, TRS-80, and Commodore PET had all been released. Sales of the new computers exploded. In 1978, the BBC explored the dramatic societal changes these new machines were sure to bring in a documentary called “Now the Chips Are Down.” - -The documentary was alarming. Within the first five minutes, the narrator explains that microelectronics will “totally revolutionize our way of life.” As eerie synthesizer music plays, and green pulses of electricity dance around a magnified microprocessor on screen, the narrator argues that the new chips are why “Japan is abandoning its ship building, and why our children will grow up without jobs to go to.” The documentary goes on to explore how robots are being used to automate car assembly and how the European watch industry has lost out to digital watch manufacturers in the United States. It castigates the British government for not doing more to prepare the country for a future of mass unemployment. - -The documentary was supposedly shown to the British Cabinet.4 Several government agencies, including the Department of Industry and the Manpower Services Commission, became interested in trying to raise awareness about computers among the British public. The Manpower Services Commission provided funds for a team from the BBC’s education division to travel to Japan, the United States, and other countries on a fact-finding trip. This research team produced a report that cataloged the ways in which microelectronics would indeed mean major changes for industrial manufacturing, labor relations, and office work. In late 1979, it was decided that the BBC should make a ten-part TV series that would help regular Britons “learn how to use and control computers and not feel dominated by them.”5 The project eventually became a multimedia endeavor similar to the _Adult Literacy Project_ , an earlier BBC undertaking involving both a TV series and supplemental courses that helped two million people improve their reading. - -The producers behind the _Computer Literacy Project_ were keen for the TV series to feature “hands-on” examples that viewers could try on their own if they had a microcomputer at home. These examples would have to be in BASIC, since that was the language (really the entire shell) used on almost all microcomputers. But the producers faced a thorny problem: Microcomputer manufacturers all had their own dialects of BASIC, so no matter which dialect they picked, they would inevitably alienate some large fraction of their audience. The only real solution was to create a new BASIC—BBC BASIC—and a microcomputer to go along with it. Members of the British public would be able to buy the new microcomputer and follow along without worrying about differences in software or hardware. - -The TV producers and presenters at the BBC were not capable of building a microcomputer on their own. So they put together a specification for the computer they had in mind and invited British microcomputer companies to propose a new machine that met the requirements. The specification called for a relatively powerful computer because the BBC producers felt that the machine should be able to run real, useful applications. Technical consultants for the _Computer Literacy Project_ also suggested that, if it had to be a BASIC dialect that was going to be taught to the entire nation, then it had better be a good one. (They may not have phrased it exactly that way, but I bet that’s what they were thinking.) BBC BASIC would make up for some of BASIC’s usual shortcomings by allowing for recursion and local variables.6 - -The BBC eventually decided that a Cambridge-based company called Acorn Computers would make the BBC Micro. In choosing Acorn, the BBC passed over a proposal from Clive Sinclair, who ran a company called Sinclair Research. Sinclair Research had brought mass-market microcomputing to the UK in 1980 with the Sinclair ZX80. Sinclair’s new computer, the ZX81, was cheap but not powerful enough for the BBC’s purposes. Acorn’s new prototype computer, known internally as the Proton, would be more expensive but more powerful and expandable. The BBC was impressed. The Proton was never marketed or sold as the Proton because it was instead released in December 1981 as the BBC Micro, also affectionately called “The Beeb.” You could get a 16k version for £235 and a 32k version for £335. - -In 1980, Acorn was an underdog in the British computing industry. But the BBC Micro helped establish the company’s legacy. Today, the world’s most popular microprocessor instruction set is the ARM architecture. “ARM” now stands for “Advanced RISC Machine,” but originally it stood for “Acorn RISC Machine.” ARM Holdings, the company behind the architecture, was spun out from Acorn in 1990. - -![Picture of the BBC Micro.][3] _A bad picture of a BBC Micro, taken by me at the Computer History Museum -in Mountain View, California._ - -### The Computer Programme - -A dozen different TV series were eventually produced as part of the _Computer Literacy Project_ , but the first of them was a ten-part series known as _The Computer Programme_. The series was broadcast over ten weeks at the beginning of 1982. A million people watched each week-night broadcast of the show; a quarter million watched the reruns on Sunday and Monday afternoon. - -The show was hosted by two presenters, Chris Serle and Ian McNaught-Davis. Serle plays the neophyte while McNaught-Davis, who had professional experience programming mainframe computers, plays the expert. This was an inspired setup. It made for [awkward transitions][4]—Serle often goes directly from a conversation with McNaught-Davis to a bit of walk-and-talk narration delivered to the camera, and you can’t help but wonder whether McNaught-Davis is still standing there out of frame or what. But it meant that Serle could voice the concerns that the audience would surely have. He can look intimidated by a screenful of BASIC and can ask questions like, “What do all these dollar signs mean?” At several points during the show, Serle and McNaught-Davis sit down in front of a computer and essentially pair program, with McNaught-Davis providing hints here and there while Serle tries to figure it out. It would have been much less relatable if the show had been presented by a single, all-knowing narrator. - -The show also made an effort to demonstrate the many practical applications of computing in the lives of regular people. By the early 1980s, the home computer had already begun to be associated with young boys and video games. The producers behind _The Computer Programme_ sought to avoid interviewing “impressively competent youngsters,” as that was likely “to increase the anxieties of older viewers,” a demographic that the show was trying to attract to computing.7 In the first episode of the series, Gill Nevill, the show’s “on location” reporter, interviews a woman that has bought a Commodore PET to help manage her sweet shop. The woman (her name is Phyllis) looks to be 60-something years old, yet she has no trouble using the computer to do her accounting and has even started using her PET to do computer work for other businesses, which sounds like the beginning of a promising freelance career. Phyllis says that she wouldn’t mind if the computer work grew to replace her sweet shop business since she enjoys the computer work more. This interview could instead have been an interview with a teenager about how he had modified _Breakout_ to be faster and more challenging. But that would have been encouraging to almost nobody. On the other hand, if Phyllis, of all people, can use a computer, then surely you can too. - -While the show features lots of BASIC programming, what it really wants to teach its audience is how computing works in general. The show explains these general principles with analogies. In the second episode, there is an extended discussion of the Jacquard loom, which accomplishes two things. First, it illustrates that computers are not based only on magical technology invented yesterday—some of the foundational principles of computing go back two hundred years and are about as simple as the idea that you can punch holes in card to control a weaving machine. Second, the interlacing of warp and weft threads is used to demonstrate how a binary choice (does the weft thread go above or below the warp thread?) is enough, when repeated over and over, to produce enormous variation. This segues, of course, into a discussion of how information can be stored using binary digits. - -Later in the show there is a section about a steam organ that plays music encoded in a long, segmented roll of punched card. This time the analogy is used to explain subroutines in BASIC. Serle and McNaught-Davis lay out the whole roll of punched card on the floor in the studio, then point out the segments where it looks like a refrain is being repeated. McNaught-Davis explains that a subroutine is what you would get if you cut out those repeated segments of card and somehow added an instruction to go back to the original segment that played the refrain for the first time. This is a brilliant explanation and probably one that stuck around in people’s minds for a long time afterward. - -I’ve picked out only a few examples, but I think in general the show excels at demystifying computers by explaining the principles that computers rely on to function. The show could instead have focused on teaching BASIC, but it did not. This, it turns out, was very much a conscious choice. In a retrospective written in 1983, John Radcliffe, the executive producer of the _Computer Literacy Project_ , wrote the following: - -> If computers were going to be as important as we believed, some genuine understanding of this new subject would be important for everyone, almost as important perhaps as the capacity to read and write. Early ideas, both here and in America, had concentrated on programming as the main route to computer literacy. However, as our thinking progressed, although we recognized the value of “hands-on” experience on personal micros, we began to place less emphasis on programming and more on wider understanding, on relating micros to larger machines, encouraging people to gain experience with a range of applications programs and high-level languages, and relating these to experience in the real world of industry and commerce…. Our belief was that once people had grasped these principles, at their simplest, they would be able to move further forward into the subject. - -Later, Radcliffe writes, in a similar vein: - -> There had been much debate about the main explanatory thrust of the series. One school of thought had argued that it was particularly important for the programmes to give advice on the practical details of learning to use a micro. But we had concluded that if the series was to have any sustained educational value, it had to be a way into the real world of computing, through an explanation of computing principles. This would need to be achieved by a combination of studio demonstration on micros, explanation of principles by analogy, and illustration on film of real-life examples of practical applications. Not only micros, but mini computers and mainframes would be shown. - -I love this, particularly the part about mini-computers and mainframes. The producers behind _The Computer Programme_ aimed to help Britons get situated: Where had computing been, and where was it going? What can computers do now, and what might they do in the future? Learning some BASIC was part of answering those questions, but knowing BASIC alone was not seen as enough to make someone computer literate. - -### Computer Literacy Today - -If you google “learn to code,” the first result you see is a link to Codecademy’s website. If there is a modern equivalent to the _Computer Literacy Project_ , something with the same reach and similar aims, then it is Codecademy. - -“Learn to code” is Codecademy’s tagline. I don’t think I’m the first person to point this out—in fact, I probably read this somewhere and I’m now ripping it off—but there’s something revealing about using the word “code” instead of “program.” It suggests that the important thing you are learning is how to decode the code, how to look at a screen’s worth of Python and not have your eyes glaze over. I can understand why to the average person this seems like the main hurdle to becoming a professional programmer. Professional programmers spend all day looking at computer monitors covered in gobbledygook, so, if I want to become a professional programmer, I better make sure I can decipher the gobbledygook. But dealing with syntax is not the most challenging part of being a programmer, and it quickly becomes almost irrelevant in the face of much bigger obstacles. Also, armed only with knowledge of a programming language’s syntax, you may be able to _read_ code but you won’t be able to _write_ code to solve a novel problem. - -I recently went through Codecademy’s “Code Foundations” course, which is the course that the site recommends you take if you are interested in programming (as opposed to web development or data science) and have never done any programming before. There are a few lessons in there about the history of computer science, but they are perfunctory and poorly researched. (Thank heavens for [this noble internet vigilante][5], who pointed out a particularly egregious error.) The main focus of the course is teaching you about the common structural elements of programming languages: variables, functions, control flow, loops. In other words, the course focuses on what you would need to know to start seeing patterns in the gobbledygook. - -To be fair to Codecademy, they offer other courses that look meatier. But even courses such as their “Computer Science Path” course focus almost exclusively on programming and concepts that can be represented in programs. One might argue that this is the whole point—Codecademy’s main feature is that it gives you little interactive programming lessons with automated feedback. There also just isn’t enough room to cover more because there is only so much you can stuff into somebody’s brain in a little automated lesson. But the producers at the BBC tasked with kicking off the _Computer Literacy Project_ also had this problem; they recognized that they were limited by their medium and that “the amount of learning that would take place as a result of the television programmes themselves would be limited.”8 With similar constraints on the volume of information they could convey, they chose to emphasize general principles over learning BASIC. Couldn’t Codecademy replace a lesson or two with an interactive visualization of a Jacquard loom weaving together warp and weft threads? - -I’m banging the drum for “general principles” loudly now, so let me just explain what I think they are and why they are important. There’s a book by J. Clark Scott about computers called _But How Do It Know?_ The title comes from the anecdote that opens the book. A salesman is explaining to a group of people that a thermos can keep hot food hot and cold food cold. A member of the audience, astounded by this new invention, asks, “But how do it know?” The joke of course is that the thermos is not perceiving the temperature of the food and then making a decision—the thermos is just constructed so that cold food inevitably stays cold and hot food inevitably stays hot. People anthropomorphize computers in the same way, believing that computers are digital brains that somehow “choose” to do one thing or another based on the code they are fed. But learning a few things about how computers work, even at a rudimentary level, takes the homunculus out of the machine. That’s why the Jacquard loom is such a good go-to illustration. It may at first seem like an incredible device. It reads punch cards and somehow “knows” to weave the right pattern! The reality is mundane: Each row of holes corresponds to a thread, and where there is a hole in that row the corresponding thread gets lifted. Understanding this may not help you do anything new with computers, but it will give you the confidence that you are not dealing with something magical. We should impart this sense of confidence to beginners as soon as we can. - -Alas, it’s possible that the real problem is that nobody wants to learn about the Jacquard loom. Judging by how Codecademy emphasizes the professional applications of what it teaches, many people probably start using Codecademy because they believe it will help them “level up” their careers. They believe, not unreasonably, that the primary challenge will be understanding the gobbledygook, so they want to “learn to code.” And they want to do it as quickly as possible, in the hour or two they have each night between dinner and collapsing into bed. Codecademy, which after all is a business, gives these people what they are looking for—not some roundabout explanation involving a machine invented in the 18th century. - -The _Computer Literacy Project_ , on the other hand, is what a bunch of producers and civil servants at the BBC thought would be the best way to educate the nation about computing. I admit that it is a bit elitist to suggest we should laud this group of people for teaching the masses what they were incapable of seeking out on their own. But I can’t help but think they got it right. Lots of people first learned about computing using a BBC Micro, and many of these people went on to become successful software developers or game designers. [As I’ve written before][6], I suspect learning about computing at a time when computers were relatively simple was a huge advantage. But perhaps another advantage these people had is shows like _The Computer Programme_ , which strove to teach not just programming but also how and why computers can run programs at all. After watching _The Computer Programme_ , you may not understand all the gobbledygook on a computer screen, but you don’t really need to because you know that, whatever the “code” looks like, the computer is always doing the same basic thing. After a course or two on Codecademy, you understand some flavors of gobbledygook, but to you a computer is just a magical machine that somehow turns gobbledygook into running software. That isn’t computer literacy. - -_If you enjoyed this post, more like it come out every four weeks! Follow[@TwoBitHistory][7] on Twitter or subscribe to the [RSS feed][8] to make sure you know when a new post is out._ - -_Previously on TwoBitHistory…_ - -> FINALLY some new damn content, amirite? -> -> Wanted to write an article about how Simula bought us object-oriented programming. It did that, but early Simula also flirted with a different vision for how OOP would work. Wrote about that instead! -> -> — TwoBitHistory (@TwoBitHistory) [February 1, 2019][9] - - 1. Robert Albury and David Allen, Microelectronics, report (1979). ↩ - - 2. Gregg Williams, “Microcomputing, British Style”, Byte Magazine, 40, January 1983, accessed on March 31, 2019, . ↩ - - 3. John Radcliffe, “Toward Computer Literacy,” Computer Literacy Project Achive, 42, accessed March 31, 2019, [https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/media/Towards Computer Literacy.pdf][10]. ↩ - - 4. David Allen, “About the Computer Literacy Project,” Computer Literacy Project Archive, accessed March 31, 2019, . ↩ - - 5. ibid. ↩ - - 6. Williams, 51. ↩ - - 7. Radcliffe, 11. ↩ - - 8. Radcliffe, 5. ↩ - - - - --------------------------------------------------------------------------------- - -via: https://twobithistory.org/2019/03/31/bbc-micro.html - -作者:[Two-Bit History][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://twobithistory.org -[b]: https://github.com/lujun9972 -[1]: https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/ -[2]: /2018/07/22/dawn-of-the-microcomputer.html -[3]: /images/beeb.jpg -[4]: https://twitter.com/TwoBitHistory/status/1112372000742404098 -[5]: https://twitter.com/TwoBitHistory/status/1111305774939234304 -[6]: /2018/09/02/learning-basic.html -[7]: https://twitter.com/TwoBitHistory -[8]: https://twobithistory.org/feed.xml -[9]: https://twitter.com/TwoBitHistory/status/1091148050221944832?ref_src=twsrc%5Etfw -[10]: https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/media/Towards%20Computer%20Literacy.pdf diff --git a/sources/talk/20190417 Cisco Talos details exceptionally dangerous DNS hijacking attack.md b/sources/talk/20190417 Cisco Talos details exceptionally dangerous DNS hijacking attack.md deleted file mode 100644 index db534e4457..0000000000 --- a/sources/talk/20190417 Cisco Talos details exceptionally dangerous DNS hijacking attack.md +++ /dev/null @@ -1,130 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco Talos details exceptionally dangerous DNS hijacking attack) -[#]: via: (https://www.networkworld.com/article/3389747/cisco-talos-details-exceptionally-dangerous-dns-hijacking-attack.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco Talos details exceptionally dangerous DNS hijacking attack -====== -Cisco Talos says state-sponsored attackers are battering DNS to gain access to sensitive networks and systems -![Peshkova / Getty][1] - -Security experts at Cisco Talos have released a [report detailing][2] what it calls the “first known case of a domain name registry organization that was compromised for cyber espionage operations.” - -Talos calls ongoing cyber threat campaign “Sea Turtle” and said that state-sponsored attackers are abusing DNS to harvest credentials to gain access to sensitive networks and systems in a way that victims are unable to detect, which displays unique knowledge on how to manipulate DNS, Talos stated. - -**More about DNS:** - - * [DNS in the cloud: Why and why not][3] - * [DNS over HTTPS seeks to make internet use more private][4] - * [How to protect your infrastructure from DNS cache poisoning][5] - * [ICANN housecleaning revokes old DNS security key][6] - - - -By obtaining control of victims’ DNS, the attackers can change or falsify any data on the Internet, illicitly modify DNS name records to point users to actor-controlled servers; users visiting those sites would never know, Talos reported. - -DNS, routinely known as the Internet’s phonebook, is part of the global internet infrastructure that translates between familiar names and the numbers computers need to access a website or send an email. - -### Threat to DNS could spread - -At this point Talos says Sea Turtle isn't compromising organizations in the U.S. - -“While this incident is limited to targeting primarily national security organizations in the Middle East and North Africa, and we do not want to overstate the consequences of this specific campaign, we are concerned that the success of this operation will lead to actors more broadly attacking the global DNS system,” Talos stated. - -Talos reports that the ongoing operation likely began as early as January 2017 and has continued through the first quarter of 2019. “Our investigation revealed that approximately 40 different organizations across 13 different countries were compromised during this campaign,” Talos stated. “We assess with high confidence that this activity is being carried out by an advanced, state-sponsored actor that seeks to obtain persistent access to sensitive networks and systems.” - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][7] ]** - -Talos says the attackers directing the Sea Turtle campaign show signs of being highly sophisticated and have continued their attacks despite public reports of their activities. In most cases, threat actors typically stop or slow down their activities once their campaigns are publicly revealed suggesting the Sea Turtle actors are unusually brazen and may be difficult to deter going forward, Talos stated. - -In January the Department of Homeland Security (DHS) [issued an alert][8] about this activity, warning that an attacker could redirect user traffic and obtain valid encryption certificates for an organization’s domain names. - -At that time the DHS’s [Cybersecurity and Infrastructure Security Agency][9] said in its [Emergency Directive][9] that it was tracking a series of incidents targeting DNS infrastructure. CISA wrote that it “is aware of multiple executive branch agency domains that were impacted by the tampering campaign and has notified the agencies that maintain them.” - -### DNS hijacking - -CISA said that attackers have managed to intercept and redirect web and mail traffic and could target other networked services. The agency said the attacks start with compromising user credentials of an account that can make changes to DNS records. Then the attacker alters DNS records, like Address, Mail Exchanger, or Name Server records, replacing the legitimate address of the services with an address the attacker controls. - -To achieve their nefarious goals, Talos stated the Sea Turtle accomplices: - - * Use DNS hijacking through the use of actor-controlled name servers. - * Are aggressive in their pursuit targeting DNS registries and a number of registrars, including those that manage country-code top-level domains (ccTLD). - - - * Use Let’s Encrypts, Comodo, Sectigo, and self-signed certificates in their man-in-the-middle (MitM) servers to gain the initial round of credentials. - - - * Steal victim organization’s legitimate SSL certificate and use it on actor-controlled servers. - - - -Such actions also distinguish Sea Turtle from an earlier DNS exploit known as DNSpionage, which [Talos ​reported][10]​ on in November 2018. - -Talos noted “with high confidence” that these operations are distinctly different and independent from the operations performed by [DNSpionage.][11] - -In that report, Talos said a DNSpionage campaign utilized two fake, malicious websites containing job postings that were used to compromise targets via malicious Microsoft Office documents with embedded macros. The malware supported HTTP and DNS communication with the attackers. - -In a separate DNSpionage campaign, the attackers used the same IP address to redirect the DNS of legitimate .gov and private company domains. During each DNS compromise, the actor carefully generated Let's Encrypt certificates for the redirected domains. These certificates provide X.509 certificates for [Transport Layer Security (TLS)][12] free of charge to the user, Talos said. - -The Sea Turtle campaign gained initial access either by exploiting known vulnerabilities or by sending spear-phishing emails. Talos said it believes the attackers have exploited multiple known common vulnerabilities and exposures (CVEs) to either gain initial access or to move laterally within an affected organization. Talos research further shows the following known exploits of Sea Turtle include: - - * CVE-2009-1151​: PHP code injection vulnerability affecting phpMyAdmin - * CVE-2014-6271​: RCE affecting GNU bash system, specifically the SMTP (this was part of the ​Shellshock​ CVEs) - * CVE-2017-3881​: RCE by unauthenticated user with elevated privileges Cisco switches - * CVE-2017-6736​: Remote Code Exploit (RCE) for Cisco integrated Service Router 2811 - * CVE-2017-12617​: RCE affecting Apache web servers running Tomcat - * CVE-2018-0296​: ​Directory​ traversal allowing unauthorized access to Cisco Adaptive Security Appliances (ASAs) and firewalls - * CVE-2018-7600​: RCE for Website built with Drupal, aka “Drupalgeddon” - - - -“As with any initial access involving a sophisticated actor, we believe this list of CVEs to be incomplete,” Talos stated. “The actor in question can leverage known vulnerabilities as they encounter a new threat surface. This list only represents the observed behavior of the actor, not their complete capabilities.” - -Talos says that the Sea Turtle campaign continues to be highly successful for several reasons. “First, the actors employ a unique approach to gain access to the targeted networks. Most traditional security products such as IDS and IPS systems are not designed to monitor and log DNS requests,” Talos stated. “The threat actors were able to achieve this level of success because the DNS domain space system added security into the equation as an afterthought. Had more ccTLDs implemented security features such as registrar locks, attackers would be unable to redirect the targeted domains.” - -Talos said the attackers also used previously undisclosed techniques such as certificate impersonation. “This technique was successful in part because the SSL certificates were created to provide confidentiality, not integrity. The attackers stole organizations’ SSL certificates associated with security appliances such as [Cisco's Adaptive Security Appliance] to obtain VPN credentials, allowing the actors to gain access to the targeted network, and have long-term persistent access, Talos stated. - -### Cisco Talos DNS attack mitigation strategy - -To protect against Sea Turtle, Cisco recommends: - - * Use a registry lock service, which will require an out-of-band message before any changes can occur to an organization's DNS record. - * If your registrar does not offer a registry-lock service, Talos recommends implementing multi-factor authentication, such as ​DUO​, to access your organization's DNS records. - * If you suspect you were targeted by this type of intrusion, Talos recommends instituting a network-wide password reset, preferably from a computer on a trusted network. - * Apply patches, especially on internet-facing machines. Network administrators can monitor passive DNS records on their domains to check for abnormalities. - - - -Join the Network World communities on [Facebook][13] and [LinkedIn][14] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389747/cisco-talos-details-exceptionally-dangerous-dns-hijacking-attack.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/man-in-boat-surrounded-by-sharks_risk_fear_decision_attack_threat_by-peshkova-getty-100786972-large.jpg -[2]: https://blog.talosintelligence.com/2019/04/seaturtle.html -[3]: https://www.networkworld.com/article/3273891/hybrid-cloud/dns-in-the-cloud-why-and-why-not.html -[4]: https://www.networkworld.com/article/3322023/internet/dns-over-https-seeks-to-make-internet-use-more-private.html -[5]: https://www.networkworld.com/article/3298160/internet/how-to-protect-your-infrastructure-from-dns-cache-poisoning.html -[6]: https://www.networkworld.com/article/3331606/security/icann-housecleaning-revokes-old-dns-security-key.html -[7]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[8]: https://www.networkworld.com/article/3336201/batten-down-the-dns-hatches-as-attackers-strike-feds.html -[9]: https://cyber.dhs.gov/ed/19-01/ -[10]: https://blog.talosintelligence.com/2018/11/dnspionage-campaign-targets-middle-east.html -[11]: https://krebsonsecurity.com/tag/dnspionage/ -[12]: https://www.networkworld.com/article/2303073/lan-wan-what-is-transport-layer-security-protocol.html -[13]: https://www.facebook.com/NetworkWorld/ -[14]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190424 Cisco- DNSpionage attack adds new tools, morphs tactics.md b/sources/talk/20190424 Cisco- DNSpionage attack adds new tools, morphs tactics.md deleted file mode 100644 index e202384558..0000000000 --- a/sources/talk/20190424 Cisco- DNSpionage attack adds new tools, morphs tactics.md +++ /dev/null @@ -1,97 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco: DNSpionage attack adds new tools, morphs tactics) -[#]: via: (https://www.networkworld.com/article/3390666/cisco-dnspionage-attack-adds-new-tools-morphs-tactics.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco: DNSpionage attack adds new tools, morphs tactics -====== -Cisco's Talos security group says DNSpionage tools have been upgraded to be more stealthy -![Calvin Dexter / Getty Images][1] - -The group behind the Domain Name System attacks known as DNSpionage have upped their dark actions with new tools and malware to focus their attacks and better hide their activities. - -Cisco Talos security researchers, who discovered [DNSpionage][2] in November, this week warned of new exploits and capabilities of the nefarious campaign. - -**More about DNS:** - - * [DNS in the cloud: Why and why not][3] - * [DNS over HTTPS seeks to make internet use more private][4] - * [How to protect your infrastructure from DNS cache poisoning][5] - * [ICANN housecleaning revokes old DNS security key][6] - - - -“The threat actor's ongoing development of DNSpionage malware shows that the attacker continues to find new ways to avoid detection. DNS tunneling is a popular method of exfiltration for some actors and recent examples of DNSpionage show that we must ensure DNS is monitored as closely as an organization's normal proxy or weblogs,” [Talos wrote][7]. “DNS is essentially the phonebook of the internet, and when it is tampered with, it becomes difficult for anyone to discern whether what they are seeing online is legitimate.” - -In Talos’ initial report, researchers said a DNSpionage campaign targeted various businesses in the Middle East as well as United Arab Emirates government domains. It also utilized two malicious websites containing job postings that were used to compromise targets via crafted Microsoft Office documents with embedded macros. The malware supported HTTP and DNS communication with the attackers. - -In a separate DNSpionage campaign, the attackers used the same IP address to redirect the DNS of legitimate .gov and private company domains. During each DNS compromise, the actor carefully generated “Let's Encrypt” certificates for the redirected domains. These certificates provide X.509 certificates for [Transport Layer Security (TLS)][8] free of charge to the user, Talos said. - -This week Cisco said DNSpionage actors have created a new remote administrative tool that supports HTTP and DNS communication with the attackers' command and control (C2). - -“In our previous post concerning DNSpionage, we showed that the malware author used malicious macros embedded in a Microsoft Word document. In the new sample from Lebanon identified at the end of February, the attacker used an Excel document with a similar macro.” - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][9] ]** - -Talos wrote: “The malware supports HTTP and DNS communication to the C2 server. The HTTP communication is hidden in the comments in the HTML code. This time, however, the C2 server mimics the GitHub platform instead of Wikipedia. While the DNS communication follows the same method we described in our previous article, the developer added some new features in this latest version and, this time, the actor removed the debug mode.” - -Talos added that the domain used for the C2 campaign is “bizarre.” - -“The previous version of DNSpionage attempted to use legitimate-looking domains in an attempt to remain undetected. However, this newer version uses the domain ‘coldfart[.]com,’ which would be easier to spot than other APT campaigns which generally try to blend in with traffic more suitable to enterprise environments. The domain was also hosted in the U.S., which is unusual for any espionage-style attack.” - -Talos researchers said they discovered that DNSpionage added a reconnaissance phase, that ensures the payload is being dropped on specific targets rather than indiscriminately downloaded on every machine. - -This level of attack also returns information about the workstation environment, including platform-specific information, the name of the domain and the local computer, and information concerning the operating system, Talos wrote. This information is key to helping the malware select the victims only and attempts to avoid researchers or sandboxes. Again, it shows the actor's improved abilities, as they now fingerprint the victim. - -This new tactic indicates an improved level of sophistication and is likely in response to the significant amount of public interest in the campaign. - -Talos noted that there have been several other public reports of DNSpionage attacks, and in January, the U.S. Department of Homeland Security issued an [alert][10] warning users about this threat activity. - -“In addition to increased reports of threat activity, we have also discovered new evidence that the threat actors behind the DNSpionage campaign continue to change their tactics, likely in an attempt to improve the efficacy of their operations,” Talos stated. - -In April, Cisco Talos identified an undocumented malware developed in .NET. On the analyzed samples, the malware author left two different internal names in plain text: "DropperBackdoor" and "Karkoff." - -“The malware is lightweight compared to other malware due to its small size and allows remote code execution from the C2 server. There is no obfuscation and the code can be easily disassembled,” Talos wrote. - -The Karkoff malware searches for two specific anti-virus platforms: Avira and Avast and will work around them. - -“The discovery of Karkoff also shows the actor is pivoting and is increasingly attempting to avoid detection while remaining very focused on the Middle Eastern region,” Talos wrote. - -Talos distinguished DNSpionage from another DNS attack method, “[Sea Turtle][11]”, it detailed this month. Sea Turtle involves state-sponsored attackers that are abusing DNS to target organizations and harvest credentials to gain access to sensitive networks and systems in a way that victims are unable to detect. This displays unique knowledge about how to manipulate DNS, Talos stated. - -By obtaining control of victims’ DNS, attackers can change or falsify any data victims receive from the Internet, illicitly modify DNS name records to point users to actor-controlled servers and users visiting those sites would never know, Talos reported. - -“While this incident is limited to targeting primarily national security organizations in the Middle East and North Africa, and we do not want to overstate the consequences of this specific campaign, we are concerned that the success of this operation will lead to actors more broadly attacking the global DNS system,” Talos stated about Sea Turtle. - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3390666/cisco-dnspionage-attack-adds-new-tools-morphs-tactics.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/cyber_attack_threat_danger_breach_hack_security_by_calvindexter_gettyimages-860363294_2400x800-100788395-large.jpg -[2]: https://blog.talosintelligence.com/2018/11/dnspionage-campaign-targets-middle-east.html -[3]: https://www.networkworld.com/article/3273891/hybrid-cloud/dns-in-the-cloud-why-and-why-not.html -[4]: https://www.networkworld.com/article/3322023/internet/dns-over-https-seeks-to-make-internet-use-more-private.html -[5]: https://www.networkworld.com/article/3298160/internet/how-to-protect-your-infrastructure-from-dns-cache-poisoning.html -[6]: https://www.networkworld.com/article/3331606/security/icann-housecleaning-revokes-old-dns-security-key.html -[7]: https://blog.talosintelligence.com/2019/04/dnspionage-brings-out-karkoff.html -[8]: https://www.networkworld.com/article/2303073/lan-wan-what-is-transport-layer-security-protocol.html -[9]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[10]: https://www.us-cert.gov/ncas/alerts/AA19-024A -[11]: https://www.networkworld.com/article/3389747/cisco-talos-details-exceptionally-dangerous-dns-hijacking-attack.html -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190501 Vapor IO provides direct, high-speed connections from the edge to AWS.md b/sources/talk/20190501 Vapor IO provides direct, high-speed connections from the edge to AWS.md deleted file mode 100644 index 0ddef36770..0000000000 --- a/sources/talk/20190501 Vapor IO provides direct, high-speed connections from the edge to AWS.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Vapor IO provides direct, high-speed connections from the edge to AWS) -[#]: via: (https://www.networkworld.com/article/3391922/vapor-io-provides-direct-high-speed-connections-from-the-edge-to-aws.html#tk.rss_all) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Vapor IO provides direct, high-speed connections from the edge to AWS -====== -With a direct fiber line, latency between the edge and the cloud can be dramatically reduced. -![Vapor IO][1] - -Edge computing startup Vapor IO now offers a direct connection between its edge containers to Amazon Web Services (AWS) via a high-speed fiber network link. - -The company said that connection between its Kinetic Edge containers and AWS will be provided by Crown Castle's Cloud Connect fiber network, which uses Amazon Direct Connect Services. This would help reduce network latency by essentially drawing a straight fiber line from Vapor IO's edge computing data centers to Amazon's cloud computing data centers. - -“When combined with Crown Castle’s high-speed Cloud Connect fiber, the Kinetic Edge lets AWS developers build applications that span the entire continuum from core to edge. By enabling new classes of applications at the edge, we make it possible for any AWS developer to unlock the next generation of real-time, innovative use cases,” wrote Matt Trifiro, chief marketing officer of Vapor IO, in a [blog post][2]. - -**[ Read also:[What is edge computing and how it’s changing the network][3] ]** - -Vapor IO clams that the connection will lower latency by as much as 75%. “Connecting workloads and data at the Kinetic Edge with workloads and data in centralized AWS data centers makes it possible to build edge applications that leverage the full power of AWS,” wrote Trifiro. - -Developers building applications at the Kinetic Edge will have access to the full suite of AWS cloud computing services, including Amazon Simple Storage Service (Amazon S3), Amazon Elastic Cloud Compute (Amazon EC2), Amazon Virtual Private Cloud (Amazon VPC), and Amazon Relational Database Service (Amazon RDS). - -Crown Castle is the largest provider of shared communications infrastructure in the U.S., with 40,000 cell towers and 60,000 miles of fiber, offering 1Gbps to 10Gbps private fiber connectivity between the Kinetic Edge and AWS. - -AWS Direct Connect is a essentially a private connection between Amazon's AWS customers and their the AWS data centers, so customers don’t have to rout their traffic over the public internet and compete with Netflix and YouTube, for example, for bandwidth. - -### How edge computing works - -The structure of [edge computing][3] is the reverse of the standard internet design. Rather than sending all the data up to central servers, as much processing as possible is done at the edge. This is to reduce the sheer volume of data coming upstream and thus reduce latency. - -With things like smart cars, even if 95% of data is eliminated that remaining, 5% can still be a lot, so moving it fast is essential. Vapor IO said it will shuttle workloads to Amazon’s USEAST and USWEST data centers, depending on location. - -This shows how the edge is up-ending the traditional internet design and moving more computing outside the traditional data center, although a connection upstream is still important because it allows for rapid movement of necessary data from the edge to the cloud, where it can be stored or processed. - -**More about edge networking:** - - * [How edge networking and IoT will reshape data centers][4] - * [Edge computing best practices][5] - * [How edge computing can help secure the IoT][6] - - - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3391922/vapor-io-provides-direct-high-speed-connections-from-the-edge-to-aws.html#tk.rss_all - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/09/vapor-io-kinetic-edge-data-center-100771510-large.jpg -[2]: https://www.vapor.io/powering-amazon-web-services-at-the-kinetic-edge/ -[3]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[5]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[6]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190506 Cisco boosts SD-WAN with multicloud-to-branch access system.md b/sources/talk/20190506 Cisco boosts SD-WAN with multicloud-to-branch access system.md deleted file mode 100644 index c676e5effb..0000000000 --- a/sources/talk/20190506 Cisco boosts SD-WAN with multicloud-to-branch access system.md +++ /dev/null @@ -1,89 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco boosts SD-WAN with multicloud-to-branch access system) -[#]: via: (https://www.networkworld.com/article/3393232/cisco-boosts-sd-wan-with-multicloud-to-branch-access-system.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco boosts SD-WAN with multicloud-to-branch access system -====== -Cisco's SD-WAN Cloud onRamp for CoLocation can tie branch offices to private data centers in regional corporate headquarters via colocation facilities for shorter, faster, possibly more secure connections. -![istock][1] - -Cisco is looking to give traditional or legacy wide-area network users another reason to move to the [software-defined WAN world][2]. - -The company has rolled out an integrated hardware/software package called SD-WAN Cloud onRamp for CoLocation that lets customers tie distributed multicloud applications back to a local branch office or local private data center. The idea is that a cloud-to-branch link would be shorter, faster and possibly more secure that tying cloud-based applications directly all the way to the data center. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][3] - * [How to pick an off-site data-backup method][4] - * [SD-Branch: What it is and why you’ll need it][5] - * [What are the options for security SD-WAN?][6] - - - -“With Cisco SD-WAN Cloud onRamp for CoLocation operating regionally, connections from colocation facilities to branches are set up and configured according to traffic loads (such as video vs web browsing vs email) SLAs (requirements for low latency/jitter), and Quality of Experience for optimizing cloud application performance,” wrote Anand Oswal, senior vice president of engineering, in Cisco’s Enterprise Networking Business in a [blog about the new service][7]. - -According to Oswal, each branch or private data center is equipped with a network interface that provides a secure tunnel to the regional colocation facility. In turn, the Cloud onRamp for CoLocation establishes secure tunnels to SaaS application platforms, multi-cloud platform services, and enterprise data centers, he stated. - -Traffic is securely routed through the Cloud onRamp for CoLocation stack which includes security features such as application-aware firewalls, URL-filtering, intrusion detection/prevention, DNS-layer security, and Advanced Malware Protection (AMP) Threat Grid, as well as other network services such as load-balancing and Wide Area Application Services, Oswal wrote. - -A typical use case for the package is an enterprise that has dozens of distributed branch offices, clustered around major cities, spread over several countries. The goal is to tie each branch to enterprise data center databases, SaaS applications, and multi-cloud services while meeting service level agreements and application quality of experience, Oswal stated. - -“With virtualized Cisco SD-WAN running on regional colocation centers, the branch workforce has access to applications and data residing in AWS, Azure, and Google cloud platforms as well as SaaS providers such as Microsoft 365 and Salesforce—transparently and securely,” Oswal said. “Distributing SD-WAN features over a regional architecture also brings processing power closer to where data is being generated—at the cloud edge.” - -The idea is that paths to designated SaaS applications will be monitored continuously for performance, and the application traffic will be dynamically routed to the best-performing path, without requiring human intervention, Oswal stated. - -For a typical configuration, a region covering a target city uses a colocation IaaS provider that hosts the Cisco Cloud onRamp for CoLocation, which includes: - - * Cisco vManage software that lets customers manage applications and provision, monitor and troubleshooting the WAN. - * [Cisco Cloud Services Platform (CSP) 5000][8] The systems are x86 Linux Kernel-based Virtual Machine (KVM) software and hardware platforms for the data center, regional hub, and colocation Network Functions Virtualization (NFV). The platforms let enterprise IT teams or service providers deploy any Cisco or third-party network virtual service with Cisco’s [Network Services Orchestrator (NSO)][9] or any other northbound management and orchestration system. - * The Cisco [Catalyst 9500 Series][10] aggregation switches. Based on an x86 CPU, the Catalyst 9500 Series is Cisco’s lead purpose-built fixed core and aggregation enterprise switching platform, built for security, IoT, and cloud. The switches come with a 4-core x86, 2.4-GHz CPU, 16-GB DDR4 memory, and 16-GB internal storage. - - - -If the features of the package sound familiar, that’s because the [Cloud onRamp for CoLocation][11] package is the second generation of a similar SD-WAN package offered by Viptela which Cisco [bought in 2017][12]. - -SD-WAN's driving principle is to simplify the way big companies turn up new links to branch offices, better manage the way those links are utilized – for data, voice or video – and potentially save money in the process. - -It's a profoundly hot market with tons of players including [Cisco][13], VMware, Silver Peak, Riverbed, Aryaka, Fortinet, Nokia and Versa. IDC says the SD-WAN infrastructure market will hit $4.5 billion by 2022, growing at a more than 40% yearly clip between now and then. - -[SD-WAN][14] lets networks route traffic based on centrally managed roles and rules, no matter what the entry and exit points of the traffic are, and with full security. For example, if a user in a branch office is working in Office365, SD-WAN can route their traffic directly to the closest cloud data center for that app, improving network responsiveness for the user and lowering bandwidth costs for the business. - -"SD-WAN has been a promised technology for years, but in 2019 it will be a major driver in how networks are built and re-built," Oswal said a Network World [article][15] earlier this year. - -Join the Network World communities on [Facebook][16] and [LinkedIn][17] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3393232/cisco-boosts-sd-wan-with-multicloud-to-branch-access-system.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/02/istock-578801262-100750453-large.jpg -[2]: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html -[3]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[4]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[5]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[6]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[7]: https://blogs.cisco.com/enterprise/cisco-sd-wan-cloud-onramp-for-colocation-multicloud -[8]: https://www.cisco.com/c/en/us/products/collateral/switches/cloud-services-platform-5000/nb-06-csp-5k-data-sheet-cte-en.html#ProductOverview -[9]: https://www.cisco.com/go/nso -[10]: https://www.cisco.com/c/en/us/products/collateral/switches/catalyst-9500-series-switches/data_sheet-c78-738978.html -[11]: https://www.networkworld.com/article/3207751/viptela-cloud-onramp-optimizes-cloud-access.html -[12]: https://www.networkworld.com/article/3193784/cisco-grabs-up-sd-wan-player-viptela-for-610m.html?nsdr=true -[13]: https://www.networkworld.com/article/3322937/what-will-be-hot-for-cisco-in-2019.html -[14]: https://www.networkworld.com/article/3031279/sd-wan/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[15]: https://www.networkworld.com/article/3332027/cisco-touts-5-technologies-that-will-change-networking-in-2019.html -[16]: https://www.facebook.com/NetworkWorld/ -[17]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190507 Server shipments to pick up in the second half of 2019.md b/sources/talk/20190507 Server shipments to pick up in the second half of 2019.md deleted file mode 100644 index 8169c594ef..0000000000 --- a/sources/talk/20190507 Server shipments to pick up in the second half of 2019.md +++ /dev/null @@ -1,56 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Server shipments to pick up in the second half of 2019) -[#]: via: (https://www.networkworld.com/article/3393167/server-shipments-to-pick-up-in-the-second-half-of-2019.html#tk.rss_all) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Server shipments to pick up in the second half of 2019 -====== -Server sales slowed in anticipation of the new Intel Xeon processors, but they are expected to start up again before the end of the year. -![Thinkstock][1] - -Global server shipments are not expected to return to growth momentum until the third quarter or even the fourth quarter of 2019, according to Taiwan-based tech news site DigiTimes, which cited unnamed server supply chain sources. The one bright spot remains cloud providers like Amazon, Google, and Facebook, which continue their buying binge. - -Normally I’d be reluctant to cite such a questionable source, but given most of the OEMs and ODMs are based in Taiwan and DigiTimes (the article is behind a paywall so I cannot link) has shown it has connections to them, I’m inclined to believe them. - -Quanta Computer chairman Barry Lam told the publication that Quanta's shipments of cloud servers have risen steadily, compared to sharp declines in shipments of enterprise servers. Lam continued that enterprise servers command only 1-2% of the firm's total server shipments. - -**[ Also read:[Gartner: IT spending to drop due to falling equipment prices][2] ]** - -[Server shipments began to slow down in the first quarter][3] thanks in part to the impending arrival of second-generation Xeon Scalable processors from Intel. And since it takes a while to get parts and qualify them, this quarter won’t be much better. - -In its latest quarterly earnings, Intel's data center group (DCG) said sales declined 6% year over year, the first decline of its kind since the first quarter of 2012 and reversing an average growth of over 20% in the past. - -[The Osbourne Effect][4] wasn’t the sole reason. An economic slowdown in China and the trade war, which will add significant tariffs to Chinese-made products, are also hampering sales. - -DigiTimes says Inventec, Intel's largest server motherboard supplier, expects shipments of enterprise server motherboards to further lose steams for the rest of the year, while sales of data center servers are expected to grow 10-15% on year in 2019. - -**[[Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][5] ]** - -It went on to say server shipments may concentrate in the second half or even the fourth quarter of the year, while cloud-based data center servers for the cloud giants will remain positive as demand for edge computing, new artificial intelligence (AI) applications, and the proliferation of 5G applications begin in 2020. - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3393167/server-shipments-to-pick-up-in-the-second-half-of-2019.html#tk.rss_all - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/04/2_data_center_servers-100718306-large.jpg -[2]: https://www.networkworld.com/article/3391062/it-spending-to-drop-due-to-falling-equipment-prices-gartner-predicts.html -[3]: https://www.networkworld.com/article/3332144/server-sales-projected-to-slow-while-memory-prices-drop.html -[4]: https://en.wikipedia.org/wiki/Osborne_effect -[5]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190509 Cisco adds AMP to SD-WAN for ISR-ASR routers.md b/sources/talk/20190509 Cisco adds AMP to SD-WAN for ISR-ASR routers.md deleted file mode 100644 index a5ec6212d8..0000000000 --- a/sources/talk/20190509 Cisco adds AMP to SD-WAN for ISR-ASR routers.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco adds AMP to SD-WAN for ISR/ASR routers) -[#]: via: (https://www.networkworld.com/article/3394597/cisco-adds-amp-to-sd-wan-for-israsr-routers.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco adds AMP to SD-WAN for ISR/ASR routers -====== -Cisco SD-WAN now sports Advanced Malware Protection on its popular edge routers, adding to their routing, segmentation, security, policy and orchestration capabilities. -![vuk8691 / Getty Images][1] - -Cisco has added support for Advanced Malware Protection (AMP) to its million-plus ISR/ASR edge routers, in an effort to [reinforce branch and core network malware protection][2] at across the SD-WAN. - -Cisco last year added its Viptela SD-WAN technology to the IOS XE version 16.9.1 software that runs its core ISR/ASR routers such as the ISR models 1000, 4000 and ASR 5000, in use by organizations worldwide. Cisco bought Viptela in 2017. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][3] - * [How to pick an off-site data-backup method][4] - * [SD-Branch: What it is and why you’ll need it][5] - * [What are the options for security SD-WAN?][6] - - - -The release of Cisco IOS XE offered an instant upgrade path for creating cloud-controlled SD-WAN fabrics to connect distributed offices, people, devices and applications operating on the installed base, Cisco said. At the time Cisco said that Cisco SD-WAN on edge routers builds a secure virtual IP fabric by combining routing, segmentation, security, policy and orchestration. - -With the recent release of [IOS-XE SD-WAN 16.11][7], Cisco has brought AMP and other enhancements to its SD-WAN. - -“Together with Cisco Talos [Cisco’s security-intelligence arm], AMP imbues your SD-WAN branch, core and campuses locations with threat intelligence from millions of worldwide users, honeypots, sandboxes, and extensive industry partnerships,” wrote Cisco’s Patrick Vitalone a product marketing manager in a [blog][8] about the security portion of the new software. “In total, AMP identifies more than 1.1 million unique malware samples a day." When AMP in Cisco SD-WAN spots malicious behavior it automatically blocks it, he wrote. - -The idea is to use integrated preventative engines, exploit prevention and intelligent signature-based antivirus to stop malicious attachments and fileless malware before they execute, Vitalone wrote. - -AMP support is added to a menu of security features already included in the SD-WAN software including support for URL filtering, [Cisco Umbrella][9] DNS security, Snort Intrusion Prevention, the ability to segment users across the WAN and embedded platform security, including the [Cisco Trust Anchor][10] module. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][11] ]** - -The software also supports [SD-WAN Cloud onRamp for CoLocation][12], which lets customers tie distributed multicloud applications back to a local branch office or local private data center. That way a cloud-to-branch link would be shorter, faster and possibly more secure that tying cloud-based applications directly to the data center. - -“The idea that this kind of security technology is now integrated into Cisco’s SD-WAN offering is a critical for Cisco and customers looking to evaluate SD-WAN offerings,” said Lee Doyle, principal analyst at Doyle Research. - -IOS-XE SD-WAN 16.11 is available now. - -Join the Network World communities on [Facebook][13] and [LinkedIn][14] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394597/cisco-adds-amp-to-sd-wan-for-israsr-routers.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/09/shimizu_island_el_nido_palawan_philippines_by_vuk8691_gettyimages-155385042_1200x800-100773533-large.jpg -[2]: https://www.networkworld.com/article/3285728/what-are-the-options-for-securing-sd-wan.html -[3]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[4]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[5]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[6]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[7]: https://www.cisco.com/c/en/us/td/docs/routers/sdwan/release/notes/xe-16-11/sd-wan-rel-notes-19-1.html -[8]: https://blogs.cisco.com/enterprise/enabling-amp-in-cisco-sd-wan -[9]: https://www.networkworld.com/article/3167837/cisco-umbrella-cloud-service-shapes-security-for-cloud-mobile-resources.html -[10]: https://www.cisco.com/c/dam/en_us/about/doing_business/trust-center/docs/trustworthy-technologies-datasheet.pdf -[11]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[12]: https://www.networkworld.com/article/3393232/cisco-boosts-sd-wan-with-multicloud-to-branch-access-system.html -[13]: https://www.facebook.com/NetworkWorld/ -[14]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190510 Supermicro moves production from China.md b/sources/talk/20190510 Supermicro moves production from China.md deleted file mode 100644 index 21739fa416..0000000000 --- a/sources/talk/20190510 Supermicro moves production from China.md +++ /dev/null @@ -1,58 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Supermicro moves production from China) -[#]: via: (https://www.networkworld.com/article/3394404/supermicro-moves-production-from-china.html#tk.rss_all) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Supermicro moves production from China -====== -Supermicro was cleared of any activity related to the Chinese government and secret chips in its motherboards, but it is taking no chances and is moving its facilities. -![Frank Schwichtenberg \(CC BY 4.0\)][1] - -Server maker Supermicro, based in Fremont, California, is reportedly moving production out of China over customer concerns that the Chinese government had secretly inserted chips for spying into its motherboards. - -The claims were made by Bloomberg late last year in a story that cited more than 100 sources in government and private industry, including Apple and Amazon Web Services (AWS). However, Apple CEO Tim Cook and AWS CEO Andy Jassy denied the claims and called for Bloomberg to retract the article. And a few months later, the third-party investigations firm Nardello & Co examined the claims and [cleared Supermicro][2] of any surreptitious activity. - -At first it seemed like Supermicro was weathering the storm, but the story did have a negative impact. Server sales have fallen since the Bloomberg story, and the company is forecasting a near 10% decline in total revenues for the March quarter compared to the previous three months. - -**[ Also read:[Who's developing quantum computers][3] ]** - -And now, Nikkei Asian Review reports that despite the strong rebuttals, some customers remain cautious about the company's products. To address those concerns, Nikkei says Supermicro has told suppliers to [move production out of China][4], citing industry sources familiar with the matter. - -It also has the side benefit of mitigating against the U.S.-China trade war, which is only getting worse. Since the tariffs are on the dollar amount of the product, that can quickly add up even for a low-end system, as Serve The Home noted in [this analysis][5]. - -Supermicro is the world's third-largest server maker by shipments, selling primarily to cloud providers like Amazon and Facebook. It does its own assembly in its Fremont facility but outsources motherboard production to numerous suppliers, mostly China and Taiwan. - -"We have to be more self-reliant [to build in-house manufacturing] without depending only on those outsourcing partners whose production previously has mostly been in China," an executive told Nikkei. - -Nikkei notes that roughly 90% of the motherboards shipped worldwide in 2017 were made in China, but that percentage dropped to less than 50% in 2018, according to Digitimes Research, a tech supply chain specialist based in Taiwan. - -Supermicro just held a groundbreaking ceremony in Taiwan for a 800,000 square foot manufacturing plant in Taiwan and is expanding its San Jose, California, plant as well. So, they must be anxious to be free of China if they are willing to expand in one of the most expensive real estate markets in the world. - -A Supermicro spokesperson said via email, “We have been expanding our manufacturing capacity for many years to meet increasing customer demand. We are currently constructing a new Green Computing Park building in Silicon Valley, where we are the only Tier 1 solutions vendor manufacturing in Silicon Valley, and we proudly broke ground this week on a new manufacturing facility in Taiwan. To support our continued global growth, we look forward to expanding in Europe as well.” - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394404/supermicro-moves-production-from-china.html#tk.rss_all - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/supermicro_-_x11sae__cebit_2016_01-100796121-large.jpg -[2]: https://www.networkworld.com/article/3326828/investigator-finds-no-evidence-of-spy-chips-on-super-micro-motherboards.html -[3]: https://www.networkworld.com/article/3275385/who-s-developing-quantum-computers.html -[4]: https://asia.nikkei.com/Economy/Trade-war/Server-maker-Super-Micro-to-ditch-made-in-China-parts-on-spy-fears -[5]: https://www.servethehome.com/how-tariffs-hurt-intel-xeon-d-atom-and-amd-epyc-3000/ -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190514 Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise.md b/sources/talk/20190514 Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise.md deleted file mode 100644 index e821405199..0000000000 --- a/sources/talk/20190514 Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise.md +++ /dev/null @@ -1,56 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise) -[#]: via: (https://www.networkworld.com/article/3394687/brillio-and-blue-planet-partner-to-bring-network-automation-to-the-enterprise.html) -[#]: author: (Rick Hamilton, Senior Vice President, Blue Planet Software ) - -Brillio and Blue Planet Partner to Bring Network Automation to the Enterprise -====== -Rick Hamilton, senior vice president of Blue Planet, a division of Ciena, explains how partnering with Brillio brings the next generation of network capabilities to enterprises—just when they need it most. -![Kritchanut][1] - -![][2] - -_Rick Hamilton, senior vice president of Blue Planet, a division of Ciena, explains how partnering with Brillio brings the next generation of network capabilities to enterprises—just when they need it most._ - -In February 2019, we announced that Blue Planet was evolving into a more independent division, helping us increase our focus on innovative intelligent automation solutions that help our enterprise and service provider customers accelerate and achieve their business transformation goals. - -Today we’re excited to make another leap forward in delivering these benefits to enterprises of all types via our partnership with digital transformation services and solutions leader Brillio. Together, we are co-creating intelligent cloud and network management solutions that increase service visibility and improve service assurance by effectively leveraging the convergence of cloud, IoT, and AI. - -**Accelerating digital transformation in the enterprise** - -Enterprises continue to look toward cloud services to create new and incremental revenue streams based on innovative solution offerings and on-demand product/solution delivery models, and to optimize their infrastructure investments. In fact, Gartner predicts that enterprise IT spending for cloud-based offerings will continue to grow faster than non-cloud IT offerings, making up 28% of spending by 2022, up from 19% in 2018. - -As enterprises adopt cloud, they realize there are many challenges associated with traditional approaches to operating and managing complex and hybrid multi-cloud environments. Our partnership with Brillio enables us to help these organizations across industries such as manufacturing, logistics, retail, and financial services meet their technical and business needs with high-impact solutions that improve customer experiences, drive operational efficiencies, and improve quality of service. - -This is achieved by combining the Blue Planet intelligent automation platform and the Brillio CLIP™services delivery excellence platform and user-centered design (UCD) lead solution framework. Together, we offer end-to-end visibility of application and infrastructure assets in a hybrid multi-cloud environment and provide service assurance and self-healing capabilities that improve network and service availability. - -**Partnering on research and development** - -Brillio will also partner with Blue Planet on longer-term R&D efforts. As one of a preferred product engineering services providers, Brillio will work closely with our engineering team to develop and deliver network intelligence and automation solutions to help enterprises build dynamic, programmable infrastructure that leverage analytics and automation to realize the Adaptive Network vision. - -Of course, a partnership like this is a two-way street, and we consider Brillio’s choice to work with us to be a testament to our expertise, vision, and execution. In the words of Brillio Chairman and CEO Raj Mamodia, “Blue Planet’s experience in end-to-end service orchestration coupled with Brillio’s expertise in cloudification, user-centered enterprise solutions design, and rapid software development delivers distinct advantages to the industry. Through integration of technologies like cloud, IoT, and AI into our combined solutions, our partnership spurs greater innovation and helps us address the large and growing enterprise networking automation market.” - -Co-creating intelligent hybrid cloud and network management solutions with Brillio is key to advancing enterprise digital transformation initiatives. Partnering with Brillio helps us address the plethora of challenges facing enterprises today on their digital journey. Our partnership enables Blue Planet to achieve faster time-to-market and greater efficiency in developing new solutions to enable enterprises to continue to thrive and grow. - -[Learn more about Blue Planet here][3] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394687/brillio-and-blue-planet-partner-to-bring-network-automation-to-the-enterprise.html - -作者:[Rick Hamilton, Senior Vice President, Blue Planet Software][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/istock-952625346-100796314-large.jpg -[2]: https://images.idgesg.net/images/article/2019/05/rick-100796315-small.jpg -[3]: https://www.blueplanet.com/?utm_campaign=X1058319&utm_source=NWW&utm_term=BPWeb_Brillio&utm_medium=sponsoredpost3Q19 diff --git a/sources/talk/20190514 Las Vegas targets transport, public safety with IoT deployments.md b/sources/talk/20190514 Las Vegas targets transport, public safety with IoT deployments.md deleted file mode 100644 index 84a563c8bc..0000000000 --- a/sources/talk/20190514 Las Vegas targets transport, public safety with IoT deployments.md +++ /dev/null @@ -1,65 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Las Vegas targets transport, public safety with IoT deployments) -[#]: via: (https://www.networkworld.com/article/3395536/las-vegas-targets-transport-public-safety-with-iot-deployments.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Las Vegas targets transport, public safety with IoT deployments -====== - -![Franck V. \(CC0\)][1] - -The city of Las Vegas’ pilot program with NTT and Dell, designed to crack down on wrong-way driving on municipal roads, is just part of the big plans that Sin City has for leveraging IoT tech in the future, according to the city's director of technology Michael Sherwood, who sat down with Network World at the IoT World conference in Silicon Valley this week. - -The system uses smart cameras and does most of its processing at the edge, according to Sherwood. The only information that gets sent back to the city’s private cloud is metadata – aggregated information about overall patterns, for decision-making and targeting purposes, not data about individual traffic incidents and wrong-way drivers. - -**[ Also see[What is edge computing?][2] and [How edge networking and IoT will reshape data centers][3].]** - -It’s an important public safety consideration, he said, but it’s a small part of the larger IoT-enabled framework that the city envisions for the future. - -“Our goal is to make our data open to the public, not only for transparency purposes, but to help spur development and create new applications to make Vegas a better place to live,” said Sherwood. - -[The city’s public data repository][4] already boasts a range of relevant data, some IoT-generated, some not. And efforts to make that data store more open have already begun to bear fruit, according to Sherwood. For example, one hackathon about a year ago resulted in an Alexa app that tells users how many traffic lights are out, by tracking energy usage data via the city’s portal, among other applications. - -As with IoT in general, Sherwood said that the city’s efforts have been bolstered by an influx of operational talen. Rather than additional IT staff to run the new systems, they’ve brought in experts from the traffic department to help get the most out of the framework. - -Another idea for leveraging the city’s traffic data involves tracking the status of the curb. Given the rise of Uber and Lyft and other on-demand transportation services, linking a piece of camera-generated information like “rideshares are parked along both sides of this street” directly into a navigation app could help truck drivers avoid gridlock. - -“We’re really looking to make the roads a living source of information,” Sherwood said. - -**Safer parks** - -Las Vegas is also pursuing related public safety initiatives. One pilot project aims to make public parks safer by installing infrared cameras so authorities can tell whether people are in parks after hours without incurring undue privacy concerns, given that facial recognition is very tricky in infrared. - -It’s the test-and-see method of IoT development, according to Sherwood. - -“That’s a way of starting with an IoT project: start with one park. The cost to do something like this is not astronomical, and it allows you to gauge some other information from it,” he said. - -The city has also worked to keep the costs of these projects low or even show a returnon investment, Sherwood added. Workforce development programs could train municipal workers to do simple maintenance on smart cameras in parks or along roadways, and the economic gains made from the successful use of the systems ought to outweigh deployment and operational outlay. - -“If it’s doing it’s job, those efficiencies should cover the system’s cost,” he said. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3395536/las-vegas-targets-transport-public-safety-with-iot-deployments.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/pedestrian-walk-sign_go_start_begin_traffic-light_by-franck-v-unsplaash-100765089-large.jpg -[2]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[3]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[4]: https://opendata.lasvegasnevada.gov/ -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190523 Cisco ties its security-SD-WAN gear with Teridion-s cloud WAN service.md b/sources/talk/20190523 Cisco ties its security-SD-WAN gear with Teridion-s cloud WAN service.md deleted file mode 100644 index 2638987b16..0000000000 --- a/sources/talk/20190523 Cisco ties its security-SD-WAN gear with Teridion-s cloud WAN service.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco ties its security/SD-WAN gear with Teridion’s cloud WAN service) -[#]: via: (https://www.networkworld.com/article/3396628/cisco-ties-its-securitysd-wan-gear-with-teridions-cloud-wan-service.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco ties its security/SD-WAN gear with Teridion’s cloud WAN service -====== -An agreement links Cisco Meraki MX Security/SD-WAN appliances and its Auto VPN technology to Teridion’s cloud-based WAN service that claims to accelerate TCP-based applications by up to 5X. -![istock][1] - -Cisco and Teridion have tied the knot to deliver faster enterprise [software-defined WAN][2] services. - -The agreement links [Cisco Meraki][3] MX Security/SD-WAN appliances and its Auto [VPN][4] technology which lets users quickly bring up and configure secure sessions between branches and data centers with [Teridion’s cloud-based WAN service][5]. Teridion’s service promises customers better performance and control over traffic running from remote offices over the public internet to the [data center][6]. The service features what Teridion calls “Curated Routing” which fuses WAN acceleration techniques with route optimization to speed traffic. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][7] - * [How to pick an off-site data-backup method][8] - * [SD-Branch: What it is and why you’ll need it][9] - * [What are the options for security SD-WAN?][10] - - - -For example, Teridion says its WAN service can accelerate TCP-based applications like file transfers, backups and page loads, by as much as three to five times. - -“[The service] improves network performance for UDP based applications like voice, video, RDP, and VDI. Enterprises can get carrier grade performance over broadband and dedicated internet access. Depending on the locations of the sites, [customers] can expect to see a 15 to 30 percent reduction in latency. That’s the difference between a great quality video conference and an unworkable, choppy mess” Teridion [stated][11]. - -Teridion says the Meraki integration creates an IPSec connection from the Cisco Meraki MX to the Teridion edge. “Customers create locations in the Teridion portal and apply the preconfigured Meraki template to them, or just upload a csv file if you have a lot of locations. Then, from each Meraki MX, create a 3rd party IPSec tunnel to the Teridion edge IP addresses that are generated as part of the Teridion configuration.” - -The combined Cisco Meraki and Teridion offering brings SD-WAN and security capabilities at the WAN edge that are tightly integrated with a WAN service delivered over cost-effective broadband or dedicated Internet access, said Raviv Levi, director of product management at Cisco Meraki in a statement. “This brings better reliability and consistency to the enterprise WAN across multiple sites, as well as high performance access to all SaaS applications and cloud workloads.” - -Meraki’s MX family supports everything from SD-WAN and [Wi-Fi][12] features to next-generation [firewall][13] and intrusion prevention in a single package. - -Some studies show that by 2021 over 75 percent of enterprise traffic will be SaaS-oriented, so giving branch offices SD-WAN's reliable, secure transportation options will be a necessity, Cisco said when it [upgraded the Meraki][3] boxes last year. - -Cisco Meraki isn’t the only SD-WAN service Teridion supports. The company also has agreements Citrix, Silver Peak, VMware (VeloCloud). Teridion also has partnerships with over 25 cloud partners, including Google, Amazon Web Services and Microsoft Azure. - -[Teridion for Cisco Meraki][14] is available now from authorized Teridion resellers. Pricing starts at $50 per site per month. - -Join the Network World communities on [Facebook][15] and [LinkedIn][16] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396628/cisco-ties-its-securitysd-wan-gear-with-teridions-cloud-wan-service.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/02/istock-820219662-100749695-large.jpg -[2]: https://www.networkworld.com/article/3031279/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[3]: https://www.networkworld.com/article/3301169/cisco-meraki-amps-up-throughput-wi-fi-to-sd-wan-family.html -[4]: https://www.networkworld.com/article/3138952/5-things-you-need-to-know-about-virtual-private-networks.html -[5]: https://www.networkworld.com/article/3284285/teridion-enables-higher-performing-and-more-responsive-saas-applications.html -[6]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[7]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[8]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[9]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[10]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[11]: https://www.teridion.com/blog/teridion-announces-deep-integration-with-cisco-meraki-mx/ -[12]: https://www.networkworld.com/article/3318119/what-to-expect-from-wi-fi-6-in-2019.html -[13]: https://www.networkworld.com/article/3230457/what-is-a-firewall-perimeter-stateful-inspection-next-generation.html -[14]: https://www.teridion.com/meraki -[15]: https://www.facebook.com/NetworkWorld/ -[16]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190528 With Cray buy, HPE rules but does not own the supercomputing market.md b/sources/talk/20190528 With Cray buy, HPE rules but does not own the supercomputing market.md deleted file mode 100644 index 07f9eea10c..0000000000 --- a/sources/talk/20190528 With Cray buy, HPE rules but does not own the supercomputing market.md +++ /dev/null @@ -1,59 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (With Cray buy, HPE rules but does not own the supercomputing market) -[#]: via: (https://www.networkworld.com/article/3397087/with-cray-buy-hpe-rules-but-does-not-own-the-supercomputing-market.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -With Cray buy, HPE rules but does not own the supercomputing market -====== -In buying supercomputer vendor Cray, HPE has strengthened its high-performance-computing technology, but serious competitors remain. -![Cray Inc.][1] - -Hewlett Packard Enterprise was already the leader in the high-performance computing (HPC) sector before its announced acquisition of supercomputer maker Cray earlier this month. Now it has a commanding lead, but there are still competitors to the giant. - -The news that HPE would shell out $1.3 billion to buy the company came just as Cray had announced plans to build three of the biggest systems yet — all exascale, and all with the same deployment time of 2021. - -Sales had been slowing for HPC systems, but our government, with its endless supply of money, came to the rescue, throwing hundreds of millions at Cray for systems to be built at Lawrence Berkeley National Laboratory, Argonne National Laboratory and Oak Ridge National Laboratory. - -**[ Read also:[How to plan a software-defined data-center network][2] ]** - -And HPE sees a big revenue opportunity in HPC, a market that was $2 billion in 1990 and now nearly $30 billion, according to Steve Conway, senior vice president with Hyperion Research, which follows the HPC market. HPE thinks the HPC market will grow to $35 billion by 2021, and it hopes to earn a big chunk of that pie. - -“They were solidly in the lead without Cray. They were already in a significant lead over the No. 2 company, Dell. This adds to their lead and gives them access to very high end of market, especially government supercomputers that sell for $300 million to $600 million each,” said Conway. - -He’s not exaggerating. Earlier this month the U.S. Department of Energy announced a contract with Cray to build Frontier, an exascale supercomputer at Oak Ridge National Laboratory, sometime in 2021, with a $600 million price tag. Frontier will be powered by AMD Epyc processors and Radeon GPUs, which must have them doing backflips at AMD. - -With Cray, HPE is sitting on a lot of technology for the supercomputing and even the high-end, non-HPC market. It had the ProLiant business, the bulk of server sales (and proof the Compaq acquisition wasn’t such a bad idea), Integrity NonStop mission-critical servers, the SGI business it acquired in in 2016, plus a variety running everything from Arm to Xeon Scalable processors. - -Conway thinks all of those technologies fit in different spaces, so he doubts HPE will try to consolidate any of it. All HPE has said so far is it will keep the supercomputer products it has now under the Cray business unit. - -But the company is still getting something it didn’t have. “It takes a certain kind of technical experience [to do HPC right] and only a few companies able to play at that level. Before this deal, HPE was not one of them,” said Conway. - -And in the process, HPE takes Cray away from its many competitors: IBM, Lenovo, Dell/EMC, Huawei (well, not so much now), Super Micro, NEC, Hitachi, Fujitsu, and Atos. - -“[The acquisition] doesn’t fundamentally change things because there’s still enough competitors that buyers can have competitive bids. But it’s gotten to be a much bigger market,” said Conway. - -Cray sells a lot to government, but Conway thinks there is a new opportunity in the ever-expanding AI race. “Because HPC is indispensable at the forefront of AI, there is a new area for expanding the market,” he said. - -Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397087/with-cray-buy-hpe-rules-but-does-not-own-the-supercomputing-market.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/06/the_cray_xc30_piz_daint_system_at_the_swiss_national_supercomputing_centre_via_cray_inc_3x2_978x652-100762113-large.jpg -[2]: https://www.networkworld.com/article/3284352/data-center/how-to-plan-a-software-defined-data-center-network.html -[3]: https://www.facebook.com/NetworkWorld/ -[4]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190529 Cisco security spotlights Microsoft Office 365 e-mail phishing increase.md b/sources/talk/20190529 Cisco security spotlights Microsoft Office 365 e-mail phishing increase.md deleted file mode 100644 index c1e0493e63..0000000000 --- a/sources/talk/20190529 Cisco security spotlights Microsoft Office 365 e-mail phishing increase.md +++ /dev/null @@ -1,92 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco security spotlights Microsoft Office 365 e-mail phishing increase) -[#]: via: (https://www.networkworld.com/article/3398925/cisco-security-spotlights-microsoft-office-365-e-mail-phishing-increase.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco security spotlights Microsoft Office 365 e-mail phishing increase -====== -Cisco blog follows DHS Cybersecurity and Infrastructure Security Agency (CISA) report detailing risks around Office 365 and other cloud services -![weerapatkiatdumrong / Getty Images][1] - -It’s no secret that if you have a cloud-based e-mail service, fighting off the barrage of security issues has become a maddening daily routine. - -The leading e-mail service – in [Microsoft’s Office 365][2] package – seems to be getting the most attention from those attackers hellbent on stealing enterprise data or your private information via phishing attacks. Amazon and Google see their share of phishing attempts in their cloud-based services as well. - -**[ Also see[What to consider when deploying a next generation firewall][3]. | Get regularly scheduled insights by [signing up for Network World newsletters][4]. ]** - -But attackers are crafting and launching phishing campaigns targeting Office 365 users, [wrote][5] Ben Nahorney, a Threat Intelligence Analyst focused on covering the threat landscape for Cisco Security in a blog focusing on the Office 365 phishing issue. - -Nahorney wrote of research from security vendor [Agari Data][6], that found over the last few quarters, there has been a steady increase in the number of phishing emails impersonating Microsoft. While Microsoft has long been the most commonly impersonated brand, it now accounts for more than half of all brand impersonations seen in the last quarter. - -Recently cloud security firm Avanan wrote in its [annual phishing report][7], one in every 99 emails is a phishing attack, using malicious links and attachments as the main vector. “Of the phishing attacks we analyzed, 25 percent bypassed Office 365 security, a number that is likely to increase as attackers design new obfuscation methods that take advantage of zero-day vulnerabilities on the platform,” Avanan wrote. - -The attackers attempt to steal a user’s login credentials with the goal of taking over accounts. If successful, attackers can often log into the compromised accounts, and perform a wide variety of malicious activity: Spread malware, spam and phishing emails from within the internal network; carry out tailored attacks such as spear phishing and [business email compromise][8] [a long-standing business scam that uses spear-phishing, social engineering, identity theft, e-mail spoofing], and target partners and customers, Nahorney wrote. - -Nahorney wrote that at first glance, this may not seem very different than external email-based attacks. However, there is one critical distinction: The malicious emails sent are now coming from legitimate accounts. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][9] ]** - -“For the recipient, it’s often even someone that they know, eliciting trust in a way that would not necessarily be afforded to an unknown source. To make things more complicated, attackers often leverage ‘conversation hijacking,’ where they deliver their payload by replying to an email that’s already located in the compromised inbox,” Nahorney stated. - -The methods used by attackers to gain access to an Office 365 account are fairly straightforward, Nahorney wrote. - -“The phishing campaigns usually take the form of an email from Microsoft. The email contains a request to log in, claiming the user needs to reset their password, hasn’t logged in recently or that there’s a problem with the account that needs their attention. A URL is included, enticing the reader to click to remedy the issue,” Nahorney wrote. - -Once logged in, nefarious activities can go on unnoticed as the attacker has what look like authorized credentials. - -“This gives the attacker time for reconnaissance: a chance to observe and plan additional attacks. Nor will this type of attack set off a security alert in the same way something like a brute-force attack against a webmail client will, where the attacker guesses password after password until they get in or are detected,” Nahorney stated. - -Nahorney suggested the following steps customers can take to protect email: - - * Use multi-factor authentication. If a login attempt requires a secondary authorization before someone is allowed access to an inbox, this will stop many attackers, even with phished credentials. - * Deploy advanced anti-phishing technologies. Some machine-learning technologies can use local identity and relationship modeling alongside behavioral analytics to spot deception-based threats. - * Run regular phishing exercises. Regular, mandated phishing exercises across the entire organization will help to train employees to recognize phishing emails, so that they don’t click on malicious URLs, or enter their credentials into malicious website. - - - -### Homeland Security flags Office 365, other cloud email services - -The U.S. government, too, has been warning customers of Office 365 and other cloud-based email services that they should be on alert for security risks. The US Department of Homeland Security's Cybersecurity and Infrastructure Security Agency (CISA) this month [issued a report targeting][10] Office 365 and other cloud services saying: - -“Organizations that used a third party have had a mix of configurations that lowered their overall security posture (e.g., mailbox auditing disabled, unified audit log disabled, multi-factor authentication disabled on admin accounts). In addition, the majority of these organizations did not have a dedicated IT security team to focus on their security in the cloud. These security oversights have led to user and mailbox compromises and vulnerabilities.” - -The agency also posted remediation suggestions including: - - * Enable unified audit logging in the Security and Compliance Center. - * Enable mailbox auditing for each user. - * Ensure Azure AD password sync is planned for and configured correctly, prior to migrating users. - * Disable legacy email protocols, if not required, or limit their use to specific users. - - - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398925/cisco-security-spotlights-microsoft-office-365-e-mail-phishing-increase.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/cso_phishing_social_engineering_security_threat_by_weerapatkiatdumrong_gettyimages-489433130_3x2_2400x1600-100796450-large.jpg -[2]: https://docs.microsoft.com/en-us/office365/securitycompliance/security-roadmap -[3]: https://www.networkworld.com/article/3236448/lan-wan/what-to-consider-when-deploying-a-next-generation-firewall.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://blogs.cisco.com/security/office-365-phishing-threat-of-the-month -[6]: https://www.agari.com/ -[7]: https://www.avanan.com/hubfs/2019-Global-Phish-Report.pdf -[8]: https://www.networkworld.com/article/3195072/fbi-ic3-vile-5b-business-e-mail-scam-continues-to-breed.html -[9]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[10]: https://www.us-cert.gov/ncas/analysis-reports/AR19-133A -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190529 Nvidia launches edge computing platform for AI processing.md b/sources/talk/20190529 Nvidia launches edge computing platform for AI processing.md deleted file mode 100644 index f608db970c..0000000000 --- a/sources/talk/20190529 Nvidia launches edge computing platform for AI processing.md +++ /dev/null @@ -1,53 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Nvidia launches edge computing platform for AI processing) -[#]: via: (https://www.networkworld.com/article/3397841/nvidia-launches-edge-computing-platform-for-ai-processing.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Nvidia launches edge computing platform for AI processing -====== -EGX platform goes to the edge to do as much processing there as possible before sending data upstream to major data centers. -![Leo Wolfert / Getty Images][1] - -Nvidia is launching a new platform called EGX Platform designed to bring real-time artificial intelligence (AI) to edge networks. The idea is to put AI computing closer to where sensors collect data before it is sent to larger data centers. - -The edge serves as a buffer to data sent to data centers. It whittles down the data collected and only sends what is relevant up to major data centers for processing. This can mean discarding more than 90% of data collected, but the trick is knowing which data to keep and which to discard. - -“AI is required in this data-driven world,” said Justin Boitano, senior director for enterprise and edge computing at Nvidia, on a press call last Friday. “We analyze data near the source, capture anomalies and report anomalies back to the mothership for analysis.” - -**[ Now read[20 hot jobs ambitious IT pros should shoot for][2]. ]** - -Boitano said we are hitting crossover where there is more compute at edge than cloud because more work needs to be done there. - -EGX comes from 14 server vendors in a range of form factors, combining AI with network, security and storage from Mellanox. Boitano said that the racks will fit in any industry-standard rack, so they will fit into edge containers from the likes of Vapor IO and Schneider Electric. - -EGX uses Nvidia’s low-power Jetson Nano processor, but also all the way up to Nvidia T4 processors that can deliver more than 10,000 trillion operations per second (TOPS) for real-time speech recognition and other real-time AI tasks. - -Nvdia is working on software stack called Nvidia Edge Stack that can be updated constantly, and the software runs in containers, so no reboots are required, just a restart of the container. EGX runs enterprise-grade Kubernetes container platforms like Red Hat Openshift. - -Edge Stack is optimized software that includes Nvidia drivers, a CUDA Kubernetes plugin, a CUDA container runtime, CUDA-X libraries and containerized AI frameworks and applications, including TensorRT, TensorRT Inference Server and DeepStream. - -The company is boasting more than 40 early adopters, including BMW Group Logistics, which uses EGX and its own Isaac robotic platforms to handle increasingly complex logistics with real-time efficiency. - -Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397841/nvidia-launches-edge-computing-platform-for-ai-processing.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/industry_4-0_industrial_iot_smart_factory_by_leowolfert_gettyimages-689799380_2400x1600-100788464-large.jpg -[2]: https://www.networkworld.com/article/3276025/careers/20-hot-jobs-ambitious-it-pros-should-shoot-for.html -[3]: https://www.facebook.com/NetworkWorld/ -[4]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190601 HPE Synergy For Dummies.md b/sources/talk/20190601 HPE Synergy For Dummies.md deleted file mode 100644 index 1b7ddbe2e7..0000000000 --- a/sources/talk/20190601 HPE Synergy For Dummies.md +++ /dev/null @@ -1,77 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (HPE Synergy For Dummies) -[#]: via: (https://www.networkworld.com/article/3399618/hpe-synergy-for-dummies.html) -[#]: author: (HPE https://www.networkworld.com/author/Michael-Cooney/) - -HPE Synergy For Dummies -====== - -![istock/venimo][1] - -Business must move fast today to keep up with competitive forces. That means IT must provide an agile — anytime, anywhere, any workload — infrastructure that ensures growth, boosts productivity, enhances innovation, improves the customer experience, and reduces risk. - -A composable infrastructure helps organizations achieve these important objectives that are difficult — if not impossible — to achieve via traditional means, such as the ability to do the following: - - * Deploy quickly with simple flexing, scaling, and updating - * Run workloads anywhere — on physical servers, on virtual servers, or in containers - * Operate any workload upon which the business depends, without worrying about infrastructure resources or compatibility - * Ensure the infrastructure is able to provide the right service levels so the business can stay in business - - - -In other words, IT must inherently become part of the fabric of products and services that are rapidly innovated at every company, with an anytime, anywhere, any workload infrastructure. - -**The anytime paradigm** - -For organizations that seek to embrace DevOps, collaboration is the cultural norm. Development and operations staff work side‐by‐side to support software across its entire life cycle, from initial idea to production support. - -To provide DevOps groups — as well as other stakeholders — the IT infrastructure required at the rate at which it is demanded, enterprise IT must increase its speed, agility, and flexibility to enable people anytime composition and re‐composition of resources. Composable infrastructure enables this anytime paradigm. - -**The anywhere ability** - -Bare metal and virtualized workloads are just two application foundations that need to be supported in the modern data center. Today, containers are emerging as a compelling construct, providing significant benefits for certain kinds of workloads. Unfortunately, with traditional infrastructure approaches, IT needs to build out custom, unique infrastructure to support them, at least until an infrastructure is deployed that can seamlessly handle physical, virtual, and container‐based workloads. - -Each environment would need its own hardware and software and might even need its own staff members supporting it. - -Composable infrastructure provides an environment that supports the ability to run physical, virtual, or containerized workloads. - -**Support any workload** - -Do you have a legacy on‐premises application that you have to keep running? Do you have enterprise resource planning (ERP) software that currently powers your business but that will take ten years to phase out? At the same time, do you have an emerging DevOps philosophy under which you’d like to empower developers to dynamically create computing environments as a part of their development efforts? - -All these things can be accomplished simultaneously on the right kind of infrastructure. Composable infrastructure enables any workload to operate as a part of the architecture. - -**HPE Synergy** - -HPE Synergy brings to life the architectural principles of composable infrastructure. It is a single, purpose-built platform that reduces operational complexity for workloads and increases operational velocity for applications and services. - -Download a copy of the [HPE Synergy for Dummies eBook][2] to learn how to: - - * Infuse the IT architecture with the ability to enable agility, flexibility, and speed - * Apply composable infrastructure concepts to support both traditional and cloud-native applications - * Deploy HPE Synergy infrastructure to revolutionize workload support in the data center - - - -Also, you will find more information about HPE Synergy [here][3]. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3399618/hpe-synergy-for-dummies.html - -作者:[HPE][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/06/istock-1026657600-100798064-large.jpg -[2]: https://www.hpe.com/us/en/resources/integrated-systems/synergy-for-dummies.html -[3]: http://hpe.com/synergy diff --git a/sources/talk/20190605 Cisco will use AI-ML to boost intent-based networking.md b/sources/talk/20190605 Cisco will use AI-ML to boost intent-based networking.md deleted file mode 100644 index 29d2acd519..0000000000 --- a/sources/talk/20190605 Cisco will use AI-ML to boost intent-based networking.md +++ /dev/null @@ -1,87 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco will use AI/ML to boost intent-based networking) -[#]: via: (https://www.networkworld.com/article/3400382/cisco-will-use-aiml-to-boost-intent-based-networking.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco will use AI/ML to boost intent-based networking -====== -Cisco explains how artificial intelligence and machine learning fit into a feedback loop that implements and maintain desired network conditions to optimize network performance for workloads using real-time data. -![xijian / Getty Images][1] - -Artificial Intelligence and machine learning are expected to be some of the big topics at next week’s Cisco Live event and the company is already talking about how those technologies will help drive the next generation of [Intent-Based Networking][2]. - -“Artificial intelligence will change how we manage networks, and it’s a change we need,” wrote John Apostolopoulos Cisco CTO and vice president of Enterprise Networking in a [blog][3] about how Cisco says these technologies impact the network. - -**[ Now see[7 free network tools you must have][4]. ]** - -AI is the next major step for networking capabilities, and while researchers have talked in the past about how great AI would be, now the compute power and algorithms exist to make it possible, Apostolopoulos told Network World. - -To understand how AI and ML can boost IBN, Cisco says it's necessary to understand four key factors an IBN environment needs: infrastructure, translation, activation and assurance. - -Infrastructure can be virtual or physical and include wireless access points, switches, routers, compute and storage. “To make the infrastructure do what we want, we use the translation function to convert the intent, or what we are trying to make the network accomplish, from a person or computer into the correct network and security policies. These policies then must be activated on the network,” Apostolopoulos said. - -The activation step takes the network and security polices and couples them with a deep understanding of the network infrastructure that includes both real-time and historic data about its behavior. It then activates or automates the policies across all of the network infrastructure elements, ideally optimizing for performance, reliability and security, Apostolopoulos wrote. - -Finally assurance maintains a continuous validation-and-verification loop. IBN improves on translation and assurance to form a valuable feedback loop about what’s going on in the network that wasn’t available before. ** ** - -Apostolopoulos used the example of an international company that wanted to set up a world-wide video all-hands meeting. Everyone on the call had to have high-quality, low-latency video, and also needed the capability to send high-quality video into the call when it was time for Q&A. - -“By applying machine learning and related machine reasoning, assurance can also sift through the massive amount of data related to such a global event to correctly identify if there are any problems arising. We can then get solutions to these issues – and even automatically apply solutions – more quickly and more reliably than before,” Apostolopoulos said. - -In this case, assurance could identify that the use of WAN bandwidth to certain sites is increasing at a rate that will saturate the network paths and could proactively reroute some of the WAN flows through alternative paths to prevent congestion from occurring, Apostolopoulos wrote. - -“In prior systems, this problem would typically only be recognized after the bandwidth bottleneck occurred and users experienced a drop in call quality or even lost their connection to the meeting. It would be challenging or impossible to identify the issue in real time, much less to fix it before it distracted from the experience of the meeting. Accurate and fast identification through ML and MR coupled with intelligent automation through the feedback loop is key to successful outcome.” - -Apostolopoulos said AI can accelerate the path from intent into translation and activation and then examine network and behavior data in the assurance step to make sure everything is working correctly. Activation uses the insights to drive more intelligent actions for improved performance, reliability and security, creating a cycle of network optimization. - -So what might an implementation of this look like? Applications that run on Cisco’s DNA Center may be the central component in an IBN environment. Introduced on 2017 as the heart of its IBN initiative, [Cisco DNA Center][5] features automation capabilities, assurance setting, fabric provisioning and policy-based segmentation for enterprise networks. - -“DNA Center can bring together AI and ML in a unified manner,” Apostolopoulos said. “It can store data from across the network and then customers can do AI and ML on that data.” - -Central to Cisco's push is being able to gather metadata about traffic as it passes without slowing the traffic, which is accomplished through the use of ASICs in its campus and data-center switches. - -“We have designed our networking gear from the ASIC, OS and software levels to gather key data via our IBN architecture, which provides unified data collection and performs algorithmic analysis across the entire network (wired, wireless, LAN, WAN, datacenter), Apostolopoulos said. “We have a massive collection of network data, including a database of problems and associated root causes, from being the world’s top enterprise network vendor over the past 20-plus years. And we have been investing for many years to create innovative network-data analysis and ML, MR, and other AI techniques to identify and solve key problems.” - -Machine learning and AI can then be applied to all that data to help network operators handle everything from policy setting and network control to security. - -“I also want to stress that the feedback the IT user gets from the IBN system with AI is not overwhelming telemetry data,” Apostolopoulos said. Instead it is valuable and actionable insights at scale, derived from immense data and behavioral analytics using AI. - -Managing and developing new AI/ML-based applications from enormous data sets beyond what Cisco already has is a key driver behind it’s the company’s Unified Compute System (UCS) server that wasa rolled out last September. While the new server, the UCS C480 ML, is powerful – it includes eight Nvidia Tesla V100-32G GPUs with 128GB of DDR4 RAM, 24 SATA hard drives and more – it is the ecosystem of vendors – Cloudera, HortonWorks and others that will end up being more important. - -[Earlier this year Cisco forecast][6] that [AI and ML][7] will significantly boost network management this year. - -“In 2019, companies will start to adopt Artificial Intelligence, in particular Machine Learning, to analyze the telemetry coming off networks to see these patterns, in an attempt to get ahead of issues from performance optimization, to financial efficiency, to security,” said [Anand Oswal][8], senior vice president of engineering in Cisco’s Enterprise Networking Business. The pattern-matching capabilities of ML will be used to spot anomalies in network behavior that might otherwise be missed, while also de-prioritizing alerts that otherwise nag network operators but that aren’t critical, Oswal said. - -“We will also start to use these tools to categorize and cluster device and user types, which can help us create profiles for use cases as well as spot outlier activities that could indicate security incursions,” he said. - -The first application of AI in network management will be smarter alerts that simply report on activities that break normal patterns, but as the technology advances it will react to more situations autonomously. The idea is to give customers more information so they and the systems can make better network decisions. Workable tools should appear later in 2019, Oswal said. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3400382/cisco-will-use-aiml-to-boost-intent-based-networking.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/ai-vendor-relationship-management_bar-code_purple_artificial-intelligence_hand-on-virtual-screen-100795252-large.jpg -[2]: http://www.networkworld.com/cms/article/3202699 -[3]: https://blogs.cisco.com/enterprise/improving-networks-with-ai -[4]: https://www.networkworld.com/article/2825879/7-free-open-source-network-monitoring-tools.html -[5]: https://www.networkworld.com/article/3280988/cisco-opens-dna-center-network-control-and-management-software-to-the-devops-masses.html -[6]: https://www.networkworld.com/article/3332027/cisco-touts-5-technologies-that-will-change-networking-in-2019.html -[7]: https://www.networkworld.com/article/3320978/data-center/network-operations-a-new-role-for-ai-and-ml.html -[8]: https://blogs.cisco.com/author/anandoswal -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190606 Juniper- Security could help drive interest in SDN.md b/sources/talk/20190606 Juniper- Security could help drive interest in SDN.md deleted file mode 100644 index b140969eb5..0000000000 --- a/sources/talk/20190606 Juniper- Security could help drive interest in SDN.md +++ /dev/null @@ -1,89 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Juniper: Security could help drive interest in SDN) -[#]: via: (https://www.networkworld.com/article/3400739/juniper-sdn-snapshot-finds-security-legacy-network-tech-impacts-core-network-changes.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Juniper: Security could help drive interest in SDN -====== -Juniper finds that enterprise interest in software-defined networking (SDN) is influenced by other factors, including artificial intelligence (AI) and machine learning (ML). -![monsitj / Getty Images][1] - -Security challenges and developing artificial intelligence/maching learning (AI/ML) technologies are among the key issues driving [software-defined networking][2] (SDN) implementations, according to a new Juniper survey of 500 IT decision makers. - -And SDN interest abounds – 98% of the 500 said they were already using or considering an SDN implementation. Juniper said it had [Wakefield Research][3] poll IT decision makers of companies with 500 or more employees about their SDN strategies between May 7 and May 14, 2019. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][4] - * [How to pick an off-site data-backup method][5] - * [SD-Branch: What it is and why you’ll need it][6] - * [What are the options for security SD-WAN?][7] - - - -SDN includes technologies that separate the network control plane from the forwarding plane to enable more automated provisioning and policy-based management of network resources. - -IDC estimates that the worldwide data-center SDN market will be worth more than $12 billion in 2022, recording a CAGR of 18.5% during the 2017-2022 period. The market-generated revenue of nearly $5.15 billion in 2017 was up more than 32.2% from 2016. - -There are many ideas driving the development of SDN. For example, it promises to reduce the complexity of statically defined networks; make automating network functions much easier; and allow for simpler provisioning and management of networked resources from the data center to the campus or wide area network. - -While the evolution of SDN is ongoing, Juniper’s study pointed out an issue that was perhaps not unexpected – many users are still managing operations via the command line interface (CLI). CLI is the primary text-based user interface used for configuring, monitoring and maintaining most networked devices. - -“If SDN is as attractive as it is then why manage the network with the same legacy technology of the past?” said Michael Bushong, vice president of enterprise and cloud marketing at Juniper Networks. “If you deploy SDN and don’t adjust the operational model then it is difficult to reap all the benefits SDN can bring. It’s the difference between managing devices individually which you may have done in the past to managing fleets of devices via SDN – it simplifies and reduces operational expenses.” - -Juniper pointed to a [Gartner prediction][8] that stated “by 2020, only 30% of network operations teams will use the command line interface (CLI) as their primary interface, down from 85% at years end 2016.” Garter stated that poll results from a recent Gartner conference found some 71% still using CLI as the primary way to make network changes. - -Gartner [wrote][9] in the past that CLI has remained the primary operational tool for mainstream network operations teams for easily the past 15-20 years but that “moving away from the CLI is a good thing for the networking industry, and while it won’t disappear completely (advanced/nuanced troubleshooting for example), it will be supplanted as the main interface into networking infrastructure.” - -Juniper’s study found that 87% of businesses are still doing most or some of their network management at the device level. - -What all of this shows is that customers are obviously interested in SDN but are still grappling with the best ways to get there, Bushong said. - -The Juniper study also found users interested in SDN because of the potential for a security boost. - -SDN can empowers a variety of security benefits. A customer can split up a network connection between an end user and the data center and have different security settings for the various types of network traffic. A network could have one public-facing, low-security network that does not touch any sensitive information. Another segment could have much more fine-grained remote-access control with software-based [firewall][10] and encryption policies on it, which allow sensitive data to traverse over it. SDN users can roll out security policies across the network from the data center to the edge much more rapidly than traditional network environments. - -“Many enterprises see security—not speed—as the biggest consequence of not making this transition in the next five years, with nearly 40 percent identifying the inability to quickly address new threats as one of their main concerns,” wrote Manoj Leelanivas, chief product officer at Juniper Networks, in a blog about the survey. - -“SDN is not often associated with greater security but this makes sense when we remember this is an operational transformation. In security, the challenge lies not in identifying threats or creating solutions, but in applying these solutions to a fragmented network. Streamlining complex security operations, touching many different departments and managing multiple security solutions, is where a software-defined approach can provide the answer,” Leelanivas stated. - -Some of the other key findings from Juniper included: - - * **The future of AI** : The deployment of artificial intelligence is about changing the operational model, Bushong said. “The ability to more easily manage workflows over groups of devices and derive usable insights to help customers be more proactive rather than reactive is the direction we are moving. Everything will ultimately be AI-driven, he said. - * **Automation** : While automation is often considered a threat, Juniper said its respondents see it positively within the context of SDN, with 38% reporting it will improve security and 25% that it will enhance their jobs by streamlining manual operations. - * **Flexibility** : Agility is the #1 benefit respondents considering SDN want to gain (48%), followed by improved reliability (43%) and greater simplicity (38%). - * **SD-WAN** : The majority, 54%, have rolled out or are in the process of rolling out SD-WAN, while an additional 34% have it under current consideration. - - - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3400739/juniper-sdn-snapshot-finds-security-legacy-network-tech-impacts-core-network-changes.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/03/sdn_software-defined-network_architecture-100791938-large.jpg -[2]: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html -[3]: https://www.wakefieldresearch.com/ -[4]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[5]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[6]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[7]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[8]: https://blogs.gartner.com/andrew-lerner/2018/01/04/checking-in-on-the-death-of-the-cli/ -[9]: https://blogs.gartner.com/andrew-lerner/2016/11/22/predicting-the-death-of-the-cli/ -[10]: https://www.networkworld.com/article/3230457/what-is-a-firewall-perimeter-stateful-inspection-next-generation.html -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190611 Cisco launches a developer-community cert program.md b/sources/talk/20190611 Cisco launches a developer-community cert program.md deleted file mode 100644 index 92ce486e6d..0000000000 --- a/sources/talk/20190611 Cisco launches a developer-community cert program.md +++ /dev/null @@ -1,66 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco launches a developer-community cert program) -[#]: via: (https://www.networkworld.com/article/3401524/cisco-launches-a-developer-community-cert-program.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco launches a developer-community cert program -====== -Cisco has revamped some of its most critical certification and career-development programs in an effort to address the emerging software-oriented-network environment. -![Getty Images][1] - -SAN DIEGO – Cisco revamped some of its most critical certification and career-development tools in an effort to address the emerging software-oriented network environment. - -Perhaps one of the biggest additions – rolled out here at the company’s Cisco Live customer event – is the new set of professional certifications for developers utilizing Cisco’s growing DevNet developer community. - -**[ Also see[4 job skills that can boost networking salaries][2] and [20 hot jobs ambitious IT pros should shoot for][3].]** - -The Cisco Certified DevNet Associate, Specialist and Professional certifications will cover software development for applications, automation, DevOps, cloud and IoT. They will also target software developers and network engineers who develop software proficiency to develop applications and automated workflows for operational networks and infrastructure. - -“This certification evolution is the next step to reflect the critical skills network engineers must have to be at the leading edge of networked-enabled business disruption and delivering customer excellence,” said Mike Adams, vice president and general manager of Learning@Cisco. “To perform effectively in this new world, every IT professional needs skills that are broader, deeper and more agile than ever before. And they have to be comfortable working as a multidisciplinary team including infrastructure network engineers, DevOps and automation specialists, and software professionals.” - -Other Cisco Certifications changes include: - - * Streamlined certifications to validate engineering professionals with Cisco Certified Network Associate (CCNA) and Cisco Specialist certifications as well as Cisco Certified Network Professional (CCNP) and Cisco Certified Internetwork Expert (CCIE) certifications in enterprise, data center, service provider, security and collaboration. - * For more senior professionals, the CCNP will give learners a choice of five tracks, covering enterprise technologies including infrastructure and wireless, service provider, data center, security and collaboration. Candidates will be able to further specialize in a particular focus area within those technologies. - * Cisco says it will eliminate pre-requisites for certifications, meaning engineers can change career options without having to take a defined path. - * Expansion of Cisco Networking Academy offerings to train entry level network professionals and software developers. Courses prepare students to earn CCNA and Certified DevNet Associate certifications, equipping them for high-demand jobs in IT. - - - -New network technologies such as intent-based networking, multi-domain networking, and programmability fundamentally change the capabilities of the network, giving network engineers the opportunity to architect solutions that utilize the programmable network in new and exciting ways, wrote Susie Wee senior vice president and chief technology officer of DevNet. - -“DevOps practices can be applied to the network, making the network more agile and enabling automation at scale. The new network provides more than just connectivity, it can now use policy and intent to securely connect applications, users, devices and data across multiple environments – from the data center and cloud, to the campus and branch, to the edge, and to the device,” Wee wrote. - -**[[Looking to upgrade your career in tech? This comprehensive online course teaches you how.][4] ]** - -She also announced the DevNet Automation Exchange, a community that will offer shared code, best practices and technology tools for users, developers or channel partners interested in developing automation apps. - -Wee said Cisco seeded the Automation Exchange with over 50 shared code repositories. - -“It is becoming increasingly clear that network ops can be handled much more efficiently with automation, and offering the tools to develop better applications is crucial going forward,” said Zeus Kerravala, founder and principal analyst with ZK Research. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3401524/cisco-launches-a-developer-community-cert-program.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/01/run_digital-vanguard_business-executive-with-briefcase_career-growth-100786736-large.jpg -[2]: https://www.networkworld.com/article/3227832/lan-wan/4-job-skills-that-can-boost-networking-salaries.html -[3]: https://www.networkworld.com/article/3276025/careers/20-hot-jobs-ambitious-it-pros-should-shoot-for.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fupgrading-your-technology-career -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190612 Cisco offers cloud-based security for SD-WAN resources.md b/sources/talk/20190612 Cisco offers cloud-based security for SD-WAN resources.md deleted file mode 100644 index a6cd0c73b4..0000000000 --- a/sources/talk/20190612 Cisco offers cloud-based security for SD-WAN resources.md +++ /dev/null @@ -1,95 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco offers cloud-based security for SD-WAN resources) -[#]: via: (https://www.networkworld.com/article/3402079/cisco-offers-cloud-based-security-for-sd-wan-resources.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco offers cloud-based security for SD-WAN resources -====== -Cisco adds support for its cloud-based security gateway Umbrella to SD-WAN software -![Thinkstock][1] - -SAN DIEGO— As many companies look to [SD-WAN][2] technology to reduce costs, improve connectivity and streamline branch office access, one of the key requirements will be solid security technologies to protect corporate resources. - -At its Cisco Live customer event here this week, the company took aim at that need by telling customers it added support for the its cloud-based security gateway – known as Umbrella – to its SD-WAN software offerings. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][3] - * [How to pick an off-site data-backup method][4] - * [SD-Branch: What it is and why you’ll need it][5] - * [What are the options for security SD-WAN?][6] - - - -At its most basic, SD-WAN lets companies aggregate a variety of network connections – including MPLS, 4G LTE and DSL – into a branch or network-edge location and provides a management software that can turn up new sites, prioritize traffic and set security policies. SD-WAN's driving principle is to simplify the way big companies turn up new links to branch offices, better manage the way those links are utilized – for data, voice or video – and potentially save money in the process. - -According to Cisco, Umbrella can provide the first line of defense against threats on the internet. By analyzing and learning from internet activity patterns, Umbrella automatically uncovers attacker infrastructure and proactively blocks requests to malicious destinations before a connection is even established — without adding latency for users. With Umbrella, customers can stop phishing and malware infections earlier, identify already infected devices faster and prevent data exfiltration, Cisco says. - -Branch offices and roaming users are more vulnerable to attacks, and attackers are looking to exploit them, said Gee Rittenhouse, senior vice president and general manager of Cisco's Security Business Group. He pointed to Enterprise Strategy Group research that says 68 percent of branch offices and roaming users were the source of compromise in recent attacks. And as organizations move to more direct internet access, this becomes an even greater risk, Rittenhouse said. - -“Scaling security at every location often means more appliances to ship and manage, more policies to separately maintain, which translates into more money and resources needed – but Umbrella offers an alternative to all that," he said. "Umbrella provides simple deployment and management, and in a single cloud platform, it unifies multiple layers of security, ncluding DNS, secure web gateway, firewall and cloud-access security,” Rittenhouse said. - -“It also acts as your secure onramp to the internet by offering secure internet access and controlled SaaS usage across all locations and roaming users.” - -Basically users can set up Umbrella support via the SD-WAN dashboard vManage, and the system automatically creates a secure tunnel to the cloud.** ** Once the SD-WAN traffic is pointed at the cloud, firewall and other security policies can be set. Customers can then see traffic and collect information about patterns or set policies and respond to anomalies, Rittenhouse said. - -Analysts said the Umbrella offering is another important security option offered by Cisco for SD-WAN customers. - -“Since it is cloud-based, using Umbrella is a great option for customers with lots of branch or SD-WAN locations who don’t want or need to have a security gateway on premises,” said Rohit Mehra, vice president of Network Infrastructure at IDC. “One of the largest requirements for large customers going forward will be the need for all manner of security technologies for the SD-WAN environment, and Cisco has a big menu of offerings that can address those requirements.” - -IDC says the SD-WAN infrastructure market will hit $4.5 billion by 2022, growing at a more than 40 percent yearly clip between now and then. - -The Umbrella announcement is on top of other recent SD-WAN security enhancements the company has made. In May [Cisco added support for Advanced Malware Protection (AMP) to its million-plus ISR/ASR edge routers][7] in an effort to reinforce branch- and core-network malware protection across the SD-WAN. - -“Together with Cisco Talos [Cisco’s security-intelligence arm], AMP imbues your SD-WAN branch, core and campuses locations with threat intelligence from millions of worldwide users, honeypots, sandboxes and extensive industry partnerships,” Cisco said. - -In total, AMP identifies more than 1.1 million unique malware samples a day and when AMP in Cisco SD-WAN platform spots malicious behavior it automatically blocks it, Cisco said. - -Last year Cisco added its [Viptela SD-WAN technology to the IOS XE][8] version 16.9.1 software that runs its core ISR/ASR routers such as the ISR models 1000, 4000 and ASR 1000, in use by organizations worldwide. Cisco bought Viptela in 2017. - -The release of Cisco IOS XE offered an instant upgrade path for creating cloud-controlled SD-WAN fabrics to connect distributed offices, people, devices and applications operating on the installed base, Cisco said. At the time Cisco said that Cisco SD-WAN on edge routers builds a secure virtual IP fabric by combining routing, segmentation, security, policy and orchestration. - -With the recent release of IOS-XE SD-WAN 16.11, Cisco has brought AMP and other enhancements to its SD-WAN. - -AMP support is added to a menu of security features already included in Cisco's SD-WAN software including support for URL filtering, Snort Intrusion Prevention, the ability to segment users across the WAN and embedded platform security, including the Cisco Trust Anchor module. - -The software also supports SD-WAN Cloud onRamp for CoLocation, which lets customers tie distributed multicloud applications back to a local branch office or local private data center. That way a cloud-to-branch link would be shorter, faster and possibly more secure that tying cloud-based applications directly to the data center. - -Also in May [Cisco and Teridion][9] said they would team to deliver faster enterprise software-defined WAN services. The integration links Cisco Meraki MX Security/SD-WAN appliances and its Auto VPN technology which lets users quickly bring up and configure secure sessions between branches and data centers with Teridion’s cloud-based WAN service. Teridion’s service promises customers better performance and control over traffic running from remote offices over the public internet to the data center. - -Teridion said the Meraki integration creates an IPSec connection from the Cisco Meraki MX to the Teridion edge. Customers create locations in the Teridion portal and apply the preconfigured Meraki template to them, or just upload a csv file if they have a lot of locations. Then, from each Meraki MX, they can create a third-party IPSec tunnel to the Teridion edge IP addresses that are generated as part of the Teridion configuration, the company stated. - -The combined Cisco Meraki and Teridion offering brings SD-WAN and security capabilities at the WAN edge that are tightly integrated with a WAN service delivered over cost-effective broadband or dedicated Internet access. Meraki’s MX family supports everything from SD-WAN and [Wi-Fi][10] features to next-generation [firewall][11] and intrusion prevention in a single package. - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402079/cisco-offers-cloud-based-security-for-sd-wan-resources.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2015/10/cloud-security-ts-100622309-large.jpg -[2]: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html -[3]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[4]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[5]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[6]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[7]: https://www.networkworld.com/article/3394597/cisco-adds-amp-to-sd-wan-for-israsr-routers.html -[8]: https://www.networkworld.com/article/3296007/cisco-upgrade-enables-sd-wan-in-1m-israsr-routers.html -[9]: https://www.networkworld.com/article/3396628/cisco-ties-its-securitysd-wan-gear-with-teridions-cloud-wan-service.html -[10]: https://www.networkworld.com/article/3318119/what-to-expect-from-wi-fi-6-in-2019.html -[11]: https://www.networkworld.com/article/3230457/what-is-a-firewall-perimeter-stateful-inspection-next-generation.html -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190612 Dell and Cisco extend VxBlock integration with new features.md b/sources/talk/20190612 Dell and Cisco extend VxBlock integration with new features.md deleted file mode 100644 index 30e225de98..0000000000 --- a/sources/talk/20190612 Dell and Cisco extend VxBlock integration with new features.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Dell and Cisco extend VxBlock integration with new features) -[#]: via: (https://www.networkworld.com/article/3402036/dell-and-cisco-extend-vxblock-integration-with-new-features.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Dell and Cisco extend VxBlock integration with new features -====== -Dell EMC and Cisco took another step in their alliance, announcing plans to expand VxBlock 1000 integration across servers, networking, storage, and data protection. -![Dell EMC][1] - -Just two months ago [Dell EMC and Cisco renewed their converged infrastructure][2] vows, and now the two have taken another step in the alliance. At this year’s at [Cisco Live][3] event taking place in San Diego, the two announced plans to expand VxBlock 1000 integration across servers, networking, storage, and data protection. - -This is done through support of NVMe over Fabrics (NVMe-oF), which allows enterprise SSDs to talk to each other directly through a high-speed fabric. NVMe is an important advance because SATA and PCI Express SSDs could never talk directly to other drives before until NVMe came along. - -To leverage NVMe-oF to its fullest extent, Dell EMC has unveiled a new integrated Cisco compute (UCS) and storage (MDS) 32G options, extending PowerMax capabilities to deliver NVMe performance across the VxBlock stack. - -**More news from Cisco Live 2019:** - - * [Cisco offers cloud-based security for SD-WAN resources][4] - * [Cisco software to make networks smarter, safer, more manageable][5] - * [Cisco launches a developer-community cert program][6] - - - -Dell EMC said this will enhance the architecture, high-performance consistency, availability, and scalability of VxBlock and provide its customers with high-performance, end-to-end mission-critical workloads that can deliver microsecond responses. - -These new compute and storage options will be available to order sometime later this month. - -### Other VxBlock news from Dell EMC - -Dell EMC also announced it is extending its factory-integrated on-premise integrated protection solutions for VxBlock to hybrid and multi-cloud environments, such as Amazon Web Services (AWS). This update will offer to help protect VMware workloads and data via the company’s Data Domain Virtual Edition and Cloud Disaster Recovery software options. This will be available in July. - -The company also plans to release VxBlock Central 2.0 software next month. VxBlock Central is designed to help customers simplify CI administration through converged awareness, automation, and analytics. - -New to version 2.0 is modular licensing that matches workflow automation, advanced analytics, and life-cycle management/upgrade options to your needs. - -VxBlock Central 2.0 has a variety of license features, including the following: - -**Base** – Free with purchase of a VxBlock, the base license allows you to manage your system and improve compliance with inventory reporting and alerting. **Workflow Automation** – Provision infrastructure on-demand using engineered workflows through vRealize Orchestrator. New workflows available with this package include Cisco UCS server expansion with Unity and XtremIO storage arrays. **Advanced Analytics** – View capacity and KPIs to discover deeper actionable insights through vRealize Operations. **Lifecycle Management** (new, available later in 2019) – Apply “guided path” software upgrades to optimize system performance. - - * Lifecycle Management includes a new multi-tenant, cloud-based database based on Cloud IQ that will collect and store the CI component inventory structured by the customer, extending the value and ease of use of the cloud-based analytics monitoring. - * This feature extends the value and ease of use of the cloud-based analytics monitoring Cloud IQ already provides for individual Dell EMC storage arrays. - - - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402036/dell-and-cisco-extend-vxblock-integration-with-new-features.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/dell-emc-vxblock-1000-100794721-large.jpg -[2]: https://www.networkworld.com/article/3391071/dell-emc-and-cisco-renew-converged-infrastructure-alliance.html -[3]: https://www.ciscolive.com/global/ -[4]: https://www.networkworld.com/article/3402079/cisco-offers-cloud-based-security-for-sd-wan-resources.html -[5]: https://www.networkworld.com/article/3401523/cisco-software-to-make-networks-smarter-safer-more-manageable.html -[6]: https://www.networkworld.com/article/3401524/cisco-launches-a-developer-community-cert-program.html -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190613 Oracle updates Exadata at long last with AI and machine learning abilities.md b/sources/talk/20190613 Oracle updates Exadata at long last with AI and machine learning abilities.md deleted file mode 100644 index 280cfd1a4a..0000000000 --- a/sources/talk/20190613 Oracle updates Exadata at long last with AI and machine learning abilities.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Oracle updates Exadata at long last with AI and machine learning abilities) -[#]: via: (https://www.networkworld.com/article/3402559/oracle-updates-exadata-at-long-last-with-ai-and-machine-learning-abilities.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Oracle updates Exadata at long last with AI and machine learning abilities -====== -Oracle to update the Oracle Exadata Database Machine X8 server line to include artificial intelligence (AI) and machine learning capabilities, plus support for hybrid cloud. -![Magdalena Petrova][1] - -After a rather [long period of silence][2], Oracle announced an update to its server line, the Oracle Exadata Database Machine X8, which features hardware and software enhancements that include artificial intelligence (AI) and machine learning capabilities, as well as support for hybrid cloud. - -Oracle acquired a hardware business nine years ago with the purchase of Sun Microsystems. It steadily whittled down the offerings, getting out of the commodity hardware business in favor of high-end mission-critical hardware. Whereas the Exalogic line is more of a general-purpose appliance running Oracle’s own version of Linux, Exadata is a purpose-built database server, and they really made some upgrades. - -The Exadata X8 comes with the latest Intel Xeon Scalable processors and PCIe NVME flash technology to drive performance improvements, which Oracle promises a 60% increase in I/O throughput for all-Flash storage and a 25% increase in IOPS per storage server compared to Exadata X7. The X8 offers a 60% performance improvement over the previous generation for analytics with up to 560GB per second throughput. It can scan a 1TB table in under two seconds. - -**[ Also read:[What is quantum computing (and why enterprises should care)][3] ]** - -The company also enhanced the storage server to offload Oracle Database processing, and the X8 features 60% more cores and 40% higher capacity disk drives over the X7. - -But the real enhancements come on the software side. With Exadata X8, Oracle introduces new machine-learning capabilities, such as Automatic Indexing, which continuously learns and tunes the database as usage patterns change. The Indexing technology originated with the Oracle Autonomous Database, the cloud-based software designed to automate management of Oracle databases. - -And no, MySQL is not included in the stack. This is for Oracle databases only. - -“We’re taking code from Autonomous Database and making it available on prem for our customers,” said Steve Zivanic, vice president for converged infrastructure at Oracle’s Cloud Business Group. “That enables companies rather than doing manual indexing for various Oracle databases to automate it with machine learning.” - -In one test, it took a 15-year-old Netsuite database with over 9,000 indexes built up over the lifespan of the database, and in 24 hours, its AI indexer rebuilt the indexes with just 6,000, reducing storage space and greatly increasing performance of the database, since the number of indexes to search were smaller. - -### Performance improvements with Exadata - -Zivanic cited several examples of server consolidation done with Exadata but would not identify companies by name. He told of a large healthcare company that achieved a 10-fold performance improvement over IBM Power servers and consolidated 600 Power servers with 50 Exadata systems. - -A financial services company replaced 4,000 Dell servers running Red Hat Linux and VMware with 100 Exadata systems running 6,000 production Oracle databases. Not only did it reduce its power footprint, but patching was down 99%. An unnamed retailer with 28 racks of hardware from five vendors went from installing 1,400 patches per year to 16 patches on four Exadata racks. - -Because Oracle owns the entire stack, from hardware to OS to middleware and database, Exadata can roll all of its patch components – 640 in all – into a single bundle. - -“The trend we’ve noticed is you see these [IT hardware] companies who try to maintain an erector set mentality,” said Zivanic. “And you have people saying why are we trying to build pods? Why don’t we buy finished goods and focus on our core competency rather than build erector sets?” - -### Oracle Zero Data Loss Recovery Appliance X8 now available - -Oracle also announced the availability of the Oracle Zero Data Loss Recovery Appliance X8, its database backup appliance, which offers up to 10 times faster data recovery of an Oracle Database than conventional data deduplication appliances while providing sub-second recoverability of all transactions. - -The new Oracle Recovery Appliance X8 now features 30% larger capacity, nearly a petabyte in a single rack, for the same price, Oracle says. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402559/oracle-updates-exadata-at-long-last-with-ai-and-machine-learning-abilities.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/03/vid-still-79-of-82-100714308-large.jpg -[2]: https://www.networkworld.com/article/3317564/is-oracles-silence-on-its-on-premises-servers-cause-for-concern.html -[3]: https://www.networkworld.com/article/3275367/what-s-quantum-computing-and-why-enterprises-need-to-care.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190614 Report- Mirai tries to hook its tentacles into SD-WAN.md b/sources/talk/20190614 Report- Mirai tries to hook its tentacles into SD-WAN.md deleted file mode 100644 index d4a3a9a927..0000000000 --- a/sources/talk/20190614 Report- Mirai tries to hook its tentacles into SD-WAN.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Report: Mirai tries to hook its tentacles into SD-WAN) -[#]: via: (https://www.networkworld.com/article/3403016/report-mirai-tries-to-hook-its-tentacles-into-sd-wan.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Report: Mirai tries to hook its tentacles into SD-WAN -====== - -Mirai – the software that has hijacked hundreds of thousands of internet-connected devices to launch massive DDoS attacks – now goes beyond recruiting just IoT products; it also includes code that seeks to exploit a vulnerability in corporate SD-WAN gear. - -That specific equipment – VMware’s SDX line of SD-WAN appliances – now has an updated software version that fixes the vulnerability, but by targeting it Mirai’s authors show that they now look beyond enlisting security cameras and set-top boxes and seek out any vulnerable connected devices, including enterprise networking gear. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][1] - * [How to pick an off-site data-backup method][2] - * [SD-Branch: What it is and why you’ll need it][3] - * [What are the options for security SD-WAN?][4] - - - -“I assume we’re going to see Mirai just collecting as many devices as it can,” said Jen Miller-Osborn, deputy director of threat research at Palo Alto Networks’ Unit 42, which recently issued [a report][5] about Mirai. - -### Exploiting SD-WAN gear is new - -While the exploit against the SD-WAN appliances was a departure for Mirai, it doesn’t represent a sea-change in the way its authors are approaching their work, according Miller-Osborn. - -The idea, she said, is simply to add any devices to the botnet, regardless of what they are. The fact that SD-WAN devices were targeted is more about those particular devices having a vulnerability than anything to do with their SD-WAN capabilities. - -### Responsible disclosure headed off execution of exploits - -[The vulnerability][6] itself was discovered last year by independent researchers who responsibly disclosed it to VMware, which then fixed it in a later software version. But the means to exploit the weakness nevertheless is included in a recently discovered new variant of Mirai, according to the Unit 42 report. - -The authors behind Mirai periodically update the software to add new targets to the list, according to Unit 42, and the botherders’ original tactic of simply targeting devices running default credentials has given way to a strategy that also exploits vulnerabilities in a wide range of different devices. The updated variant of the malicious software includes a total of eight new-to-Mirai exploits. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][7] ]** - -The remediated version of the VMware SD-WAN is SD-WAN Edge 3.1.2. The vulnerability still affects SD-WAN Edge 3.1.1 and earlier, [according to a VMware security advisory][8]. After the Unit 42 report came out VMware posted [a blog][9] that says it is conducting its own investigation into the matter. - -Detecting whether a given SD-WAN implementation has been compromised depends heavily on the degree of monitoring in place on the network. Any products that give IT staff the ability to notice unusual traffic to or from an affected appliance could flag that activity. Otherwise, it could be difficult to tell if anything’s wrong, Miller-Osborne said. “You honestly might not notice it unless you start seeing a hit in performance or an outside actor notifies you about it.” - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403016/report-mirai-tries-to-hook-its-tentacles-into-sd-wan.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[2]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[3]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[4]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[5]: https://unit42.paloaltonetworks.com/new-mirai-variant-adds-8-new-exploits-targets-additional-iot-devices/ -[6]: https://www.exploit-db.com/exploits/44959 -[7]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[8]: https://www.vmware.com/security/advisories/VMSA-2018-0011.html -[9]: https://blogs.vmware.com/security/2019/06/vmsa-2018-0011-revisited.html -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190614 Western Digital launches open-source zettabyte storage initiative.md b/sources/talk/20190614 Western Digital launches open-source zettabyte storage initiative.md deleted file mode 100644 index 9c31358d47..0000000000 --- a/sources/talk/20190614 Western Digital launches open-source zettabyte storage initiative.md +++ /dev/null @@ -1,60 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Western Digital launches open-source zettabyte storage initiative) -[#]: via: (https://www.networkworld.com/article/3402318/western-digital-launches-open-source-zettabyte-storage-initiative.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Western Digital launches open-source zettabyte storage initiative -====== -Western Digital's Zoned Storage initiative leverages new technology to create more efficient zettabyte-scale data storage for data centers by improving how data is organized when it is stored. -![monsitj / Getty Images][1] - -Western Digital has announced a project called the Zoned Storage initiative that leverages new technology to create more efficient zettabyte-scale data storage for data centers by improving how data is organized when it is stored. - -As part of this, the company also launched a [developer site][2] that will host open-source, standards-based tools and other resources. - -The Zoned Storage architecture is designed for Western Digital hardware and its shingled magnetic recording (SMR) HDDs, which hold up to 15TB of data, as well as the emerging zoned namespaces (ZNS) standard for NVMe SSDs, designed to deliver better endurance and predictability. - -**[ Now read:[What is quantum computing (and why enterprises should care)][3] ]** - -This initiative is not being retrofitted for non-SMR drives or non-NVMe SSDs. Western Digital estimates that by 2023, half of all its HDD shipments are expected to be SMR. And that will be needed because IDC predicts data will be generated at a rate of 103 zettabytes a year by 2023. - -With this project Western Digital is targeting cloud and hyperscale providers and anyone building a large data center who has to manage a large amount of data, according to Eddie Ramirez, senior director of product marketing for Western Digital. - -Western Digital is changing how data is written and stored from the traditional random 4K block writes to large blocks of sequential data, like Big Data workloads and video streams, which are rapidly growing in size and use in the digital age. - -“We are now looking at a one-size-fits-all architecture that leaves a lot of TCO [total cost of ownership] benefits on the table if you design for a single architecture,” Ramirez said. “We are looking at workloads that don’t rely on small block randomization of data but large block sequential write in nature.” - -Because drives use 4k write blocks, that leads to overprovisioning of storage, especially around SSDs. This is true of consumer and enterprise SSDs alike. My 1TB SSD drive has only 930GB available. And that loss scales. An 8TB SSD has only 6.4TB available, according to Ramirez. SSDs also have to be built with DRAM for caching of small block random writes. You need about 1GB of DRAM per 1TB of NAND to act as a buffer, according to Ramirez. - -### The benefits of Zoned Storage - -Zoned Storage allows for 15-20% more storage on a HDD the than traditional storage mechanism. It eliminates the overprovisioning of SSDs, so you get all the NAND flash the drive has and you need far fewer DRAM chips on an SSD. Additionally, Western Digital promises you will need up to one-eighth as much DRAM to act as a cache in future SSD drives, lowering the cost. - -Ramirez also said quality of service will improve, not necessarily that peak performance is better, but it will manage latency from outliers better. - -Western Digital has not disclosed what if any pricing is associated with the project. It plans to work with the open-source community, customers, and industry players to help accelerate application development around Zoned Storage through its website. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402318/western-digital-launches-open-source-zettabyte-storage-initiative.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/big_data_center_server_racks_storage_binary_analytics_by_monsitj_gettyimages-951389152_3x2-100787358-large.jpg -[2]: http://ZonedStorage.io -[3]: https://www.networkworld.com/article/3275367/what-s-quantum-computing-and-why-enterprises-need-to-care.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190619 Cisco connects with IBM in to simplify hybrid cloud deployment.md b/sources/talk/20190619 Cisco connects with IBM in to simplify hybrid cloud deployment.md deleted file mode 100644 index b3344c5eb2..0000000000 --- a/sources/talk/20190619 Cisco connects with IBM in to simplify hybrid cloud deployment.md +++ /dev/null @@ -1,85 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco connects with IBM in to simplify hybrid cloud deployment) -[#]: via: (https://www.networkworld.com/article/3403363/cisco-connects-with-ibm-in-to-simplify-hybrid-cloud-deployment.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco connects with IBM in to simplify hybrid cloud deployment -====== -Cisco and IBM are working todevelop a hybrid-cloud architecture that meld Cisco’s data-center, networking and analytics platforms with IBM’s cloud offerings. -![Ilze Lucero \(CC0\)][1] - -Cisco and IBM said the companies would meld their [data-center][2] and cloud technologies to help customers more easily and securely build and support on-premises and [hybrid-cloud][3] applications. - -Cisco, IBM Cloud and IBM Global Technology Services (the professional services business of IBM) said they will work to develop a hybrid-cloud architecture that melds Cisco’s data-center, networking and analytics platforms with IBM’s cloud offerings. IBM's contribution includea a heavy emphasis on Kubernetes-based offerings such as Cloud Foundry and Cloud Private as well as a catalog of [IBM enterprise software][4] such as Websphere and open source software such as Open Whisk, KNative, Istio and Prometheus. - -**[ Read also:[How to plan a software-defined data-center network][5] ]** - -Cisco said customers deploying its Virtual Application Centric Infrastructure (ACI) technologies can now extend that network fabric from on-premises to the IBM Cloud. ACI is Cisco’s [software-defined networking (SDN)][6] data-center package, but it also delivers the company’s Intent-Based Networking technology, which brings customers the ability to automatically implement network and policy changes on the fly and ensure data delivery. - -[IBM said Cisco ACI Virtual Pod][7] (vPOD) software can now run on IBM Cloud bare-metal servers. “vPOD consists of virtual spines and leafs and supports up to eight instances of ACI Virtual Edge. These elements are often deployed on VMware services on the IBM Cloud to support hybrid deployments from on-premises environments to the IBM Cloud," the company stated. - -“Through a new relationship with IBM’s Global Technology Services team, customers can implement Virtual ACI on their IBM Cloud,” Cisco’s Kaustubh Das, vice president of strategy and product development wrote in a [blog][8] about the agreement. “Virtual ACI is a software-only solution that you can deploy wherever you have at least two servers on which you can run the VMware ESXi hypervisor. In the future, the ability to deploy IBM Cloud Pak for Applications in a Cisco ACI environment will also be supported,” he stated. - -IBM’s prepackaged Cloud Paks include a secured Kubernetes container and containerized IBM middleware designed to let customers quickly spin-up enterprise-ready containers, Big Blue said. - -Additionally IBM said it would add support for its IBM Cloud Private, which manages Kubernetes and other containers, on Cisco HyperFlex and HyperFlex Edge hyperconverged infrastructure (HCI) systems. HyperFlex is Cisco's HCI that offers computing, networking and storage resources in a single system. The package can be managed via Cisco’s Intersight software-as-a-service cloud management platform that offers a central dashboard of HyperFlex operations. - -IBM said it was adding Hyperflex support to its IBM Cloud Pak for Applications as well. - -The paks include IBM Multicloud Manager which is a Kubernetes-based platform that runs on the company’s [IBM Cloud Private][9] platform and lets customers manage and integrate workloads on clouds from other providers such as Amazon, Red Hat and Microsoft. - -At the heart of the Multi-cloud Manager is a dashboard interface for managing thousands of Kubernetes applications and huge volumes of data regardless of where in the organization they are located. - -The idea is that Multi-cloud Manager lets operations and development teams get visibility of Kubernetes applications and components across the different clouds and clusters via a single control pane. - -“With IBM Multicloud Manager, enterprises can have a single place to manage multiple clusters running across multiple on-premises, public and private cloud environments, providing consistent visibility, governance and automation from on-premises to the edge, wrote IBM’s Evaristus Mainsah, general manager of IBM Cloud Private Ecosystem in a [blog][7] about the relationship. - -Distributed workloads can be pushed out and managed directly at the device at a much larger scale across multiple public clouds and on-premises locations. Visibility, compliance and governance are provided with extended MCM capabilities that will be available at the lightweight device layer, with a connection back to the central server/gateway, Mainsah stated. - -In addition, Cisco’s AppDynamics\can be tied in to monitor infrastructure and business performance, Cisco stated. Cisco recently added [AppDynamics for Kubernetes][10], which Cisco said will reduce the time it takes to identify and troubleshoot performance issues across Kubernetes clusters. - -The companies said the hybrid-cloud architecture they envision will help reduce the complexity of setting up and managing hybrid-cloud environments. - -Cisco and IBM are both aggressively pursuing cloud customers. Cisco[ ramped up][11] its own cloud presence in 2018 with all manner of support stemming from an [agreement with Amazon Web Services][12] (AWS) that will offer enterprise customers an integrated platform to help them more simply build, secure and connect [Kubernetes][13] clusters across private [data centers][14] and the AWS cloud. - -Cisco and Google in [April expanded their joint cloud-development][15] activities to help customers more easily build secure multicloud and hybrid applications everywhere from on-premises data centers to public clouds. - -IBM is waiting to close [its $34 billion Red Hat deal][16] that it expects will give it a huge presence in the hotly contested hybrid-cloud arena and increase its inroads to competitors – Google, Amazon and Microsoft among others. Gartner says that market will be worth $240 billion by next year. - -Join the Network World communities on [Facebook][17] and [LinkedIn][18] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403363/cisco-connects-with-ibm-in-to-simplify-hybrid-cloud-deployment.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/03/cubes_blocks_squares_containers_ilze_lucero_cc0_via_unsplash_1200x800-100752172-large.jpg -[2]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[3]: https://www.networkworld.com/article/3233132/what-is-hybrid-cloud-computing.html -[4]: https://www.networkworld.com/article/3340043/ibm-marries-on-premises-private-and-public-cloud-data.html -[5]: https://www.networkworld.com/article/3284352/data-center/how-to-plan-a-software-defined-data-center-network.html -[6]: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html -[7]: https://www.ibm.com/blogs/cloud-computing/2019/06/18/ibm-cisco-collaborating-hybrid-cloud-modern-enterprise/ -[8]: https://blogs.cisco.com/datacenter/cisco-and-ibm-cloud-announce-hybrid-cloud-partnership -[9]: https://www.ibm.com/cloud/private -[10]: https://blog.appdynamics.com/product/kubernetes-monitoring-with-appdynamics/ -[11]: https://www.networkworld.com/article/3322937/lan-wan/what-will-be-hot-for-cisco-in-2019.html?nsdr=true -[12]: https://www.networkworld.com/article/3319782/cloud-computing/cisco-aws-marriage-simplifies-hybrid-cloud-app-development.html?nsdr=true -[13]: https://www.networkworld.com/article/3269848/cloud-computing/cisco-embraces-kubernetes-pushing-container-software-into-mainstream.html -[14]: https://www.networkworld.com/article/3223692/data-center/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[15]: https://www.networkworld.com/article/3388218/cisco-google-reenergize-multicloudhybrid-cloud-joint-development.html -[16]: https://www.networkworld.com/article/3316960/ibm-says-buying-red-hat-makes-it-the-biggest-in-hybrid-cloud.html -[17]: https://www.facebook.com/NetworkWorld/ -[18]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190619 Cisco issues critical security warnings on SD-WAN, DNA Center.md b/sources/talk/20190619 Cisco issues critical security warnings on SD-WAN, DNA Center.md deleted file mode 100644 index 187e883706..0000000000 --- a/sources/talk/20190619 Cisco issues critical security warnings on SD-WAN, DNA Center.md +++ /dev/null @@ -1,111 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco issues critical security warnings on SD-WAN, DNA Center) -[#]: via: (https://www.networkworld.com/article/3403349/cisco-issues-critical-security-warnings-on-sd-wan-dna-center.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco issues critical security warnings on SD-WAN, DNA Center -====== -Vulnerabilities to Cisco's SD-WAN and DNA Center software top a list of nearly 30 security advisories issued by the company. -![zajcsik \(CC0\)][1] - -Cisco has released two critical warnings about security issues with its SD-WAN and DNA Center software packages. - -The worse, with a Common Vulnerability Scoring System rating of 9.3 out of 10, is a vulnerability in its [Digital Network Architecture][2] (DNA) Center software that could let an unauthenticated attacker connect an unauthorized network device to the subnet designated for cluster services. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][3] - * [How to pick an off-site data-backup method][4] - * [SD-Branch: What it is and why you’ll need it][5] - * [What are the options for security SD-WAN?][6] - - - -A successful exploit could let an attacker reach internal services that are not hardened for external access, Cisco [stated][7]. The vulnerability is due to insufficient access restriction on ports necessary for system operation, and the company discovered the issue during internal security testing, Cisco stated. - -Cisco DNA Center gives IT teams the ability to control access through policies using Software-Defined Access, automatically provision through Cisco DNA Automation, virtualize devices through Cisco Network Functions Virtualization (NFV), and lower security risks through segmentation and Encrypted Traffic Analysis. - -This vulnerability affects Cisco DNA Center Software releases prior to 1.3, and it is fixed in version 1.3 and releases after that. - -Cisco wrote that system updates are available from the Cisco cloud but not from the [Software Center][8] on Cisco.com. To upgrade to a fixed release of Cisco DNA Center Software, administrators can use the “System Updates” feature of the software. - -A second critical warning – with a CVVS score of 7.8 – is a weakness in the command-line interface of the Cisco SD-WAN Solution that could let an authenticated local attacker elevate lower-level privileges to the root user on an affected device. - -Cisco [wrote][9] that the vulnerability is due to insufficient authorization enforcement. An attacker could exploit this vulnerability by authenticating to the targeted device and executing commands that could lead to elevated privileges. A successful exploit could let the attacker make configuration changes to the system as the root user, the company stated. - -This vulnerability affects a range of Cisco products running a release of the Cisco SD-WAN Solution prior to Releases 18.3.6, 18.4.1, and 19.1.0 including: - - * vBond Orchestrator Software - * vEdge 100 Series Routers - * vEdge 1000 Series Routers - * vEdge 2000 Series Routers - * vEdge 5000 Series Routers - * vEdge Cloud Router Platform - * vManage Network Management Software - * vSmart Controller Software - - - -Cisco said it has released free [software updates][10] that address the vulnerability described in this advisory. Cisco wrote that it fixed this vulnerability in Release 18.4.1 of the Cisco SD-WAN Solution. - -The two critical warnings were included in a dump of [nearly 30 security advisories][11]. - -There were two other “High” impact rated warnings involving the SD-WAN software. - -One, a vulnerability in the vManage web-based UI (Web UI) of the Cisco SD-WAN Solution could let an authenticated, remote attacker gain elevated privileges on an affected vManage device, Cisco [wrote][12]. - -The vulnerability is due to a failure to properly authorize certain user actions in the device configuration. An attacker could exploit this vulnerability by logging in to the vManage Web UI and sending crafted HTTP requests to vManage. A successful exploit could let attackers gain elevated privileges and make changes to the configuration that they would not normally be authorized to make, Cisco stated. - -Another vulnerability in the vManage web-based UI could let an authenticated, remote attacker inject arbitrary commands that are executed with root privileges. - -This exposure is due to insufficient input validation, Cisco [wrote][13]. An attacker could exploit this vulnerability by authenticating to the device and submitting crafted input to the vManage Web UI. - -Both vulnerabilities affect Cisco vManage Network Management Software that is running a release of the Cisco SD-WAN Solution prior to Release 18.4.0 and Cisco has released free [software updates][10] to correct them. - -Other high-rated vulnerabilities Cisco disclosed included: - - * A [vulnerability][14] in the Cisco Discovery Protocol (CDP) implementation for the Cisco TelePresence Codec (TC) and Collaboration Endpoint (CE) Software could allow an unauthenticated, adjacent attacker to inject arbitrary shell commands that are executed by the device. - * A [weakness][15] in the internal packet-processing functionality of the Cisco StarOS operating system running on virtual platforms could allow an unauthenticated, remote attacker to cause an affected device to stop processing traffic, resulting in a denial of service (DoS) condition. - * A [vulnerability][16] in the web-based management interface of the Cisco RV110W Wireless-N VPN Firewall, Cisco RV130W Wireless-N Multifunction VPN Router, and Cisco RV215W Wireless-N VPN Router could allow an unauthenticated, remote attacker to cause a reload of an affected device, resulting in a denial of service (DoS) condition. - - - -Cisco has [released software][10] fixes for those advisories as well. - -Join the Network World communities on [Facebook][17] and [LinkedIn][18] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403349/cisco-issues-critical-security-warnings-on-sd-wan-dna-center.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/04/lightning_storm_night_gyorgy_karoly_toth_aka_zajcsik_cc0_via_pixabay_1200x800-100754504-large.jpg -[2]: https://www.networkworld.com/article/3401523/cisco-software-to-make-networks-smarter-safer-more-manageable.html -[3]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[4]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[5]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[6]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[7]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-dnac-bypass -[8]: https://software.cisco.com/download/home -[9]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-sdwan-privesca -[10]: https://tools.cisco.com/security/center/resources/security_vulnerability_policy.html#fixes -[11]: https://tools.cisco.com/security/center/publicationListing.x?product=Cisco&sort=-day_sir&limit=50#~Vulnerabilities -[12]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-sdwan-privilescal -[13]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-sdwan-cmdinj -[14]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-tele-shell-inj -[15]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-staros-asr-dos -[16]: https://tools.cisco.com/security/center/content/CiscoSecurityAdvisory/cisco-sa-20190619-rvrouters-dos -[17]: https://www.facebook.com/NetworkWorld/ -[18]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190625 You Can-t Afford Not to Use a Business-Driven SD-WAN.md b/sources/talk/20190625 You Can-t Afford Not to Use a Business-Driven SD-WAN.md deleted file mode 100644 index eb2dc6f18e..0000000000 --- a/sources/talk/20190625 You Can-t Afford Not to Use a Business-Driven SD-WAN.md +++ /dev/null @@ -1,78 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (You Can’t Afford Not to Use a Business-Driven SD-WAN) -[#]: via: (https://www.networkworld.com/article/3404618/you-can-t-afford-not-to-use-a-business-driven-sd-wan.html) -[#]: author: (Rami Rammaha https://www.networkworld.com/author/Rami-Rammaha/) - -You Can’t Afford Not to Use a Business-Driven SD-WAN -====== - -![maxkabakov/istock][1] - -Digital transformation and cloud initiatives are changing the way IT organizations are thinking about and architecting the wide area network. It is estimated that over 70 percent of applications have already moved to the cloud. Yet, the transformational promise of the cloud is falling short as conventional networks can’t keep pace with demands of the cloud. Why? Because today’s router-centric and basic [SD-WAN][2] architectures have either hit the wall or can’t keep up with traffic pattern shifts, distributed applications and the open security perimeters inherent to the cloud. This blog will explore the limitations of today’s WAN approaches, offering a better way forward with a business-first networking model. - -### **Traditional Router-centric WAN** - -The traditional router-centric model is network-driven, where businesses are forced to conform to the constraints of the network. Enterprises struggle trying to stretch the old router-centric WAN – it’s too cumbersome and complicated and is simply unable to meet the business needs of a cloud-first enterprise. Cloud-first enterprise business requirements include: - - * Using the internet to connect users directly to cloud applications - * Delivering new applications to 1000s of sites, across multiple clouds, in 10 percent of the time - * Delivering 10x more bandwidth at the edge, for the same budget - * Protecting the business when the cloud is open, accessible and everything is connected - * Continuously delivering a WOW application performance for every business-critical application - - - -![][3] - -### **Basic SD-WAN Solutions** - -To address the requirements of cloud-first businesses, a plethora of SD-WAN solutions have emerged in the past few years. Basic SD-WAN solutions are a step in the right direction but fall well short of the goal of a fully automated business-driven network. A basic SD-WAN provides some level of automation and intelligence, but it is unable to continuously and automatically adapt to changing network conditions. A basic SD-WAN solution can’t deliver a consistent WOW experience for real-time voice and video applications, especially over broadband. Furthermore, with a basic SD-WAN, IT is unable to deliver a simplified end-to-end secure segmentation across the LAN-WAN-LAN/Data Center to minimize the attack surface.  A basic SD-WAN also won’t deliver on the promised savings in operational costs. The graphic below illustrates the short falls of a basic SD-WAN offering.  - -![][4] - -### **The Time is Now to Shift to a Business-driven SD-WAN** - -With a [business-driven SD-WAN][5], the network becomes a business enabler, not a constraint. It acts as a business accelerant with a top-down approach that starts with business intent. Business intent defines how applications should be delivered to end users. Business intent can include performance, priority, security, resiliency, routing, etc. that should be applied to different classes of applications. With a business-driven SD-WAN, network resources are matched – automatically – based on the business priority and security requirements for every application. The network continuously monitors the performance of applications and transport resources and automatically adapts to any changes to remain in compliance with application QoS and security policies. A business-driven SD-WAN delivers the highest quality of experience for users with consistent, reliable application performance – including the highest quality voice and video over broadband.  - -The highest quality of experience doesn’t stop with users. With [centralized orchestration][6], a business-driven SD-WAN minimizes human error, makes changes easier and enables faster response to business needs. A business-driven SD-WAN goes beyond the automation and templates of basic SD-WAN solutions to power a self-driving wide area network™ that learns and adapts to the changing requirements of the business to ensure the highest levels of end user and application performance. It eliminates the impact of brownouts and blackouts as monitoring and analytics detect changing conditions and trigger immediate adjustments. Built-in monitoring, alarms/alerts and reporting enables faster troubleshooting when issues occur. With a highly available, resilient, business-driven SD-WAN, IT can reclaim their weekends and sleep through the night!  A unified platform is designed as one unifying and orchestrating network functions such as SD-WAN, firewall, segmentation, routing, WAN optimization, application visibility and control based on business requirements. With service chaining, to ecosystem partners (security, cloud and service providers), existing investments can be fully leveraged with rapid deployment, interoperating with full and open APIs.     - -![][7] - -In this table, a comparison of router-centric, basic SD-WAN and business-driven SD-WAN shows that enterprises get the most value and benefits from shifting to a business-first networking model.  - -![Full Harvey Ball: Most; Empty Harvey Ball: Least][8] - -Click on the [infographic][9] for a full summary of the WAN edge architecture approaches. - -![][10] - -With an interactive ROI calculator, you can calculate savings between a business-driven SD-WAN from Silver Peak and a traditional router-centric SD-WAN. Click [here][11] to calculate your savings right now. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3404618/you-can-t-afford-not-to-use-a-business-driven-sd-wan.html - -作者:[Rami Rammaha][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Rami-Rammaha/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/06/istock-1073941846-100800084-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://images.idgesg.net/images/article/2019/06/illo_1-100800095-large.jpg -[4]: https://images.idgesg.net/images/article/2019/06/illo_2-100800097-large.jpg -[5]: https://www.silver-peak.com/products/unity-edge-connect -[6]: https://www.silver-peak.com/products/unity-orchestrator -[7]: https://images.idgesg.net/images/article/2019/06/illo_3-100800099-large.jpg -[8]: https://images.idgesg.net/images/article/2019/06/sd-wan-comparison-chart4-100800100-large.jpg -[9]: https://www.silver-peak.com/sites/default/files/infoctr/sd-wan-comparison-diagram-0119.pdf -[10]: https://images.idgesg.net/images/article/2019/06/acomparisonoftodayswanedgeapproaches-100800113-large.jpg -[11]: https://www.silver-peak.com/sd-wan-interactive-roi-calculator diff --git a/sources/talk/20190626 Juniper-s Mist adds WiFi 6, AI-based cloud services to enterprise edge.md b/sources/talk/20190626 Juniper-s Mist adds WiFi 6, AI-based cloud services to enterprise edge.md deleted file mode 100644 index 0548e968fe..0000000000 --- a/sources/talk/20190626 Juniper-s Mist adds WiFi 6, AI-based cloud services to enterprise edge.md +++ /dev/null @@ -1,94 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Juniper’s Mist adds WiFi 6, AI-based cloud services to enterprise edge) -[#]: via: (https://www.networkworld.com/article/3405123/juniper-s-mist-adds-wifi-6-ai-based-cloud-services-to-enterprise-edge.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Juniper’s Mist adds WiFi 6, AI-based cloud services to enterprise edge -====== -Mist, a Juniper Networks company, has rolled out an artificial-intelligence, cloud-based appliance and a WIFI 6 access point aimed at helping users roll out smart, high-density wireless networks. -![Getty Images][1] - -Mist, now a Juniper Networks company, has rolled out an artificial-intelligence, cloud-based appliance and a WiFi 6 access point that together aim at helping users deploy smart, high-density wireless networks. - -Leading the rollout is the Mist Edge appliance that extends Mist’s cloud services to the branch and lets enterprises manage the distributed Wi-Fi infrastructure from a central location.  - -**More about 802.11ax (Wi-Fi 6)** - - * [Why 802.11ax is the next big thing in wireless][2] - * [FAQ: 802.11ax Wi-Fi][3] - * [Wi-Fi 6 (802.11ax) is coming to a router near you][4] - * [Wi-Fi 6 with OFDMA opens a world of new wireless possibilities][5] - * [802.11ax preview: Access points and routers that support Wi-Fi 6 are on tap][6] - - - -The Mist Edge device features the company’s artificial-intelligence engine that helps automate tasks such as adjusting Wi-Fi signal strength and troubleshooting.  According to Mist, some other potential use cases for Mist Edge include: - - * Seamless roaming for large campus networks through on-premises tunnel termination of traffic to/from access points. - * Extending virtual LANs (VLANs) to distributed branches and telecommuters to replace remote virtual private network (VPN) technology. - * Dynamic traffic segmentation for IoT devices. - * The ability to split tunneling to keep guest access and corporate traffic separate. - - - -The company says a software-only version of Mist Edge will be available in the future.  - -[Mist’s][7] strength is its AI-based wireless platform which makes Wi-Fi more predictable, reliable and measurable. Mist is also unique in how it has delivered applications via cloud microservices and containers which could be attractive to enterprise users looking to reduce wireless operational costs, experts say.  - -Mist’s cloud-based system brings patented dynamic packet capture and machine learning technology to automatically identify, adapt and fix network issues, Gartner wrote in a recent Magic Quadrant report. The Mist system is delivered and managed via cloud services. - -“Mist's AI-driven Wi-Fi provides guest access, network management, policy applications and a virtual network assistant as well as analytics, IoT segmentation, and behavioral analysis at scale,” Gartner stated.  “Mist offers a new and unique approach to high-accuracy location services through a cloud-based machine-learning engine that uses Wi-Fi and Bluetooth Low Energy (BLE)-based signals from its multielement directional-antenna access points. The same platform can be used for Real Time Location System (RTLS) usage scenarios, static or zonal applications, and engagement use cases like wayfinding and proximity notifications.” - -Juniper bought Mist in March for $405 million for this AI-based WIFI technology.  For Juniper the Mist buy was significant as it had depended on agreements with partners such as Aerohive and Aruba to deliver wireless, according to Gartner.  - -Mist, too, has partners and recently announced joint product development with VMware that integrates Mist WLAN technology and VMware’s VeloCloud-based NSX SD-WAN. - -“Mist has focused on large enterprises and has won some very well known brands,” said Chris Depuy, technology analyst with the 650 Group.  “The [Mist/Juniper] combination is a good fit because both product lines are focusing on larger enterprises and over time, we expect Mist AI will be used to benefit the entire Juniper campus portfolio.” - -The other part of the company’s rollout is a WiFi 6 (802.11ax) access point, the Mist AP43, a cloud-managed WiFi 6 access point with integrated support for Mist’s AI automation and manageability. - -“The new access point gets Juniper to 802.11ax on the same time frame as other major competitors like Cisco,” said Depuy. “Juniper could not address customers who were upgrading wireless and wired at the same time without Mist. With 802.11ax, we expect new switches to be necessary because 1 GB isn’t fast enough to support these new APs. Thus, Juniper can now upgrade customers to 802.11ax and MultiGig switches instead of bringing in another vendor. “ - -WiFi 6 is designed for high-density public or private environments. But it also will be beneficial in internet of things (IoT) deployments, and in offices that use bandwidth-hogging applications like videoconferencing. Products promising WIFI 6 support have been rolling out across the industry with [HPE][8], [Cisco][9], [Arista][10] and others recently tossing their hats into the ring. - -The enterprise WLAN is now dominated by the 802.11ac standard, which makes up 86.4% of dependent access point (AP) shipments and 93.1% of enterprise WLAN dependent AP revenues. The next iteration of the standard, 802.11ax or WiFi 6, will increase in the market throughout the rest of 2019 and into 2020. In the consumer WLAN market, the 802.11ac standard accounted for 58.0% of shipments and 79.2% of revenue in 1Q19, according to IDC’s most recent [Worldwide Quarterly WLAN Tracker][11] report. - -"The WLAN market continues to see steady, moderate growth as enterprises invest in wireless connectivity to support the continued demand for access technology," said [Brandon Butler][12], senior research analyst, Network Infrastructure at IDC in the report. "Meanwhile, the coming Wi-Fi 6 standard will be a major driver of growth in the WLAN market in the coming years, especially in the advanced enterprise segments of the market." - -The AP43 lists at $1,585. - -Mist also announced a strategic relationship with ForeScout to automate management and security control of Wi-Fi client and Internet of Things (IoT) devices.  The Juniper and Forescout mashup lets customers monitor and profile devices and mobile clients including smartphones, tablets, laptops, robots and IoT devices (HVAC systems, security devices, displays, sensors, lights) based on their network traffic patterns. Then if anomalous or threatening behavior is observed, customers can launch trouble tickets, remediate software on devices as needed or quarantine devices. - -Join the Network World communities on [Facebook][13] and [LinkedIn][14] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3405123/juniper-s-mist-adds-wifi-6-ai-based-cloud-services-to-enterprise-edge.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/wifi_cloud_wireless-100787113-large.jpg -[2]: https://www.networkworld.com/article/3215907/mobile-wireless/why-80211ax-is-the-next-big-thing-in-wi-fi.html -[3]: https://%20https//www.networkworld.com/article/3048196/mobile-wireless/faq-802-11ax-wi-fi.html -[4]: https://www.networkworld.com/article/3311921/mobile-wireless/wi-fi-6-is-coming-to-a-router-near-you.html -[5]: https://www.networkworld.com/article/3332018/wi-fi/wi-fi-6-with-ofdma-opens-a-world-of-new-wireless-possibilities.html -[6]: https://www.networkworld.com/article/3309439/mobile-wireless/80211ax-preview-access-points-and-routers-that-support-the-wi-fi-6-protocol-on-tap.html -[7]: https://www.networkworld.com/article/3089038/why-one-cisco-shop-is-willing-to-give-wifi-startup-mist-a-shot.html -[8]: https://www.arubanetworks.com/products/networking/802-11ax/ -[9]: https://www.networkworld.com/article/3391919/cisco-goes-all-in-on-wifi-6.html -[10]: https://www.networkworld.com/article/3400905/new-switches-wi-fi-gear-to-advance-aristas-campus-architecture.html -[11]: http://www.idc.com/tracker/showproductinfo.jsp?prod_id=262 -[12]: https://www.idc.com/getdoc.jsp?containerId=PRF005027 -[13]: https://www.facebook.com/NetworkWorld/ -[14]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190701 Tempered Networks simplifies secure network connectivity and microsegmentation.md b/sources/talk/20190701 Tempered Networks simplifies secure network connectivity and microsegmentation.md deleted file mode 100644 index 5cf40e865d..0000000000 --- a/sources/talk/20190701 Tempered Networks simplifies secure network connectivity and microsegmentation.md +++ /dev/null @@ -1,99 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Tempered Networks simplifies secure network connectivity and microsegmentation) -[#]: via: (https://www.networkworld.com/article/3405853/tempered-networks-simplifies-secure-network-connectivity-and-microsegmentation.html) -[#]: author: (Linda Musthaler https://www.networkworld.com/author/Linda-Musthaler/) - -Tempered Networks simplifies secure network connectivity and microsegmentation -====== -Tempered Networks’ Identity Defined Network platform uses the Host Identity Protocol to partition and isolate the network into trusted microsegments, providing an easy and cost-effective way to secure the network. -![Thinkstock][1] - -The TCP/IP protocol is the foundation of the internet and pretty much every single network out there. The protocol was designed 45 years ago and was originally only created for connectivity. There’s nothing in the protocol for security, mobility, or trusted authentication. - -The fundamental problem with TCP/IP is that the IP address within the protocol represents both the device location and the device identity on a network. This dual functionality of the address lacks the basic mechanisms for security and mobility of devices on a network. - -This is one of the reasons networks are so complicated today. To connect to things on a network or over the internet, you need VPNs, firewalls, routers, cell modems, etc. and you have all the configurations that come with ACLs, VLANs, certificates, and so on. The nightmare grows exponentially when you factor in internet of things (IoT) device connectivity and security. It’s all unsustainable at scale. - -Clearly, we need a more efficient and effective way to take on network connectivity, mobility, and security. - -**[ Also read: [What is microsegmentation? How getting granular improves network security][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ]** - -The Internet Engineering Task Force (IETF) tackled this problem with the Host Identity Protocol (HIP). It provides a method of separating the endpoint identifier and the locator roles of IP addresses. It introduces a new Host Identity (HI) name space, based on public keys, from which endpoint identifiers are taken. HIP uses existing IP addressing and forwarding for locators and packet delivery.The protocol is compatible with IPv4 and IPv6 applications and utilizes a customized IPsec tunnel mode for confidentiality, authentication, and integrity of network applications. - -Ratified by IETF in 2015, HIP represents a new security networking layer within the OSI stack. Think of it as Layer 3.5. It’s a flip of the trust model where TCP/IP is inherently promiscuous and will answer to anything that wants to talk to a device on that network. In contrast, HIP is a trust protocol that will not answer to anything on the network unless that connection has been authenticated and authorized based on its cryptographic identity. It is, in effect, a form of a [software-defined perimeter][4] around specific network resources. This is also known as [microsegmentation][5]. - -![][6] - -### Tempered Networks’ IDN platform creates segmented, encrypted network - -[Tempered Networks][7] has created a platform utilizing the HIP and a variety of technologies that partitions and isolates the network into trusted microsegments. Tempered Networks’ Identity Defined Networking (IDN) platform is deployed as an overlay technology that layers on top of any IP network. The HIP was designed to be both forward and backward compatible with any IP network without having to make any changes to the underlay network. The overlay network creates a direct tunnel between the two things you want to connect. - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][8] ]** - -The IDN platform uses three components to create a segmented and encrypted network: an orchestration engine called the Conductor, the HIPrelay identity-based router, and HIP Services enforcement points. - -The Conductor is a centralized orchestration and intelligence engine that connects, protects, and disconnects any resource globally through a single pane of glass. The Conductor is used to define and enforce policies for HIP Services. Policy configuration is done in a simple point-and-click manner. The Conductor is available as a physical or virtual appliance or in the Amazon Web Services (AWS) cloud. - -HIP Services provide software-based policy enforcement, enabling secure connectivity among IDN-protected devices, as well as cloaking, segmentation, identity-based routing, and IP mobility. They can be deployed on or in-line to any device or system and come in the form of HIPswitch hardware, HIPserver, HIPclient, Cloud HIPswitch, or Virtual HIPswitch. HIP Services also can be embedded in customer hardware or applications. - -Placing HIPswitches in front of any connected device renders the device HIP-enabled and immediately microsegments the traffic, isolating inbound and outbound traffic from the underlying network. HIPswitches deployed on the network automatically register with the Conductor using their cryptographic identity. - -HIPrelay works with the HIP Service-enabled endpoints to deliver peer-to-peer connectivity for any device or system across all networks and transport options. Rather than using Layer 3 or 4 rule sets or traditional routing protocols, HIPrelay routes and connects encrypted communications based on provable cryptographic identities traversing existing infrastructure. - -It sounds complicated, but it really isn’t. A use case example should demonstrate the ease and power of this solution. - -### Use case: Smart Ships - -An international cruise line recently installed Tempered Networks’ IDN solution to provide tighter security around its critical maritime systems. Prior to deployment, the systems for fuel, propulsion, navigation, ballast, weather, and incinerators were on a flat Layer 2 network, which basically allowed authorized users of the network to see everything. - -Given that vendors of the different maritime systems had access to their own system, the lack of microsegmentation allowed them to see the other systems as well. The cruise line needed a simple way to segment access to these different systems — isolating them from each other — and they wanted to do it without having to put the ships in dry dock for the network reconfiguration. - -The original configuration looked like this: - -![][9] - -The company implemented microsegmentation of the network based on the functionality of the systems. This isolated and segmented vendor access to only their own systems — everything else was hidden to them. The implementation involved installing HIPrelay identity routing in the cloud, several HIPswitch wireless devices onboard the ships, and HIPclient software on the vendors’ and crew members’ devices. The Conductor appliance that managed the entire deployment was installed in AWS. - -All of that was done without impacting the underlying network, and no dry dock time was required for the deployment. In addition, the cruise line was able to eliminate internal firewalls and VPNs that had previously been used for segmentation and remote access. The resulting configuration looks like this: - -![][10] - -The color coding of the illustration above indicates what systems are now able to directly see and communicate with their corresponding controllers and sensors. Everything else on the network is hidden from view of those systems. - -The acquisition cost of the Tempered Networks’ solution was one-tenth that of a traditional microsegmentation solution. The deployment time was 2 FTE days per ship compared to the 40 FTE days a traditional solution would have needed. No additional staffing was required to support the solution, and no changes were made to the underlying network. - -### A time-tested microsegmentation solution - -This technology came out of Boeing and was deployed for over 12 years within their manufacturing facilities until 2014, when Boeing allowed the technology to become commercialized. Tempered Networks took the HIP and developed the full platform with easy, centralized management. It was purpose-built to provide secure connectivity to networks. The solution has been successfully deployed in industrial domains such as the utilities sector, oil and gas, electricity generation, and aircraft manufacturing, as well as in enterprise domains and healthcare. - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3405853/tempered-networks-simplifies-secure-network-connectivity-and-microsegmentation.html - -作者:[Linda Musthaler][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Linda-Musthaler/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/01/network_security_hacker_virus_crime-100745979-large.jpg -[2]: https://www.networkworld.com/article/3236448/lan-wan/what-to-consider-when-deploying-a-next-generation-firewall.html -[3]: https://www.networkworld.com/newsletters/signup.html -[4]: https://www.networkworld.com/article/3359363/software-defined-perimeter-brings-trusted-access-to-multi-cloud-applications-network-resources.html -[5]: https://www.networkworld.com/article/3247672/what-is-microsegmentation-how-getting-granular-improves-network-security.html -[6]: https://images.idgesg.net/images/article/2019/07/hip-slide-100800735-large.jpg -[7]: https://www.temperednetworks.com/ -[8]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[9]: https://images.idgesg.net/images/article/2019/07/cruise-ship-before-100800736-large.jpg -[10]: https://images.idgesg.net/images/article/2019/07/cruise-ship-after-100800738-large.jpg -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190709 Cisco goes deeper into photonic, optical technology with -2.6B Acacia buy.md b/sources/talk/20190709 Cisco goes deeper into photonic, optical technology with -2.6B Acacia buy.md deleted file mode 100644 index 96f4b055cd..0000000000 --- a/sources/talk/20190709 Cisco goes deeper into photonic, optical technology with -2.6B Acacia buy.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco goes deeper into photonic, optical technology with $2.6B Acacia buy) -[#]: via: (https://www.networkworld.com/article/3407706/cisco-goes-deeper-into-photonic-optical-technology-with-2-6b-acacia-buy.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco goes deeper into photonic, optical technology with $2.6B Acacia buy -====== -Cisco: Optical-interconnect technologies are becoming increasingly strategic for data centers, service providers -![KTSimage / Getty Images][1] - -Looking to bulk-up its optical systems portfolio, Cisco says it intends to buy Acacia Communications for approximately $2.6 billion.  The deal is Cisco’s largest since it [laid out $3.7B for AppDynamics][2] in 2017. - -Acacia develops, manufactures and sells high-speed [coherent optical][3] interconnect products that are designed to transform networks linking data centers, cloud and service providers. Cisco is familiar with Acacia as it has been a “significant” customer of the optical firm for about five years, Cisco said. - -**[ Also see [How to plan a software-defined data-center network][4] and [Efficient container use requires data-center software networking][5].]** - -Acacia’s other customers include Nokia Oyj, Huawei and ZTE. Cisco accounts for about 18% of its revenue, [according to Bloomberg’s supply-chain analysis][6]. - -"With the explosion of bandwidth in the multi-cloud era, optical interconnect technologies are becoming increasingly strategic,” said David Goeckeler, executive vice president and general manager of Cisco's networking and security business in a statement. “The acquisition of Acacia will allow us to build on the strength of our switching, routing and optical networking portfolio to address our customers' most demanding requirements." - -For Cisco, one of the key drivers for making this deal was Acacia’s coherent technology – “a fancy term that means the ability to send optical signals over long distances,” said Bill Gartner, senior vice president of Cisco’s Optical Systems and Optics business.  “That technology today is typically delivered via a line card on a big chassis in a separate optical layer but with Acadia’s digital signal processing, ASIC and other technology we are looking to move that from a line card to a pluggable module that increases network capacity, but also reduces complexity and costs.” - -In addition, Acacia uses silicon photonics as the platform for integration of multiple photonic functions for coherent optics, Gartner wrote in a [blog][7] about the acquisition. “Leveraging the advances in silicon photonics, each new generation of coherent optics products has enabled higher data transmission rates, lower power and higher performance than the one before.” - -Recent research from [IHS Markit][8] shows that data center interconnections are the fastest growing segment for coherent transceivers. - -“Acacia’s digital signal processing and small form-factor long-distance communications technology is strong and will be very valuable to Cisco in the long and short term,” said Jimmy Yu, vice president of the Dell'Oro Group. - -The question many analysts have is the impact the sale will have on other Acacia customers Yu said.   “If wasn’t for Acacia selling to others, [such as Huawei, ZTE and Infinera] I don’t thise think vendors would have done as well as they have, and when Cisco owns Acacia it could be a different story,” Yu said. - -The Acacia buy will significantly boost Cisco’s optical portfolio for application outside the data center.  In February [Cisco closed a deal to buy optical-semiconductor firm Luxtera][9] for $660 million, bringing it the advanced optical technology customers will need for speed and throughput for future data center and webscale networks. - -The combination of Cisco’s and Luxtera’s capabilities in 100GbE/400GbE optics, silicon and process technology will help customers build future-proof networks optimized for performance, reliability and cost, Cisco stated. - -The reason Cisco snatched-up Luxtera was its silicon photonics technology that moves data among computer chips optically, which is far quicker than today's electrical transfer, Cisco said. Photonics will be the underpinning of future switches and other networking devices. - -"It seems that Cisco is going all in on being a supplier of optical components and optical pluggable: Luxtera (client side optical components and pluggable) and Acacia (line side optical components and pluggable)," Yu said. - -"Unless Cisco captures more of the optical systems market share and coherent shipment volume, I think Cisco will need to continue selling Acacia products to the broader market and other system vendors due to the high cost of product development," Yu said. - -The acquisition is expected to close during the second half of Cisco's FY2020, and upon close, Acacia employees will join Cisco's Optical Systems and Optics business within its networking and security business under Goeckeler. - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407706/cisco-goes-deeper-into-photonic-optical-technology-with-2-6b-acacia-buy.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/money_currency_printing_press_us_100-dollar_bills_by_ktsimage_gettyimages-1015664778_2400x1600-100788423-large.jpg -[2]: https://www.networkworld.com/article/3184027/cisco-closes-appdynamics-deal-increases-software-weight.html -[3]: https://www.ciena.com/insights/what-is/What-Is-Coherent-Optics.html -[4]: https://www.networkworld.com/article/3284352/data-center/how-to-plan-a-software-defined-data-center-network.html -[5]: https://www.networkworld.com/article/3297379/data-center/efficient-container-use-requires-data-center-software-networking.html -[6]: https://www.bloomberg.com/news/articles/2019-07-09/cisco-to-acquire-acacia-communications-for-2-6-billion-jxvs6rva?utm_source=twitter&utm_medium=social&cmpid=socialflow-twitter-business&utm_content=business&utm_campaign=socialflow-organic -[7]: https://blogs.cisco.com/news/cisco-news-announcement-07191234 -[8]: https://technology.ihs.com/ -[9]: https://www.networkworld.com/article/3339360/cisco-pushes-silicon-photonics-for-enterprise-webscale-networking.html -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190710 The Titan supercomputer is being decommissioned- a costly, time-consuming project.md b/sources/talk/20190710 The Titan supercomputer is being decommissioned- a costly, time-consuming project.md deleted file mode 100644 index 1cee0a981d..0000000000 --- a/sources/talk/20190710 The Titan supercomputer is being decommissioned- a costly, time-consuming project.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The Titan supercomputer is being decommissioned: a costly, time-consuming project) -[#]: via: (https://www.networkworld.com/article/3408176/the-titan-supercomputer-is-being-decommissioned-a-costly-time-consuming-project.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -The Titan supercomputer is being decommissioned: a costly, time-consuming project -====== -The old gives way to new at Oak Ridge National Labs. The Titan supercomputer is being replaced by Frontier, and it's a super-sized task. -![Oak Ridge National Laboratory][1] - -A supercomputer deployed in 2012 is going into retirement after seven years of hard work, but the task of decommissioning it is not trivial. - -The Cray XK7 “Titan” supercomputer at the Department of Energy’s (DOE) Oak Ridge National Laboratory (ORNL) is scheduled to be decommissioned on August 1 and disassembled for recycling. - -At 27 petaflops, or 27 quadrillion calculations per second, Titan was at one point the fastest supercomputer in the world at its debut in 2012 and remained in the top 10 worldwide until June 2019. - -**[ Also read: [10 of the world's fastest supercomputers][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ]** - -But time marches on. This beast is positively ancient by computing standards. It uses 16-core AMD Opteron CPUs and Nvidia Kepler generation processors. You can buy a gaming PC with better than that today. - -“Titan has run its course,” Operations Manager Stephen McNally at ORNL said in an [article][4] published by ONRL. “The reality is, in electronic years, Titan is ancient. Think of what a cell phone was like seven years ago compared to the cell phones available today. Technology advances rapidly, including supercomputers.” - -In its seven years, Titan generated than 26 billion core hours of computing time for hundreds of research teams around the world, not just the DOE. It was one of the first to use GPUs, a groundbreaking move at the time but now commonplace. - -The Oak Ridge Leadership Computing Facility (OLCF) actually houses Titan in a 60,000-sq.-ft. facility, 20,000 of which is occupied by Titan, the Eos cluster that supports Titan and Atlas file system that holds 32 petabytes of data. - -**[ [Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][5] ]** - -June 30 was the last day users could submit jobs to Titan or Eos, another supercomputer, which is also 7 years old.  - -### Decommissioning a supercomputer is a super-sized task - -Decommissioning a computer the size of Titan is more than turning off a switch. ONRL didn’t have a dollar estimate of the cost involved, but it did discuss the scale, which should give some idea of how costly this will be. - -The decommissioning of Titan will include about 41 people, including staff from ORNL, Cray, and external subcontractors. OLCF staff are supporting users who need to complete runs, save data, or transition their projects to other resources. - -Electricians will safely shut down the 9 megawatt-capacity system, and Cray staff will disassemble and recycle Titan’s electronics and its metal components and cabinets. A separate crew will handle the cooling system. All told, 350 tons of equipment and 10,800 pounds of refrigerant are being removed from the site. - -What becomes of the old gear is unclear. Even ONRL has no idea what Cray will do with it. McNally said there is no value in Titan’s parts: “It’s simply not worth the cost to a data center or university of powering and cooling even fragments of Titan. Titan’s value lies in the system as a whole.” - -The 20,000-sq.-ft. data center that is currently home to Titan will be gutted and expanded in preparation for [Frontier][6], the an exascale system scheduled for delivery in 2021 running AMD Epyc processors and Nvidia GPUs. - -A power, cooling, and data center upgrade is already underway ahead of the Titan decommissioning to prepare for Frontier. The whole removal process will take about a month but has been in the works for several months to ensure a smooth transition for people still using the old machine. - -**[ Now read this: [10 of the world's fastest supercomputers][2] ]** - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3408176/the-titan-supercomputer-is-being-decommissioned-a-costly-time-consuming-project.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/06/titan_supercomputer_at_ornl_oak_ridge_national_laboratory_1200x800-100762120-large.jpg -[2]: https://www.networkworld.com/article/3236875/embargo-10-of-the-worlds-fastest-supercomputers.html -[3]: https://www.networkworld.com/newsletters/signup.html -[4]: https://www.olcf.ornl.gov/2019/06/28/farewell-titan/ -[5]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[6]: https://www.olcf.ornl.gov/frontier/ -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190710 Will IBM-s acquisition be the end of Red Hat.md b/sources/talk/20190710 Will IBM-s acquisition be the end of Red Hat.md deleted file mode 100644 index a1a2e4cad5..0000000000 --- a/sources/talk/20190710 Will IBM-s acquisition be the end of Red Hat.md +++ /dev/null @@ -1,66 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Will IBM’s acquisition be the end of Red Hat?) -[#]: via: (https://www.networkworld.com/article/3407746/will-ibms-acquisition-be-the-end-of-red-hat.html) -[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) - -Will IBM’s acquisition be the end of Red Hat? -====== -IBM's acquisition of Red Hat is a big deal -- a 34 billion dollar big deal -- and many Linux professionals are wondering how it's going to change Red Hat's role in the Linux world. Here are some thoughts. -![Stephen Lawson/IDG][1] - -[IBM's acquisition of Red Hat for $34 billion][2] is now a done deal, and statements from the leadership of both companies sound extremely promising. But some in the Linux users have expressed concern. - -Questions being asked by some Linux professionals and devotees include: - - * Will Red Hat lose customer confidence now that it’s part of IBM and not an independent company? - * Will IBM continue putting funds into open source after paying such a huge price for Red Hat? Will they curtail what Red Hat is able to invest? - * Both companies’ leaders are saying all the right things now, but can they predict how their business partners and customers will react as they move forward? Will their good intentions be derailed? - - - -Part of the worry simply comes from the size of this deal. Thirty-four billion dollars is a _lot_ of money. This is probably the largest cloud computing acquisition to date. What kind of strain will that price tag put on how the new IBM functions going forward? Other worries come from the character of the acquisition – whether Red Hat will be able to continue operating independently and what will change if they cannot. In addition, a few Linux devotees hark back to Oracle’s acquisition of Sun Microsystems in 2010 and Sun’s slow death in its aftermath. - -**[ Also read: [The IBM-Red Hat deal: What it means for enterprises][3] | Get daily insights: [Sign up for Network World newsletters][4] ]** - -The good news is that this merger of IBM and Red Hat appears to offer each of the companies some significant benefits. IBM makes a strong move into cloud computing, and Red Hat gains a broader international footing. - -The other good news relates to the pace at which this acquisition occurred. Initially announced on October 28, 2018, it is now more than eight months later. It’s clear that the leadership of each company has not rushed headlong into this new relationship. Both parties to the acquisition appear to be moving ahead with trust and optimism. IBM promises to ensure Red Hat's independence and will allow it to continue to be "Red Hat" both in name and business activity. - -### The end of Red Hat highly unlikely - -Will this acquisition be the end of Red Hat? That outcome is not impossible, but it seems extremely unlikely. For one thing, both companies stand to gain significantly from the other’s strong points. IBM is likely to be revitalized in ways that allow it to be more successful, and Red Hat is starting from a very strong position. While it’s a huge gamble by some measurements, I think most of us Linux enthusiasts are cautiously optimistic at worst. - -IBM seems intent on allowing Red Hat to work independently and seems to be taking the time required to work out the kinks in their plans. - -As for the eventual demise of Sun Microsystems, the circumstances were very different. As this [coverage in Network World in 2017][5] suggests, Sun was in an altogether different position when it was acquired. The future for IBM and Red Hat appears to be considerably brighter – even to a former (decades earlier) member of the Sun User Group Board of Directors. - -The answer to the question posed by the title of this post is “probably not.” Only time will tell, but leadership seems committed to doing things the right way – preserving Red Hat's role in the Linux world and making the arrangement pay off for both organizations. And I, for one, expect good things to come from the merger – for IBM, for Red Hat and likely even for Linux enthusiasts like myself. - -**[ Now read this: [The IBM-Red Hat deal: What it means for enterprises][3] ]** - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407746/will-ibms-acquisition-be-the-end-of-red-hat.html - -作者:[Sandra Henry-Stocker][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2015/10/20151027-red-hat-logo-100625237-large.jpg -[2]: https://www.networkworld.com/article/3316960/ibm-closes-34b-red-hat-deal-vaults-into-multi-cloud.html -[3]: https://www.networkworld.com/article/3317517/the-ibm-red-hat-deal-what-it-means-for-enterprises.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.networkworld.com/article/3222707/the-sun-sets-on-solaris-and-sparc.html -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190717 MPLS is hanging on in this SD-WAN world.md b/sources/talk/20190717 MPLS is hanging on in this SD-WAN world.md deleted file mode 100644 index 159c8598db..0000000000 --- a/sources/talk/20190717 MPLS is hanging on in this SD-WAN world.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (MPLS is hanging on in this SD-WAN world) -[#]: via: (https://www.networkworld.com/article/3409070/mpls-is-hanging-on-in-this-sd-wan-world.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -MPLS is hanging on in this SD-WAN world -====== -The legacy networking protocol is still viable and there is no need to replace it in certain use cases, argues one cloud provider. -![jamesteohart][1] - -The [SD-WAN networking market is booming and is expected to grow][2] to $17 billion by 2025, and no wonder. Software-defined wide-area networking eliminates the need for expensive routers and does all the network connectivity in the cloud. - -Among its advantages is the support for secure cloud connectivity, one area where multiprotocol label switching (MPLS) falls short. MPLS is a data protocol from before the internet took off and while ideal for communications within the corporate firewall, it doesn’t lend itself to cloud and outside communications well. - -You would think that would seal MPLS’s fate, but just like IPv6 is ever so slowly replacing IPv4, MPLS is hanging on and some IT pros are even increasing their investment. - -**[ Related: [MPLS explained – What you need to know about multi-protocol label switching][3] ]** - -Avant Communications, a cloud services provider that specializes in SD-WAN, recently issued a report entitled [State of Disruption][4] that found that 83% of enterprises that use or are familiar with MPLS plan to increase their MPLS network infrastructure this year, and 40% say they will “significantly increase” their use of it. - -The report did not find one protocol winning that the expense of another. Just as 83% plan to use MPLS, 78% acknowledged plans to use SD-WAN in their corporate networks by the end of the year. Although SD-WAN is on the rise, MPLS is clearly not going away anytime soon. Both SD-WAN and MPLS can live together in harmony, adding value to each other. - -“SD-WAN is the most disruptive technology in our study. It’s not surprising that adoption of new technologies is slowest among the largest companies. The wave of SD-WAN disruption has not fully hit larger companies yet, but our belief is that it is moving quickly upmarket,” the report stated. - -While SD-WAN is much better suited for the job of cloud connectivity, 50% of network traffic is still staying within the corporate firewall. So while SD-WAN can solve the connection issues, so can MPLS. And if you have it deployed, rip and replace makes no sense. - -“MPLS continues to have a strong role in modern networks, and we expect that to continue,” the report stated. “This is especially true among larger enterprises that have larger networks depending on MPLS. While you’ll find MPLS at the core for a long time to come, we expect to see a shared environment with SD-WAN at the edge, enabled by broadband Internet and other lower cost networks. “ - -And MPLS isn’t without its advantages, most notably it can [guarantee performance][5] while SD-WAN, at the mercy of the public internet, cannot. - -As broadband networks continue to improve in performance, SD-WAN will allow companies to reduce their reliance on MPLS, especially as equipment ages and is replaced. Avant expects that, for the foreseeable future, there will continue to be a very viable role for both. - -**More about SD-WAN:** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][6] - * [How to pick an off-site data-backup method][7] - * [SD-Branch: What it is and why you’ll need it][8] - * [What are the options for security SD-WAN?][9] - - - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409070/mpls-is-hanging-on-in-this-sd-wan-world.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/the-latest-in-innovation-in-the-sd-wan-managed-services-market1400-100801684-large.jpg -[2]: https://www.prnewswire.com/news-releases/software-defined-wide-area-network-sd-wan-market-to-hit-17bn-by-2025-global-market-insights-inc-300795304.html -[3]: https://www.networkworld.com/article/2297171/sd-wan/network-security-mpls-explained.html -[4]: https://www.goavant.net/Disruption -[5]: https://www.networkworld.com/article/2297171/network-security-mpls-explained.html -[6]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[7]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[8]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[9]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190718 Smart cities offer window into the evolution of enterprise IoT technology.md b/sources/talk/20190718 Smart cities offer window into the evolution of enterprise IoT technology.md deleted file mode 100644 index 06b5726379..0000000000 --- a/sources/talk/20190718 Smart cities offer window into the evolution of enterprise IoT technology.md +++ /dev/null @@ -1,102 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Smart cities offer window into the evolution of enterprise IoT technology) -[#]: via: (https://www.networkworld.com/article/3409787/smart-cities-offer-window-into-the-evolution-of-enterprise-iot-technology.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -Smart cities offer window into the evolution of enterprise IoT technology -====== -Smart-city technologies such as 0G networking hold clues for successful large-scale implementations of the internet of things in enterprise settings. -![Benjamin Hung modified by IDG Comm. \(CC0\)][1] - -Powering smart cities is one of the most ambitious use cases for the internet of things (IoT), combining a wide variety of IoT technologies to create coherent systems that span not just individual buildings or campuses but entire metropolises. As such, smart cities offer a window into the evolution of enterprise IoT technologies and implementations on the largest scale. - -And that’s why I connected with [Christophe Fourtet][2], CSO and co-founder of [Sigfox][3], a French global network operator, to learn more about using wireless networks to connect large numbers of low-power objects, ranging from smartwatches to electricity meters. (And I have to admit I was intrigued by the 0G network moniker, which conjured visions of weightless IoT devices floating in space, or maybe [OG-][4]style old-school authenticity. That’s not at all what it’s about, of course.) - -**[ Learns more: [Download a PDF bundle of five essential articles about IoT in the enterprise][5] ]** - -According to Fourtet, "Sigfox’s global 0G network specializes in inexpensively conveying small amounts of data over long ranges—without sacrificing quality. Whereas other networks aim to collect and transmit as much data as possible, as quickly as possible, we deliver small packets of information at regular intervals, giving customers only the critical information they need." - -The software-based wireless 0G network listens to devices without the need to establish and maintain network connection, eliminating signaling overhead. With network and computing complexity managed in the cloud, energy consumption and costs of connected devices are dramatically reduced, [the company says][6]. Just as important, the low power requirements can also dramatically cut battery requirements for IoT devices. - -Around the world, customers like Michelin, General Motors, and Airbus use the 0G networks to connect IoT devices, and the network is supported by more than 660 partner organizations, including device makers and service providers such as Urbansense and Bosch. Sigfox cited [0G-connected IoT devices enabling Danish cities][7] to monitor quality of life data, from detecting defects in buildings to tracking garbage collection. - -### 0G applications beyond smart cities - -In addition to smart cities applications, Sigfox serves several industry verticals, including manufacturing, agriculture, and retail. Common use cases include supply-chain management and asset tracking, both within factory/warehouse environments and between locations as containers/shipments move through the supply chain around the globe. The network is uniquely equipped for supply chain use cases due to its cost-efficiency, long-lasting batteries with totally predictable autonomy, and wide-range reach. - -In facilities management, the 0G network can connect IoT devices that track ambient factors such temperature, humidity, and occupancy. Doing so helps managers leverage occupancy data to adjust the amount of space a company needs to rent, reducing overhead costs. It can also help farmers optimize the planting, care, and harvesting of crops. - -Operating as a backup solution to ensure connectivity during a broadband network outage, 0G networking built into a cable box or router could allow service providers to access hardware even when the primary network is down, Fourtet said. - -“The 0G network does not promise a continuation of these services,” Fourtet noted, “but it can provide access to the necessary information to solve challenges associated with outages.” - -In a more dire example in the home and commercial building security market, sophisticated burglars could use cellular and Wi-Fi jammers to block a security system’s access to a network so even though alarms were issued, the service might never receive them, Fourtet said. But the 0G network can send an alert to the alarm system provider even if it has been jammed or blocked, he said. - -### How 0g networks are used today - -Current 0G implementations include helping [Louis Vuitton track luggage][8] for its traveling customers. Using a luggage tracker powered by by [Sigfox’s Monarch service][9], a suitcase can stay connected to the 0G network throughout a trip, automatically recognizing and adapting to local radio frequency standards. The idea is for travelers to track the location of their bags at major airports in multiple countries, Fourtet said, while low energy consumption promises a six-month battery life with a very small battery. - -At the Special Olympics World Games Abu Dhabi 2019, [iWire, LITE-ON and Sigfox worked together][10] to create a tracking solution designed to help safeguard 10,000 athletes and delegates. Sensors connected to the Sigfox 0G network and outfitted with Wi-Fi capabilities were equipped with tiny batteries designed to provide uninterrupted service throughout the weeklong event. The devices “periodically transmitted messages that helped to identify the location of athletes and delegates in case they went off course,” Fourtet said, while LITE-ON incorporated a panic button for use in case of emergencies. In fact, during the event, the system was used to locate a lost athlete and return them to the Games without incident, he said. - -French car manufacturer [Groupe PSA][11] uses the 0G network to optimize shipping container routes between suppliers and assembly plants. [Track&Trace][11] works with IBM’s cloud-based IoT technologies to track container locations and alert Groupe PSA when issues crop up, Fourtet said. - -### 0G is still growing - -“It takes time to build a new network,” Fourtet said. So while Sigfox has delivered 0G network coverage in 60 countries across five continents, covering 1 billion people  (including 51 U.S. metropolitan areas covering 30% of the population), Fourtet acknowledged, “[We] still have a ways to go to build our global network.” In the meantime, the company is expanding its Connectivity-as-a-Service (CaaS) solutions to enable coverage in areas where the 0G network does not yet exist. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][12] - * [What is edge computing and how it’s changing the network][13] - * [Most powerful Internet of Things companies][14] - * [10 Hot IoT startups to watch][15] - * [The 6 ways to make money in IoT][16] - * [What is digital twin technology? [and why it matters]][17] - * [Blockchain, service-centric networking key to IoT success][18] - * [Getting grounded in IoT networking and security][5] - * [Building IoT-ready networks must become a priority][19] - * [What is the Industrial IoT? [And why the stakes are so high]][20] - - - -Join the Network World communities on [Facebook][21] and [LinkedIn][22] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409787/smart-cities-offer-window-into-the-evolution-of-enterprise-iot-technology.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/tokyo_asia_smart-city_iot_networking_by-benjamin-hung-unsplash-100764249-large.jpg -[2]: https://www.sigfox.com/en/sigfox-story -[3]: https://www.sigfox.com/en -[4]: https://www.dictionary.com/e/slang/og/ -[5]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[6]: https://www.sigfox.com/en/sigfox-iot-technology-overview -[7]: https://www.youtube.com/watch?v=WXc722WGjnE&t=1s -[8]: https://www.sigfox.com/en/news/sigfox-and-louis-vuitton-partner-innovative-luggage-tracker -[9]: https://www.sigfox.com/en/solutions/sigfox-services -[10]: https://www.sigfox.com/en/news/case-study-special-olympics-2019 -[11]: https://www.sigfox.com/en/news/ibm-revolutionizes-container-tracking-groupe-psa-sigfox -[12]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[13]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[14]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[15]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[16]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[17]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[18]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[19]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[20]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[21]: https://www.facebook.com/NetworkWorld/ -[22]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190724 How BMW-s new annual fee for Apple CarPlay could define the IoT.md b/sources/talk/20190724 How BMW-s new annual fee for Apple CarPlay could define the IoT.md deleted file mode 100644 index 6a73b041d5..0000000000 --- a/sources/talk/20190724 How BMW-s new annual fee for Apple CarPlay could define the IoT.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How BMW’s new annual fee for Apple CarPlay could define the IoT) -[#]: via: (https://www.networkworld.com/article/3411478/how-bmws-new-annual-fee-for-apple-carplay-could-define-the-iot.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -How BMW’s new annual fee for Apple CarPlay could define the IoT -====== -BMW's plans to charge for Apple CarPlay access illustrates the promise—and the pitfalls—of the internet of things (IoT). -![Apple][1] - -Apple calls CarPlay “[the ultimate co-pilot][2].” BMW calls it the “smart and fast way to conveniently use your iPhone features while in your car. ... You can control your iPhone and use apps with the touchscreen display, the iDrive Controller or voice commands.” - -However you describe it, though, Apple’s CarPlay system suddenly finds itself in the center of what could be a defining conversation about the future of the internet of things (IoT). - -You see, the German luxury carmaker’s plans to charge $80 a year to access CarPlay have suddenly become the talk of the internet, from [tech blogs][3] to [car sites][4]. The hue and cry makes CarPlay the perfect illustration of the promise—and the pitfalls—of the IoT. - -**[ [Learn more:][5] Download a PDF bundle of five essential articles about IoT in the enterprise ]** - -First, the facts: BMW’s website now reveals that beginning with the 2019 model year, it’s turning the CarPlay interface between iPhones and the vehicle’s infotainment system into a subscription service. While most car manufacturers that offer CarPlay make it available free of charge, owners of the “ultimate driving machine,” will get free access for only the first year. After that, [BMW drivers will need to pony up $80 a year—or $300 for 20 years][6]—to keep using it. - -### An “outrageous” fee? - -Some observers are labeling the new fee “[outrageous][7],” and it’s not yet clear what Apple thinks about BMW’s pricing policy. For me, though, it’s both a shining example of the amazing new revenue opportunities generated by the IoT, and a terrifying warning of how the IoT could add new cost and complexity to everyday activities. - -Look at this as a glass half full, and BMW is cleverly finding a new revenue stream by offering valuable functionality to a target market that has already demonstrated a willingness to pay for high-end functionality. The IoT and connected cars offer a new and better experience, and BMW is leveraging that to boost its business. It’s the power of capitalism at work, and if BMW drivers don’t value the CarPlay functionality, no one is forcing them to buy it. - -In some ways, the subscription business model is similar to that of [satellite radio][8] or GM’s [OnStar][9] system. The automaker builds in the equipment needed to offer the service, and car owners can choose to avail themselves of it if they feel it’s worthwhile. Or not. - -### A particular bit of usury - -But that’s only one perspective on what’s happening here. Look at it another way, and you could paint a very different picture. For one thing, as noted above, other car makers that offer CarPlay do not charge anything extra for it. BMWs are relatively expensive vehicles, and nickel-and-diming affluent consumers does not seem like a path to great customer loyalty. Think of the annoyance surrounding the fact that budget motels typically make Wi-Fi available for free, while luxury properties charge guests through the nose. (With the [rise of 5G networks][10], though, that particular bit of usury may not last much longer.) - -Making matters worse, CarPlay is really just internal connectivity between your iPhone and your car’s infotainment system. There’s no actual _service_ involved, and no real justification for a separate fee, other than the fact that BMW _can_ charge for it. It seems more like getting charged a monthly fee to connect your own phone to your own big-screen TV (like Apple’s AirPlay) or hooking up your smart light fixture to your home assistant or—I don’t know—putting your lamp on your coffee table! It just doesn’t feel right. - -### Dangerous long-term implications? - -Sure, if this kind of thing takes off in the larger world of the IoT, it could lead to a significant amount of new revenue—at least in the short run. But over time, it could easily backfire, encouraging consumers to view IoT vendors as greedy and to question the costs and benefits of everything from smart houses to connected enterprises. That could turn out to be a drag on the overall IoT market. - -That would be a shame, and it doesn’t have to be that way. If BMW had merely buried the CarPlay costs in the price of the equipment or options, or in the sticker cost of the car itself, nobody would be worrying about it. But just like breaking out the costs of checked baggage on airplane flights, charging a subscription for CarPlay makes it seem like a combination of bait-and-switch and price gouging. And that’s exactly what the IoT industry _doesn’t_ need. If the goal is to maximize the growth and acceptance of the IoT, vendors should strive to make IoT users feel like they’re getting great functionality at a fair price. - -That’s often exactly what many IoT devices and IoT-based services do, so it shouldn’t be too hard to avoid screwing it up. - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3411478/how-bmws-new-annual-fee-for-apple-carplay-could-define-the-iot.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/06/ios13-carplay-waze-100799546-large.jpg -[2]: https://www.apple.com/ios/carplay/ -[3]: https://www.engadget.com/2019/07/24/bmw-adds-a-80-yearly-subscription-for-apples-carplay/ -[4]: https://www.caranddriver.com/news/a15530125/bmw-to-treat-apple-carplay-as-a-subscription-service-and-charge-customers-an-annual-fee/ -[5]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[6]: https://connecteddrive.bmwusa.com/app/index.html#/portal/store/Base_CarPlay -[7]: https://www.cultofmac.com/640578/bmw-carplay-annual-fee/ -[8]: https://www.siriusxm.com/ -[9]: https://www.onstar.com/us/en/home/?ppc=GOOGLE_700000001302986_71700000048879287_58700004855294718_p41772767724&gclid=EAIaIQobChMIi7qn4IDO4wIVJRh9Ch1mlw6tEAAYASAAEgKQf_D_BwE&gclsrc=aw.ds -[10]: http://www.networkworld.com/cms/article/17%20predictions%20about%205G%20networks%20and%20devices -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190730 Google Cloud to offer VMware data-center tools natively.md b/sources/talk/20190730 Google Cloud to offer VMware data-center tools natively.md deleted file mode 100644 index e882248bf8..0000000000 --- a/sources/talk/20190730 Google Cloud to offer VMware data-center tools natively.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Google Cloud to offer VMware data-center tools natively) -[#]: via: (https://www.networkworld.com/article/3428497/google-cloud-to-offer-vmware-data-center-tools-natively.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Google Cloud to offer VMware data-center tools natively -====== -Google is enlisting VMware and CloudSimple to serve up vSphere, NSX and vSAN software on Google Cloud to make ease the transition of enterprise workloads to the cloud. -![Thinkstock / Google][1] - -Google this week said it would for the first time natively support VMware workloads in its Cloud service, giving customers more options for deploying enterprise applications. - -The hybrid cloud service called Google Cloud VMware Solution by CloudSimple will use VMware software-defined data center (SDCC) technologies including VMware vSphere, NSX and vSAN software deployed on a platform administered by CloudSimple for GCP. - -[RELATED: How to make hybrid cloud work][2] - -“Users will have full, native access to the full VMware stack including vCenter, vSAN and NSX-T. Google Cloud will provide the first line of support, working closely with CloudSimple to help ensure customers receive a streamlined product support experience and that their business-critical applications are supported with the SLAs that enterprise customers need,”  Thomas Kurian, CEO of Google Cloud [wrote in a blog outlining the deal][3].  - -“With VMware on Google Cloud Platform, customers will be able to leverage all of the familiarity of VMware tools and training, and protect their investments, as they execute on their cloud strategies and rapidly bring new services to market and operate them seamlessly and more securely across a hybrid cloud environment,” said Sanjay Poonen, chief operating officer, customer operations at VMware [in a statement][4]. - -The move further integrates Google and VMware software as both have teamed up multiple times in the past including: - - * Google Cloud integration for VMware NSX Service Mesh and SD-WAN by VeloCloud that lets customers deploy and gain visibility into their hybrid workloads—wherever they’re running. - * Google Cloud’s Anthos on VMware vSphere, including validations for vSAN, as the preferred hyperconverged infrastructure, to provide customers a multi-cloud offering and providing Kubernetes users the ability to create and manage persistent storage volumes for stateful workloads on-premises. - * A Google Cloud plug-in for VMware vRealize Automation providing customers with a seamless way to deploy, orchestrate and manage Google Cloud resources from within their vRealize Automation environment. - - - -Google is just one key cloud relationship VMware relies on.  It has a deep integration with Amazon Web Services that began in 2017.  With that flagship agreement, VMware customers can run workloads in the AWS cloud.  And more recently, VMware cloud offerings can be bought directly through the AWS service.  - -VMware also has a hybrid cloud partnership with [Microsoft’s Azure cloud service][5].  That package, called Azure VMware Solutions is built on VMware Cloud Foundation, which  is a packaging of the company’s traditional compute virtualization software vSphere with its NSX network virtualization product and its VSAN software-defined storage area network product. - -More recently VMware bulked up its cloud offerings by [buying Avi Networks][6]' load balancing, analytics and application-delivery technology for an undisclosed amount. - -Founded in 2012 by a group of Cisco engineers and executives, Avi offers a variety of software-defined products and services including a software-based application delivery controller (ADC) and intelligent web-application firewall.  The software already integrates with VMware vCenter and NSX, OpenStack, third party [SDN][7] controllers, as well as Amazon AWS and Google Cloud Platform, Red Hat OpenShift and container orchestration platforms such as Kubernetes and Docker. - -According to the company,  the VMware and Avi Networks teams will work together to advance VMware’s Virtual Cloud Network plan, build out full stack Layer 2-7 services, and deliver the public-cloud experience for on-prem environments and data centers, said Tom Gillis, VMware's senior vice president and general manager of its networking and security business unit. - -Combining Avi Networks with [VMware NSX][8] will further enable organizations to respond to new opportunities and threats, create new business models and deliver services to all applications and data, wherever they are located, VMware stated. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3428497/google-cloud-to-offer-vmware-data-center-tools-natively.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/google-cloud-services-100765812-large.jpg -[2]: https://www.networkworld.com/article/3119362/hybrid-cloud/how-to-make-hybrid-cloud-work.html#tk.nww-fsb -[3]: https://cloud.google.com/blog/topics/partners/vmware-cloud-foundation-comes-to-google-cloud -[4]: https://www.vmware.com/company/news/releases/vmw-newsfeed.Google-Cloud-and-VMware-Extend-Strategic-Partnership.1893625.html -[5]: https://www.networkworld.com/article/3113394/vmware-cloud-foundation-integrates-virtual-compute-network-and-storage-systems.html -[6]: https://www.networkworld.com/article/3402981/vmware-eyes-avi-networks-for-data-center-software.html -[7]: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html -[8]: https://www.networkworld.com/article/3346017/vmware-preps-milestone-nsx-release-for-enterprise-cloud-push.html -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190731 Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration.md b/sources/talk/20190731 Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration.md deleted file mode 100644 index 423cd7180a..0000000000 --- a/sources/talk/20190731 Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration.md +++ /dev/null @@ -1,67 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration) -[#]: via: (https://www.networkworld.com/article/3429116/cisco-simplifies-kubernetes-container-deployment-with-microsoft-azure-collaboration.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco simplifies Kubernetes container deployment with Microsoft Azure collaboration -====== -Microsoft's Azure Kubernetes Service (AKS) has been added to the Kubernetes managed services that natively integrate with the Cisco Container Platform. -![Viti / Getty Images][1] - -Cisco seeks to enhance container deployment with a service to let enterprise customers run containerized applications across both Cisco-based on-premises environments and in the Microsoft Azure cloud. - -Customers can now further simplify deploying and managing Kubernetes clusters on-premises and in Azure Kubernetes Service (AKS) with one tool, using common identify and control policies, reducing manual tasks and ultimately time-to-market for their application environments, wrote Cisco’s Kip Compton, senior vice president of the company’s Cloud Platform and Solutions group in a [blog][2] about the work.  - -[RELATED: How to make hybrid cloud work][3] - -Specifically, AKS has been added to Kubernetes managed services that natively integrate with the [Cisco Container Platform][4]. Cisco introduced its Kubernetes-based Container Platform in January 2018 and said it allows for self-service deployment and management of container clusters.  - -Cisco has added multivendor support to the platform, including support of SAP’s Data Hub to integrate large data sets that may be in public clouds, such as Amazon Web Services, Hadoop, Microsoft or Google, and integrate them with private cloud or enterprise apps such as SAP S/4 HANA. - -Kubernetes, originally designed by Google, is an open-source-based system for developing and orchestrating containerized applications. Containers can be deployed across multiple server hosts and Kubernetes orchestration lets customers build application services that span multiple containers, schedule those containers across a cluster, scale those containers and manage the container health.  - -Cisco has been working to further integrate with Azure services for quite a while  now.  For example, the [Cisco Integrated System for Microsoft Azure Stack][5] lets organizations access development tools, data repositories, and related Azure services to reinvent applications and gain new information from secured data. Azure Stack provides the same APIs and user interface as the Azure public cloud. - -In future phases, the Cisco Container Platform will integrate more features to support Microsoft Windows container applications with the potential to leverage virtual-kubelet or Windows node pools in Azure, Compton stated. “In addition, we will support Azure Active Directory common identity integration for both on-prem and AKS clusters so customer/applications experience a single consistent environment across hybrid cloud.” - -In addition, Cisco has a substantial portfolio of offerings running in the Azure cloud and available in the Azure Marketplace.  For example,  the company offers its Cloud Services Router, CSV1000v, as well as Meraki vMX, Stealthwatch Cloud, the Adaptive Security Virtual Appliance and its Next Generation Firewall.  - -The Azure work broadens Cisco’s drive into cloud.  For example Cisco and [Amazon Web Services (AWS) offer][6] enterprise customers an integrated platform that promises to help them more simply build, secure and connect Kubernetes clusters across private data centers and the AWS cloud.  - -The package, Cisco Hybrid Solution for Kubernetes on AWS, combines Cisco, AWS and open-source technologies to simplify complexity and helps eliminate challenges for customers who use Kubernetes to enable deploying applications on premises and across the AWS cloud in a secure, consistent manner.  The hybrid service integrates Cisco Container Platform (CCP) and Amazon Elastic Container Service for Kubernetes (EKS), so customers can provision clusters on premises and on EKS in the cloud. - -Cisco [also released a cloud-service program][7] on its flagship software-defined networking (SDN) software that will let customers manage and secure applications running in the data center or in Amazon Web Service cloud environments. The service, Cisco Cloud application centric infrastructure (ACI) for AWS lets users configure inter-site connectivity, define policies and monitor the health of network infrastructure across hybrid environments, Cisco said. - -Meanwhile, Cisco and Google have done extensive work on their own joint cloud-development activities to help customers more easily build secure multicloud and hybrid applications everywhere from on-premises data centers to public clouds.  - -Cisco and Google have been working closely together since October 2017, when the companies said they were working on an open hybrid cloud platform that bridges on-premises and cloud environments. That package, [Cisco Hybrid Cloud Platform for Google Cloud][8], became generally available in September 2018. It lets customer develop enterprise-grade capabilities from Google Cloud-managed Kubernetes containers that include Cisco networking and security technology as well as service mesh monitoring from Istio. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3429116/cisco-simplifies-kubernetes-container-deployment-with-microsoft-azure-collaboration.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/africa_guinea_conakry_harbor_harbour_shipping_containers_cranes_by_viti_gettyimages-1154922310_2400x1600-100802866-large.jpg -[2]: https://www.networkworld.com/cms/article/%20https:/blogs.cisco.com/news/cisco-microsoft%20%E2%80%8E -[3]: https://www.networkworld.com/article/3119362/hybrid-cloud/how-to-make-hybrid-cloud-work.html#tk.nww-fsb -[4]: https://www.networkworld.com/article/3252810/cisco-unveils-container-management-on-hyperflex.html -[5]: https://blogs.cisco.com/datacenter/cisco-integrated-system-for-microsoft-azure-stack-it-is-here-and-shipping -[6]: https://www.networkworld.com/article/3319782/cisco-aws-marriage-simplifies-hybrid-cloud-app-development.html -[7]: https://www.networkworld.com/article/3388679/cisco-taps-into-aws-for-data-center-cloud-applications.html -[8]: https://cloud.google.com/cisco/ -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190731 Remote code execution is possible by exploiting flaws in Vxworks.md b/sources/talk/20190731 Remote code execution is possible by exploiting flaws in Vxworks.md deleted file mode 100644 index 7fa6eaa226..0000000000 --- a/sources/talk/20190731 Remote code execution is possible by exploiting flaws in Vxworks.md +++ /dev/null @@ -1,85 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Remote code execution is possible by exploiting flaws in Vxworks) -[#]: via: (https://www.networkworld.com/article/3428996/remote-code-execution-is-possible-by-exploiting-flaws-in-vxworks.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Remote code execution is possible by exploiting flaws in Vxworks -====== - -![Thinkstock][1] - -Eleven zero-day vulnerabilities in WindRiver’s VxWorks, a real-time operating system in use across an advertised 2 billion connected devices have been discovered by network security vendor Armis. - -Six of the vulnerabilities could enable remote attackers to access unpatched systems without any user interaction, even through a firewall according to Armis. - -**About IoT:** - - * [What is the IoT? How the internet of things works][2] - * [What is edge computing and how it’s changing the network][3] - * [Most powerful Internet of Things companies][4] - * [10 Hot IoT startups to watch][5] - * [The 6 ways to make money in IoT][6] - * [What is digital twin technology? [and why it matters]][7] - * [Blockchain, service-centric networking key to IoT success][8] - * [Getting grounded in IoT networking and security][9] - * [Building IoT-ready networks must become a priority][10] - * [What is the Industrial IoT? [And why the stakes are so high]][11] - - - -The vulnerabilities affect all devices running VxWorks version 6.5 and later with the exception of VxWorks 7, issued July 19, which patches the flaws. That means the attack windows may have been open for more than 13 years. - -Armis Labs said that affected devices included SCADA controllers, patient monitors, MRI machines, VOIP phones and even network firewalls, specifying that users in the medical and industrial fields should be particularly quick about patching the software. - -Thanks to remote-code-execution vulnerabilities, unpatched devices can be compromised by a maliciously crafted IP packet that doesn’t need device-specific tailoring, and every vulnerable device on a given network can be targeted more or less simultaneously. - -The Armis researchers said that, because the most severe of the issues targets “esoteric parts of the TCP/IP stack that are almost never used by legitimate applications,” specific rules for the open source Snort security framework can be imposed to detect exploits. - -VxWorks, which has been in use since the 1980s, is a popular real-time OS, used in industrial, medical and many other applications that require extremely low latency and response time. While highly reliable, the inability to install a security agent alongside the operating system makes it vulnerable, said Armis, and the proprietary source code makes it more difficult to detect problems. - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][12] ]** - -Armis argued that more attention has to be paid by security researchers to real-time operating systems, particularly given the explosive growth in IoT usage – for one thing, the researchers said, any software that doesn’t get thoroughly researched runs a higher risk of having serious vulnerabilities go unaddressed. For another, the critical nature of many IoT use cases means that the consequences of a compromised device are potentially very serious. - -“It is inconvenient to have your phone put out of use, but it’s an entirely different story to have your manufacturing plant shut down,” the Armis team wrote. “A compromised industrial controller could shut down a factory, and a pwned patient monitor could have a life-threatening effect.” - -In addition to the six headlining vulnerabilities, five somewhat less serious security holes were found. These could lead to consequences ranging from denial of service and leaked information to logic flaws and memory issues. - -More technical details and a fuller overview of the problem can be found at the Armis Labs blog post here, and there are partial lists available of companies and devices that run VxWorks available [on Wikipedia][13] and at [Wind River’s customer page][14]. Wind River itself issued a security advisory [here][15], which contains some potential mitigation techniques. - -Join the Network World communities on [Facebook][16] and [LinkedIn][17] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3428996/remote-code-execution-is-possible-by-exploiting-flaws-in-vxworks.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/09/iot-security11-100735405-large.jpg -[2]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[5]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[6]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[7]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[8]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[9]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[10]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[11]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[12]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[13]: https://en.wikipedia.org/wiki/VxWorks#Notable_uses -[14]: https://www.windriver.com/customers/ -[15]: https://www.windriver.com/security/announcements/tcp-ip-network-stack-ipnet-urgent11/security-advisory-ipnet/ -[16]: https://www.facebook.com/NetworkWorld/ -[17]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190731 VMware-s Bitfusion acquisition could be a game-changer for GPU computing.md b/sources/talk/20190731 VMware-s Bitfusion acquisition could be a game-changer for GPU computing.md deleted file mode 100644 index 4946824820..0000000000 --- a/sources/talk/20190731 VMware-s Bitfusion acquisition could be a game-changer for GPU computing.md +++ /dev/null @@ -1,58 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (VMware’s Bitfusion acquisition could be a game-changer for GPU computing) -[#]: via: (https://www.networkworld.com/article/3429036/vmwares-bitfusion-acquisition-could-be-a-game-changer-for-gpu-computing.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -VMware’s Bitfusion acquisition could be a game-changer for GPU computing -====== -VMware will integrate Bitfusion technology into vSphere, bolstering VMware’s strategy of supporting AI- and ML-based workloads by virtualizing hardware accelerators. -![Vladimir Timofeev / Getty Images][1] - -In a low-key move that went under the radar of a lot of us, last week VMware snapped up a startup called Bitfusion, which makes virtualization software for accelerated computing. It improves performance of virtual machines by offloading processing to accelerator chips, such as GPUs, FPGAs, or other custom ASICs. - -Bitfusion provides sharing of GPU resources among isolated GPU compute workloads, allowing workloads to be shared across the customer’s network. This way workloads are not tied to one physical server but shared as a pool of resources, and if multiple GPUs are brought to bear, performance naturally increases. - -“In many ways, Bitfusion offers for hardware acceleration what VMware offered to the compute landscape several years ago. Bitfusion also aligns well with VMware’s ‘Any Cloud, Any App, Any Device’ vision with its ability to work across AI frameworks, clouds, networks, and formats such as virtual machines and containers,” said Krish Prasad, senior vice president and general manager of the Cloud Platform Business Unit at VMware, in a [blog post][2] announcing the deal. - -**[ Also read: [After virtualization and cloud, what's left on premises?][3] ]** - -When the acquisition closes, VMware will integrate Bitfusion technology into vSphere. Prasad said the inclusion of Bitfusion will bolster VMware’s strategy of supporting artificial intelligence- and machine learning-based workloads by virtualizing hardware accelerators. - -“Multi-vendor hardware accelerators and the ecosystem around them are key components for delivering modern applications. These accelerators can be used regardless of location in the environment—on-premises and/or in the cloud,” he wrote. The platform can be extended to support other accelerator chips, such as FGPAs and ASICs, he wrote. - -Prasad noted that hardware accelerators today are deployed “with bare-metal practices, which force poor utilization, poor efficiencies, and limit organizations from sharing, abstracting, and automating the infrastructure. This provides a perfect opportunity to virtualize them—providing increased sharing of resources and lowering costs.” - -He added: “The platform can share GPUs in a virtualized infrastructure as a pool of network-accessible resources rather than isolated resources per server.” - -This is a real game-changer, much the way VMware added storage virtualization and software-defined networks (SDN) to expand the use of vSphere. It gives them a major competitive advantage over Microsoft Hyper-V and Linux’s KVM now as well. - -By virtualizing and pooling GPUs, it lets users bring multiple GPUs to bear rather than locking one physical processor to a server and application. The same applies to FPGAs and the numerous AI processor chips either on or coming to market. - -### VMware also buys Uhana - -That wasn’t VMware’s only purchase. The company also acquired Uhana, which provides an AI engine specifically for telcos and other carriers that discovers anomalies in the network or application, prioritizes them based on their potential impact, and automatically recommends optimization strategies. That means improved network operations and operational efficiently. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3429036/vmwares-bitfusion-acquisition-could-be-a-game-changer-for-gpu-computing.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/08/clouded_view_of_data_center_server_virtualization_by_vladimir_timofeev_gettyimages-600404124_1200x800-100768156-large.jpg -[2]: https://blogs.vmware.com/vsphere/2019/07/vmware-to-acquire-bitfusion.html -[3]: https://https//www.networkworld.com/article/3232626/virtualization/extreme-virtualization-impact-on-enterprises.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190801 Cisco assesses the top enterprise SD-WAN technology drivers.md b/sources/talk/20190801 Cisco assesses the top enterprise SD-WAN technology drivers.md deleted file mode 100644 index b6f845b4a7..0000000000 --- a/sources/talk/20190801 Cisco assesses the top enterprise SD-WAN technology drivers.md +++ /dev/null @@ -1,96 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco assesses the top enterprise SD-WAN technology drivers) -[#]: via: (https://www.networkworld.com/article/3429186/cisco-assesses-the-top-enterprise-sd-wan-technology-drivers.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco assesses the top enterprise SD-WAN technology drivers -====== -Cisco SD-WAN customer National Instruments touts benefits of the technology: Speed, efficiency, security, cost savings -![Getty Images][1] - -Cisco this week celebrated the [second anniversary][2] of its purchase of SD-WAN vendor Viptela and reiterated its expectation that 2019 will see the [technology change][3] enterprise networks in major ways. - -In a blog outlining trends in the SD-WAN world, Anand Oswal, Cisco senior vice president, engineering, in the company’s Enterprise Networking Business described how SD-WAN technology has changed the network for one of its customers,  test and measurement systems vendor National Instruments.  - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][4] - * [How to pick an off-site data-backup method][5] - * [SD-Branch: What it is and why you’ll need it][6] - * [What are the options for security SD-WAN?][7] - - - -“The existing WAN greatly constrained video conferencing, slowed large software transfers, and couldn’t provide acceptable application performance,” [Oswald wrote][8].  Implementing SD-WAN turned those issues around by: - - * Reducing MPLS spending by 25% while increasing bandwidth by 3,075% - * Categorizing traffic by function and type, sending backup traffic over the Internet under an SLA, eliminating bandwidth bottleneck on MPLS circuits - * Reducing the time for software updates to replicate across the network from 8 hours to 10 minutes - * Adding new internet-based services used to take months, but with the agility of SD-WAN, new services can be deployed in the cloud immediately - * Eliminating the need for call -dmission controls and limiting video quality for conferencing - - - -National Instruments' bandwidth requirements were growing10 to 25 percent per year, overwhelming the budget, Luis Castillo, global network team manager told Cisco in a [case study][9] of the SD-WAN project. “Part of the problem was that these sites can have very different requirements. R&D shops need lots of bandwidth. One site may have a special customer that requires unique segmentation and security. Our contact centers need to support mission-critical voice services. All of that is dependent on the WAN, which means escalating complexity and constantly growing costs.” - -After the shift to SD-WAN, the company no longer has 80 people with diverse IT workloads copeting for a sinlge 10-Mbit circuit, Castillo says. - -It’s not just cost savings by supplementing or replacing MPLS with direct internet connections that is motivating the transition to software-defined WAN architecture, Oswald said.   “It’s also about gaining flexibility and stability with intelligent, continuously monitored connections to multicloud resources and SaaS applications that are fueling the current SD-WAN transition.” - -In its most recent [SD-WAN Infrastructure Forecast][10], IDC researchers talked about a number of other factors driving SD-WAN evolution. - -"First, traditional enterprise WANs are increasingly not meeting the needs of today's modern digital businesses, especially as it relates to supporting SaaS apps and multi- and hybrid-cloud usage. Second, enterprises are interested in easier management of multiple connection types across their WAN to improve application performance and end-user experience," said [Rohit Mehra][11], vice president, [Network Infrastructure][12] at IDC. "Combined with the rapid embrace of SD-WAN by leading communications service providers globally, these trends continue to drive deployments of SD-WAN, providing enterprises with dynamic management of hybrid WAN connections and the ability to guarantee high levels of quality of service on a per-application basis." - -IDC also said that the SD-WAN infrastructure market continues to be highly competitive with sales increasing 64.9% in 2018 to $1.37 billion. IDC stated Cisco holds the largest share of the SD-WAN infrastructure market, with VMware coming in second followed by Silver Peak, Nokia-Nuage, and Riverbed. - -IDC also [recently wrote][13] about how security is also a key driver in recent SD-WAN deployments.  - -“With SD-WAN, mission-critical traffic and assets can be partitioned and protected against vulnerabilities in other parts of the enterprise. This use case appears to be especially popular in verticals such as retail, healthcare, and financial,” IDC wrote.  - -"SD-WAN can also protect application traffic from threats within the enterprise and from outside by leveraging a full stack of security solutions included in SD-WAN such as next-gen firewalls, IPS, URL filtering, malware protection, and cloud security.  - -These security features can enable Layer 3-7 protection for WAN traffic regardless of where it's headed - to the cloud or to the data center, IDC wrote. - -Application traffic to the cloud straight from the branch can now be secured using an internet or cloud gateway, IDC wrote. Users, applications and their data at the branch edge can be protected by the stack of security solutions incorporated into the SD-WAN on-premises appliance, vCPE or router, which typically includes  next-gen firewall, intrusion protection, malware protection and URL filtering, IDC wrote. - -Cisco [most recently][14] added support for its cloud-based security gateway – known as Umbrella – to its SD-WAN software offerings.  According to Cisco, Umbrella can provide the first line of defense against threats on the internet. By analyzing and learning from internet activity patterns, Umbrella automatically uncovers attacker infrastructure and blocks requests to malicious destinations before a connection is even established — without adding latency for users. With Umbrella, customers can stop phishing and malware infections earlier, identify already infected devices faster and prevent data exfiltration, Cisco says. - -The Umbrella announcement is on top of other recent SD-WAN security enhancements the company has made. In May Cisco added support for Advanced Malware Protection (AMP) to its million-plus ISR/ASR edge routers in an effort to reinforce branch- and core-network malware protection across the SD-WAN. AMP support is added to a menu of security features already included in Cisco's SD-WAN software including support for URL filtering, Snort Intrusion Prevention, the ability to segment users across the WAN and embedded platform security, including the Cisco Trust Anchor module. - -Last year Cisco added its Viptela SD-WAN technology to the IOS XE version 16.9.1 software that runs its core ISR/ASR routers. - -Join the Network World communities on [Facebook][15] and [LinkedIn][16] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3429186/cisco-assesses-the-top-enterprise-sd-wan-technology-drivers.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/08/2_networks_smart-city_iot_connected-100769196-large.jpg -[2]: https://www.networkworld.com/article/3193888/why-cisco-needs-sd-wan-vendor-viptela.html -[3]: https://blog.cimicorp.com/?p=3781 -[4]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[5]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[6]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[7]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[8]: https://blogs.cisco.com/author/anandoswal -[9]: https://www.cisco.com/c/dam/en_us/services/it-case-studies/ni-case-study.pdf -[10]: https://www.idc.com/getdoc.jsp?containerId=prUS45380319 -[11]: https://www.idc.com/getdoc.jsp?containerId=PRF003513 -[12]: https://www.idc.com/getdoc.jsp?containerId=IDC_P2 -[13]: https://www.cisco.com/c/dam/en/us/solutions/collateral/enterprise-networks/intelligent-wan/idc-tangible-benefits.pdf -[14]: https://www.networkworld.com/article/3402079/cisco-offers-cloud-based-security-for-sd-wan-resources.html -[15]: https://www.facebook.com/NetworkWorld/ -[16]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190801 IBM fuses its software with Red Hat-s to launch hybrid-cloud juggernaut.md b/sources/talk/20190801 IBM fuses its software with Red Hat-s to launch hybrid-cloud juggernaut.md deleted file mode 100644 index c1c3ba375e..0000000000 --- a/sources/talk/20190801 IBM fuses its software with Red Hat-s to launch hybrid-cloud juggernaut.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IBM fuses its software with Red Hat’s to launch hybrid-cloud juggernaut) -[#]: via: (https://www.networkworld.com/article/3429596/ibm-fuses-its-software-with-red-hats-to-launch-hybrid-cloud-juggernaut.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -IBM fuses its software with Red Hat’s to launch hybrid-cloud juggernaut -====== -IBM is starting a potentially huge run at hybrid cloud by tying more than 100 of its products to the Red Hat OpenShift platform. -![Hans \(CC0\)][1] - -IBM has wasted no time aligning its own software with its newly acquired [Red Hat technoloogy][2],saying its portfolio would be transformed to work cloud natively and augmented to run on Red Hat’s OpenShift platform. - -IBM in July [finalized its $34 billion][3] purchase of Red Hat and says it will use the Linux powerhouse's open-source know-how and Linux expertise to grow larger scale hybrid-cloud customer projects and to create a web of partnerships to simplify carrying them out. - -**[ Check out [What is hybrid cloud computing][4] and learn [what you need to know about multi-cloud][5]. | Get regularly scheduled insights by [signing up for Network World newsletters][6]. ]** - -The effort has started with IBM bundling Red Hat’s Kubernetes-based OpenShift Container Platform with more than 100 IBM products in what it calls Cloud Paks. OpenShift lets enterprise customers deploy and manage containers on their choice of infrastructure of choice, be it private or public clouds, including AWS, Microsoft Azure, Google Cloud Platform, Alibaba and IBM Cloud. - -The prepackaged Cloud Paks include a secured Kubernetes container and containerized IBM middleware designed to let customers quickly spin-up enterprise-ready containers, the company said.  - -Five Cloud Paks exist today: Cloud Pak for Data, Application, Integration, Automation and Multicloud Management. The Paks will ultimately include IBM’s DB2, WebSphere, [API Connect][7], Watson Studio, [Cognos Analytics][8] and more. - -In addition, IBM said it will bring the Red Hat OpenShift Container Platform over to IBM Z mainframes and IBM LinuxONE. Together these two platforms power about 30 billion transactions a day globally, [IBM said][9].  Some of the goals here are to increase container density and help customers build containerized applications that can scale vertically and horizontally. - -“The vision is for OpenShift-enabled IBM software to become the foundational building blocks clients can use to transform their organizations and build across hybrid, multicloud environments,” Hillery Hunter, VP & CTO IBM Cloud said in an [IBM blog][10] about the announcement. - -OpenShift is the underlying Kubernetes and Container orchestration layer that supports the containerized software, she wrote, and placing the Cloud Paks atop Red Hat OpenShift gives IBM a broad reach immediately. "OpenShift is also where the common services such as logging, metering, and security that IBM Cloud Paks leverage let businesses effectively manage and understand their workloads,” Hunter stated. - -Analysts said the moves were expected but still extremely important for the company to ensure this acquisition is successful. - -“We expect IBM and Red Hat will do the obvious stuff first, and that’s what this mostly is,” said Lee Doyle, principal analyst at Doyle Research. "The challenge will be getting deeper integrations and taking the technology to the next level. What they do in the next six months to a year will be critical.” - -Over the last few years IBM has been evolving its strategy to major on-cloud computing and cognitive computing. Its argument against cloud providers like AWS, Microsoft Azure, and Google Cloud is that only 20 percent of enterprise workloads have so far moved to the cloud – the easy 20 percent. The rest are the difficult 80 percent of workloads that are complex, legacy applications, often mainframe based, that have run banking and big business for decades, wrote David Terrar, executive advisor for [Bloor Research][11]. "How do you transform those?" - -That background gives IBM enterprise expertise and customer relationships competitors don't. “IBM has been talking hybrid cloud and multicloud to these customers for a while, and the Red Hat move is like an injection of steroids to the strategy, " Terrar wrote. "When you add in its automation and cognitive positioning with Watson, and the real-world success with enterprise-grade blockchain implementations like TradeLens and the Food Trust network, I’d argue that IBM is positioning itself as the ‘Enterprise Cloud Company’.” - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3429596/ibm-fuses-its-software-with-red-hats-to-launch-hybrid-cloud-juggernaut.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/06/moon-2117426_1280-100726933-large.jpg -[2]: https://www.networkworld.com/article/3317517/the-ibm-red-hat-deal-what-it-means-for-enterprises.html -[3]: https://www.networkworld.com/article/3316960/ibm-closes-34b-red-hat-deal-vaults-into-multi-cloud.html -[4]: https://www.networkworld.com/article/3233132/cloud-computing/what-is-hybrid-cloud-computing.html -[5]: https://www.networkworld.com/article/3252775/hybrid-cloud/multicloud-mania-what-to-know.html -[6]: https://www.networkworld.com/newsletters/signup.html -[7]: https://www.ibm.com/cloud/api-connect -[8]: https://www.ibm.com/products/cognos-analytics -[9]: https://www.ibm.com/blogs/systems/announcing-our-direction-for-red-hat-openshift-for-ibm-z-and-linuxone/?cm_mmc=OSocial_Twitter-_-Systems_Systems+-+LinuxONE-_-WW_WW-_-OpenShift+IBM+Z+and+LinuxONE+BLOG+still+image&cm_mmca1=000001BT&cm_mmca2=10009456&linkId=71365692 -[10]: https://www.ibm.com/blogs/think/2019/08/ibm-software-on-any-cloud/ -[11]: https://www.bloorresearch.com/ -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190809 Goodbye, Linux Journal.md b/sources/talk/20190809 Goodbye, Linux Journal.md deleted file mode 100644 index dd0964db66..0000000000 --- a/sources/talk/20190809 Goodbye, Linux Journal.md +++ /dev/null @@ -1,67 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Goodbye, Linux Journal) -[#]: via: (https://opensource.com/article/19/8/goodbye-linux-journal) -[#]: author: (Jim Hall https://opensource.com/users/jim-hallhttps://opensource.com/users/scottnesbitthttps://opensource.com/users/alanfdoss) - -Goodbye, Linux Journal -====== -Linux Journal's coverage from 1994 to 2019 highlighted Linux’s rise to -an enterprise platform that runs a majority of the world’s servers and -services. -![Linux keys on the keyboard for a desktop computer][1] - -I first discovered Linux in 1993, when I was an undergraduate physics student who wanted the power of Big Unix on my home PC. I remember installing my first Linux distribution, SoftLanding Systems (SLS), and exploring the power of Linux on my ‘386 PC. I was immediately impressed. Since then, I’ve run Linux at home—and even at work. - -In those early days, it felt like I was the only person who knew about Linux. Certainly, there was an online community via Usenet, but there weren’t many other ways to get together with other Linux users—unless you had a local Linux User Group in your area. I shared what I knew about Linux with those around me, and we pooled our Linux fu. - -So, it was awesome to learn about a print magazine that was dedicated to all things Linux. In March 1994, Phil Hughes and Red Hat co-founder Bob Young published a new magazine about Linux, named _Linux Journal_. The [first issue][2] featured an "[Interview With Linus, The Author of Linux][3]" by Robert Young, and an article comparing "[Linux Vs. Windows NT and OS/2][4]" by Bernie Thompson. - -From the start, _Linux Journal_ aimed to be a community-driven magazine. Hughes and Young were not the only contributors to the magazine. Instead, they invited others to write about Linux and share what they had learned. In a way, _Linux Journal_ used a model similar to open source software. Anyone could contribute, and the editors acted as "maintainers" to ensure content was top quality and informative. - -_Linux Journal_ also went for a broad audience. The editors realized that a purely technical magazine would lose too many new users, while a magazine written for "newbies" would not attract a more focused audience. In the first issue, [Hughes highlighted][5] both groups of users as the audience _Linux Journal_ was looking for, writing: "We see this part of our audience as being two groups. Lots of the current Linux users have worked professionally with Unix. The other segment is the DOS user who wants to upgrade to a multi-user system. With a combination of tutorials and technical articles, we hope to satisfy the needs of both these groups." - -I was glad to discover _Linux Journal_ in those early days, and I quickly became a subscriber. In time, I contributed my own stories to _Linux Journal_. I’ve written several articles including essays on usability in open source software, Bash shell scripting tricks, and C programming how-tos. - -But my contributions to Linux Journal are meager compared to others. Over the years, I have enjoyed reading many article series from regular contributors. I loved Dave Taylor's "Work the Shell" series about practical and sometimes magical scripts written for the Bash shell. I always turned to Kyle Rankin's "Hack and /" series about cool projects with Linux. And I have enjoyed reading articles from the latest Linux Journal deputy editor Bryan Lunduke, especially a recent geeky article about "[How to Live Entirely in a Terminal][6]" that showed you can still do daily tasks on Linux without a graphical environment. - -Many years later, things took a turn. Linux Journal’s Publisher Carlie Fairchild wrote a seemingly terminal essay [_Linux Journal Ceases Publication_][7] in December 2017 that indicated _Linux Journal_ had "run out of money, and options along with it." But a month later, Carlie updated the news item to report that "*Linux Journal *was saved and brought back to life" by an angel investor. London Trust Media, the parent company of Private Internet Access, injected new funds into Linux Journal to get the magazine back on its feet. _Linux Journal_ resumed regular issues in March 2018. - -But it seems the rescue was not enough. Late in the evening of August 7, 2019, _Linux Journal_ posted a final, sudden goodbye. Kyle Rankin’s essay [_Linux Journal Ceases Publication: An Awkward Goodbye_][8] was preceded with this announcement: - -**IMPORTANT NOTICE FROM LINUX JOURNAL, LLC:** -_On August 7, 2019, Linux Journal shut its doors for good. All staff were laid off and the company is left with no operating funds to continue in any capacity. The website will continue to stay up for the next few weeks, hopefully longer for archival purposes if we can make it happen. -–Linux Journal, LLC_ - -The announcement came as a surprise to readers and staff alike. I reached out to Bryan Lunduke, who commented the shutdown was a "total surprise. Was writing an article the night before for an upcoming issue... No indication that things were preparing to fold." The next morning, on August 7, Lunduke said he "had a series of frantic messages from our Editor (Jill) and Publisher (Carlie). They had just found out, effective the night before... _Linux Journal_ was shut down. So we weren't so much being told that Linux Journal is shutting down... as _Linux Journal_ had already been shut down the day before... and we just didn't know it." - -It's the end of an era. And as we salute the passing of _Linux Journal_, I’d like to recognize the indelible mark the magazine has left on the Linux landscape. _Linux Journal_ was the first publication to highlight Linux as a serious platform, and I think that made people take notice. - -And with that seriousness, that maturity, _Linux Journal_ helped Linux shake its early reputation of being a hobby project. _Linux Journal's_ coverage from 1994 to 2019 highlighted Linux’s rise to an enterprise platform that runs a majority of the world’s servers and services. - -I tip my hat to everyone at _Linux Journal_ and any contributor who was part of its journey. It has been a pleasure to work with you over the years. You kept the spirit alive. This may be a painful experience, but I hope everyone ends up in a good place. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/8/goodbye-linux-journal - -作者:[Jim Hall][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jim-hallhttps://opensource.com/users/scottnesbitthttps://opensource.com/users/alanfdoss -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/linux_keyboard_desktop.png?itok=I2nGw78_ (Linux keys on the keyboard for a desktop computer) -[2]: https://www.linuxjournal.com/issue/1 -[3]: https://www.linuxjournal.com/article/2736 -[4]: https://www.linuxjournal.com/article/2734 -[5]: https://www.linuxjournal.com/article/2735 -[6]: https://www.linuxjournal.com/content/without-gui-how-live-entirely-terminal -[7]: https://www.linuxjournal.com/content/linux-journal-ceases-publication -[8]: https://www.linuxjournal.com/content/linux-journal-ceases-publication-awkward-goodbye diff --git a/sources/talk/20190812 Xilinx launches new FPGA cards that can match GPU performance.md b/sources/talk/20190812 Xilinx launches new FPGA cards that can match GPU performance.md deleted file mode 100644 index d53ca0253f..0000000000 --- a/sources/talk/20190812 Xilinx launches new FPGA cards that can match GPU performance.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Xilinx launches new FPGA cards that can match GPU performance) -[#]: via: (https://www.networkworld.com/article/3430763/xilinx-launches-new-fpga-cards-that-can-match-gpu-performance.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Xilinx launches new FPGA cards that can match GPU performance -====== -Xilinx says its new FPGA card, the Alveo U50, can match the performance of a GPU in areas of artificial intelligence (AI) and machine learning. -![Thinkstock][1] - -Xilinx has launched a new FPGA card, the Alveo U50, that it claims can match the performance of a GPU in areas of artificial intelligence (AI) and machine learning. - -The company claims the card is the industry’s first low-profile adaptable accelerator with PCIe Gen 4 support, which offers double the throughput over PCIe Gen3. It was finalized in 2017, but cards and motherboards to support it have been slow to come to market. - -The Alveo U50 provides customers with a programmable low-profile and low-power accelerator platform built for scale-out architectures and domain-specific acceleration of any server deployment, on premises, in the cloud, and at the edge. - -**[ Also read: [What is quantum computing (and why enterprises should care)][2] ]** - -Xilinx claims the Alveo U50 delivers 10 to 20 times improvements in throughput and latency as compared to a CPU. One thing's for sure, it beats the competition on power draw. It has a 75 watt power envelope, which is comparable to a desktop CPU and vastly better than a Xeon or GPU. - -For accelerated networking and storage workloads, the U50 card helps developers identify and eliminate latency and data movement bottlenecks by moving compute closer to the data. - -![Xilinx Alveo U50][3] - -The Alveo U50 card is the first in the Alveo portfolio to be packaged in a half-height, half-length form factor. It runs the Xilinx UltraScale+ FPGA architecture, features high-bandwidth memory (HBM2), 100 gigabits per second (100 Gbps) networking connectivity, and support for the PCIe Gen 4 and CCIX interconnects. Thanks to the 8GB of HBM2 memory, data transfer speeds can reach 400Gbps. It also supports NVMe-over-Fabric for high-speed SSD transfers. - -That’s a lot of performance packed into a small card. - -**[ [Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][4] ]** - -### What the Xilinx Alveo U50 can do - -Xilinx is making some big boasts about Alveo U50's capabilities: - - * Deep learning inference acceleration (speech translation): delivers up to 25x lower latency, 10x higher throughput, and significantly improved power efficiency per node compared to GPU-only for speech translation performance. - * Data analytics acceleration (database query): running the TPC-H Query benchmark, Alveo U50 delivers 4x higher throughput per hour and reduced operational costs by 3x compared to in-memory CPU. - * Computational storage acceleration (compression): delivers 20x more compression/decompression throughput, faster Hadoop and big data analytics, and over 30% lower cost per node compared to CPU-only nodes. - * Network acceleration (electronic trading): delivers 20x lower latency and sub-500ns trading time compared to CPU-only latency of 10us. - * Financial modeling (grid computing): running the Monte Carlo simulation, Alveo U50 delivers 7x greater power efficiency compared to GPU-only performance for a faster time to insight, deterministic latency and reduced operational costs. - - - -The Alveo U50 is sampling now with OEM system qualifications in process. General availability is slated for fall 2019. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3430763/xilinx-launches-new-fpga-cards-that-can-match-gpu-performance.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2014/04/bolts-of-light-speeding-through-the-acceleration-tunnel-95535268-100264665-large.jpg -[2]: https://www.networkworld.com/article/3275367/what-s-quantum-computing-and-why-enterprises-need-to-care.html -[3]: https://images.idgesg.net/images/article/2019/08/xilinx-alveo-u50-100808003-medium.jpg -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190815 Extreme-s acquisitions have prepped it to better battle Cisco, Arista, HPE, others.md b/sources/talk/20190815 Extreme-s acquisitions have prepped it to better battle Cisco, Arista, HPE, others.md deleted file mode 100644 index cfe30ea142..0000000000 --- a/sources/talk/20190815 Extreme-s acquisitions have prepped it to better battle Cisco, Arista, HPE, others.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Extreme's acquisitions have prepped it to better battle Cisco, Arista, HPE, others) -[#]: via: (https://www.networkworld.com/article/3432173/extremes-acquisitions-have-prepped-it-to-better-battle-cisco-arista-hpe-others.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Extreme's acquisitions have prepped it to better battle Cisco, Arista, HPE, others -====== -Extreme has bought cloud, SD-WAN and data center technologies that make it more prepared to take on its toughest competitors. -Extreme Networks has in recent months restyled the company with data-center networking technology acquisitions and upgrades, but now comes the hard part – executing with enterprise customers and effectively competing with the likes of Cisco, VMware, Arista, Juniper, HPE and others. - -The company’s latest and perhaps most significant long-term move was closing the [acquisition of wireless-networking vendor Aerohive][1] for about $210 million.  The deal brings Extreme Aerohive’s wireless-networking technology – including its WiFi 6 gear, SD-WAN software and cloud-management services. - -**More about edge networking** - - * [How edge networking and IoT will reshape data centers][2] - * [Edge computing best practices][3] - * [How edge computing can help secure the IoT][4] - - - -With the Aerohive technology, Extreme says customers and partners will be able to mix and match a broader array of software, hardware, and services to create networks that support their unique needs, and that can be managed and automated from the enterprise edge to the cloud. - -The Aerohive buy is just the latest in a string of acquisitions that have reshaped the company. In the past few years the company has acquired networking and data-center technology from Avaya and Brocade, and it bought wireless player Zebra Technologies in 2016 for $55 million. - -While it has been a battle to integrate and get solid sales footing for those acquisitions – particularly Brocade and Avaya, the company says those challenges are behind it and that the Aerohive integration will be much smoother. - -“After scaling Extreme’s business to $1B in revenue [for FY 2019, which ended in June] and expanding our portfolio to include end-to-end enterprise networking solutions, we are now taking the next step to transform our business to add sustainable, subscription-oriented cloud-based solutions that will enable us to drive recurring revenue and improved cash-flow generation,” said Extreme CEO Ed Meyercord at the firm’s [FY 19 financial analysts][5] call. - -The strategy to move more toward a software-oriented, cloud-based revenue generation and technology development is brand new for Extreme. The company says it expects to generate as much as 30 percent of revenues from recurring charges in the near future. The tactic was enabled in large part by the Aerohive buy, which doubled Extreme’s customer based to 60,000 and its sales partners to 11,000 and whose revenues are recurring and cloud-based.  The acquisition also created the number-three enterprise Wireless LAN company behind Cisco and HPE/Aruba.    - -“We are going to take this Aerohive system and expand across our entire portfolio and use it to deliver common, simplified software  with feature packages for on-premises or in-cloud based on customers' use case,” added Norman Rice, Extreme’s Chief Marketing, Development and Product Operations Officer. “We have never really been in any cloud conversations before so for us this will be a major add.” - -Indeed, the Aerohive move is key for the company’s future, analysts say. - -To continue reading this article register now - -[Get Free Access][6] - -[Learn More][7]   Existing Users [Sign In][6] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3432173/extremes-acquisitions-have-prepped-it-to-better-battle-cisco-arista-hpe-others.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3405440/extreme-targets-cloud-services-sd-wan-wifi-6-with-210m-aerohive-grab.html -[2]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[3]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[4]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[5]: https://seekingalpha.com/article/4279527-extreme-networks-inc-extr-ceo-ed-meyercord-q4-2019-results-earnings-call-transcript -[6]: javascript:// -[7]: https://www.networkworld.com/learn-about-insider/ diff --git a/sources/talk/20190815 Nvidia rises to the need for natural language processing.md b/sources/talk/20190815 Nvidia rises to the need for natural language processing.md deleted file mode 100644 index c2b64d7f63..0000000000 --- a/sources/talk/20190815 Nvidia rises to the need for natural language processing.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Nvidia rises to the need for natural language processing) -[#]: via: (https://www.networkworld.com/article/3432203/nvidia-rises-to-the-need-for-natural-language-processing.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Nvidia rises to the need for natural language processing -====== -As the demand for natural language processing grows for chatbots and AI-powered interactions, more companies will need systems that can provide it. Nvidia says its platform can handle it. -![andy.brandon50 \(CC BY-SA 2.0\)][1] - -Nvidia is boasting of a breakthrough in conversation natural language processing (NLP) training and inference, enabling more complex interchanges between customers and chatbots with immediate responses. - -The need for such technology is expected to grow, as digital voice assistants alone are expected to climb from 2.5 billion to 8 billion within the next four years, according to Juniper Research, while Gartner predicts that by 2021, 15% of all customer service interactions will be completely handled by AI, an increase of 400% from 2017. - -The company said its DGX-2 AI platform trained the BERT-Large AI language model in less than an hour and performed AI inference in 2+ milliseconds, making it possible “for developers to use state-of-the-art language understanding for large-scale applications.” - -**[ Also read: [What is quantum computing (and why enterprises should care)][2] ]** - -BERT, or Bidirectional Encoder Representations from Transformers, is a Google-powered AI language model that many developers say has better accuracy than humans in some performance evaluations. It’s all discussed [here][3]. - -### Nvidia sets natural language processing records - -All told, Nvidia is claiming three NLP records: - -**1\. Training:** Running the largest version of the BERT language model, a Nvidia DGX SuperPOD with 92 Nvidia DGX-2H systems running 1,472 V100 GPUs cut training from several days to 53 minutes. A single DGX-2 system, which is about the size of a tower PC, trained BERT-Large in 2.8 days. - -“The quicker we can train a model, the more models we can train, the more we learn about the problem, and the better the results get,” said Bryan Catanzaro, vice president of applied deep learning research, in a statement. - -**2\. Inference**: Using Nvidia T4 GPUs on its TensorRT deep learning inference platform, Nvidia performed inference on the BERT-Base SQuAD dataset in 2.2 milliseconds, well under the 10 millisecond processing threshold for many real-time applications, and far ahead of the 40 milliseconds measured with highly optimized CPU code. - -**3\. Model:** Nvidia said its new custom model, called Megatron, has 8.3 billion parameters, making it 24 times larger than the BERT-Large and the world's largest language model based on Transformers, the building block used for BERT and other natural language AI models. - -In a move sure to make FOSS advocates happy, Nvidia is also making a ton of source code available via [GitHub][4]. - - * NVIDIA GitHub BERT training code with PyTorch - * NGC model scripts and check-points for TensorFlow - * TensorRT optimized BERT Sample on GitHub - * Faster Transformer: C++ API, TensorRT plugin, and TensorFlow OP - * MXNet Gluon-NLP with AMP support for BERT (training and inference) - * TensorRT optimized BERT Jupyter notebook on AI Hub - * Megatron-LM: PyTorch code for training massive Transformer models - - - -Not that any of this is easily consumed. We’re talking very advanced AI code. Very few people will be able to make heads or tails of it. But the gesture is a positive one. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3432203/nvidia-rises-to-the-need-for-natural-language-processing.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/alphabetic_letters_characters_language_by_andybrandon50_cc_by-sa_2-0_1500x1000-100794409-large.jpg -[2]: https://www.networkworld.com/article/3275367/what-s-quantum-computing-and-why-enterprises-need-to-care.html -[3]: https://medium.com/ai-network/state-of-the-art-ai-solutions-1-google-bert-an-ai-model-that-understands-language-better-than-92c74bb64c -[4]: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190826 VMware plan elevates Kubernetes to star enterprise status.md b/sources/talk/20190826 VMware plan elevates Kubernetes to star enterprise status.md deleted file mode 100644 index f98bdf351d..0000000000 --- a/sources/talk/20190826 VMware plan elevates Kubernetes to star enterprise status.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (VMware plan elevates Kubernetes to star enterprise status) -[#]: via: (https://www.networkworld.com/article/3434063/vmware-plan-elevates-kubernetes-to-star-enterprise-status.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -VMware plan elevates Kubernetes to star enterprise status -====== -VMware rolls out Tanzu platform to help customer build, run and manage Kubernetes containers -![Daniel Masaoka][1] - -San Francisco – VMware has announced an initiative that will help make it easier for current vSphere customers to build and manage Kubernetes containers as the platform evolves. - -The company, at its VMworld customer event, announced VMware Tanzu which is made up of myriad new and existing VMware technologies to create a portfolio of products and services aimed at  enterprises looking to more quickly build software in Kubernetes containers. - -[Learn how to make hybrid cloud work][2] - -VMware believes that Kubernetes has emerged as the infrastructure layer to accommodate a diversity of applications. VMware says that from 2018 to 2023 – with new tools/platforms, more developers, agile methods, and lots of code reuse – 500 million new logical apps will be created serving the needs of many application types and spanning all types of environments.   - -“We view Tanzu as a comprehensive environment for customers to bridge between the development and operational world. It’ll be super-powerful, enterprise grade Kubernetes platform. Kubernetes is the main tool for this transition and we now have a lot of work to do to make it work,” said Pat Gelsinger, CEO of VMware at the VMworld event.  - -Gelsinger noted that VMware’s investments in Kubernetes technologies, including its buy of Heptio, Bitnami and [now Pivital, ][3]make the company a top-three open-source contributor to Kubernetes. - -Key to the grand Tanzu plan is technology VMware calls Project Pacific which will add Kubernetes to vSphere – the company’s flagship virtualization software. By embedding Kubernetes into the control plane of vSphere, it will enable the convergence of containers and VMs onto a single platform. Project Pacific will also add a container runtime into the hypervisor, VMware stated.    - -The new native pots for VMware's bare-metal hypervisor ESXi will combine the best properties of Kubernetes pods and VMs to help deliver a secure and high-performance runtime for mission-critical workloads. Additionally, Project Pacific will deliver a native virtual network spanning VMs and containers, VMware stated.    - -IT operators will use vSphere tools to deliver Kubernetes clusters to developers, who can then use Kubernetes APIs to access VMware’s [software defined data-center][4] (SDDC) infrastructure. With Project Pacific, both developers and IT operators will gain a consistent view via Kubernetes constructs within vSphere. - -“Project Pacific will embed Kubernetes into the control plane of vSphere, for unified access to compute, storage and networking resources, and also converge VMs and containers using the new Native Pods that are high performing, secure and easy to consume," wrote Kit Colbert vice president and CTO of VMware’s Cloud Platform business unit in a [blog about Project Pacific][5]. “Concretely this will mean that IT Ops can see and manage Kubernetes objects (e.g. pods) from the vSphere Client. It will also mean all the various vSphere scripts, third-party tools, and more will work against Kubernetes.” - -Tanzu will also feature a single management package – VMware Tanzu Mission Control – which will function as a single point of control where customers can manage Kubernetes clusters regardless of where they are running, the company stated. - -Tanzu also utilizes technology VMware bought from Bitnami which offers a catalog of pre-built, scanned, tested and maintained Kubernetes application content. The Bitnami application catalog supports and has been certified for all major Kubernetes platforms, including VMware PKS. - -Tanzu also integrates VMware’s own container technology it currently develops with Pivotal, Pivotal Container Service (PKS), which it just last week said it intends to acquire. PKS delivers Kubernetes-based container services for multi-cloud enterprises and service providers. - -With Project Pacific, IT will have unified visibility into vCenter Server for Kubernetes clusters, containers and existing VMs, as well as apply enterprise-grade vSphere capabilities (like high availability, Distributed Resource Scheduler, and vMotion) at the app level, Colbert wrote. - -VMware didn’t say when Tanzu will become part of vSphere but as features get baked into the platform and tested customers could expect it “soon,” VMware executives said. - -“Kubernetes can help organizations achieve consistency and drive developer velocity across a variety of infrastructures, but enterprises also require effective control, policy and security capabilities. Building on its acquisitions, organic innovation and open-source contributions, VMware has staked out its place as a leader in this rapidly evolving cloud-native industry.” said 451 Research Principal Analyst Jay Lyman in a statement. - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3434063/vmware-plan-elevates-kubernetes-to-star-enterprise-status.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/08/nwin_016_vmwareceo_edge-100733116-large.jpg -[2]: https://www.networkworld.com/article/3119362/hybrid-cloud/how-to-make-hybrid-cloud-work.html#tk.nww-fsb -[3]: https://www.networkworld.com/article/3433916/vmware-spends-48b-to-grab-pivotal-carbon-black-to-secure-develop-integrated-cloud-world.html?nsdr=true -[4]: https://www.networkworld.com/article/3340259/vmware-s-transformation-takes-hold.html -[5]: https://blogs.vmware.com/vsphere/2019/08/introducing-project-pacific.html -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190827 VMware boosts load balancing, security intelligence, analytics.md b/sources/talk/20190827 VMware boosts load balancing, security intelligence, analytics.md deleted file mode 100644 index dfd7da4969..0000000000 --- a/sources/talk/20190827 VMware boosts load balancing, security intelligence, analytics.md +++ /dev/null @@ -1,83 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (VMware boosts load balancing, security intelligence, analytics) -[#]: via: (https://www.networkworld.com/article/3434576/vmware-boosts-load-balancing-security-intelligence-analytics.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -VMware boosts load balancing, security intelligence, analytics -====== -At VMworld, VMware says its NSX networking software looks to help customers boost performance, management of virtualized cloud resources. -![Thinkstock][1] - -SAN FRANCISCO – VMware has added new features to its core networking software that will let customers more securely control cloud application traffic running on virtual machines, containers or bare metal.  - -At its VMworld event, the company announced a new version of the company’s NSX networking software with support for the cloud-based advanced load balancer technology it recently acquired from Avi Networks. - -**[ Also see [How to plan a software-defined data-center network][2] and [Efficient container use requires data-center software networking][3].]** - -The load balancer is included in VMware vRealize Network Insight 5.0 and tied to NSX Intelligence software that lets customers optimize network performance and availability in virtual and physical networks. The load balancer includes a web application firewall and analytics features to help customers securely control and manage traffic.  - -[VMware bought Avi in June][4] with the plan to punch up its data-center network-virtualization capabilities by adding Avi’s load balancing, analytics and application-delivery technology to NSX. Avi’s integration with VMware NSX delivers an application-services fabric that synchronizes with the NSX controller to provide automated, elastic load balancing including real-time analytics for applications deployed in a software-defined network environment. The Avi technology also monitors, scales and reconfigures application services in real time in response to changing performance requirements. - -“The load balancer uses a modern interface and architecture to deliver and optimize application delivery in a dynamic fashion," said Rohit Mehra, vice president, Network Infrastructure for IDC. "Leveraging inbuilt advanced analytics and monitoring to deliver scale that is much needed for cloud applications and micro-services, the advanced load balancer will essentially be a nice add-on option to VMware’s NSX networking portfolio. While many customers may benefit from its integration into NSX, VMware will likely keep it as an optional add-on, given the vast majority of its networking clients currently use other ADC platforms.” - -NSX-T Data Center software is targeted at organizations looking to support multivendor cloud-native applications, [bare-metal][5] workloads, [hypervisor][6] environments and the growing hybrid and multi-cloud worlds. The software offers a range of services layer 2 to Layer 7 for workloads running on all types of infrastructure – virtual machines, containers, physical servers and both private and public clouds. NSX-T is the underpinning technology for VMware’s overarching Virtual Cloud Network portfolio that offers a communications-software layer to connect everything from the data center to cloud and edge. - -“NSX now provides a complete set of networking services offered in software. Customers don’t need dedicated hardware systems to do switching, routing or traffic load balancing as NSX treats VM, container and app traffic all the same from the cloud to data center and network edge,” said Tom Gillis, VMware senior vice president and general manager, networking and security business unit.  - -Now customers can distribute workloads uniformly across network improving capacity, efficiency and reliability, he said. - -Speaking at the event, a VMware customer said VMware NSX-T Data Center is helping the company secure workloads at a granular level with micro-segmentation, and to fundamentally re-think network design. "We are looking to develop apps as quickly as possible and use NSX to do automation and move faster,” said [Andrew Hrycaj][7], principal network engineer at IHS Markit – a business information provider headquartered in London. - -NSX also helps IT manage a common security policy across different platforms, from containers, to the public cloud with AWS and Azure, to on-prem, simplifying operations and helping with regulatory compliance, while fostering a pervasive security strategy, Hrycaj said. - -At VMworld the company announced version 2.5 of NSX which includes a distributed  \analytics engine called NSX Intelligence that VMware says will help eliminate blind spots to reduce security risk and accelerate security-incident remediation through visualization and deep insight into every flow across the entire data center. - -“Traditional approaches involve sending extensive packet data and telemetry to multiple disparate centralized engines for analysis, which increase cost, operational complexity, and limit the depth of analytics,” wrote VMware’s Umesh Mahajan, a senior vice president and general manager networking and security in a [blog about version 2.5][8]. - -“In contrast, NSX Intelligence, built natively within the NSX platform, distributes the analytics within the hypervisor on each host, sending back relevant metadata… [and providing] detailed application--topology visualization, automated security-policy recommendations, continuous monitoring of every flow, and an audit trail of security policies, all built into the NSX management console.” - -IDC’s Mehra said: “The NSX Intelligence functionality is indeed very interesting, in that it delivers on the emerging need for deeper visibility and analytics capabilities in cloud IT environments. This can then be used either for network and app optimization goals, or in many cases, will facilitate NSX security and policy enforcement via micro-segmentation and other tools. This functionality, built into NSX, runs parallel to vRealize Network Insight, so it will be interesting to see how they mirror, or rather, complement each other,” he said. - -NSX-T 2.5, also introduces a new deployment and operational approach VMware calls Native Cloud Enforced mode. - -“This mode provides a consistent policy model across the hybrid cloud network and reduces overhead by eliminating the need to install NSX tools in workload VMs in the public cloud,” Mahajan wrote. “The NSX security policies are translated into the cloud provider’s native security constructs via APIs, enabling common and centralized policy enforcement across clouds.” - -Networking software vendor Apstra got into the NSX act by announcing it had more deeply integrated the Apstra Operating System (AOS) with NSX.  - -AOS includes a tighter design and operational interoperability between the underlying physical network and software-defined overlay networks with a solution that liberates customers from being locked into any specific network hardware vendor, said Mansour Karam, CEO and founder of Apstra.  - -AOS 3.1 adds automation to provide consistent network and security policy for workloads across the physical and virtual/NSX infrastructure, Apstra said. AOS supports VMware vSphere and allows for automatic remediation of network anomalies. AOS’ intent-based analytics perform regular  network checks to safeguard configurations between the Apstra managed environment and the vSphere servers are in sync. - -Like other AOS releases, version 3.1 is hardware agnostic and integrated with other networking vendors including Cisco, Arista, Dell and Juniper as well as other vendors such as Microsoft and Cumulus. - -Big Switch also announced that it has extended its Enterprise Virtual Private Cloud (E-VPC) integration to the VMware Cloud Foundation (VCF) and NSX-T.   The company's  Big Cloud Fabric (BCF) underlay now fully integrates with VMware’s software-defined data center (SDDC) portfolio, including NSX-T, vSphere, VxRail and vSAN, providing unmatched automation, visibility and troubleshooting capabilities. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3434576/vmware-boosts-load-balancing-security-intelligence-analytics.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/09/networking-100735059-large.jpg -[2]: https://www.networkworld.com/article/3284352/data-center/how-to-plan-a-software-defined-data-center-network.html -[3]: https://www.networkworld.com/article/3297379/data-center/efficient-container-use-requires-data-center-software-networking.html -[4]: https://www.networkworld.com/article/3402981/vmware-eyes-avi-networks-for-data-center-software.html -[5]: https://www.networkworld.com/article/3261113/why-a-bare-metal-cloud-provider-might-be-just-what-you-need.html?nsdr=true -[6]: https://www.networkworld.com/article/3243262/what-is-a-hypervisor.html?nsdr=true -[7]: https://www.networkworld.com/article/3223189/how-network-automation-can-speed-deployments-and-improve-security.html -[8]: https://blogs.vmware.com/networkvirtualization/2019/08/nsx-t-2-5.html/ -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190828 VMware touts hyperscale SD-WAN.md b/sources/talk/20190828 VMware touts hyperscale SD-WAN.md deleted file mode 100644 index 77c42a98b5..0000000000 --- a/sources/talk/20190828 VMware touts hyperscale SD-WAN.md +++ /dev/null @@ -1,88 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (VMware touts hyperscale SD-WAN) -[#]: via: (https://www.networkworld.com/article/3434619/vmware-touts-hyperscale-sd-wan.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -VMware touts hyperscale SD-WAN -====== -VMware is teaming up with Dell/EMC to offer a hardware/software package rolled up into a managed SD-WAN service. -BlueBay2014 / Getty Images - -SAN FRANCISCO –  VMware teamed with Dell/EMC this week to deliver an SD-WAN service that promises to greatly simplify setting up and supporting wide-area-network connectivity. - -The Dell EMC SD-WAN Solution is a package of VMware software with Dell hardware and software that will be managed by Dell and sold as a package by both companies and their partners. - -The package, introduced at the [VMworld event][1] here, includes VMware SD-WAN by VeloCloud software available as a subscription coupled with appliances available in multiple configurations capable of handling 10Mbps to 10Gbps of traffic, depending on customer need, said [Sanjay Uppal,][2] vice president and general manager of VMware’s VeloCloud Business Unit. - -**More about SD-WAN** - - * [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][3] - * [How to pick an off-site data-backup method][4] - * [SD-Branch: What it is and why you’ll need it][5] - * [What are the options for security SD-WAN?][6] - - - -“The package is a much simpler way for customers to quickly set up a modern SD-WAN, especially for those customers who don’t have a lot of IT personnel to handle setting up and configuring an SD-WAN,” Uppal said. “Branch office networking can be complex and expensive, and this package uses subscription pricing, and supports cloud-like capabilities and economics.” - -Dell EMC and VMware also announced SmartFabric Director, software that can be part of the service offering. Director enables data-center operators to build, operate and monitor an open network-underlay fabric based on Dell EMC PowerSwitch switches. - -Accoding to Dell, organizations that have embraced overlay software-defined networks need to make sure their physical, underlay networks are tuned to work with the SDN. "A lack of visibility between the two layers can lead to provisioning and configuration errors, hampering network performance,” Dell stated. - -The Director also supports flexible streaming telemetry to gather key operational data and statistics from the fabric switches it oversees, so customers can use it in security and other day-to-day operations, Dell said.   - -Analysts said the key to the VMware/Dell package isn’t so much the technology but the fact that it can be sold by so many of Dell and VMware’s partners.  - -"Dell will lead on the sales motion with an SD-WAN-as-a-Service offering leveraging its [customer premises equipment] platforms and global service and support capabilities, leveraging SD-WAN technology from VMware/VeloCloud,” said Rohit Mehra, vice president, Network Infrastructure for IDC. - -VMware also used its VMworld event to say its VeloCloud SD-WAN platform and aggregate data gathered from customer networks will let the company offer more powerful network-health and control mechanisms in the future. - -“The SD-WAN VMware/VeloCloud has actually achieved a milestone we think is significant across multiple dimensions, one is architecture. We have proven that we can get to tens of thousands of edges with a single network. In the aggregate, we are crossing 150,000 gateways, over 120 points-of-presence,” Uppal said. - -VMware/Velocloud supports gateways across major cloud providers including Amazon Web Services, Microsoft Azure, Google Cloud Platform, and IBM Cloud as well as multiple carrier underlay networks. - -“From all of those endpoints we can see how the underlay network is performing, what applications are running on it and security threat information. Right now we can use that information to help IT intervene and fix problems manually,” Uppal said. Long-term, the goal is to use the data to train algorithms that VMware is developing to promote self-healing networks that could, for example, detect outages and automatically reroute traffic around them. - -The amount of data VMware gathers from cloud, branch-office and SD-WAN endpoints amounts to a treasure trove. “That is all part of the hyperscale idea," Uppal said. - -There are a number of trends driving the increased use of SD-WAN technologies, Uppal said, a major one being the increased use of containers and cloud-based applications that need access from the edge. “The scope of clients needing SD-WAN service access to the data center or cloud resources is growing and changing rapidly,” he said. - -In the most recent IDC [SD-WAN Infrastructure Forecast][7] report, Mehra wrote about a number of other factors driving SD-WAN evolution. For example: - - * Traditional enterprise WANs are increasingly not meeting the needs of today's modern digital businesses, especially as it relates to supporting SaaS apps and multi- and hybrid-cloud usage. - * Enterprises are interested in easier management of multiple connection types across their WAN to improve application performance and end-user experience. - - - -“Combined with the rapid embrace of SD-WAN by leading communications service providers globally, these trends continue to drive deployments of SD-WAN, providing enterprises with dynamic management of hybrid WAN connections and the ability to guarantee high levels of quality of service on a per-application basis,” Mehra wrote in the report. - -The report also said that the SD-WAN infrastructure market continues to be highly competitive with sales increasing 64.9% in 2018 to $1.37 billion. IDC stated Cisco holds the largest share of the SD-WAN infrastructure market, with VMware coming in second followed by Silver Peak, Nokia-Nuage, and Riverbed. - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3434619/vmware-touts-hyperscale-sd-wan.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3434576/vmware-boosts-load-balancing-security-intelligence-analytics.html -[2]: https://www.networkworld.com/article/3387641/beyond-sd-wan-vmwares-vision-for-the-network-edge.html -[3]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[4]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[5]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[6]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html?nsdr=true -[7]: https://www.idc.com/getdoc.jsp?containerId=prUS45380319 -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190903 IT Leaders Need to Get Aggressive with SD-WAN.md b/sources/talk/20190903 IT Leaders Need to Get Aggressive with SD-WAN.md deleted file mode 100644 index 67f2bc4a5c..0000000000 --- a/sources/talk/20190903 IT Leaders Need to Get Aggressive with SD-WAN.md +++ /dev/null @@ -1,47 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IT Leaders Need to Get Aggressive with SD-WAN) -[#]: via: (https://www.networkworld.com/article/3435119/it-leaders-need-to-get-aggressive-with-sd-wan.html) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -IT Leaders Need to Get Aggressive with SD-WAN -====== - -grynold - -Late last year I moderated a MicroScope roundtable in the UK on the challenges and opportunities of [SD-WAN][1]. The representatives included 12 leading SD-WAN vendors, including Michael O’Brien, vice president of worldwide channel sales for [Silver Peak][2]. I started off the discussion by introducing a data point from a TechTarget survey (TechTarget owns MicroScope) that only 26 percent of companies surveyed had an SD-WAN deployment underway. This spans any stage of the deployment cycle, including testing. Given the hype around SD-WAN and how many conversations I have with IT leaders about it, this number seemed low to me, so I wanted to get a better feel for what the leading vendors thought about it.  - -Going into the roundtable, I wasn’t sure if the vendor community would think this number was too high or too low, but I did expect to get uniformity in their responses. Instead, their responses that were all over the map. The most pessimistic view came from a smaller and relatively new entrant into the market who felt that less than five percent of companies had an SD-WAN deployment underway. The most optimistic was Silver Peak’s O’Brien who felt that the number was a bit low and should be closer to around one third. Another industry leader supported O’Brien when he said that 55 percent of its customers plan to make an SD-WAN decision in the next nine months. Everyone else provided a perspective that fell somewhere in the middle.  - -Based on my own research and anecdotal discussions, I think 26 percent is just about right.  The smaller vendor’s outlook on the industry is more a reflection of their late entry into the market. As a corollary to this, Silver Peak jumped into the space early and would have an overly positive opinion of customer adoption. The other industry leader is an interesting case as now that they finally have a viable offering, they’ll be pushing their install base hard, which should create a “rising tide” for all vendors.  - -So, what does all this data tell us? Whether the number is five percent or 33 percent (I’m not including the 55% number here as it’s a projection), the fact is, given the strong value proposition and maturity of SD-WAN technology, it’s something all businesses should carefully evaluate. Not for the cost savings, but rather the increased network agility that enables tighter alignment with digital transformation initiatives.  - -The next obvious question is, “Why haven’t more companies adopted SD-WAN?”. The answer to this is likely that many network engineers are still clinging to the past and aren’t ready to make the shift. Most current SD-WAN solutions are built on the concept of simplicity and use high amounts of automation, enabling the network to learn and adapt to changing requirements to ensure the highest levels of performance of an organizations’ users and applications. For example, the Silver Peak [Unity EdgeConnect™][3] SD-WAN edge platform is constantly monitoring network and application performance, applying a number of optimization techniques to maintain application performance and availability. In the past, network professionals would endlessly fiddle with network configurations to accomplish the same thing. That worked in the past when traffic volumes were lower and there were only a few applications that were dependent on the network. Today, due to the rise of cloud and mobility, almost all applications require a reliable, high quality network connection to deliver a high quality of experience to users. - -Based on the results of the TechTarget survey and the feedback from the MicroScope roundtable, I’m appealing to all CIOs and IT leaders. If your company isn’t at least piloting an SD-WAN, why not? Several senior IT people I have talked to tell me that’s a decision left in the hands of the network engineers. But that’s like asking a traditional auto mechanic if people should buy an electric car. Of course, a router jockey whose livelihood is tied up in hunting and pecking on a command line all day is going to be resistant to change. - -If the network team isn’t ready to modernize the network, it will hold the company back so it’s really up to IT leadership to mandate the change. Again, not because of cost, but because it’s too risky to sit idle while your competitors get jiggy with SD-WAN and are able to do things your business can’t. Instead, it makes far more sense to be aggressive and leapfrog the field to maintain a competitive edge. SD-WAN is the biggest evolutionary step in the WAN since the invention of the WAN and the time to move is now. - -**Silver Peak was named a leader in Gartner’s 2018 Magic Quadrant for WAN Edge Infrastructure. If you are rethinking your WAN edge (and we believe you should be), this [report][4] is a must-read.** - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3435119/it-leaders-need-to-get-aggressive-with-sd-wan.html - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[2]: https://www.silver-peak.com/ -[3]: https://www.silver-peak.com/products/unity-edge-connect -[4]: https://www.silver-peak.com/sd-wan-edge-gartner-magic-quadrant-2018 diff --git a/sources/talk/20190905 HPE-s vision for the intelligent edge.md b/sources/talk/20190905 HPE-s vision for the intelligent edge.md deleted file mode 100644 index 44161337ba..0000000000 --- a/sources/talk/20190905 HPE-s vision for the intelligent edge.md +++ /dev/null @@ -1,88 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (HPE's vision for the intelligent edge) -[#]: via: (https://www.networkworld.com/article/3435790/hpes-vision-for-the-intelligent-edge.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -HPE's vision for the intelligent edge -====== -HPE plans to incorporate segmentation, artificial intelligence and automation into its wired and wireless gear in order to deal with the increased network challenges imposed by IoT and SD-WAN. -HPE - -It’s not just speeds and feeds anymore, it's intelligent software, integrated security and automation that will drive the networks of the future. - -That about sums up the networking areas that Keerti Melkote, HPE's President, Intelligent Edge, thinks are ripe for innovation in the next few years.He has a broad perspective because his role puts him in charge of the company's networking products, both wired and wireless. - -[Now see how AI can boost data-center availability and efficiency][1] - -“On the wired side, we are seeing an evolution in terms of manageability," said Melkote, who founded Aruba, now part of HPE. "I think the last couple of decades of wired networking have been about faster connectivity. How do you go from a 10G to 100G Ethernet inside data centers? That will continue, but the bigger picture that we’re beginning to see is really around automation.”  - -[For an edited version of Network World\\\\\'s wide-ranging inerview with Merkote click here.][2] - -The challenge is how to inject automation into areas such as [data centers][3], [IoT][4] and granting network access to endpoints. In the past, automation and manageability were afterthoughts, he said. “The wired network world never really enabled native management monitoring and automation from the get-go.”  - -Melkote said HPE is changing that world with its next generation of switches and apps, starting with a switching line the company introduced a little over a year ago, the Core Switch 8400 series, which puts the the ability to monitor, manage and automate right at the heart of the network itself, he said. - -In addition to providing the network fabric, it also provides deep visibility, deep penetrability and deep automation capabilities. "That is where we see the wide network foundation evolving," he said. - -In the wireless world, speeds and capacity have also increased over time, but there remains the need to improve network efficiency for high-density deployments, Melkote said. Improvements with the latest generation of wireless, [Wi-Fi 6][5], address this by focusing on efficiency and reliability and high-density connectivity, which are necessary given the explosion of wireless devices, including IoT gear, he said.  - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][6] ]** - -Artificial intelligence will also play a major role in how networks are managed, he said. “Behind the scenes, across both wired and wireless, AI and AI operations are going to be at the heart of how the vision of manageability and automation is going to be realized,” Melkote said.   - -AI operations are fundamentally about collecting large amounts of data from network devices and gaining insights from the data to predict when and where the network is going to face capacity and congestion problems that could kill performance, and to discover security issues, he said.  - -“Any one of those insights being able to proactively give our customers a view into what’s happening so they can solve a problem before it really becomes a big issue is a huge area of research and development for us,” Melkote said. - -And that includes AI in wireless networks. “Even more than Wi-Fi 6, I see the evolution of AI behind the Wi-Fi 6 network or the next-generation wired network being really the enabler of the next evolution of efficiency, the next level of insights into the operations of the network,” he said. - -From a security perspective, IoT poses a particular challenge that can be addressed in part via network features. “The big risk with IoT is that these devices are not secured with traditional operating systems. They don’t run Windows; they don’t run [Linux][7]; they don’t run an OS,” Melkote said. As a result, they are susceptible to attacks, "and if a hacker is able to jump onto your video camera or your IoT sensor, it can then use that to attack the rest of the internal network.” - -That creates a need for access control and network segmentation that isolates these devices and provides a level of visibility and control that is integrated into the network architecture itself. HPE regards this as a massive shift from what enterprise networks have been used for historically – connecting users and taking them from Point A to Point B with high quality of service, Melkote said. - -"The segmentation is, I think, the next big evolution for all the new use cases that are emerging,” Melkote said. “The segmentation not only happens inside a LAN context with Wi-Fi and wired technology but in a WAN context, too. You need to be able to extend it across a wide area network, which itself is changing from a traditional [MPLS][8] network to a software-defined WAN, [SD-WAN][9].”  - -SD-WAN is one of the core technologies for enabling edge-to-cloud efficiency, an ever-more-important consideration given the migration of applications from private data centers to public cloud, Melkote said. SD-WAN also extends to branch offices that not only need to connect to data centers, but directly to the cloud using a combination of internet links and private circuits, he said. - -“What we are doing is basically integrating the security and the WAN functionality into the architecture so you don’t have to rely on technology from third parties to provide that additional level of security or additional segmentation on the network itself,” Melkote said.    - -The edge of the network – or the intelligent edge – is also brings with it its own challenges. HPE says the intelligent edge entails analysis of data where it is generated to reduce latency, security risk and costs. It breaks intelligent edge types into three groups: operational technology, IT and IoT edges. - -Part of the intelligent edge will include micro data centers that will be deployed at the point where data gets created, he said. "That’s not to say that the on-prem data center goes away or the cloud data center goes away," Melkote said. "Those two will continue to be served, and we will continue to serve those through our switching/networking products as well as our traditional compute and storage products." - -The biggest challenge will be bringing these technologies to customers to deploy them quickly. "We are still in the early days of the intelligent-edge explosion. I think in a decade we’ll be talking about the edge in the same way we talk about mobility and cloud today, which is in the past tense – and they’re massive trends. The edge is going to be very similar, and I think we don’t say that yet simply because I don’t think we have enough critical mass and use cases yet.” - -But ultimately, individual industustries will glean advantages from the intelligent edge, and it will spread, Melkote said. - -“A lot of the early work that we’re doing is taking these building blocks of connectivity, security, manageability and analytics and packaging them in a manner that is consumable for retail use cases, for energy use cases, for healthcare use cases, for education use cases and workplace use cases," he said. Every vertical has its own unique way to derive value out of this package. We are in the early days figuring that out." - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3435790/hpes-vision-for-the-intelligent-edge.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3274654/ai-boosts-data-center-availability-efficiency.html -[2]: https://www.networkworld.com/article/3435206/hpe-s-keerti-melkote-dissects-future-of-mobility-the-role-of-the-data-center-and-data-intelligence.html -[3]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[4]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[5]: https://www.networkworld.com/article/3356838/how-to-determine-if-wi-fi-6-is-right-for-you.html -[6]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[7]: https://www.networkworld.com/article/3215226/what-is-linux-uses-featres-products-operating-systems.html -[8]: https://www.networkworld.com/article/2297171/network-security-mpls-explained.html -[9]: https://www.networkworld.com/article/3031279/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190906 Two AMD Epyc processors crush four Intel Xeons in tests.md b/sources/talk/20190906 Two AMD Epyc processors crush four Intel Xeons in tests.md deleted file mode 100644 index 00861c6d9d..0000000000 --- a/sources/talk/20190906 Two AMD Epyc processors crush four Intel Xeons in tests.md +++ /dev/null @@ -1,59 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Two AMD Epyc processors crush four Intel Xeons in tests) -[#]: via: (https://www.networkworld.com/article/3435727/two-amd-epyc-processors-crush-four-intel-xeons-in-tests.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Two AMD Epyc processors crush four Intel Xeons in tests -====== -Tests show the AMD Epyc processors performed almost 25% better, and they cost about one quarter less. -AMD - -Tests by the evaluation and testing site ServeTheHome found a server with two AMD Epyc processors can outperform a four-socket Intel system that costs considerably more. - -If you don’t read [ServeTheHome][1], you should. It’s cut from the same cloth as Tom’s Hardware Guide and AnandTech but with a focus on server hardware, mostly the low end but they throw in some enterprise stuff, as well. - -[ServeTheHome ran tests][2] comparing the AMD Epyc 7742, which has 64 cores and 128 threads, and the Intel Xeon Platinum 8180M with its 28 cores and 56 threads. The dollars, though, show a real difference. Each Epyc 7742 costs $6,950, while each Xeon Platinum 8180M goes for $13,011. So, two Epyc 7742 processors cost you $13,900, and four Xeon Platinum 8180M processors cost $52,044, four times as much as the AMD chips. - -**Also read: [How AI can boost data-center availability and efficiency][3]** - -And that’s just the chips. The actual servers will also set you back a pretty penny, especially since four-socket servers cost much more than two-socket servers regardless of the processor you use. - -ServeTheHome used GeekBench, a Linux-based benchmark tool that measures both single- and multi-core performance. It’s purely a performance measure and looks only to see how fast a processor can go. It does not use real-world apps the way PCMark uses spreadsheets and graphics apps to test application performance. - -Nonetheless, the dual Epyc system racked up single- and multi-core scores of 4,876 and 193,554 points, respectively, while the quad Xeon Platinum 8180M system scored 4,700 and 155,050 points in the single-core and multi-core tests, respectively. - -So, the two-socket AMD system outperforms the four-socket Intel system by up to 3.74% in single-core workloads and 24.83% in multi-core workloads. And it costs one quarter the price. In terms of price/performance, it’s not even close. - -**[ [Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][4] ]** - -And ServeTheHome used an AMD reference server for the Epyc tests, while it used a Dell PowerEdge R840 for the Xeons. What that means is when HPE, Dell, Lenovo, Supermicro, etc. start shipping their servers, they will tune and optimize the daylights out of them vs. the AMD reference box and get even better performance. - -There are other advantages to the AMD processor, as well: support for up to 4TB per socket vs. Xeon’s 1.5TB per socket, and PCI Express 4 for AMD vs. PCI Express 3 for Intel, which is half the speed of PCIe 4. - -AMD has clearly built a better mousetrap. Now to bring in the customers. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3435727/two-amd-epyc-processors-crush-four-intel-xeons-in-tests.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://www.servethehome.com/ -[2]: https://www.servethehome.com/geekbench-4-2p-amd-epyc-7742-sets-world-record/ -[3]: https://www.networkworld.com/article/3274654/ai-boosts-data-center-availability-efficiency.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md b/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md deleted file mode 100644 index 2f38c9b2a0..0000000000 --- a/sources/talk/20190911 Can AMD convert its growing GPU presence into a data center play.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Can AMD convert its growing GPU presence into a data center play?) -[#]: via: (https://www.networkworld.com/article/3438098/can-amd-convert-its-growing-gpu-presence-into-a-data-center-play.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Can AMD convert its growing GPU presence into a data center play? -====== -AMD has scored high-performance computing deals recently, but to truly compete with Nvidia it needs to develop an alternative to Nvidia’s CUDA language. -AMD - -AMD's $5.4 billion purchase of ATI Technologies in 2006 seemed like an odd match. Not only were the companies in separate markets, but they were on separate coasts, with ATI in the Toronto, Canada, region and AMD in Sunnyvale, California. - -They made it work, and arguably it saved AMD from extinction because it was the graphics business that kept the company afloat while the Athlon/Opteron business was going nowhere. There were many quarters where graphics brought in more revenue than CPUs and likely saved the company from bankruptcy. - -But those days are over, and AMD is once again a highly competitive CPU company, and quarterly sales are getting very close to the $2 billion mark. While the CPU business is on fire, the GPU business continues to do well. - -**Also read: [AI boosts data-center availability and efficiency][1]** - -For the second quarter of 2019, AMD's GPU shipments increased 9.8% vs. Q1, while Nvidia's were flat and Intel's shipments decreased -1.4%, according to Jon Peddie Research. An increase over the first quarter is a very good showing because Q2 typically drops from Q1. - -AMD and Nvidia don't break out market segments, nor do they say what percentage comes from enterprise/HPC/supercomputing sales. The challenge for AMD, then, is to translate its gaming popularity into enterprise sales. - -### Competing in the high-performance computing space - -In high-performance computing (HPC), which includes artificial intelligence (AI), Nvidia clearly dominates. AMD has no answer for Nvidia's RTX 270/280 or the Tesla T4, but that hasn't stopped AMD from racking up the occasional win. The Oak Ridge National Lab plans to build an exascale supercomputer called Frontier in 2021 using AMD Epyc processors and Radeon GPUs. - -AMD CEO Lisa Su talked about it at the recent Hot Chips semiconductor conference, where she said Frontier would feature "highly optimized CPU, highly optimized GPU, highly optimized coherent interconnect between CPU and GPU, [and] working together with Cray on the node to node latency characteristics really enables us to put together a leadership system.” - -AMD has also scored deals with Google to power its cloud-based Stadia game console, providing 10.7Tflops/sec., more than the Microsoft and Sony consoles combined. And AMD has had a deal with China's Baidu to provide GPU-based computing for two years. - -The problem, according to Peddie, isn't so much the hardware as it is the software. Nvidia has a special language called CUDA, first developed by Stanford professor Ian Buck, who is now head of Nvidia's AI efforts. It allows developers to write apps that fully utilize the GPU with a familiar C++ syntax. Nvidia then went to hundreds of universities and set them up to teach CUDA to students. - -"The net result is universities around the world are cranking out thousands of grads who know CUDA, and AMD has no equivalent," said Peddie. - -The result is it's much harder to code for a Radeon than a Tesla/Volta. AMD supports the open-standard OpenCL library and the open-source project [HIP][2], which converts CUDA to portable C++ code. - -The OpenCL standard was developed by Apple but is now maintained by the [Khronos Group][3], and if there is one way for a standard to go nowhere, it's to put it in the hands of a standards group. Look what it did for OpenGL. It had the lead decades ago, then Microsoft came out with DirectX and obliterated OpenGL. The unfortunate fact is standards always fare better when there is a company behind it with something to gain. - -For AMD to gain ground in the data center and HPC/AI against Nvidia, it needs a competitor to CUDA. Up until two years ago, that simply wasn't possible because AMD was fighting for its life. But now, with hot new silicon, the time is right for the company to push into software and give Nvidia the same fits it is giving Intel. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3438098/can-amd-convert-its-growing-gpu-presence-into-a-data-center-play.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3274654/ai-boosts-data-center-availability-efficiency.html -[2]: https://github.com/ROCm-Developer-Tools/HIP -[3]: https://www.khronos.org/opencl/ -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world From 9e17cf49fa3fe466d146c03f121c42313f0c3ab0 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 18:26:37 +0800 Subject: [PATCH 091/202] TSL&PRF --- ...Hobby Project To A Professional Project.md | 84 ------------------- ...Hobby Project To A Professional Project.md | 78 +++++++++++++++++ 2 files changed, 78 insertions(+), 84 deletions(-) delete mode 100644 sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md create mode 100644 translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md diff --git a/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md deleted file mode 100644 index 4b4e4f84d4..0000000000 --- a/sources/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md +++ /dev/null @@ -1,84 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Manjaro Linux Graduates From A Hobby Project To A Professional Project) -[#]: via: (https://itsfoss.com/manjaro-linux-business-formation/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -Manjaro Linux Graduates From A Hobby Project To A Professional Project -====== - -_**Brief: Manjaro is taking things professionally. While the Manjaro community will be responsible for the development of the project and other related activities, a company has been formed to work as its legal entity and handle the commercial agreements and professional services.**_ - -Manjaro is a quite popular Linux distribution considering that it was just a passion project by three people, Bernhard, Jonathan and Philip, which came into existence in 2011. Now that it’s one of the [best Linux distros][1] out there, this can’t really remain a hobby project, right? - -Well, here’s good news: Manjaro has established a new company “**Manjaro GmbH & Co. KG**” with [Blue Systems][2] as an advisor to enable full-time employment of maintainers and exploration of future commercial opportunities. - -![][3] - -### What is exactly the change here? - -As per the [official announcement][4], the Manjaro project will stay as-is. However, a new company has been formed to secure the project and allow them to make legal contracts, official agreements, and other potential commercial activities. So, this makes the “hobby project” a professional endeavor. - -In addition to this, the donation funds will be transferred to non-profit [fiscal hosts][5] ([CommunityBridge][6] and [OpenCollective][7]) which will then accept and administer the funds on behalf of the project. Do note, that the donations haven’t been used to create the company – so the transfer of funds to a non-profit fiscal host will ensure transparency while securing the donations. - -### How does this improve things? - -With the company formed, the new structure will help Manjaro in the following ways (as mentioned by the devlopers): - - * enable developers to commit full time to Manjaro and its related projects; - * interact with other developers in sprints and events around Linux; - * protect the independence of Manjaro as a community-driven project, as well as protect its brand; - * provide faster security updates and a more efficient reaction to the needs of users; - * provide the means to act as a company on a professional level. - - - -[][8] - -Suggested read  Linux Mint Website Hacked, ISOs Compromised With Backdoor - -The Manjaro team also shed some light on how it’s going to stay committed to the community: - -> The mission and goals of Manjaro will remain the same as before – to support the collaborative development of Manjaro and its widespread use. This effort will continue to be supported through donations and sponsorship and these will not, under any circumstances, be used by the established company. - -### More about Manjaro as a company - -Even though they mentioned that the project will remain independent of the company, not everyone is clear about the involvement of Manjaro with the “community” while having a company with commercial interests. So, the team also clarified about their plans as a company in the announcement. - -Manjaro GmbH & Co. KG has been formed to effectively engage in commercial agreements, form partnerships, and offer professional services. With this, Manjaro devs Bernhard and Philip will now be able to commit full-time to Manjaro, while Blue Systems will take a role as an advisor. - -The company will be able to sign contracts and cover duties and guarantees officially, which the community cannot take or be held responsible for. - -**Wrapping Up** - -So, with this move, along with commercial opportunities, they plan to go full-time and also hire contributors. - -Of course, now they mean – “business” (not as the bad guys, I hope). Most of the reactions to this announcement are positive and we all wish them good luck with this. While some might be skeptical about a “community” project having “commercial” interests (remember the [FreeOffice and Manjaro fiasco][9]?), I see this as an interesting move. - -What do you think? Feel free to let us know your thoughts in the comments below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/manjaro-linux-business-formation/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://itsfoss.com/best-linux-distributions/ -[2]: https://www.blue-systems.com/ -[3]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/manjaro-gmbh.jpg?ssl=1 -[4]: https://forum.manjaro.org/t/manjaro-is-taking-the-next-step/102105 -[5]: https://en.wikipedia.org/wiki/Fiscal_sponsorship -[6]: https://communitybridge.org/ -[7]: https://opencollective.com/ -[8]: https://itsfoss.com/linux-mint-hacked/ -[9]: https://itsfoss.com/libreoffice-freeoffice-manjaro-linux/ diff --git a/translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md new file mode 100644 index 0000000000..f4a1e40266 --- /dev/null +++ b/translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md @@ -0,0 +1,78 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Manjaro Linux Graduates From A Hobby Project To A Professional Project) +[#]: via: (https://itsfoss.com/manjaro-linux-business-formation/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +Manjaro Linux 从业余爱好项目成长为专业项目 +====== + +> Manjaro 正在走专业化路线。虽然 Manjaro 社区将负责项目的开发和其他相关活动,但该团队已成立了一家公司作为其法人实体处理商业协议和专业服务。 + +Manjaro 是一个相当流行的 Linux 发行版,而它只是由三个人(Bernhard、Jonathan 和 Phili)于 2011 年激情之下创建的项目。现在,它是目前[最好的 Linux 发行版][1]之一,所以它不能真的一直还只是个业余爱好项目了,对吧。 + +嗯,现在有个好消息:Manjaro 已经建立了一家新公司“[Manjaro GmbH & Co. KG]”,以 [Blue Systems][2] 为顾问,以便能够全职雇佣维护人员,并探索未来的商业机会。 + +![][3] + +### 具体有什么变化? + +根据[官方公告][4],Manjaro 项目将保持不变。但是,成立了一家新公司来保护该项目,以允许他们制定法律合同、官方协议和进行其他潜在的商业活动。因此,这使得这个“业余爱好项目”成为了一项专业工作。 + +除此之外,捐赠资金将转给非营利性的[财政托管][5]([CommunityBridge][6] 和 [OpenCollective][7]),让他们来代表项目接受和管理资金。请注意,这些捐赠没有被用于创建这个公司,因此,将资金转移给非营利的财务托管将在确保捐赠的同时也确保透明度。 + +### 这会有何改善? + +随着这个公司的成立,(如开发者所述)新结构将以下列方式帮助 Manjaro: + +* 使开发人员能够全职投入 Manjaro 及其相关项目; +* 在 Linux 相关的比赛和活动中与其他开发人员进行互动; +* 保护 Manjaro 作为一个社区驱动项目的独立性,并保护其品牌; +* 提供更快的安全更新,更有效地响应用户需求; +* 提供在专业层面上作为公司行事的手段。 + +Manjaro 团队还阐明了它将如何继续致力于社区: + +> Manjaro 的使命和目标将与以前一样 —— 支持 Manjaro 的协作开发及其广泛使用。这项工作将继续通过捐赠和赞助来支持,这些捐赠和赞助在任何情况下都不会被这个成立的公司使用。 + +### 关于 Manjaro 公司的更多信息 + +尽管他们提到该项目将独立于公司,但并非所有人都清楚当有了一家具有商业利益的公司时 Manjaro 与“社区”的关系。因此,该团队还在公告中澄清了他们作为一家公司的计划。 + +> Manjaro GmbH & Co.KG 的成立旨在有效地参与商业协议、建立合作伙伴关系并提供专业服务。有了这个,Manjaro 开发者 Bernhard 和 Philip 现在可以全职工作投入到 Manjaro,而 Blue Systems 将担任顾问。 + +> 公司将能够正式签署合同并承担职责和保障,而社区不能承担或承担责任。 + +### 总结 + +因此,通过这一举措以及商业机会,他们计划全职工作并聘请贡献者。 + +当然,现在他们的意思是“业务”(我希望不是作为坏人)。对此公告的大多数反应都是积极的,我们都祝他们好运。虽然有些人可能对具有“商业”利益的“社区”项目持怀疑态度(还记得 [FreeOffice 和 Manjaro 的挫败][9]吗?),但我认为这是一个有趣的举措。 + +你怎么看?请在下面的评论中告诉我们你的想法。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/manjaro-linux-business-formation/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/best-linux-distributions/ +[2]: https://www.blue-systems.com/ +[3]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/manjaro-gmbh.jpg?ssl=1 +[4]: https://forum.manjaro.org/t/manjaro-is-taking-the-next-step/102105 +[5]: https://en.wikipedia.org/wiki/Fiscal_sponsorship +[6]: https://communitybridge.org/ +[7]: https://opencollective.com/ +[8]: https://itsfoss.com/linux-mint-hacked/ +[9]: https://itsfoss.com/libreoffice-freeoffice-manjaro-linux/ From ad6b7059998203f232daf5834e343de280cd1272 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Mon, 16 Sep 2019 18:34:09 +0800 Subject: [PATCH 092/202] PUB @wxy https://linux.cn/article-11349-1.html --- ...duates From A Hobby Project To A Professional Project.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename {translated/news => published}/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md (97%) diff --git a/translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md b/published/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md similarity index 97% rename from translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md rename to published/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md index f4a1e40266..0f5ce59599 100644 --- a/translated/news/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md +++ b/published/20190914 Manjaro Linux Graduates From A Hobby Project To A Professional Project.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11349-1.html) [#]: subject: (Manjaro Linux Graduates From A Hobby Project To A Professional Project) [#]: via: (https://itsfoss.com/manjaro-linux-business-formation/) [#]: author: (Ankush Das https://itsfoss.com/author/ankush/) From f9b95510c532e7a27ae4a6ff780c5def7ad92345 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:53:06 +0800 Subject: [PATCH 093/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20Copyin?= =?UTF-8?q?g=20large=20files=20with=20Rsync,=20and=20some=20misconceptions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 Copying large files with Rsync, and some misconceptions.md --- ...les with Rsync, and some misconceptions.md | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 sources/tech/20190916 Copying large files with Rsync, and some misconceptions.md diff --git a/sources/tech/20190916 Copying large files with Rsync, and some misconceptions.md b/sources/tech/20190916 Copying large files with Rsync, and some misconceptions.md new file mode 100644 index 0000000000..ae314e2a2e --- /dev/null +++ b/sources/tech/20190916 Copying large files with Rsync, and some misconceptions.md @@ -0,0 +1,101 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Copying large files with Rsync, and some misconceptions) +[#]: via: (https://fedoramagazine.org/copying-large-files-with-rsync-and-some-misconceptions/) +[#]: author: (Daniel Leite de Abreu https://fedoramagazine.org/author/dabreu/) + +Copying large files with Rsync, and some misconceptions +====== + +![][1] + +There is a notion that a lot of people working in the IT industry often copy and paste from internet howtos. We all do it, and the copy-and-paste itself is not a problem. The problem is when we run things without understanding them. + +Some years ago, a friend who used to work on my team needed to copy virtual machine templates from site A to site B. They could not understand why the file they copied was 10GB on site A but but it became 100GB on-site B. + +The friend believed that _rsync_ is a magic tool that should just “sync” the file as it is. However, what most of us forget is to understand what _rsync_ really is, and how is it used, and the most important in my opinion is, where it come from. This article provides some further information about rsync, and an explanation of what happened in that story. + +### About rsync + +_rsync_ is a tool was created by Andrew Tridgell and Paul Mackerras who were motivated by the following problem: + +Imagine you have two files, _file_A_ and _file_B_. You wish to update _file_B_ to be the same as _file_A_. The obvious method is to copy _file_A_ onto _file_B_. + +Now imagine that the two files are on two different servers connected by a slow communications link, for example, a dial-up IP link. If _file_A_ is large, copying it onto _file_B_ will be slow, and sometimes not even possible. To make it more efficient, you could compress _file_A_ before sending it, but that would usually only gain a factor of 2 to 4. + +Now assume that _file_A_ and _file_B_ are quite similar, and to speed things up, you take advantage of this similarity. A common method is to send just the differences between _file_A_ and _file_B_ down the link and then use such list of differences to reconstruct the file on the remote end. + +The problem is that the normal methods for creating a set of differences between two files rely on being able to read both files. Thus they require that both files are available beforehand at one end of the link. If they are not both available on the same machine, these algorithms cannot be used. (Once you had copied the file over, you don’t need the differences). This is the problem that _rsync_ addresses. + +The _rsync_ algorithm efficiently computes which parts of a source file match parts of an existing destination file. Matching parts then do not need to be sent across the link; all that is needed is a reference to the part of the destination file. Only parts of the source file which are not matching need to be sent over. + +The receiver can then construct a copy of the source file using the references to parts of the existing destination file and the original material. + +Additionally, the data sent to the receiver can be compressed using any of a range of common compression algorithms for further speed improvements. + +The rsync algorithm addresses this problem in a lovely way as we all might know. + +After this introduction on _rsync_, Back to the story! + +### Problem 1: Thin provisioning + +There were two things that would help the friend understand what was going on. + +The problem with the file getting significantly bigger on the other size was caused by Thin Provisioning (TP) being enabled on the source system — a method of optimizing the efficiency of available space in Storage Area Networks (SAN) or Network Attached Storages (NAS). + +The source file was only 10GB because of TP being enabled, and when transferred over using _rsync_ without any additional configuration, the target destination was receiving the full 100GB of size. _rsync_ could not do the magic automatically, it had to be configured. + +The Flag that does this work is _-S_ or _–sparse_ and it tells _rsync_ to handle sparse files efficiently. And it will do what it says! It will only send the sparse data so source and destination will have a 10GB file. + +### Problem 2: Updating files + +The second problem appeared when sending over an updated file. The destination was now receiving just the 10GB, but the whole file (containing the virtual disk) was always transferred. Even when a single configuration file was changed on that virtual disk. In other words, only a small portion of the file changed. + +The command used for this transfer was: + +``` +rsync -avS vmdk_file syncuser@host1:/destination +``` + +Again, understanding how _rsync_ works would help with this problem as well. + +The above is the biggest misconception about rsync. Many of us think _rsync_ will simply send the delta updates of the files, and that it will automatically update only what needs to be updated. But this is not the default behaviour of _rsync_. + +As the man page says, the default behaviour of _rsync_ is to create a new copy of the file in the destination and to move it into the right place when the transfer is completed. + +To change this default behaviour of _rsync_, you have to set the following flags and then rsync will send only the deltas: + +``` +--inplace update destination files in-place +--partial keep partially transferred files +--append append data onto shorter files +--progress show progress during transfer +``` + +So the full command that would do exactly what the friend wanted is: + +``` +rsync -av --partial --inplace --append --progress vmdk_file syncuser@host1:/destination +``` + +Note that the sparse flag _-S_ had to be removed, for two reasons. The first is that you can not use _–sparse_ and _–inplace_ together when sending a file over the wire. And second, when you once sent a file over with _–sparse_, you can’t updated with _–inplace_ anymore. Note that versions of rsync older than 3.1.3 will reject the combination of _–sparse_ and _–inplace_. + +So even when the friend ended up copying 100GB over the wire, that only had to happen once. All the following updates were only copying the difference, making the copy to be extremely efficient. + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/copying-large-files-with-rsync-and-some-misconceptions/ + +作者:[Daniel Leite de Abreu][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/dabreu/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2019/08/rsync-816x345.jpg From bbfd7282541a881e58e6c28be2e126570c4c3d4c Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:53:35 +0800 Subject: [PATCH 094/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20How=20?= =?UTF-8?q?to=20Check=20Linux=20Mint=20Version=20Number=20&=20Codename?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md --- ...ck Linux Mint Version Number - Codename.md | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md diff --git a/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md new file mode 100644 index 0000000000..843ab133a6 --- /dev/null +++ b/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md @@ -0,0 +1,145 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Check Linux Mint Version Number & Codename) +[#]: via: (https://itsfoss.com/check-linux-mint-version/) +[#]: author: (Sergiu https://itsfoss.com/author/sergiu/) + +How to Check Linux Mint Version Number & Codename +====== + +Linux Mint has a major release (like Mint 19) every two years and minor releases (like Mint 19.1, 19.2 etc) every six months or so. You can upgrade Linux Mint version on your own or it may get automatically update for the minor releases. + +Between all these release, you may wonder which Linux Mint version you are using. Knowing the version number is also helpful in determining whether a particular software is available for your system or if your system has reached end of life. + +There could be a number of reasons why you might require the Linux Mint version number and there are various ways you can obtain this information. Let me show you both graphical and command line ways to get the Mint release information. + + * [Check Linux Mint version using command line][1] + * [Check Linux Mint version information using GUI][2] + + + +### Ways to check Linux Mint version number using terminal + +![][3] + +I’ll go over several ways you can check your Linux Mint version number and codename using very simple commands. You can open up a **terminal** from the **Menu** or by pressing **CTRL+ALT+T** (default hotkey). + +The **last two entries** in this list also output the **Ubuntu release** your current Linux Mint version is based on. + +#### 1\. /etc/issue + +Starting out with the simplest CLI method, you can print out the contents of **/etc/issue** to check your **Version Number** and **Codename**: + +``` +[email protected]:~$ cat /etc/issue +Linux Mint 19.2 Tina \n \l +``` + +#### 2\. hostnamectl + +![hostnamectl][4] + +This single command (**hostnamectl**) prints almost the same information as that found in **System Info**. You can see your **Operating System** (with **version number**), as well as your **kernel version**.3. + +#### 3\. lsb_release + +**lsb_release** is a very simple Linux utility to check basic information about your distribution: + +``` +[email protected]:~$ lsb_release -a +No LSB modules are available. +Distributor ID: LinuxMint +Description: Linux Mint 19.2 Tina +Release: 19.2 +Codename: tina +``` + +**Note:** *I used the **–***_**a**_ _tag to print all parameters, but you can also use **-s** for short form, **-d** for description etc. (check **man lsb_release** for all tags)._ + +#### 4\. /etc/linuxmint/info + +![/etc/linuxmint/info][5] + +This isn’t a command, but rather a file on any Linux Mint install. Simply use cat command to print it’s contents to your terminal and see your **Release Number** and **Codename**. + +[][6] + +Suggested read  Get Rid Of Two Google Chrome Icons From Dock In Elementary OS Freya [Quick Tip] + +#### 5\. Use /etc/os-release to get Ubuntu codename as well + +![/etc/os-release][7] + +Linux Mint is based on Ubuntu. Each Linux Mint release is based on a different Ubuntu release. Knowing which Ubuntu version your Linux Mint release is based on is helpful in cases where you’ll have to use Ubuntu codename while adding a repository like when you need to [install the latest Virtual Box in Linux Mint][8]. + +**os-release** is yet another file similar to **info**, showing you the codename for the **Ubuntu** release your Linux Mint is based on. + +#### 6\. Use /etc/upstream-release/lsb-release to get only Ubuntu base info + +If you only ****want to see information about the **Ubuntu** base, output **/etc/upstream-release/lsb-release**: + +``` +[email protected]:~$ cat /etc/upstream-release/lsb-release +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=18.04 +DISTRIB_CODENAME=bionic +DISTRIB_DESCRIPTION="Ubuntu 18.04 LTS" +``` + +Bonus Tip: [You can just check Linux kernel version][9] with the **uname** command: + +``` +[email protected]:~$ uname -r +4.15.0-54-generic +``` + +**Note:** _**-r** stands for **release**, however you can check the other flags with **man uname**._ + +### Check Linux Mint version information using GUI + +If you are not comfortable with the terminal and commands, you can use the graphical method. As you would expect, this one is pretty straight-forward. + +Open up the **Menu** (bottom-left corner) and then go to **Preferences > System Info**: + +![Linux Mint Menu][10] + +Alternatively, in the Menu you can search for **System Info**: + +![Menu Search System Info][11] + +Here you can see both your operating system (including version number), your kernel and the version number of your DE: + +![System Info][12] + +**Wrapping Up** + +I have covered some different ways you can quickly check the version and name (as well as the Ubuntu base and kernel) of the Linux Mint release you are running. I hope you found this beginner tutorial helpful. Let us know in the comments which one is your favorite method! + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/check-linux-mint-version/ + +作者:[Sergiu][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/sergiu/ +[b]: https://github.com/lujun9972 +[1]: tmp.pL5Hg3N6Qt#terminal +[2]: tmp.pL5Hg3N6Qt#GUI +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/check-linux-mint-version.png?ssl=1 +[4]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/hostnamectl.jpg?ssl=1 +[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/linuxmint_info.jpg?ssl=1 +[6]: https://itsfoss.com/rid-google-chrome-icons-dock-elementary-os-freya/ +[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/os_release.jpg?ssl=1 +[8]: https://itsfoss.com/install-virtualbox-ubuntu/ +[9]: https://itsfoss.com/find-which-kernel-version-is-running-in-ubuntu/ +[10]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/linux_mint_menu.jpg?ssl=1 +[11]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/menu_search_system_info.jpg?ssl=1 +[12]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/system_info.png?ssl=1 From 131b3ab96a2746f52f13975e384c635d2cc9c3ec Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:54:03 +0800 Subject: [PATCH 095/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20Linux?= =?UTF-8?q?=20Plumbers,=20Appwrite,=20and=20more=20industry=20trends?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md --- ...ers, Appwrite, and more industry trends.md | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md diff --git a/sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md new file mode 100644 index 0000000000..d3f1fd3087 --- /dev/null +++ b/sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md @@ -0,0 +1,79 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Linux Plumbers, Appwrite, and more industry trends) +[#]: via: (https://opensource.com/article/19/9/conferences-industry-trends) +[#]: author: (Tim Hildred https://opensource.com/users/thildred) + +Linux Plumbers, Appwrite, and more industry trends +====== +A weekly look at open source community and industry trends. +![Person standing in front of a giant computer screen with numbers, data][1] + +As part of my role as a senior product marketing manager at an enterprise software company with an open source development model, I publish a regular update about open source community, market, and industry trends for product marketers, managers, and other influencers. Here are five of my and their favorite articles from that update. + +## [Working on Linux's nuts and bolts at Linux Plumbers][2] + +> The Kernel Maintainers Summit, Linux creator Linus Torvalds told me, is an invitation-only gathering of the top Linux kernel developers. But, while you might think it's about planning on the Linux kernel's future, that's not the case. "The maintainer summit is really different because it doesn't even talk about technical issues." Instead, "It's all about the process of creating and maintaining the Linux kernel." + +**The impact**: This is like the technical version of the Bilderberg meeting: you can have your flashy buzzword conferences, but we'll be over here making the real decisions. Or so I imagine. Probably less private jets involved though. + +## [Microsoft hosts first Windows Subsystem for Linux conference][3] + +> Hayden Barnes, founder of [Whitewater Foundry][4], a startup focusing on [Windows Subsystem for Linux (WSL)][5] [announced WSLconf 1][6], the first community conference for WSL. This event will be held on March 10-11, 2020 at Building 20 on the Microsoft HQ campus in Redmond, WA. The conference is still coming together. But we already know it will have presentations and workshops from [Pengwin, Whitewater's Linux for Windows,][7] Microsoft WSL, and [Canonical][8]'s [Ubuntu][9] on WSL developers. + +**The impact**: Microsoft is nurturing the seeds of community growing up around its increasing adoption of and contribution to open source software. It's enough to bring a tear to my eye. + +## [Introducing Appwrite: An open source backend server for mobile and web developers][10] + +> [Appwrite][11] is a new [open source][12], end to end backend server for frontend and mobile developers that allows you to build apps a lot faster. [Appwrite][13] goal is to abstract and simplify common development tasks behind REST APIs and tools, to help developers build advanced apps way faster. +> +> In this post I will shortly cover some of the main [Appwrite][14] services and explain about their main features and how they are designed to help you build your next project way faster than you would when writing all your backend APIs from scratch. + +**The impact**: Software development is getting more and more accessible as more open source middleware gets easier to use. Appwrite claims to reduce the time and cost of development by 70%. Imagine what that would mean to a small mobile development agency or citizen developer. I'm curious about how they'll monetize this. + +## ['More than just IT': Open source technologist says collaborative culture is key to government transformation][15] + +> AGL (agile government leadership) is providing a valuable support network for people who are helping government work better for the public. The organization is focused on things that I am very passionate about — DevOps, digital transformation, open source, and similar topics that are top-of-mind for many government IT leaders. AGL provides me with a community to learn about what the best and brightest are doing today, and share those learnings with my peers throughout the industry. + +**The impact**: It is easy to be cynical about the government no matter your political persuasion. I found it refreshing to have a reminder that the government is comprised of real people who are mostly doing their best to apply relevant technology to the public good. Especially when that technology is open source! + +## [How Bloomberg achieves close to 90-95% hardware utilization with Kubernetes][16] + +> In 2016, Bloomberg adopted Kubernetes—when it was still in alpha—and has seen remarkable results ever since using the project’s upstream code. “With Kubernetes, we’re able to very efficiently use our hardware to the point where we can get close to 90 to 95% utilization rates,” says Rybka. Autoscaling in Kubernetes allows the system to meet demands much faster. Furthermore, Kubernetes “offered us the ability to standardize our approach to how we build and manage services, which means that we can spend more time focused on actually working on the open source tools that we support,” says Steven Bower, Data and Analytics Infrastructure Lead. “If we want to stand up a new cluster in another location in the world, it’s really very straightforward to do that. Everything is all just code. Configuration is code.” + +**The impact**: Nothing cuts through the fog of marketing like utilization stats. One of the things that I've heard about Kube is that people don't know what to do with it when they have it running. Use cases like this give them (and you) something to aspire to. + +_I hope you enjoyed this list of what stood out to me from last week and come back next Monday for more open source community, market, and industry trends._ + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/conferences-industry-trends + +作者:[Tim Hildred][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/thildred +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data) +[2]: https://www.zdnet.com/article/working-on-linuxs-nuts-and-bolts-at-linux-plumbers/ +[3]: https://www.zdnet.com/article/microsoft-hosts-first-windows-subsystem-for-linux-conference/ +[4]: https://github.com/WhitewaterFoundry +[5]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 +[6]: https://www.linkedin.com/feed/update/urn:li:activity:6574754435518599168/ +[7]: https://www.zdnet.com/article/pengwin-a-linux-specifically-for-windows-subsystem-for-linux/ +[8]: https://canonical.com/ +[9]: https://ubuntu.com/ +[10]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d +[11]: https://appwrite.io +[12]: https://github.com/appwrite/appwrite +[13]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d?source=friends_link&sk=b6a2be384aafd1fa5b1b6ff12906082c +[14]: https://appwrite.io/ +[15]: https://medium.com/agile-government-leadership/more-than-just-it-open-source-technologist-says-collaborative-culture-is-key-to-government-c46d1489f822 +[16]: https://www.cncf.io/blog/2019/09/12/how-bloomberg-achieves-close-to-90-95-hardware-utilization-with-kubernetes/ From bafbced87e3bcc68030228a16527a5229d3d1831 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:54:29 +0800 Subject: [PATCH 096/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20How=20?= =?UTF-8?q?to=20start=20developing=20with=20.NET?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 How to start developing with .NET.md --- ...90916 How to start developing with .NET.md | 170 ++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 sources/tech/20190916 How to start developing with .NET.md diff --git a/sources/tech/20190916 How to start developing with .NET.md b/sources/tech/20190916 How to start developing with .NET.md new file mode 100644 index 0000000000..8dae5addd0 --- /dev/null +++ b/sources/tech/20190916 How to start developing with .NET.md @@ -0,0 +1,170 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to start developing with .NET) +[#]: via: (https://opensource.com/article/19/9/getting-started-net) +[#]: author: (Seth Kenlon https://opensource.com/users/sethhttps://opensource.com/users/alex-bunardzichttps://opensource.com/users/alex-bunardzic) + +How to start developing with .NET +====== +Learn the basics to get up and running with the .NET development +platform. +![Coding on a computer][1] + +The .NET framework was released in 2000 by Microsoft. An open source implementation of the platform, [Mono][2], was the center of controversy in the early 2000s because Microsoft held several patents for .NET technology and could have used those patents to end Mono implementations. Fortunately, in 2014, Microsoft declared that the .NET development platform would be open source under the MIT license from then on, and in 2016, Microsoft purchased Xamarin, the company that produces Mono. + +Both .NET and Mono have grown into cross-platform programming environments for C#, F#, GTK#, Visual Basic, Vala, and more. Applications created with .NET and Mono have been delivered to Linux, BSD, Windows, MacOS, Android, and even some gaming consoles. You can use either .NET or Mono to develop .NET applications. Both are open source, and both have active and vibrant communities. This article focuses on getting started with Microsoft's implementation of the .NET environment. + +### How to install .NET + +The .NET downloads are divided into packages: one containing just a .NET runtime, and the other a .NET software development kit (SDK) containing the .NET Core and runtime. Depending on your platform, there may be several variants of even these packages, accounting for architecture and OS version. To start developing with .NET, you must [install the SDK][3]. This gives you the [dotnet][4] terminal or PowerShell command, which you can use to create and build projects. + +#### Linux + +To install .NET on Linux, first, add the Microsoft Linux software repository to your computer. + +On Fedora: + + +``` +$ sudo rpm --import +$ sudo wget -q -O /etc/yum.repos.d/microsoft-prod.repo +``` + +On Ubuntu: + + +``` +$ wget -q -O packages-microsoft-prod.deb +$ sudo dpkg -i packages-microsoft-prod.deb +``` + +Next, install the SDK using your package manager, replacing **<X.Y>** with the current version of the .NET release: + +On Fedora: + + +``` +`$ sudo dnf install dotnet-sdk-` +``` + +On Ubuntu: + + +``` +$ sudo apt install apt-transport-https +$ sudo apt update +$ sudo apt install dotnet-sdk-<X.Y> +``` + +Once all the packages are downloaded and installed, confirm the installation by opening a terminal and typing: + + +``` +$ dotnet --version +X.Y.Z +``` + +#### Windows + +If you're on Microsoft Windows, you probably already have the .NET runtime installed. However, to develop .NET applications, you must also install the .NET Core SDK. + +First, [download the installer][3]. To keep your options open, download .NET Core for cross-platform development (the .NET Framework is Windows-only). Once the **.exe** file is downloaded, double-click it to launch the installation wizard, and click through the two-step install process: accept the license and allow the install to proceed. + +![Installing dotnet on Windows][5] + +Afterward, open PowerShell from your Application menu in the lower-left corner. In PowerShell, type a test command: + + +``` +`PS C:\Users\osdc> dotnet` +``` + +If you see information about a dotnet installation, .NET has been installed correctly. + +#### MacOS + +If you're on an Apple Mac, [download the Mac installer][3], which comes in the form of a **.pkg** package. Download and double-click on the **.pkg** file and click through the installer. You may need to grant permission for the installer since the package is not from the App Store. + +Once all packages are downloaded and installed, confirm the installation by opening a terminal and typing: + + +``` +$ dotnet --version +X.Y.Z +``` + +### Hello .NET + +A sample "hello world" application written in .NET is provided with the **dotnet** command. Or, more accurately, the command provides the sample application. + +First, create a project directory and the required code infrastructure using the **dotnet** command with the **new** and **console** options to create a new console-only application. Use the **-o** option to specify a project name: + + +``` +`$ dotnet new console -o hellodotnet` +``` + +This creates a directory called **hellodotnet** in your current directory. Change into your project directory and have a look around: + + +``` +$ cd hellodotnet +$ dir +hellodotnet.csproj  obj  Program.cs +``` + +The file **Program.cs** is an empty C# file containing a simple Hello World application. Open it in a text editor to view it. Microsoft's Visual Studio Code is a cross-platform, open source application built with dotnet in mind, and while it's not a bad text editor, it also collects a lot of data about its user (and grants itself permission to do so in the license applied to its binary distribution). If you want to try out Visual Studio Code, consider using [VSCodium][6], a distribution of Visual Studio Code that's built from the MIT-licensed source code _without_ the telemetry (read the [documentation][7] for options to disable other forms of tracking in even this build). Alternatively, just use your existing favorite text editor or IDE. + +The boilerplate code in a new console application is: + + +``` +using System; + +namespace hellodotnet +{ +    class Program +    { +        static void Main(string[] args) +        { +            Console.WriteLine("Hello World!"); +        } +    } +} +``` + +To run the program, use the **dotnet run** command: + + +``` +$ dotnet run +Hello World! +``` + +That's the basic workflow of .NET and the **dotnet** command. The full [C# guide for .NET][8] is available, and everything there is relevant to .NET. For examples of .NET in action, follow [Alex Bunardzic][9]'s mutation testing articles here on opensource.com. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/getting-started-net + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/sethhttps://opensource.com/users/alex-bunardzichttps://opensource.com/users/alex-bunardzic +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/code_computer_laptop_hack_work.png?itok=aSpcWkcl (Coding on a computer) +[2]: https://www.monodevelop.com/ +[3]: https://dotnet.microsoft.com/download +[4]: https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet?tabs=netcore21 +[5]: https://opensource.com/sites/default/files/uploads/dotnet-windows-install.jpg (Installing dotnet on Windows) +[6]: https://vscodium.com/ +[7]: https://github.com/VSCodium/vscodium/blob/master/DOCS.md +[8]: https://docs.microsoft.com/en-us/dotnet/csharp/tutorials/intro-to-csharp/ +[9]: https://opensource.com/users/alex-bunardzic (View user profile.) From 59ed6c97ba3426312974f048304f82cf666d880e Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:54:52 +0800 Subject: [PATCH 097/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20Linux?= =?UTF-8?q?=20commands=20to=20display=20your=20hardware=20information?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 Linux commands to display your hardware information.md --- ...ds to display your hardware information.md | 417 ++++++++++++++++++ 1 file changed, 417 insertions(+) create mode 100644 sources/tech/20190916 Linux commands to display your hardware information.md diff --git a/sources/tech/20190916 Linux commands to display your hardware information.md b/sources/tech/20190916 Linux commands to display your hardware information.md new file mode 100644 index 0000000000..f0a13905e5 --- /dev/null +++ b/sources/tech/20190916 Linux commands to display your hardware information.md @@ -0,0 +1,417 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Linux commands to display your hardware information) +[#]: via: (https://opensource.com/article/19/9/linux-commands-hardware-information) +[#]: author: (Howard Fosdick https://opensource.com/users/howtechhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/seth) + +Linux commands to display your hardware information +====== +Get the details on what's inside your computer from the command line. +![computer screen ][1] + +There are many reasons you might need to find out details about your computer hardware. For example, if you need help fixing something and post a plea in an online forum, people will immediately ask you for specifics about your computer. Or, if you want to upgrade your computer, you'll need to know what you have and what you can have. You need to interrogate your computer to discover its specifications. + +The easiest way is to do that is with one of the standard Linux GUI programs: + + * [i-nex][2] collects hardware information and displays it in a manner similar to the popular [CPU-Z][3] under Windows. + * [HardInfo][4] displays hardware specifics and even includes a set of eight popular benchmark programs you can run to gauge your system's performance. + * [KInfoCenter][5] and [Lshw][6] also display hardware details and are available in many software repositories. + + + +Alternatively, you could open up the box and read the labels on the disks, memory, and other devices. Or you could enter the boot-time panels—the so-called UEFI or BIOS panels. Just hit [the proper program function key][7] during the boot process to access them. These two methods give you hardware details but omit software information. + +Or, you could issue a Linux line command. Wait a minute… that sounds difficult. Why would you do this? + +Sometimes it's easy to find a specific bit of information through a well-targeted line command. Perhaps you don't have a GUI program available or don't want to install one. + +Probably the main reason to use line commands is for writing scripts. Whether you employ the Linux shell or another programming language, scripting typically requires coding line commands. + +Many line commands for detecting hardware must be issued under root authority. So either switch to the root user ID, or issue the command under your regular user ID preceded by **sudo**: + + +``` +`sudo ` +``` + +and respond to the prompt for the root password. + +This article introduces many of the most useful line commands for system discovery. The quick reference chart at the end summarizes them. + +### Hardware overview + +There are several line commands that will give you a comprehensive overview of your computer's hardware. + +The **inxi** command lists details about your system, CPU, graphics, audio, networking, drives, partitions, sensors, and more. Forum participants often ask for its output when they're trying to help others solve problems. It's a standard diagnostic for problem-solving: + + +``` +`inxi -Fxz` +``` + +The **-F** flag means you'll get full output, **x** adds details, and **z** masks out personally identifying information like MAC and IP addresses. + +The **hwinfo** and **lshw** commands display much of the same information in different formats: + + +``` +`hwinfo --short` +``` + +or + + +``` +`lshw -short` +``` + +The long forms of these two commands spew out exhaustive—but hard to read—output: + + +``` +`hwinfo` +``` + +or + + +``` +`lshw` +``` + +### CPU details + +You can learn everything about your CPU through line commands. View CPU details by issuing either the **lscpu** command or its close relative **lshw**: + + +``` +`lscpu` +``` + +or + + +``` +`lshw -C cpu` +``` + +In both cases, the last few lines of output list all the CPU's capabilities. Here you can find out whether your processor supports specific features. + +With all these commands, you can reduce verbiage and narrow any answer down to a single detail by parsing the command output with the **grep** command. For example, to view only the CPU make and model: + + +``` +`lshw -C cpu | grep -i product` +``` + +To view just the CPU's speed in megahertz: + + +``` +`lscpu | grep -i mhz` +``` + +or its [BogoMips][8] power rating: + + +``` +`lscpu | grep -i bogo` +``` + +The **-i** flag on the **grep** command simply ensures your search ignores whether the output it searches is upper or lower case. + +### Memory + +Linux line commands enable you to gather all possible details about your computer's memory. You can even determine whether you can add extra memory to the computer without opening up the box. + +To list each memory stick and its capacity, issue the **dmidecode** command: + + +``` +`dmidecode -t memory | grep -i size` +``` + +For more specifics on system memory, including type, size, speed, and voltage of each RAM stick, try: + + +``` +`lshw -short -C memory` +``` + +One thing you'll surely want to know is is the maximum memory you can install on your computer: + + +``` +`dmidecode -t memory | grep -i max` +``` + +Now find out whether there are any open slots to insert additional memory sticks. You can do this without opening your computer by issuing this command: + + +``` +`lshw -short -C memory | grep -i empty` +``` + +A null response means all the memory slots are already in use. + +Determining how much video memory you have requires a pair of commands. First, list all devices with the **lspci** command and limit the output displayed to the video device you're interested in: + + +``` +`lspci | grep -i vga` +``` + +The output line that identifies the video controller will typically look something like this: + + +``` +`00:02.0 VGA compatible controller: Intel Corporation 82Q35 Express Integrated Graphics Controller (rev 02)` +``` + +Now reissue the **lspci** command, referencing the video device number as the selected device: + + +``` +`lspci -v -s 00:02.0` +``` + +The output line identified as _prefetchable_ is the amount of video RAM on your system: + + +``` +... +Memory at f0100000 (32-bit, non-prefetchable) [size=512K] +I/O ports at 1230 [size=8] +Memory at e0000000 (32-bit, prefetchable) [size=256M] +Memory at f0000000 (32-bit, non-prefetchable) [size=1M] +... +``` + +Finally, to show current memory use in megabytes, issue: + + +``` +`free -m` +``` + +This tells how much memory is free, how much is in use, the size of the swap area, and whether it's being used. For example, the output might look like this: + + +``` +              total        used        free     shared    buff/cache   available +Mem:          11891        1326        8877      212        1687       10077 +Swap:          1999           0        1999 +``` + +The **top** command gives you more detail on memory use. It shows current overall memory and CPU use and also breaks it down by process ID, user ID, and the commands being run. It displays full-screen text output: + + +``` +`top` +``` + +### Disks, filesystems, and devices + +You can easily determine whatever you wish to know about disks, partitions, filesystems, and other devices. + +To display a single line describing each disk device: + + +``` +`lshw -short -C disk` +``` + +Get details on any specific SATA disk, such as its model and serial numbers, supported modes, sector count, and more with: + + +``` +`hdparm -i /dev/sda` +``` + +Of course, you should replace **sda** with **sdb** or another device mnemonic if necessary. + +To list all disks with all their defined partitions, along with the size of each, issue: + + +``` +`lsblk` +``` + +For more detail, including the number of sectors, size, filesystem ID and type, and partition starting and ending sectors: + + +``` +`fdisk -l` +``` + +To start up Linux, you need to identify mountable partitions to the [GRUB][9] bootloader. You can find this information with the **blkid** command. It lists each partition's unique identifier (UUID) and its filesystem type (e.g., ext3 or ext4): + + +``` +`blkid` +``` + +To list the mounted filesystems, their mount points, and the space used and available for each (in megabytes): + + +``` +`df -m` +``` + +Finally, you can list details for all USB and PCI buses and devices with these commands: + + +``` +`lsusb` +``` + +or + + +``` +`lspci` +``` + +### Network + +Linux offers tons of networking line commands. Here are just a few. + +To see hardware details about your network card, issue: + + +``` +`lshw -C network` +``` + +Traditionally, the command to show network interfaces was **ifconfig**: + + +``` +`ifconfig -a` +``` + +But many people now use: + + +``` +`ip link show` +``` + +or + + +``` +`netstat -i` +``` + +In reading the output, it helps to know common network abbreviations: + +**Abbreviation** | **Meaning** +---|--- +**lo** | Loopback interface +**eth0** or **enp*** | Ethernet interface +**wlan0** | Wireless interface +**ppp0** | Point-to-Point Protocol interface (used by a dial-up modem, PPTP VPN connection, or USB modem) +**vboxnet0** or **vmnet*** | Virtual machine interface + +The asterisks in this table are wildcard characters, serving as a placeholder for whatever series of characters appear from system to system. **** + +To show your default gateway and routing tables, issue either of these commands: + + +``` +`ip route | column -t` +``` + +or + + +``` +`netstat -r` +``` + +### Software + +Let's conclude with two commands that display low-level software details. For example, what if you want to know whether you have the latest firmware installed? This command shows the UEFI or BIOS date and version: + + +``` +`dmidecode -t bios` +``` + +What is the kernel version, and is it 64-bit? And what is the network hostname? To find out, issue: + + +``` +`uname -a` +``` + +### Quick reference chart + +This chart summarizes all the commands covered in this article: + +Display info about all hardware | **inxi -Fxz**              _\--or--_ +**hwinfo --short**     _\--or--_ +**lshw  -short** +---|--- +Display all CPU info | **lscpu**                  _\--or--_ +**lshw -C cpu** +Show CPU features (e.g., PAE, SSE2) | **lshw -C cpu | grep -i capabilities** +Report whether the CPU is 32- or 64-bit | **lshw -C cpu | grep -i width** +Show current memory size and configuration | **dmidecode -t memory | grep -i size**    _\--or--_ +**lshw -short -C memory** +Show maximum memory for the hardware | **dmidecode -t memory | grep -i max** +Determine whether memory slots are available | **lshw -short -C memory | grep -i empty** +(a null answer means no slots available) +Determine the amount of video memory | **lspci | grep -i vga** +then reissue with the device number; +for example:  **lspci -v -s 00:02.0** +The VRAM is the _prefetchable_ value. +Show current memory use | **free -m**    _\--or--_ +**top** +List the disk drives | **lshw -short -C disk** +Show detailed information about a specific disk drive | **hdparm -i /dev/sda** +(replace **sda** if necessary) +List information about disks and partitions | **lsblk **     (simple)      _\--or--_ +**fdisk -l**   (detailed) +List partition IDs (UUIDs) | **blkid** +List mounted filesystems, their mount points, +and megabytes used and available for each | **df -m** +List USB devices | **lsusb** +List PCI devices | **lspci** +Show network card details | **lshw -C network** +Show network interfaces | **ifconfig -a**       _\--or--_ +**ip link show   **_\--or--_ +**netstat -i** +Display routing tables | **ip route | column -t`  `**_\--or--_ +**netstat -r** +Display UEFI/BIOS info | **dmidecode -t bios** +Show kernel version, network hostname, more | **uname -a** + +Do you have a favorite command that I overlooked? Please add a comment and share it. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/linux-commands-hardware-information + +作者:[Howard Fosdick][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/howtechhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/features_solutions_command_data.png?itok=4_VQN3RK (computer screen ) +[2]: http://sourceforge.net/projects/i-nex/ +[3]: https://www.cpuid.com/softwares/cpu-z.html +[4]: http://sourceforge.net/projects/hardinfo.berlios/ +[5]: https://userbase.kde.org/KInfoCenter +[6]: http://www.binarytides.com/linux-lshw-command/ +[7]: http://www.disk-image.com/faq-bootmenu.htm +[8]: https://en.wikipedia.org/wiki/BogoMips +[9]: https://www.dedoimedo.com/computers/grub.html From bedc6bc22202769050c2d81898edf31953301ae2 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:55:22 +0800 Subject: [PATCH 098/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20Constr?= =?UTF-8?q?aint=20programming=20by=20example?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 Constraint programming by example.md --- ...90916 Constraint programming by example.md | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 sources/tech/20190916 Constraint programming by example.md diff --git a/sources/tech/20190916 Constraint programming by example.md b/sources/tech/20190916 Constraint programming by example.md new file mode 100644 index 0000000000..c434913c5e --- /dev/null +++ b/sources/tech/20190916 Constraint programming by example.md @@ -0,0 +1,163 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Constraint programming by example) +[#]: via: (https://opensource.com/article/19/9/constraint-programming-example) +[#]: author: (Oleksii Tsvietnov https://opensource.com/users/oleksii-tsvietnovhttps://opensource.com/users/oleksii-tsvietnov) + +Constraint programming by example +====== +Understand constraint programming with an example application that +converts a character's case and ASCII codes. +![Math formulas in green writing][1] + +There are many different ways to solve problems in computing. You might "brute force" your way to a solution by calculating as many possibilities as you can, or you might take a procedural approach and carefully establish the known factors that influence the correct answer. In [constraint programming][2], a problem is viewed as a series of limitations on what could possibly be a valid solution. This paradigm can be applied to effectively solve a group of problems that can be translated to variables and constraints or represented as a mathematic equation. In this way, it is related to the Constraint Satisfaction Problem ([CSP][3]). + +Using a declarative programming style, it describes a general model with certain properties. In contrast to the imperative style, it doesn't tell _how_ to achieve something, but rather _what_ to achieve. Instead of defining a set of instructions with only one obvious way to compute values, constraint programming declares relationships between variables within constraints. A final model makes it possible to compute the values of variables regardless of direction or changes. Thus, any change in the value of one variable affects the whole system (i.e., all other variables), and to satisfy defined constraints, it leads to recomputing the other values. + +As an example, let's take Pythagoras' theorem: **a² + b² = c²**. The _constraint_ is represented by this equation, which has three _variables_ (a, b, and c), and each has a _domain_ (non-negative). Using the imperative programming style, to compute any of the variables if we have the other two, we would need to create three different functions (because each variable is computed by a different equation): + + * c = √(a² + b²) + * a = √(c² - b²) + * b = √(c² - a²) + + + +These functions satisfy the main constraint, and to check domains, each function should validate the input. Moreover, at least one more function would be needed for choosing an appropriate function according to the provided variables. This is one of the possible solutions: + + +``` +def pythagoras(*, a=None, b=None, c=None): +    ''' Computes a side of a right triangle ''' + +    # Validate +    if len([i for i in (a, b, c) if i is None or i <= 0]) != 1: +        raise SystemExit("ERROR: you need to define any of two non-negative variables") + +    # Compute +    if a is None: +        return (c**2 - b**2)**0.5 +    elif b is None: +        return (c**2 - a**2)**0.5 +    else: +        return (a**2 + b**2)**0.5 +``` + +To see the difference with the constraint programming approach, I'll show an example of a "problem" with four variables and a constraint that is not represented by a straight mathematic equation. This is a converter that can change characters' cases (lower-case to/from capital/upper-case) and return the ASCII codes for each. Hence, at any time, the converter is aware of all four values and reacts immediately to any changes. The idea of creating this example was fully inspired by John DeNero's [Fahrenheit-Celsius converter][4]. + +Here is a diagram of a constraint system: + +![Constraint system model][5] + +The represented "problem" is translated into a constraint system that consists of nodes (constraints) and connectors (variables). Connectors provide an interface for getting and setting values. They also check the variables' domains. When one value changes, that particular connector notifies all its connected nodes about the change. Nodes, in turn, satisfy constraints, calculate new values, and propagate them to other connectors across the system by "asking" them to set a new value. Propagation is done using the message-passing technique that means connectors and nodes get messages (synchronously) and react accordingly. For instance, if the system gets the **A** letter on the "capital letter" connector, the other three connectors provide an appropriate result according to the defined constraint on the nodes: 97, a, and 65. It's not allowed to set any other lower-case letters (e.g., b) on that connector because each connector has its own domain. + +When all connectors are linked to nodes, which are defined by constraints, the system is fully set and ready to get values on any of four connectors. Once it's set, the system automatically calculates and sets values on the rest of the connectors. There is no need to check what variable was set and which functions should be called, as is required in the imperative approach—that is relatively easy to achieve with a few variables but gets interesting in case of tens or more. + +### How it works + +The full source code is available in my [GitHub repo][6]. I'll dig a little bit into the details to explain how the system is built. + +First, define the connectors by giving them names and setting domains as a function of one argument: + + +``` +import constraint_programming as cp + +small_ascii = cp.connector('Small Ascii', lambda x: x >= 97 and x <= 122) +small_letter = cp.connector('Small Letter', lambda x: x >= 'a' and x <= 'z') +capital_ascii = cp.connector('Capital Ascii', lambda x: x >= 65 and x <= 90) +capital_letter = cp.connector('Capital Letter', lambda x: x >= 'A' and x <= 'Z') +``` + +Second, link these connectors to nodes. There are two types: _code_ (translates letters back and forth to ASCII codes) and _aA_ (translates small letters to capital and back): + + +``` +code(small_letter, small_ascii) +code(capital_letter, capital_ascii) +aA(small_letter, capital_letter) +``` + +These two nodes differ in which functions should be called, but they are derived from a general constraint function: + + +``` +def code(conn1, conn2): +    return cp.constraint(conn1, conn2, ord, chr) + +def aA(conn1, conn2): +    return cp.constraint(conn1, conn2, str.upper, str.lower) +``` + +Each node has only two connectors. If there is an update on a first connector, then a first function is called to calculate the value of another connector (variable). The same happens if a second connector's value changes. For example, if the _code_ node gets **A** on the **conn1** connector, then the function **ord** will be used to get its ASCII code. And, the other way around, if the _aA_ node gets **A** on the **conn2** connector, then it needs to use the **str.lower** function to get the correct small letter on the **conn1**. Every node is responsible for computing new values and "sending" a message to another connector that there is a new value to set. This message is conveyed with the name of a node that is asking to set a new value and also a new value. + + +``` +def set_value(src_constr, value): +    if (not domain is None) and (not domain(value)): +        raise ValueOutOfDomain(link, value) +    link['value'] = value +    for constraint in constraints: +        if constraint is not src_constr: +            constraint['update'](link) +``` + +When a connector receives the **set** message, it runs the **set_value** function to check a domain, sets a new value, and sends the "update" message to another node. It is just a notification that the value on that connector has changed. + + +``` +def update(src_conn): +    if src_conn is conn1: +        conn2['set'](node, constr1(conn1['value'])) +    else: +        conn1['set'](node, constr2(conn2['value'])) +``` + +Then, the notified node requests this new value on the connector, computes a new value for another connector, and so on until the whole system changes. That's how the propagation works. + +But how does the message passing happen? It is implemented as accessing keys of dictionaries. Both functions (connector and constraint) return a _dispatch dictionary_. Such a dictionary contains _messages_ as keys and _closures_ as values. By accessing a key, let's say, **set**, a dictionary returns the function **set_value** (closure) that has access to all local names of the "connector" function. + + +``` +# A dispatch dictionary +link = { 'name': name, +         'value': None, +         'connect': connect, +         'set': set_value, +         'constraints': get_constraints } + +return link +``` + +Having a dictionary as a return value makes it possible to create multiple closures (functions) with access to the same local state to operate on. Then these closures are callable by using keys as a type of message. + +### Why use Constraint programming? + +Constraint programming can give you a new perspective to difficult problems. It's not something you can use in every situation, but it may well open new opportunities for solutions in certain situations. If you find yourself up against an equation that seems difficult to reliably solve in code, try looking at it from a different angle. If the angle that seems to work best is constraint programming, you now have an example of how it can be implemented. + +* * * + +_This article was originally published on [Oleksii Tsvietnov's blog][7] and is reprinted with his permission._ + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/constraint-programming-example + +作者:[Oleksii Tsvietnov][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/oleksii-tsvietnovhttps://opensource.com/users/oleksii-tsvietnov +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/edu_math_formulas.png?itok=B59mYTG3 (Math formulas in green writing) +[2]: https://en.wikipedia.org/wiki/Constraint_programming +[3]: https://vorakl.com/articles/csp/ +[4]: https://composingprograms.com/pages/24-mutable-data.html#propagating-constraints +[5]: https://opensource.com/sites/default/files/uploads/constraint-system.png (Constraint system model) +[6]: https://github.com/vorakl/composingprograms.com/tree/master/char_converter +[7]: https://vorakl.com/articles/char-converter/ From e052f7bcfe63c3ef8e9f970cd045bc49734ae064 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:55:39 +0800 Subject: [PATCH 099/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190915=20Sandbo?= =?UTF-8?q?xie's=20path=20to=C2=A0open=20source,=20update=20on=20the=20Pen?= =?UTF-8?q?tagon's=20open=20source=20initiative,=20open=20source=20in=20Ho?= =?UTF-8?q?llywood,=C2=A0and=20more?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md --- ...ive, open source in Hollywood,-and more.md | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md diff --git a/sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md new file mode 100644 index 0000000000..f7ea0ed99b --- /dev/null +++ b/sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md @@ -0,0 +1,103 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more) +[#]: via: (https://opensource.com/article/19/9/news-september-15) +[#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeo) + +Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more +====== +Catch up on the biggest open source headlines from the past two weeks. +![Weekly news roundup with TV][1] + +In this edition of our open source news roundup, Sandboxie's path to open source, update on the Pentagon's adoption of open source, open source in Hollywood, and more! + +### Sandboxie becomes freeware on its way to open source + +Sophos Group plc, a British security company, released a [free version of its popular Sandboxie tool][2], used as an isolated operating environment for Windows ([downloadable here][2]). + +Sophos said that since Sandboxie isn't a core aspect of its business, the easier decision would've been to shut it down. But Sandboxie has [earned a reputation][3] for letting users run unknown software in a safe environment without risking their systems, so the team is putting in the additional work to release it as open source software. This intermediate phase of free-but-not-open-source appears to be related to the current system design, which requires an activation key: + +> Sandboxie currently uses a license key to activate and grant access to premium features only available to paid customers (as opposed to those using a free version). We have modified the code and have released an updated free version that does not restrict any features. In other words, the new free license will have access to all the features previously only available to paid customers. + +Citing this tool's community impact, senior leaders at Sophos announced the release of Sandboxie version 5.31.4–an unrestricted version of the program–will remain free until the tool is fully open sourced. + +"The Sandboxie user base represents some of the most passionate, forward thinking, and knowledgeable members of the security community and we didn’t want to let you down," [Sophos' blog post read][4]. "After thoughtful consideration we decided that the best way to keep Sandboxie going was to give it back to its users -- transitioning it to an open source tool." + +### The Pentagon doesn't meet White House mandate for more open source software + +In 2016, the White House mandated that each government agency had to open source at least 20 percent of its custom software within three years. There is an [interesting article][5] about this initiative from 2017 that laid out some of the excitement and challenges. + +According to the Government Accountability Office, [the Pentagon's not even halfway there][6]. + +In an article for Nextgov, Jack Corrigan wrote that as of July 2019, the Pentagon had released just 10 percent of its code as open source. They've also not yet implemented other aspects of the White House mandate, including the directive to build an open source software policy and inventories of custom code. + +According to the report, some government officials told the GAO that they worry about security risks of sharing code across government departments. They also admitted to not creating metrics that could measure their open source efforts' successes. The Pentagon's Chief Technology Officer cited the Pentagon's size as the reason for not implementing the White House's open source mandate. In a report published Tuesday, the GAO said, “Until [the Defense Department] fully implements its pilot program and establishes milestones for completing the OMB requirements, the department will not be positioned to take advantage of significant cost savings and efficiencies." + +### A team of volunteers works to find and digitize copyright-free books + +All books published in the U.S. before 1924 are [publicly owned and can be freely used/copied][7]. Books published in and after 1964 will stay under copyright for 95 years after their publication dates. But thanks to a copyright loophole, up to 75 percent of books published between 1923 and 1964 are free to read and copy. The time-consuming trick is confirming which books those are. + +So, a group of libraries, volunteers, and archivists have united to learn which books are copyright-free, then digitize and upload them to the Internet. Since renewal records were already digitized, it's been easy to tell if books published between 1923 and 1964 had their copyrights renewed. But looking for a lack of copyright renewal is much harder since you're trying to prove a negative. + +Participants include the New York Public Library, [which recently explained][8] why the time-consuming project is worthwhile. To help find more books faster, the NYPL converted many records to XML format. This makes it easier to automate the process of finding which books can be added to the public domain.  + +### Hollywood's Academy Software Foundation gains new members + +Microsoft and Apple announced plans to contribute at the premier membership level of the ASF. They'll join [founding board members][9] including Netflix, Google Cloud, Disney Studios, and Sony Pictures. + +The Academy Software Foundation launched in 2018 as a joint project of the [Academy of Motion Picture Arts and Sciences][10] and the [Linux Foundation][11]. + +> The mission of the Academy Software Foundation (ASWF) is to increase the quality and quantity of contributions to the content creation industry’s open source software base; to provide a neutral forum to coordinate cross-project efforts; to provide a common build and test infrastructure; and to provide individuals and organizations a clear path to participation in advancing our open source ecosystem. + +Within its first year, the Foundation built [OpenTimelineIO][12], an open source API and interchange format that helps studio teams collaborate across departments. OpenTImelineIO was formally accepted by [the Foundation's Technical Advisory Council][13] as its fifth hosted project last July. They now maintain it alongside [OpenColorIO][14], [OpenCue][15], [OpenEXR][16], and [OpenVDB][17]. + +#### In other news + + * [Comcast puts open source networking software into production][18] + * [SD Times open source project of the week: Ballerina][19] + * [DOD struggles to implement open source pilots][20] + * [Kong open sources universal service mesh Kuma][21] + * [Eclipse unveils Jakarta EE 8][22] + + + +_Thanks, as always, to Opensource.com staff members and moderators for their help this week._ + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/news-september-15 + +作者:[Lauren Maffeo][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/lmaffeo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/weekly_news_roundup_tv.png?itok=B6PM4S1i (Weekly news roundup with TV) +[2]: https://www.sandboxie.com/DownloadSandboxie +[3]: https://betanews.com/2019/09/13/sandboxie-free-open-source/ +[4]: https://community.sophos.com/products/sandboxie/f/forum/115109/major-sandboxie-news-sandboxie-is-now-a-free-tool-with-plans-to-transition-it-to-an-open-source-tool/414522 +[5]: https://medium.com/@DefenseDigitalService/code-mil-an-open-source-initiative-at-the-pentagon-5ae4986b79bc +[6]: https://www.nextgov.com/analytics-data/2019/09/pentagon-needs-make-more-software-open-source-watchdog-says/159832/ +[7]: https://www.vice.com/en_us/article/a3534j/libraries-and-archivists-are-scanning-and-uploading-books-that-are-secretly-in-the-public-domain +[8]: https://www.nypl.org/blog/2019/09/01/historical-copyright-records-transparency +[9]: https://variety.com/2019/digital/news/microsoft-apple-academy-software-foundation-1203334675/ +[10]: https://www.oscars.org/ +[11]: http://www.linuxfoundation.org/ +[12]: https://github.com/PixarAnimationStudios/OpenTimelineIO +[13]: https://www.linuxfoundation.org/press-release/2019/07/opentimelineio-joins-aswf/ +[14]: https://opencolorio.org/ +[15]: https://www.opencue.io/ +[16]: https://www.openexr.com/ +[17]: https://www.openvdb.org/ +[18]: https://www.fiercetelecom.com/operators/comcast-puts-open-source-networking-software-into-production +[19]: https://sdtimes.com/os/sd-times-open-source-project-of-the-week-ballerina/ +[20]: https://www.fedscoop.com/open-source-software-dod-struggles/ +[21]: https://sdtimes.com/micro/kong-open-sources-universal-service-mesh-kuma/ +[22]: https://devclass.com/2019/09/11/hey-were-open-source-again-eclipse-unveils-jakarta-ee-8/ From 07a90f211533d8f4d799a3d8dcbc8701890f31a3 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:56:02 +0800 Subject: [PATCH 100/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=203=20st?= =?UTF-8?q?rategies=20to=20simplify=20complex=20networks?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190916 3 strategies to simplify complex networks.md --- ...strategies to simplify complex networks.md | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 sources/talk/20190916 3 strategies to simplify complex networks.md diff --git a/sources/talk/20190916 3 strategies to simplify complex networks.md b/sources/talk/20190916 3 strategies to simplify complex networks.md new file mode 100644 index 0000000000..fa82881156 --- /dev/null +++ b/sources/talk/20190916 3 strategies to simplify complex networks.md @@ -0,0 +1,75 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (3 strategies to simplify complex networks) +[#]: via: (https://www.networkworld.com/article/3438840/3-strategies-to-simplify-complex-networks.html) +[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) + +3 strategies to simplify complex networks +====== +Innovations such as SD-WAN, Wi-Fi 6 and 5G have enabled networks to do more, but they’ve also made them complex. Software, machine learning, and automation will alleviate the problem. +Metamorworks / Getty Images + +As the cloud era meets the demands of digital transformation, networks must change. That means for enterprises, they must become simpler, said Juniper CEO Rami Rahim, speaking at the company's annual industry analyst conference last week. + +The past five years has seen more innovation in networking than in the previous 30. Things such as [SD-WAN][1], multi-cloud, [Wi-Fi 6][2], [5G][3], 400 Gig, and [edge computing][4] are on the near-term horizon for almost every company. While all of those technologies have enabled the network to do so much more than ever before, their complexity has also risen. + +[][5] Zeus Kerravala + +Juniper CEO Rami Rahim + +Network engineers face the harsh reality that they are being tasked with working faster but also more accurately to cut down on unplanned downtime. Networks must become simpler to run, which actually requires more engineering from the vendor. Think of the iPhone. It’s so simple, my dad can use it without calling me every hour. Making it easy requires a tremendous amount of innovation from Apple to mask the underlying complexity. + +**[ Related: [What is 5G wireless? And how it will change networking as we know it][6] ]** + +### How to simplify networks + +Vendors can help make networks simpler by executing on the following: + + * **Simplicity through software.** The pendulum has swung way too far on the “hardware doesn’t matter” theory. Of course it matters, particularly for networking where tasks such as deep-packet inspection, routing, and other functions are still best done in hardware. However, control and management of the hardware should be done in software because it can act as an abstraction layer for the underlying features in the actual boxes. For Juniper, Contrail Cloud and their software-delivered SD-WAN provides the centralized software overlay for simplified operations. + * **Machine learning-based operations.** Networks generate massive amounts of data that can use useful for operating the environment. The problem is that people can’t analyze the data fast enough to understand what it means – but machines can. This is where network professionals must be willing to cede some control to the computers. The purpose of machine learning isn’t to replace people, but to be a tool to let them work smarter and faster. [Juniper acquired Mist Systems earlier this year][7] to provide machine learning based operations to Wi-Fi, which is a great starting point because Wi-Fi troubleshooting is very difficult. Over time, I expect Mist’s benefit to be brought to the entire enterprise portfolio. + * **Vision of intent-based operations with purposeful automation.** The long-term goal of network operations is akin to a self-driving car where the network runs and secures itself. However, like with a self-driving car, the technology isn’t quite there yet. In the auto industry, there are many automation features, such as parallel park assist and lane change alerts that make drivers better. Similarly, network engineers can benefit by automating many of the mundane tasks associated with running a network, such as firmware upgrades, OS patching, and other things that need to be done but offer no strategic benefits. + + + +### To ASIC or not to ASIC + +As I mentioned, network hardware is still important. There’s currently a debate in the network industry as to whether companies like Juniper should be spinning their own silicon or leveraging merchant silicon. I believe ASICs allow vendors to bring new features to market faster than waiting for the silicon vendors to bake them into their chips. ASICs also give the network equipment manufacturer better control over product roadmaps. + +However, there is a wide range of silicon vendors that offer chips for a multitude of use cases that might be hard to replicate in custom chips. Also, some of the cloud providers know the specific feature set they are looking for and will dictate they want something like a Barefoot – Tofino-based switch. In this case, merchant silicon would provide a time to market advantage over custom. But both approaches are viable as long as the vendor has a clear roadmap and strategy of how to take advantage of hardware and software. + +Historically, Juniper has done a great job using custom chips for competitive advantage, and I don’t see that changing. Buyers should not shy away from one approach or the other. Rather they should look at vendor roadmaps and choose the one that meets its needs best. + +There’s no shortage of innovation in networking today, but new features and functions without simplicity can wreak havoc on a network and make things worse. One of my general rules of thumb for IT projects is that the solutions must be simpler than the original issue, and that’s not often the case in networking. Simplifying the network through software, machine learning, and automation enables businesses to take advantage of the new features without the risk associated with complexity. + +**[ Learn more about SDN: Find out [where SDN is going][8] and learn the [difference between SDN and NFV][9]. | Get regularly scheduled insights by [signing up for Network World newsletters][10]. ]** + +Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438840/3-strategies-to-simplify-complex-networks.html + +作者:[Zeus Kerravala][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Zeus-Kerravala/ +[b]: https://github.com/lujun9972 +[1]: https://www.networkworld.com/article/3031279/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html +[2]: https://www.networkworld.com/article/3311921/wi-fi-6-is-coming-to-a-router-near-you.html +[3]: https://www.networkworld.com/article/3203489/what-is-5g-how-is-it-better-than-4g.html +[4]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html +[5]: https://images.idgesg.net/images/article/2019/09/juniper-ceo-rami-rahim-100811037-orig.jpg +[6]: https://www.networkworld.com/article/3203489/lan-wan/what-is-5g-wireless-networking-benefits-standards-availability-versus-lte.html +[7]: https://www.networkworld.com/article/3353042/juniper-grabs-mist-for-wireless-ai-cloud-service-delivery-technology.html +[8]: https://www.networkworld.com/article/3209131/lan-wan/what-sdn-is-and-where-its-going.html +[9]: https://www.networkworld.com/article/3206709/lan-wan/what-s-the-difference-between-sdn-and-nfv.html +[10]: https://www.networkworld.com/newsletters/signup.html +[11]: https://www.facebook.com/NetworkWorld/ +[12]: https://www.linkedin.com/company/network-world From facebddab15bdff51e535927d85c52c6c147b799 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 00:56:19 +0800 Subject: [PATCH 101/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20How=20?= =?UTF-8?q?to=20freeze=20and=20lock=20your=20Linux=20system=20(and=20why?= =?UTF-8?q?=20you=20would=20want=20to)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md --- ...inux system (and why you would want to).md | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md diff --git a/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md b/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md new file mode 100644 index 0000000000..367113a47b --- /dev/null +++ b/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md @@ -0,0 +1,103 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to freeze and lock your Linux system (and why you would want to)) +[#]: via: (https://www.networkworld.com/article/3438818/how-to-freeze-and-lock-your-linux-system-and-why-you-would-want-to.html) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +How to freeze and lock your Linux system (and why you would want to) +====== +What it means to freeze a terminal window and lock a screen -- and how to manage these activities on your Linux system. +Sandra Henry-Stocker + +How you freeze and "thaw out" a screen on a Linux system depends a lot on what you mean by these terms. Sometimes “freezing a screen” might mean freezing a terminal window so that activity within that window comes to a halt. Sometimes it means locking your screen so that no one can walk up to your system when you're fetching another cup of coffee and type commands on your behalf. + +In this post, we'll examine how you can use and control these actions. + +**[ Two-Minute Linux Tips: [Learn how to master a host of Linux commands in these 2-minute video tutorials][1] ]** + +### How to freeze a terminal window on Linux + +You can freeze a terminal window on a Linux system by typing **Ctrl+S** (hold control key and press "s"). Think of the "s" as meaning "start the freeze". If you continue typing commands after doing this, you won't see the commands you type or the output you would expect to see. In fact, the commands will pile up in a queue and will be run only when you reverse the freeze by typing **Ctrl+Q**. Think of this as "quit the freeze". + +One easy way to view how this works is to use the date command and then type **Ctrl+S**. Then type the date command again and wait a few minutes before typing **Ctrl+Q**. You'll see something like this: + +``` +$ date +Mon 16 Sep 2019 06:47:34 PM EDT +$ date +Mon 16 Sep 2019 06:49:49 PM EDT +``` + +The gap between the two times shown will indicate that the second date command wasn't run until you unfroze your window. + +Terminal windows can be frozen and unfrozen whether you're sitting at the computer screen or running remotely using a tool such as PuTTY. + +And here's a little trick that can come in handy. If you see that a terminal window appears to be inactive, one possibility is that you or someone else inadvertently typed **Ctrl+S**. In any case, entering **Ctrl+Q** just in case this resolves the problem is not a bad idea. + +### How to lock your screen + +To lock your screen before you leave your desk, either **Ctrl+Alt+L** or **Super+L** (i.e., holding down the Windows key and pressing L) should work. Once your screen is locked, you will have to enter your password to log back in. + +### Automatic screen locking on Linux systems + +While best practice suggests that you lock your screen whenever you are about to leave your desk, Linux systems usually automatically lock after a period of no activity. The timing for "blanking" a screen (making it go dark) and actually locking the screen (requiring a login to use it again) depend on settings that you can set to your personal preferences. + +To change how long it takes for your screen to go dark when using GNOME screensaver, open your settings window and select **Power** and then **Blank screen**. You can choose times between 1 and 15 minutes or never. To select how long after the blanking the screen locks, go to settings, select **Privacy** and then **Blank screen.** Settings should include 1, 2, 3, 5 and 30 minutes or one hour. + +### How to lock your screen from the command line + +If you are using Gnome screensaver, you can also lock the screen from the command line using this command: + +``` +gnome-screensaver-command -l +``` + +That's a lowercase L for "lock". + +### How to check your lockscreen state + +You can also use the gnome screensaver command to check whether your screen is locked,. With the **\--query** option, the command tells you whether screen is currently locked (i.e., active). With the --time option, it tells you how long the lock has been in effect. Here's an sample sctipt: + +``` +#!/bin/bash + +gnome-screensaver-command --query +gnome-screensaver-command --time +``` + +Running the script will show output like this: + +``` +$ ./check_lockscreen +The screensaver is active +The screensaver has been active for 1013 seconds. +``` + +#### Wrap-up + +Freezing your terminal window is easy if you remember the proper control sequences. For screen locking, how well it works depends on the controls you put in place for yourself or whether you're comfortable working with the defaults. + +**[ Also see: [Invaluable tips and tricks for troubleshooting Linux][2] ]** + +Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438818/how-to-freeze-and-lock-your-linux-system-and-why-you-would-want-to.html + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua +[2]: https://www.networkworld.com/article/3242170/linux/invaluable-tips-and-tricks-for-troubleshooting-linux.html +[3]: https://www.facebook.com/NetworkWorld/ +[4]: https://www.linkedin.com/company/network-world From 8d2e85eba367a87f43fcd6e90d3688dfa78feda4 Mon Sep 17 00:00:00 2001 From: geekpi Date: Tue, 17 Sep 2019 08:54:14 +0800 Subject: [PATCH 102/202] translated --- ... a Mail About New User Account Creation.md | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) rename {sources => translated}/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md (53%) diff --git a/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md b/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md similarity index 53% rename from sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md rename to translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md index e8e4d27a2c..ff0832ada1 100644 --- a/sources/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md +++ b/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md @@ -7,40 +7,40 @@ [#]: via: (https://www.2daygeek.com/linux-shell-script-to-monitor-user-creation-send-email/) [#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) -Bash Script to Send a Mail About New User Account Creation +用 Bash 脚本发送新用户帐户创建的邮件 ====== -For some purposes you may need to keep track of new user creation details on Linux. +出于某些原因,你可能需要跟踪 Linux 上的新用户创建信息。 -Also, you may need to send the details by mail. +同时,你可能需要通过邮件发送详细信息。 -This may be part of the audit objective or the security team may wish to monitor this for the tracking purposes. +这或许是审计目标的一部分,或者安全团队出于跟踪目的可能希望对此进行监控。 -We can do this in other way, as we have already described in the previous article. +我们可以通过其他方式进行此操作,正如我们在上一篇文章中已经描述的那样。 - * **[Bash script to send a mail when new user account is created in system][1]** + * **[在系统中创建新用户帐户时发送邮件的 Bash 脚本][1]** -There are many open source monitoring tools are available for Linux. +Linux 有许多开源监控工具可以使用。 -But I don’t think they have a way to track the new user creation process and alert the administrator when that happens. +但我不认为他们有办法跟踪新用户创建过程,并在发生时提醒管理员。 -So how can we achieve this? +那么我们怎样才能做到这一点? -We can write our own Bash script to achieve this. +我们可以编写自己的 Bash 脚本来实现这一目标。 -We have added many useful shell scripts in the past. If you want to check them out, go to the link below. +我们过去写过许多有用的 shell 脚本。如果你想了解,请进入下面的链接。 - * **[How to automate day to day activities using shell scripts?][2]** + * **[如何使用 shell 脚本自动化日常活动?][2]** -### What does this script really do? +### 这个脚本做了什么? -This will take a backup of the “/etc/passwd” file twice a day (beginning of the day and end of the day), which will enable you to get new user creation details for the specified date. +这将每天两次(一天的开始和结束)备份 “/etc/passwd” 文件,这将使你能够获取指定日期的新用户创建详细信息。 -We need to add the below two cronjobs to copy the “/etc/passwd” file. +我们需要添加以下两个 cron 任务来复制 “/etc/passwd” 文件。 ``` # crontab -e @@ -49,13 +49,13 @@ We need to add the below two cronjobs to copy the “/etc/passwd” file. 59 23 * * * cp /etc/passwd /opt/scripts/passwd-end-$(date +"%Y-%m-%d") ``` -It uses the “difference” command to detect the difference between files, and if any difference is found to yesterday’s date, the script will send an email alert to the email id given with new user details. +它使用 “difference” 命令来检测文件之间的差异,如果发现与昨日有任何差异,脚本将向指定 email 发送新用户详细信息。 -We can’t run this script often because user creation is not happening frequently. However, we plan to run this script once a day. +我们不用经常运行此脚本,因为用户创建不经常发生。但是,我们计划每天运行一次此脚本。 -Therefore, you can get a consolidated report on new user creation. +这样,你可以获得有关新用户创建的综合报告。 -**Note:** We used our email id in the script for demonstration purpose. So we ask you to use your email id instead. +**注意:**我们在脚本中使用了我们的电子邮件地址进行演示。因此,我们要求你用自己的电子邮件地址。 ``` # vi /opt/scripts/new-user-detail.sh @@ -80,13 +80,13 @@ rm $MESSAGE fi ``` -Set an executable permission to "new-user-detail.sh" file. +给 “new-user-detail.sh” 文件添加可执行权限。 ``` $ chmod +x /opt/scripts/new-user-detail.sh ``` -Finally add a cronjob to automate this. It runs daily at 7AM. +最后添加一个 cron 任务来自动执行此操作。它在每天早上 7 点运行。 ``` # crontab -e @@ -94,9 +94,9 @@ Finally add a cronjob to automate this. It runs daily at 7AM. 0 7 * * * /bin/bash /opt/scripts/new-user.sh ``` -**Note:** You will receive an email alert at 7AM every day, which is for yesterday's date details. +**注意:**你会在每天早上 7 点都会收到一封关于昨日详情的邮件提醒。 -**Output:** The output will be the same as the one below. +**输出:**输出与下面的输出相同。 ``` # cat /tmp/new-user-logs.txt @@ -115,7 +115,7 @@ via: https://www.2daygeek.com/linux-shell-script-to-monitor-user-creation-send-e 作者:[Magesh Maruthamuthu][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[geekpi](https://github.com/geekpi) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 2196a98e32b7f1a73c26b3ad8d33321e6df60e19 Mon Sep 17 00:00:00 2001 From: geekpi Date: Tue, 17 Sep 2019 09:01:29 +0800 Subject: [PATCH 103/202] translating --- .../tech/20190913 An introduction to Virtual Machine Manager.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190913 An introduction to Virtual Machine Manager.md b/sources/tech/20190913 An introduction to Virtual Machine Manager.md index 9c2ae81643..de43386f8f 100644 --- a/sources/tech/20190913 An introduction to Virtual Machine Manager.md +++ b/sources/tech/20190913 An introduction to Virtual Machine Manager.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 9fb21c16a95097c8cc0b27d63c7932a36f76c8a7 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Tue, 17 Sep 2019 09:14:27 +0800 Subject: [PATCH 104/202] Rename sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md to sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md --- ... open source initiative, open source in Hollywood,-and more.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md (100%) diff --git a/sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md similarity index 100% rename from sources/tech/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md rename to sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md From cd99570c31e071f9aa37fae9386034f84e07fbdc Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Tue, 17 Sep 2019 09:25:18 +0800 Subject: [PATCH 105/202] Rename sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md to sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md --- ...20190916 Linux Plumbers, Appwrite, and more industry trends.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190916 Linux Plumbers, Appwrite, and more industry trends.md (100%) diff --git a/sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md similarity index 100% rename from sources/tech/20190916 Linux Plumbers, Appwrite, and more industry trends.md rename to sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md From 8c868824c074d73c6204b50ce9baa83fc3f456d1 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 17 Sep 2019 10:42:48 +0800 Subject: [PATCH 106/202] APL --- ...pen source initiative, open source in Hollywood,-and more.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md index f7ea0ed99b..fc01b9d200 100644 --- a/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md +++ b/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 9942bc662eba0e417be8ed5d53c893b6607e1b86 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:17:51 +0800 Subject: [PATCH 107/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190804=20Learn?= =?UTF-8?q?=20how=20to=20Install=20LXD=20/=20LXC=20Containers=20in=20Ubunt?= =?UTF-8?q?u?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190804 Learn how to Install LXD - LXC Containers in Ubuntu.md --- ... Install LXD - LXC Containers in Ubuntu.md | 508 ++++++++++++++++++ 1 file changed, 508 insertions(+) create mode 100644 sources/tech/20190804 Learn how to Install LXD - LXC Containers in Ubuntu.md diff --git a/sources/tech/20190804 Learn how to Install LXD - LXC Containers in Ubuntu.md b/sources/tech/20190804 Learn how to Install LXD - LXC Containers in Ubuntu.md new file mode 100644 index 0000000000..b4e1a2667b --- /dev/null +++ b/sources/tech/20190804 Learn how to Install LXD - LXC Containers in Ubuntu.md @@ -0,0 +1,508 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Learn how to Install LXD / LXC Containers in Ubuntu) +[#]: via: (https://www.linuxtechi.com/install-lxd-lxc-containers-from-scratch/) +[#]: author: (Shashidhar Soppin https://www.linuxtechi.com/author/shashidhar/) + +Learn how to Install LXD / LXC Containers in Ubuntu +====== + +Let me start by explaining what a container is, it is normal process on the host machine (any Linux based m/c) with following characteristics, + + * It feels like a VM, but it is not. + * Uses the host Kernel. + * Cannot boot a different Operating System. + * Can’t have its own modules. + * Does not need “**init”** as PID (Process id) as “1” + + + +[![Learn-LXD-LXC-Containers][1]][2] + +LXC (**LinuX Containers**) technology was developed long ago and is an Operating System level virtualization technology. This was existing from the days of BSD and System-V Release 4 (Popular Unix flavors during 1980-90’s). But until recently, no one new how much it can help us in saving in terms of resource utilization. Because of this technology change, all enterprises are moving towards adoption of virtualization (be it Cloud or be it Docker containers). This also helped in better management of **OpEX(Operational expenditures)** and **CaPEX(Captial expenditures)** costs. Using this technique, we can create and run multiple and isolated Linux virtual environments on a single Linux host machine (called control host). LXC mainly uses Linux’s cgroups and namespaces functionalities, which were introduced in version 2.6.24(kernel version) onwards. In parallel many advancements in hypervisors happened like that of **KVM**, **QEMU**, **Hyper-V**, **ESXi** etc. Especially KVM (Kernel Virtual Machine) which is core of Linux OS, helped in this kind of advancement. + +Difference between LXC and LXD is that LXC is the original and older way to manage containers but it is still supported, all commands of LXC starts with “**lxc-“** like “**lxc-create**” & “**lxc-info**“, whereas LXD is a new way to manage containers and lxc command is used for all containers operations and management. + +All of us know that “**Docker**” utilizes LXC and was developed using Go language, cgroups, namespaces and finally the Linux Kernel itself. Complete Docker has been built and developed using LXC as the basic foundation block. Docker is completely dependent on underlying infrastructure & hardware and using the Operating System as the medium. However, Docker is a portable and easily deployable container engine; all its dependencies are run using a virtual container on most of the Linux based servers. Groups, and Namespaces are the building block concepts for both LXC and Docker containers. Following are the brief description of these concepts. + +### C Groups (Control Groups) + +With Cgroups each resource will have its own hierarchy. + + * CPU, Memory, I/O etc will have their own control group hierarchy. Following are various characterics of Cgroups, + *  Each process is in each node + * Each hierarchy starts with one node + * Initially all processes start at the root node. Therefore “each node” is equivalent to “group of processes”. + * Hierarchies are independent, ex: CPU, Block I/O, memory etc + + + +As explained earlier there are various Cgroup types as listed below, + +1) **Memory Cgroups** + +a) Keeps track of pages used by each group. + +b) File read/write/mmap from block devices + +c) Anonymous memory(stack, heap etc) + +d) Each memory page is charged to a group + +e) Pages can be shared across multiple groups + +2) **CPU Cgroups** + +a) Track users/system cpu time + +b)  Track usage per CPU + +c) Allows set to weights + +d) Can’t set cpu limits + +3) **Block IO Cgroup** + +a) Keep track of read/write(I/O’s) + +b) Set throttle (limits) for each group (per block device) + +c) Set real weights for each group (per block device) + +4) **Devices Cgroup** + +a) Controls what the group can do on device nodes + +b) Permission include /read/write/mknode + +5) **Freezer Cgroup** + +a) Allow to freeze/thaw a group  of processes + +b) Similar to SIGSTOP/SIGCONT + +c) Cannot be detected by processes + +### NameSpaces + +Namespaces provide processes with their own system view. Each process is in name space of each type. + +There are multiple namespaces like, + + * PID – Process within a PID name space only see processes in the same PID name space + * Net – Processes within a given network namespace get their own private network stack. + * Mnt – Processes can have their own “root” and private “mount” points. + * Uts –  Have container its own hostname + * IPC – Allows processes to have own IPC semaphores, IPC message queues and shared memory + * USR – Allows to map UID/GID + + + +### Installation and configuration of LXD containers + +To have LXD installed on Ubuntu system (18.04 LTS) , we can start with LXD installation using below apt command + +``` +root@linuxtechi:~$ sudo apt update +root@linuxtechi:~$ sudo apt install lxd -y +``` + +Once the LXD is installed, we can start with its initialization as below, (most of the times use the default options) + +``` +root@linuxtechi:~$ sudo lxd init +``` + +![lxc-init-ubuntu-system][1] + +Once the LXD is initialized successfully, run the below command to verify information + +``` +root@linuxtechi:~$ sudo lxc info | more +``` + +![lxc-info-command][1] + +Use below command to list if there is any container is downloaded on our host, + +``` +root@linuxtechi:~$ sudo lxc image list ++-------+-------------+--------+-------------+------+------+-------------+ +| ALIAS | FINGERPRINT | PUBLIC | DESCRIPTION | ARCH | SIZE | UPLOAD DATE | ++-------+-------------+--------+-------------+------+------+-------------+ +root@linuxtechi:~$ +``` + +Quick and easy way to start the first container on Ubuntu 18.04 (or any supported Ubuntu flavor) use the following command. The container name we have provided is “shashi” + +``` +root@linuxtechi:~$ sudo lxc launch ubuntu:18.04 shashi +Creating shashi +Starting shashi +root@linuxtechi:~$ +``` + +To list out what are the LXC containers that are in the system + +``` +root@linuxtechi:~$ sudo lxc list ++--------+---------+-----------------------+-----------------------------------------------+------------+-----------+ +| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS | ++--------+---------+-----------------------+-----------------------------------------------+------------+-----------+ +| shashi | RUNNING | 10.122.140.140 (eth0) | fd42:49da:7c44:cebe:216:3eff:fea4:ea06 (eth0) | PERSISTENT | 0 | ++--------+---------+-----------------------+-----------------------------------------------+------------+-----------+ +root@linuxtechi:~$ +``` + +Other Container management commands for LXD are listed below : + +**Note:** In below examples, shashi is my container name + +**How to take bash shell of your LXD Container?** + +``` +root@linuxtechi:~$ sudo lxc exec shashi bash +root@linuxtechi:~# +``` + +**How Stop, Start & Restart LXD Container?** + +``` +root@linuxtechi:~$ sudo lxc stop shashi +root@linuxtechi:~$ sudo lxc list ++--------+---------+------+------+------------+-----------+ +| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS | ++--------+---------+------+------+------------+-----------+ +| shashi | STOPPED | | | PERSISTENT | 0 | ++--------+---------+------+------+------------+-----------+ +root@linuxtechi:~$ +root@linuxtechi:~$ sudo lxc start shashi +root@linuxtechi:~$ sudo lxc restart shashi +``` + +**How to delete a LXD Container?** + +``` +root@linuxtechi:~$ sudo lxc stop shashi +root@linuxtechi:~$ sudo lxc delete shashi +root@linuxtechi:~$ sudo lxc list ++------+-------+------+------+------+-----------+ +| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS | ++------+-------+------+------+------+-----------+ +root@linuxtechi:~$ +``` + +**How to take snapshot of LXD container and then restore it?** + +Let’s assume we have pkumar container based on centos7 image, so to take the snapshot use the following, + +``` +root@linuxtechi:~$ sudo lxc snapshot pkumar pkumar_snap0 +``` + +Use below command to verify the snapshot + +``` +root@linuxtechi:~$ sudo lxc info pkumar | grep -i Snapshots -A2 +Snapshots: + pkumar_snap0 (taken at 2019/08/02 19:39 UTC) (stateless) +root@linuxtechi:~$ +``` + +Use below command to restore the LXD container from their snapshot + +Syntax: + +$ lxc restore {container_name} {snapshot_name} + +``` +root@linuxtechi:~$ sudo lxc restore pkumar pkumar_snap0 +root@linuxtechi:~$ +``` + +**How to delete LXD container snapshot?** + +``` +$ sudo lxc delete +``` + +**How to set Memory, CPU and Disk Limit on LXD container?** + +Syntax to set Memory limit: + +# lxc config set <container_name> limits.memory <Memory_Size>KB/MB/GB + +Syntax to set CPU limit: + +# lxc config set <container_name>  limits.cpu {Number_of_CPUs} + +Syntax to Set Disk limit: + +# lxc config device set <container_name> root size <Size_MB/GB> + +**Note:** To set a disk limit (it requires btrfs or ZFS filesystem) + +Let’s set limit on Memory and CPU on container shashi using the following commands, + +``` +root@linuxtechi:~$ sudo lxc config set shashi limits.memory 256MB +root@linuxtechi:~$ sudo lxc config set shashi limits.cpu 2 +``` + +### Install and configure LXC container (commands and operations) + +To install lxc on your ubuntu system, use the beneath apt command, + +``` +root@linuxtechi:~$ sudo apt install lxc -y +``` + +In earlier version of LXC, the command “**lxc-clone**” was used and later it was deprecated. Now, “**lxc-copy**” command is widely used for cloning operation. + +**Note:** To get “lxc-copy” command working, use the following installation steps, + +``` +root@linuxtechi:~$ sudo apt install lxc1 -y +``` + +**Creating Linux Containers using the templates** + +LXC provides ready-made templates for easy installation of Linux containers. Templates are usually found in the directory path /usr/share/lxc/templates, but in fresh installation we will not get the templates, so to download the templates in your local system , run the beneath command, + +``` +root@linuxtechi:~$ sudo apt install lxc-templates -y +``` + +Once the lxc-templates are installed successfully then templates will be available, + +``` +root@linuxtechi:~$ sudo ls /usr/share/lxc/templates/ +lxc-alpine lxc-centos lxc-fedora lxc-oci lxc-plamo lxc-sparclinux lxc-voidlinux +lxc-altlinux lxc-cirros lxc-fedora-legacy lxc-openmandriva lxc-pld lxc-sshd +lxc-archlinux lxc-debian lxc-gentoo lxc-opensuse lxc-sabayon lxc-ubuntu +lxc-busybox lxc-download lxc-local lxc-oracle lxc-slackware lxc-ubuntu-cloud +root@linuxtechi:~$ +``` + +Let’s Launch a container using template, + +Syntax: lxc-create -n <container_name> lxc -t <template_name> + +``` +root@linuxtechi:~$ sudo lxc-create -n shashi_lxc -t ubuntu +……………………… +invoke-rc.d: could not determine current runlevel +invoke-rc.d: policy-rc.d denied execution of start. +Current default time zone: 'Etc/UTC' +Local time is now: Fri Aug 2 11:46:42 UTC 2019. +Universal Time is now: Fri Aug 2 11:46:42 UTC 2019. + +## +# The default user is 'ubuntu' with password 'ubuntu'! +# Use the 'sudo' command to run tasks as root in the container. +## +……………………………………… +root@linuxtechi:~$ +``` + +Once the complete template is created, we can login into this console using the following steps + +``` +root@linuxtechi:~$ sudo lxc-start -n shashi_lxc -d +root@linuxtechi:~$ sudo lxc-console -n shashi_lxc + +Connected to tty 1 +Type to exit the console, to enter Ctrl+a itself + +Ubuntu 18.04.2 LTS shashi_lxc pts/0 + +shashi_lxc login: ubuntu +Password: +Last login: Fri Aug 2 12:00:35 UTC 2019 on pts/0 +Welcome to Ubuntu 18.04.2 LTS (GNU/Linux 4.15.0-20-generic x86_64) +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +root@linuxtechi_lxc:~$ free -h + total used free shared buff/cache available +Mem: 3.9G 23M 3.8G 112K 8.7M 3.8G +Swap: 1.9G 780K 1.9G +root@linuxtechi_lxc:~$ grep -c processor /proc/cpuinfo +1 +root@linuxtechi_lxc:~$ df -h / +Filesystem Size Used Avail Use% Mounted on +/dev/sda1 40G 7.4G 31G 20% / +root@linuxtechi_lxc:~$ +``` + +Now logout or exit from the container and go back to host machine login window. With the lxc-ls command we can see that shashi-lxc container is created. + +``` +root@linuxtechi:~$ sudo lxc-ls +shashi_lxc +root@linuxtechi:~$ +``` + +“**lxc-ls -f**” command provides details with ip address of the container and the same is as below, + +``` +root@linuxtechi:~$ sudo lxc-ls -f +NAME STATE AUTOSTART GROUPS IPV4 IPV6 UNPRIVILEGED +shashi_lxc RUNNING 0 - 10.0.3.190 - false +root@linuxtechi:~$ +``` + +“**lxc-info -n <container_name>**” command provides with all the required details along with State, ip address etc. + +``` +root@linuxtechi:~$ sudo lxc-info -n shashi_lxc +Name: shashi_lxc +State: RUNNING +PID: 6732 +IP: 10.0.3.190 +CPU use: 2.38 seconds +BlkIO use: 240.00 KiB +Memory use: 27.75 MiB +KMem use: 5.04 MiB +Link: vethQ7BVGU + TX bytes: 2.01 KiB + RX bytes: 9.52 KiB + Total bytes: 11.53 KiB +root@linuxtechi:~$ +``` + +**How to Start, Stop, Restart and Delete LXC containers** + +``` +$ lxc-start -n +$ lxc-stop -n +$ lxc-destroy -n +``` + +**LXC Cloning operation** + +Now the main cloning operation to be performed on the LXC container. The following steps are followed + +As described earlier LXC offers a feature of cloning a container from the existing container, by running the following command to clone an existing “shashi_lxc” container to a new container “shashi_lxc_clone”. + +**Note:** We have to make sure that before starting the cloning operation, first we have to stop the existing container using the “**lxc-stop**” command. + +``` +root@linuxtechi:~$ sudo lxc-stop -n shashi_lxc +root@linuxtechi:~$ sudo lxc-copy -n shashi_lxc -N shashi_lxc_clone +root@linuxtechi:~$ sudo lxc-ls +shashi_lxc shashi_lxc_clone +root@linuxtechi:~$ +``` + +Now start the cloned container + +``` +root@linuxtechi:~$ sudo lxc-start -n shashi_lxc_clone +root@linuxtechi:~$ sudo lxc-ls -f +NAME STATE AUTOSTART GROUPS IPV4 IPV6 UNPRIVILEGED +shashi_lxc STOPPED 0 - - - false +shashi_lxc_clone RUNNING 0 - 10.0.3.201 - false +root@linuxtechi:~$ +``` + +With the above set of commands, cloning operation is done and the new clone “shashi_lxc_clone” got created. We can login into this lxc container console with below steps, + +``` +root@linuxtechi:~$ sudo lxc-console -n shashi_lxc_clone + +Connected to tty 1 +Type to exit the console, to enter Ctrl+a itself +Ubuntu 18.04.2 LTS shashi_lxc pts/0 + +shashi_lxc login: +``` + +**LXC Network configuration and commands** + +We can attach to the newly created container, but to remotely login into this container using SSH or any other means, we have to do some minimal configuration changes as explained below, + +``` +root@linuxtechi:~$ sudo lxc-attach -n shashi_lxc_clone +root@linuxtechi_lxc:/# +root@linuxtechi_lxc:/# useradd -m shashi +root@linuxtechi_lxc:/# passwd shashi +Enter new UNIX password: +Retype new UNIX password: +passwd: password updated successfully +root@linuxtechi_lxc:/# +``` + +First install the ssh server using the following command so that smooth “ssh” connect can be established. + +``` +root@linuxtechi_lxc:/# apt install openssh-server -y +``` + +Now get the IP address of the existing lxc container using the following command, + +``` +root@linuxtechi_lxc:/# ip addr show eth0|grep inet + inet 10.0.3.201/24 brd 10.0.3.255 scope global dynamic eth0 + inet6 fe80::216:3eff:fe82:e251/64 scope link +root@linuxtechi_lxc:/# +``` + +From the host machine with a new console window, use the following command to connect to this container over ssh + +``` +root@linuxtechi:~$ ssh 10.0.3.201 +root@linuxtechi's password: +$ +``` + +Now, we have logged in a container using ssh session. + +**LXC process related commands** + +``` +root@linuxtechi:~$ ps aux|grep lxc|grep -v grep +``` + +![lxc-process-ubuntu-system][1] + +**LXC snapshot operation** + +Snapshotting is one of the main operations which will help in taking point in time snapshot of the lxc container images. These same snapshot images can be used later for further use. + +``` +root@linuxtechi:~$ sudo lxc-stop -n shashi_lxc +root@linuxtechi:~$ sudo lxc-snapshot -n shashi_lxc +root@linuxtechi:~$ +``` + +The snapshot path can be located using the following command. + +``` +root@linuxtechi:~$ sudo lxc-snapshot -L -n shashi_lxc +snap0 (/var/lib/lxc/shashi_lxc/snaps) 2019:08:02 20:28:49 +root@linuxtechi:~$ +``` + +**Conclusion:** + +LXC, LinuX containers is one of the early container technologies. Understanding the concepts and learning about LXC will help in deeper understanding of any other containers like Docker Containers. This article has provided deeper insights on Cgroup and Namespaces which are also very much required concepts for better understanding of Containers and like. Many of the LXC operations like cloning, snapshotting, network operation etc are covered with command line examples. + +-------------------------------------------------------------------------------- + +via: https://www.linuxtechi.com/install-lxd-lxc-containers-from-scratch/ + +作者:[Shashidhar Soppin][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linuxtechi.com/author/shashidhar/ +[b]: https://github.com/lujun9972 +[1]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +[2]: https://www.linuxtechi.com/wp-content/uploads/2019/08/Learn-LXD-LXC-Containers.jpg From 60b3a12a01c84b1fab69adcda0c17c8c79cba788 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:22:35 +0800 Subject: [PATCH 108/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20Taking?= =?UTF-8?q?=20a=20year=20to=20explain=20computer=20things?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190913 Taking a year to explain computer things.md --- ...aking a year to explain computer things.md | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 sources/tech/20190913 Taking a year to explain computer things.md diff --git a/sources/tech/20190913 Taking a year to explain computer things.md b/sources/tech/20190913 Taking a year to explain computer things.md new file mode 100644 index 0000000000..43dae546ad --- /dev/null +++ b/sources/tech/20190913 Taking a year to explain computer things.md @@ -0,0 +1,63 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Taking a year to explain computer things) +[#]: via: (https://jvns.ca/blog/2019/09/13/a-year-explaining-computer-things/) +[#]: author: (Julia Evans https://jvns.ca/) + +Taking a year to explain computer things +====== + +I’ve been working on explaining computer things I’m learning on this blog for 6 years. I wrote one of my first posts, [what does a shell even do?][1] on Sept 30, 2013. Since then, I’ve written 11 zines, 370,000 words on this blog, and given 20 or so talks. So it seems like I like explaining things a lot. + +### tl;dr: I’m going to work on explaining computer things for a year + +Here’s the exciting news: I left my job a month ago and my plan is to spend the next year working on explaining computer things! + +As for why I’m doing this – I was talking through some reasons with my friend Mat last night and he said “well, sometimes there are things you just feel compelled to do”. I think that’s all there is to it :) + +### what does “explain computer things” mean? + +I’m planning to: + + 1. write some more zines (maybe I can write 10 zines in a year? we’ll see! I want to tackle both general-interest and slightly more niche topics, we’ll see what happens). + 2. work on some more interactive ways to learn things. I learn things best by trying things out and breaking them, so I want to see if I can facilitate that a little bit for other people. I started a project around this in May which has been on the backburner for a bit but which I’m excited about. Hopefully I’ll release it soon and then you can try it out and tell me what you think! + + + +I say “a year” because I think I have at least a year’s worth of ideas and I can’t predict how I’ll feel after doing this for a year. + +### how: run a business + +I started a corporation almost exactly a year ago, and I’m planning to keep running my explaining-things efforts as a business. This business has been making more than I made in my first programming job (that is, definitely enough money to live on!), which has been really surprising and great (thank you!). + +some parameters of the business: + + * I’m not planning to hire employees or anything, it’ll just be me and some (awesome) freelancers. The biggest change I have in mind is that I’m hoping to find a freelance editor to help me with editing. + * I also don’t have any specific plans for world domination or to work 80-hour weeks. I’m just going to make zines & things that explain computer concepts and sell them on the internet, like I’ve been doing. + * No commissions or consulting work, just building ideas I have + + + +It’s been pretty interesting to learn more about running a small business and so far I like it more than I thought I would. (except for taxes, which I like exactly as much as I thought I would) + +### that’s all! + +I’m excited to keep making explanations of computer things and to have more time to do it. This blog might change a bit away from “here’s what I’m learning at work these days” and towards “here are attempts at explaining things that I mostly already know”. It’ll be different! We’ll see how it goes! + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/09/13/a-year-explaining-computer-things/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://jvns.ca/blog/2013/09/30/hacker-school-day-2-what-does-a-shell-even-do/ From a1525f92f9e259faf3c29f0c25f66bdb5d84bc01 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:22:48 +0800 Subject: [PATCH 109/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190912=20New=20?= =?UTF-8?q?zine:=20HTTP:=20Learn=20your=20browser's=20language!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190912 New zine- HTTP- Learn your browser-s language.md --- ...ne- HTTP- Learn your browser-s language.md | 197 ++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 sources/tech/20190912 New zine- HTTP- Learn your browser-s language.md diff --git a/sources/tech/20190912 New zine- HTTP- Learn your browser-s language.md b/sources/tech/20190912 New zine- HTTP- Learn your browser-s language.md new file mode 100644 index 0000000000..85e3a6428a --- /dev/null +++ b/sources/tech/20190912 New zine- HTTP- Learn your browser-s language.md @@ -0,0 +1,197 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (New zine: HTTP: Learn your browser's language!) +[#]: via: (https://jvns.ca/blog/2019/09/12/new-zine-on-http/) +[#]: author: (Julia Evans https://jvns.ca/) + +New zine: HTTP: Learn your browser's language! +====== + +Hello! I’ve released a new zine! It’s called “HTTP: Learn your browsers language!” + +You can get it for $12 at . If you buy it, you’ll get a PDF that you can either read on your computer or print out. + +Here’s the cover and table of contents: + +[![][1]][2] + +### why http? + +I got the idea for this zine from talking to [Marco Rogers][3] – he mentioned that he thought that new web developers / mobile developers would really benefit from understanding the fundamentals of HTTP better, I thought “OOH I LOVE TALKING ABOUT HTTP”, wrote a few pages about HTTP, saw they were helping people, and decided to write a whole zine about HTTP. + +HTTP is important to understand because it runs the entire web – if you understand how HTTP requests and responses work, then it makes it WAY EASIER to debug why your web application isn’t working properly. Caching, cookies, and a lot of web security are implemented using HTTP headers, so if you don’t understand HTTP headers those things seem kind of like impenetrable magic. But actually the HTTP protocol is fundamentally pretty simple – there are a lot of complicated details but the basics are pretty easy to understand. + +So the goal of this zine is to teach you the basics so you can easily look up and understand the details when you need them. + +### what it looks like printed out + +All of my zines are best printed out (though you get a PDF you can read on your computer too!), so here are a couple of pictures of what it looks like when printed. I always ask my illustrator to make both a black and white version and a colour version of the cover so that it looks great when printed on a black and white printer. + +[![][4]][2] + +(if you click on that “same origin policy” image, you can make it bigger) + +The zine comes with 4 print PDFs in addition to a PDF you can just read on your computer/phone: + + * letter / colour + * letter / b&w + * a4 / colour + * a4 / b&w + + + +### zines for your team + +You can also buy this zine for your team members at work to help them learn HTTP! + +I’ve been trying to get the pricing right for this for a while – I used to do it based on size of company, but that didn’t seem quite right because sometimes people would want to buy the zine for a small team at a big company. So I’ve switched to pricing based on the number of copies you want to distribute at your company. + +Here’s the link: [zines for your team!][5]. + +### the tweets + +When I started writing zines, I would just sit down, write down the things I thought were important, and be done with it. + +In the last year and a half or so I’ve taken a different approach – instead of writing everything and then releasing it, instead I write a page at a time, post the page to Twitter, and then improve it and decide what page to write next based on the questions/comments I get on Twitter. If someone replies to the tweet and asks a question that shows that what I wrote is unclear, I can improve it! (I love getting replies on twitter asking clarifiying questions!). + +Here are all the initial drafts of the pages I wrote and posted on twitter, in chronological order. Some of the pages didn’t make it into the zine at all, and I needed to do a lot of editing at the end to figure out the right order and make them all work coherently together in a zine instead of being a bunch of independent tweets. + + * Jul 1: [http status codes][6] + * Jul 2: [anatomy of a HTTP response][7] + * Jul 2: [POST requests][8] + * Jul 2: [an example POST request][9] + * Jul 28: [the same origin policy][10] + * Jul 28: [what’s HTTP?][11] + * Jul 30: [the most important HTTP request headers][12] + * Jun 30: [anatomy of a HTTP request][13] + * Aug 4: [content delivery networks][14] + * Aug 6: [caching headers][15] + * Aug 6: [how cookies work][16] + * Aug 7: [redirects][17] + * Aug 8: [45 seconds on the Accept-Language HTTP header][18] + * Aug 9: [HTTPS: HTTP + security][19] + * Aug 9: [today in 45 second video experiments: the Range header][20] + * Aug 9: [some HTTP exercises to try][21] + * Aug 10: [some security headers][22] + * Aug 12: [using HTTP APIs][23] + * Aug 13: [what’s with those headers that start with x-?][24] + * Aug 13: [important HTTP response headers][25] + * Aug 14: [HTTP request methods (part 1)][26] + * Aug 14: [HTTP request methods (part 2)][27] + * Aug 15: [how URLs work][28] + * Aug 16: [CORS][29] + * Aug 19: [why the same origin policy matters][30] + * Aug 21: [HTTP headers][31] + * Aug 24: [how to learn more about HTTP][32] + * Aug 25: [HTTP/2][33] + * Aug 27: [certificates][34] + + + +Writing zines one tweet at a time has been really fun. I think it improves the quality a lot, because I get a ton of feedback along the way that I can use to make the zine better. There are also some experimental 45 second tiny videos in that list, which are definitely not part of the zine, but which were fun to make and which I might expand on in the future. + +### examplecat.com + +One tiny easter egg in the zine: I have a lot of examples of HTTP requests, and I wasn’t sure for a long time what domain I should use for the examples. I used example.com a bunch, and google.com and twitter.com sometimes, but none of those felt quite right. + +A couple of days before publishing the zine I finally had an epiphany – my example on the cover was requesting a picture of a cat, so I registered which just has a single picture of a cat. It also has an ASCII cat if you’re browsing in your terminal. + +``` +$ curl https://examplecat.com/cat.txt -i +HTTP/2 200 +accept-ranges: bytes +cache-control: public, max-age=0, must-revalidate +content-length: 33 +content-type: text/plain; charset=UTF-8 +date: Thu, 12 Sep 2019 16:48:16 GMT +etag: "ac5affa59f554a1440043537ae973790-ssl" +strict-transport-security: max-age=31536000 +age: 5 +server: Netlify +x-nf-request-id: c5060abc-0399-4b44-94bf-c481e22c2b50-1772748 + +\ /\ + ) ( ') +( / ) + \(__)| +``` + +### more zines at wizardzines.com + +If you’re interested in the idea of programming zines and haven’t seen my zines before, I have a bunch more at . There are 6 free zines there: + + * [so you want to be a wizard][35] + * [let’s learn tcpdump!][36] + * [spying on your programs with strace][37] + * [networking! ACK!][38] + * [linux debugging tools you’ll love][39] + * [profiling and tracing with perf][40] + + + +### next zine: not sure yet! + +Some things I’m considering for the next zine: + + * debugging skills (I started writing a bunch of pages about debugging but switched gears to the HTTP zine because I got really excited about that. but debugging is my favourite thing so I’d like to get this done at some point) + * gdb (a short zine in the spirit of [let’s learn tcpdump][36]) + * relational databases (what’s up with transactions?) + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/09/12/new-zine-on-http/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://jvns.ca/images/http-zine-cover.png +[2]: https://gum.co/http-zine +[3]: https://twitter.com/polotek +[4]: https://jvns.ca/images/http-zine-cover.jpeg +[5]: https://wizardzines.com/zines-team/ +[6]: https://twitter.com/b0rk/status/1145824140462608387 +[7]: https://twitter.com/b0rk/status/1145896193077256197 +[8]: https://twitter.com/b0rk/status/1146054159214567424 +[9]: https://twitter.com/b0rk/status/1146065212560179202 +[10]: https://twitter.com/b0rk/status/1155493682885341184 +[11]: https://twitter.com/b0rk/status/1155318552129396736 +[12]: https://twitter.com/b0rk/status/1156048630220017665 +[13]: https://twitter.com/b0rk/status/1145362860136177664 +[14]: https://twitter.com/b0rk/status/1158012032651862017 +[15]: https://twitter.com/b0rk/status/1158726129508868097 +[16]: https://twitter.com/b0rk/status/1158848054142873603 +[17]: https://twitter.com/b0rk/status/1159163613938167808 +[18]: https://twitter.com/b0rk/status/1159492669384658944 +[19]: https://twitter.com/b0rk/status/1159812119099060224 +[20]: https://twitter.com/b0rk/status/1159829608595804160 +[21]: https://twitter.com/b0rk/status/1159839824594915335 +[22]: https://twitter.com/b0rk/status/1160185182323970050 +[23]: https://twitter.com/b0rk/status/1160933788949655552 +[24]: https://twitter.com/b0rk/status/1161283690925834241 +[25]: https://twitter.com/b0rk/status/1161262574031265793 +[26]: https://twitter.com/b0rk/status/1161679906415218690 +[27]: https://twitter.com/b0rk/status/1161680137865367553 +[28]: https://twitter.com/b0rk/status/1161997141876903936 +[29]: https://twitter.com/b0rk/status/1162392625057583104 +[30]: https://twitter.com/b0rk/status/1163460967067541504 +[31]: https://twitter.com/b0rk/status/1164181027469832196 +[32]: https://twitter.com/b0rk/status/1165277002791829510 +[33]: https://twitter.com/b0rk/status/1165623594917007362 +[34]: https://twitter.com/b0rk/status/1166466933912494081 +[35]: https://wizardzines.com/zines/wizard/ +[36]: https://wizardzines.com/zines/tcpdump/ +[37]: https://wizardzines.com/zines/strace/ +[38]: https://wizardzines.com/zines/networking/ +[39]: https://wizardzines.com/zines/debugging/ +[40]: https://wizardzines.com/zines/perf/ From 31a98f94b1889918627cd376d5aeadc1d0e6d88b Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:22:58 +0800 Subject: [PATCH 110/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190906=20How=20?= =?UTF-8?q?to=20put=20an=20HTML=20page=20on=20the=20internet?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190906 How to put an HTML page on the internet.md --- ...How to put an HTML page on the internet.md | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 sources/tech/20190906 How to put an HTML page on the internet.md diff --git a/sources/tech/20190906 How to put an HTML page on the internet.md b/sources/tech/20190906 How to put an HTML page on the internet.md new file mode 100644 index 0000000000..4524d1c896 --- /dev/null +++ b/sources/tech/20190906 How to put an HTML page on the internet.md @@ -0,0 +1,69 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to put an HTML page on the internet) +[#]: via: (https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/) +[#]: author: (Julia Evans https://jvns.ca/) + +How to put an HTML page on the internet +====== + +One thing I love about the internet is that it’s SO EASY to put static HTML websites on the internet. Someone asked me today how to do it, so I thought I’d write down how really quickly! + +### just an HTML page + +All of my sites are just static HTML and CSS. My web design skills are relatively minimal ( is the most complicated site I’ve developed on my own), so keeping all my internet sites relatively simple means that I have some hope of being able to make changes / fix things without spending a billion hours on it. + +So we’re going to take as minimal of an approach as possible in this blog post – just one HTML page. + +### the HTML page + +The website we’re going to put on the internet is just one file, called `index.html`. You can find it at , which is a Github repository with exactly one file in it. + +The HTML file has some CSS in it to make it look a little less boring, which is partly copied from . + +### how to put the HTML page on the internet + +Here are the steps: + + 1. sign up for a [Neocities][1] account + 2. copy the index.html into the index.html in your neocities site + 3. done + + + +The index.html page above is on the internet at [julia-example-website.neocities.com][2], if you view source you’ll see that it’s the same HTML as in the github repo. + +I think this is probably the simplest way to put an HTML page on the internet (and it’s a throwback to Geocities, which is how I made my first website in 2003) :). I also like that Neocities (like [glitch][3], which I also love) is about experimentation and learning and having fun.. + +### other options + +This is definitely not the only easy way – Github pages and Gitlab pages and Netlify will all automatically publish a site when you push to a Git repository, and they’re all very easy to use (just connect them to your github repository and you’re done). I personally use the Git repository approach because not having things in Git makes me nervous – I like to know what changes to my website I’m actually pushing. But I think if you just want to put an HTML site on the internet for the first time and play around with HTML/CSS, Neocities is a really nice way to do it. + +If you want to actually use your website for a Real Thing and not just to play around you probably want to buy a domain and link it to your website so that you can change hosting providers in the future, but that is a bit less simple. + +### this is a good possible jumping off point for learning HTML + +If you are a person who is comfortable editing files in a Git repository but wants to practice HTML/CSS, I think this is a fun way to put a website on the internet and play around! I really like the simplicity of it – there’s literally just one file, so there’s no fancy extra magic to get in the way of understanding what’s going on. + +There are also a bunch of ways to complicate/extend this, like this blog is actually generated with [Hugo][4] which generates a bunch of HTML files which then go on the internet, but it’s always nice to start with the basics. + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://neocities.org/ +[2]: https://julia-example-website.neocities.org/ +[3]: https://glitch.com +[4]: https://gohugo.io/ From d7480d330d0934b7bc487170e8aff59c963abb40 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:08 +0800 Subject: [PATCH 111/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190901=20How=20?= =?UTF-8?q?to=20write=20zines=20with=20simple=20tools?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190901 How to write zines with simple tools.md --- ...01 How to write zines with simple tools.md | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 sources/tech/20190901 How to write zines with simple tools.md diff --git a/sources/tech/20190901 How to write zines with simple tools.md b/sources/tech/20190901 How to write zines with simple tools.md new file mode 100644 index 0000000000..05b21f047e --- /dev/null +++ b/sources/tech/20190901 How to write zines with simple tools.md @@ -0,0 +1,138 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to write zines with simple tools) +[#]: via: (https://jvns.ca/blog/2019/09/01/ways-to-write-zines-without-fancy-tools/) +[#]: author: (Julia Evans https://jvns.ca/) + +How to write zines with simple tools +====== + +People often ask me what tools I use to write my zines ([the answer is here][1]). Answering this question as written has always felt slightly off to me, though, and I couldn’t figure out why for a long time. + +I finally realized last week that instead of “what tools do you use to write zines?” some people may have actually wanted to know “how can I do this myself?”! And “buy a $500 iPad” is not a terribly useful answer to that question – it’s not how I got started, iPads are kind of a weird fancy way to write zines, and most people don’t have them. + +So this blog post is about more traditional (and easier to get started with) ways to write zines. + +We’re going to start out by talking about the mechanics of how to write the zine, and then talk about how to assemble it into a booklet. + +### Way 1: Write it on paper + +This is how I made my first zine (spying on your programs with strace) which you can see here: . + +Here’s an example of a page I drew on paper this morning pretty quickly. It looks kind of bad because I scanned it with my phone, but if you use a real scanner (like I did with the strace PDF above), the scanned version comes out better. + + + +### Way 2: Use a Google doc + +The next option is to use a Google doc (or whatever other word processor you prefer). [Here’s the Google doc I wrote for the below image][2], and here’s what it looks like: + + + +They key thing about this Google doc approach is to apply some “less is more”. It’s intended to be printed as part of a booklet on **half** a sheet of letter paper, which means everything needs to be twice as big for it to look good. + +### Way 3: Use an iPad + +This is what I do (use the Notability app on iPad). I’m not going to talk about this method much because this post is about using more readily available tools. + + + +### Way 4: Use a single sheet of paper + +This is a subset of “Write it on paper” – the [Wikibooks page on zine making][3] has a great guide that shows how to write out a tiny zine on 1 piece of paper and then fold it up to make a little booklet. Here are the pictures of the steps from the Wikibooks page: + + + +Sumana Harihareswara’s [Playing with python][4] zine is a nice example of a zine that’s intended to be folded up in that way. + +### Way 5: Adobe Illustrator + +I’ve never used Adobe Illustrator so I’m not going to pretend that I know anything about it or put together an example using it, but I hear it’s a way people do book layout. + +### booklets: the photocopier method + +So you’ve written a bunch of pages and want to assemble them into a booklet. One way to do this (and what I did for my first zine about strace!) is the photocopier method. There’s a great guide by Julia Gfrörer in [this tweet][5], which I’m going to reproduce here: + +![][6] +![][7] +![][8] +![][9] + +That explanation is excellent and I don’t have anything to add. I did it that way and it worked great. + +If you want to buy a print copy of that how-to-make-zines zine from Thruban Press, you can [get it here on Etsy][10]. + +### booklets: the computer method + +If you’ve made your zine in Google Docs or in another computery way, you probably want a more computery way of assembling the pages into a booklet. + +**what I use: pdflatex** + +I do this using the `pdfpages` LaTeX extension. This sounds complicated but it’s not really, you don’t need to learn latex or anything. You just need to have pdflatex on your system, which is a `sudo apt install texlive-base` away on Ubuntu. The steps are: + + 1. Get a PDF with the pages from your zine (pages need to be a multiple of 4) + 2. Get the latex file from [this gist][11] + 3. Replace `/home/bork/http-zine.pdf` with the path to your PDF and `1-28` with `1-however many pages are in your zine`. + 4. run `pdflatex formatted-zine.tex` + 5. Tweak the parameters until it looks the way you want. The [documentation for the pdfpages package is here][12] + + + +I like using this relatively complicated method because there are always small tweaks I want to make like “oh, the right margin is too big, crop it a little bit” and the pdfpages package has tons of options that let me make those tweaks. + +**other methods** + + 1. On Linux you can use the `pdfjam` bash script, which is just a wrapper around the pdfpages latex package. This is what I used to do but today I find it simpler to use the pdfpages latex package directly. + 2. There’s a program called [Booklet Creator][13] for Mac and Windows that [@mrfb uses][14]. It looks pretty simple to use. + 3. If you convert your PDF to a ps file (with `pdf2ps` for instance), `psnup` can do this. I tried `cat file.ps | psbook | psnup -2 > booklet.ps` and it worked, though the resulting PDFs are a little slow to load in my PDF viewer for some reason. + 4. there are probably a ton more ways to do this, if you know more let me know + + + +### making zines is easy and low tech + +That’s all! I mostly wanted to explain that zines are an easy low tech thing to do and if you think making them sounds fun, you definitely 100% do not need to use any fancy expensive tools to do it, you can literally use some sheets of paper, a Sharpie, a pen, and spend $3 at your local print shop to use the photocopier. + +### resources + +summary of the resources I linked to: + + * Guide to putting together zines with a photocopier by Julia Gfrörer: [this tweet][5], [get it on Etsy][10] + * [Wikibooks page on zine making][3] + * Notes on making zines using Google Docs: [this twitter thread][14] + * [Stolen Sharpie Revolution][15] (the first book I read about making zines). You can also get it on Amazon if you want but it’s probably better to buy directly from their site. + * [Booklet Creator][13] + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/09/01/ways-to-write-zines-without-fancy-tools/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://twitter.com/b0rk/status/1160171769833185280 +[2]: https://docs.google.com/document/d/1byzfXC0h6hNFlWXaV9peJpX-GamJOrJ70x9nu1dZ-m0/edit?usp=sharing +[3]: https://en.m.wikibooks.org/wiki/Zine_Making/Putting_pages_together +[4]: https://www.harihareswara.net/pix/playing-with-python-zine/playing-with-python-zine.pdf +[5]: https://twitter.com/thorazos/status/1158556879485906944 +[6]: https://pbs.twimg.com/media/EBQFUC0X4AAPTU1?format=jpg&name=small +[7]: https://pbs.twimg.com/media/EBQFUC0XsAEBhHf?format=jpg&name=small +[8]: https://pbs.twimg.com/media/EBQFUC1XUAAKDIB?format=jpg&name=small +[9]: https://pbs.twimg.com/media/EBQFUDRX4AMkIAr?format=jpg&name=small +[10]: https://www.etsy.com/thorazos/listing/693692176/thuban-press-guide-to-analog-self?utm_source=Copy&utm_medium=ListingManager&utm_campaign=Share&utm_term=so.lmsm&share_time=1565113962419 +[11]: https://gist.github.com/jvns/b3de1d658e2b44aebb485c35fb1a7a0f +[12]: http://texdoc.net/texmf-dist/doc/latex/pdfpages/pdfpages.pdf +[13]: https://www.bookletcreator.com/ +[14]: https://twitter.com/mrfb/status/1159478532545888258 +[15]: http://www.stolensharpierevolution.org/ From 178cf15714850852a0dadf55e97b31f82b11e816 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:17 +0800 Subject: [PATCH 112/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190830=20git=20?= =?UTF-8?q?exercises:=20navigate=20a=20repository?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190830 git exercises- navigate a repository.md --- ...30 git exercises- navigate a repository.md | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 sources/tech/20190830 git exercises- navigate a repository.md diff --git a/sources/tech/20190830 git exercises- navigate a repository.md b/sources/tech/20190830 git exercises- navigate a repository.md new file mode 100644 index 0000000000..bfafd73d66 --- /dev/null +++ b/sources/tech/20190830 git exercises- navigate a repository.md @@ -0,0 +1,84 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (git exercises: navigate a repository) +[#]: via: (https://jvns.ca/blog/2019/08/30/git-exercises--navigate-a-repository/) +[#]: author: (Julia Evans https://jvns.ca/) + +git exercises: navigate a repository +====== + +I think the [curl exercises][1] the other day went well, so today I woke up and wanted to try writing some Git exercises. Git is a big thing to learn, probably too big to learn in a few hours, so my first idea for how to break it down was by starting by **navigating** a repository. + +I was originally going to use a toy test repository, but then I thought – why not a real repository? That’s way more fun! So we’re going to navigate the repository for the Ruby programming language. You don’t need to know any C to do this exercise, it’s just about getting comfortable with looking at how files in a repository change over time. + +### clone the repository + +To get started, clone the repository: + +``` +git clone https://github.com/ruby/ruby +``` + +The big different thing about this repository (as compared to most of the repositories you’ll work with in real life) is that it doesn’t have branches, but it DOES have lots of tags, which are similar to branches in that they’re both just pointers to a commit. So we’ll do exercises with tags instead of branches. The way you _change_ tags and branches are very different, but the way you _look at_ tags and branches is exactly the same. + +### a git SHA always refers to the same code + +The most important thing to keep in mind while doing these exercises is that a git SHA like `9e3d9a2a009d2a0281802a84e1c5cc1c887edc71` always refers to the same code, as explained in this page. This page is from a zine I wrote with Katie Sylor-Miller called [Oh shit, git!][2]. (She also has a great site called that inspired the zine). + + + +We’ll be using git SHAs really heavily in the exercises to get you used to working with them and to help understand how they correspond to tags and branches. + +### git subcommands we’ll be using + +All of these exercises only use 5 git subcommands: + +``` +git checkout +git log (--oneline, --author, and -S will be useful) +git diff (--stat will be useful) +git show +git status +``` + +### exercises + + 1. Check out matz’s commit of Ruby from 1998. The commit ID is `3db12e8b236ac8f88db8eb4690d10e4a3b8dbcd4`. Find out how many lines of code Ruby was at that time. + 2. Check out the current master branch + 3. Look at the history for the file `hash.c`. What was the last commit ID that changed that file? + 4. Get a diff of how `hash.c` has changed in the last 20ish years: compare that file on the master branch to the file at commit `3db12e8b236ac8f88db8eb4690d10e4a3b8dbcd4`. + 5. Find a recent commit that changed `hash.c` and look at the diff for that commit + 6. This repository has a bunch of **tags** for every Ruby release. Get a list of all the tags. + 7. Find out how many files changed between tag `v1_8_6_187` and tag `v1_8_6_188` + 8. Find a commit (any commit) from 2015 and check it out, look at the files very briefly, then go back to the master branch. + 9. Find out what commit the tag `v1_8_6_187` corresponds to. + 10. List the directory `.git/refs/tags`. Run `cat .git/refs/tags/v1_8_6_187` to see the contents of one of those files. + 11. Find out what commit ID `HEAD` corresponds to right now. + 12. Find out how many commits have been made to the `test/` directory + 13. Get a diff of `lib/telnet.rb` between the commits `65a5162550f58047974793cdc8067a970b2435c0` and `9e3d9a2a009d2a0281802a84e1c5cc1c887edc71`. How many lines of that file were changed? + 14. How many commits were made between Ruby 2.5.1 and 2.5.2 (tags `v2_5_1` and `v2_5_3`) (this one is a tiny bit tricky, there’s more than one step) + 15. How many commits were authored by `matz` (Ruby’s creator)? + 16. What’s the most recent commit that included the word `tkutil`? + 17. Check out the commit `e51dca2596db9567bd4d698b18b4d300575d3881` and create a new branch that points at that commit. + 18. Run `git reflog` to see all the navigating of the repository you’ve done so far + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/08/30/git-exercises--navigate-a-repository/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://jvns.ca/blog/2019/08/27/curl-exercises/ +[2]: https://wizardzines.com/zines/oh-shit-git/ From bf8dc3f0d7f4c2d6d20506acf58c3831b94529c7 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:27 +0800 Subject: [PATCH 113/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190827=20curl?= =?UTF-8?q?=20exercises?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190827 curl exercises.md --- sources/tech/20190827 curl exercises.md | 84 +++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 sources/tech/20190827 curl exercises.md diff --git a/sources/tech/20190827 curl exercises.md b/sources/tech/20190827 curl exercises.md new file mode 100644 index 0000000000..36eae2743b --- /dev/null +++ b/sources/tech/20190827 curl exercises.md @@ -0,0 +1,84 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (curl exercises) +[#]: via: (https://jvns.ca/blog/2019/08/27/curl-exercises/) +[#]: author: (Julia Evans https://jvns.ca/) + +curl exercises +====== + +Recently I’ve been interested in how people learn things. I was reading Kathy Sierra’s great book [Badass: Making Users Awesome][1]. It talks about the idea of _deliberate practice_. + +The idea is that you find a small micro-skill that can be learned in maybe 3 sessions of 45 minutes, and focus on learning that micro-skill. So, as an exercise, I was trying to think of a computer skill that I thought could be learned in 3 45-minute sessions. + +I thought that making HTTP requests with `curl` might be a skill like that, so here are some curl exercises as an experiment! + +### what’s curl? + +curl is a command line tool for making HTTP requests. I like it because it’s an easy way to test that servers or APIs are doing what I think, but it’s a little confusing at first! + +Here’s a drawing explaining curl’s most important command line arguments (which is page 6 of my [Bite Size Networking][2] zine). You can click to make it bigger. + + + +### fluency is valuable + +With any command line tool, I think having fluency is really helpful. It’s really nice to be able to just type in the thing you need. For example recently I was testing out the Gumroad API and I was able to just type in: + +``` +curl https://api.gumroad.com/v2/sales \ + -d "access_token=" \ + -X GET -d "before=2016-09-03" +``` + +and get things working from the command line. + +### 21 curl exercises + +These exercises are about understanding how to make different kinds of HTTP requests with curl. They’re a little repetitive on purpose. They exercise basically everything I do with curl. + +To keep it simple, we’re going to make a lot of our requests to the same website: . httpbin is a service that accepts HTTP requests and then tells you what request you made. + + 1. Request + 2. Request . httpbin.org/anything will look at the request you made, parse it, and echo back to you what you requested. curl’s default is to make a GET request. + 3. Make a POST request to + 4. Make a GET request to , but this time add some query parameters (set `value=panda`). + 5. Request google’s robots.txt file ([www.google.com/robots.txt][3]) + 6. Make a GET request to and set the header `User-Agent: elephant`. + 7. Make a DELETE request to + 8. Request and also get the response headers + 9. Make a POST request to with the JSON body `{"value": "panda"}` + 10. Make the same POST request as the previous exercise, but set the Content-Type header to `application/json` (because POST requests need to have a content type that matches their body). Look at the `json` field in the response to see the difference from the previous one. + 11. Make a GET request to and set the header `Accept-Encoding: gzip` (what happens? why?) + 12. Put a bunch of a JSON in a file and then make a POST request to with the JSON in that file as the body + 13. Make a request to and set the header ‘Accept: image/png’. Save the output to a PNG file and open the file in an image viewer. Try the same thing with with different `Accept:` headers. + 14. Make a PUT request to + 15. Request , save it to a file, and open that file in your image editor. + 16. Request . You’ll get an empty response. Get curl to show you the response headers too, and try to figure out why the response was empty. + 17. Make any request to and just set some nonsense headers (like `panda: elephant`) + 18. Request and . Request them again and get curl to show the response headers. + 19. Request and set a username and password (with `-u username:password`) + 20. Download the Twitter homepage () in Spanish by setting the `Accept-Language: es-ES` header. + 21. Make a request to the Stripe API with curl. (see for how, they give you a test API key). Try making exactly the same request to . + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/08/27/curl-exercises/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://www.amazon.com/Badass-Making-Awesome-Kathy-Sierra/dp/1491919019 +[2]: https://wizardzines.com/zines/bite-size-networking +[3]: http://www.google.com/robots.txt From 8d281145ec631dbc66c88320c7c14ee8795b0fc3 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:37 +0800 Subject: [PATCH 114/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190628=20Get=20?= =?UTF-8?q?your=20work=20recognized:=20write=20a=20brag=20document?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190628 Get your work recognized- write a brag document.md --- ... work recognized- write a brag document.md | 256 ++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 sources/tech/20190628 Get your work recognized- write a brag document.md diff --git a/sources/tech/20190628 Get your work recognized- write a brag document.md b/sources/tech/20190628 Get your work recognized- write a brag document.md new file mode 100644 index 0000000000..e13dd2a07b --- /dev/null +++ b/sources/tech/20190628 Get your work recognized- write a brag document.md @@ -0,0 +1,256 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Get your work recognized: write a brag document) +[#]: via: (https://jvns.ca/blog/brag-documents/) +[#]: author: (Julia Evans https://jvns.ca/) + +Get your work recognized: write a brag document +====== + +There’s this idea that, if you do great work at your job, people will (or should!) automatically recognize that work and reward you for it with promotions / increased pay. In practice, it’s often more complicated than that – some kinds of important work are more visible/memorable than others. It’s frustrating to have done something really important and later realize that you didn’t get rewarded for it just because the people making the decision didn’t understand or remember what you did. So I want to talk about a tactic that I and lots of people I work with have used! + +This blog post isn’t just about being promoted or getting raises though. The ideas here have actually been more useful to me to help me reflect on themes in my work, what’s important to me, what I’m learning, and what I’d like to be doing differently. But they’ve definitely helped with promotions! + +You can also [skip to the brag document template at the end][1]. + +### you don’t remember everything you did + +One thing I’m always struck by when it comes to performance review time is a feeling of “wait, what _did_ I do in the last 6 months?“. This is a kind of demoralizing feeling and it’s usually not based in reality, more in “I forgot what cool stuff I actually did”. + +I invariably end up having to spend a bunch of time looking through my pull requests, tickets, launch emails, design documents, and more. I always end up finding small (and sometimes not-so-small) things that I completely forgot I did, like: + + * mentored an intern 5 months ago + * did a small-but-important security project + * spent a few weeks helping get an important migration over the line + * helped X put together this design doc + * etcetera! + + + +### your manager doesn’t remember everything you did + +And if you don’t remember everything important you did, your manager (no matter how great they are!) probably doesn’t either. And they need to explain to other people why you should be promoted or given an evaluation like “exceeds expectations” (“X’s work is so awesome!!!!” doesn’t fly). + +So if your manager is going to effectively advocate for you, they need help. + +### here’s the tactic: write a document listing your accomplishments + +The tactic is pretty simple! Instead of trying to remember everything you did with your brain, maintain a “brag document” that lists everything so you can refer to it when you get to performance review season! This is a pretty common tactic – when I started doing this I mentioned it to more experienced people and they were like “oh yeah, I’ve been doing that for a long time, it really helps”. + +Where I work we call this a “brag document” but I’ve heard other names for the same concept like “hype document” or “list of stuff I did” :). + +There’s a basic template for a brag document at the end of this post. + +### share your brag document with your manager + +When I first wrote a brag document I was kind of nervous about sharing it with my manager. It felt weird to be like “hey, uh, look at all the awesome stuff I did this year, I wrote a long document listing everything”. But my manager was really thankful for it – I think his perspective was “this makes my job way easier, now I can look at the document when writing your perf review instead of trying to remember what happened”. + +Giving them a document that explains your accomplishments will really help your manager advocate for you in discussions about your performance and come to any meetings they need to have prepared. + +Brag documents also **really** help with manager transitions – if you get a new manager 3 months before an important performance review that you want to do well on, giving them a brag document outlining your most important work & its impact will help them understand what you’ve been doing even though they may not have been aware of any of your work before. + +### share it with your peer reviewers + +Similarly, if your company does peer feedback as part of the promotion/perf process – share your brag document with your peer reviewers!! Every time someone shares their doc with me I find it SO HELPFUL with writing their review for much the same reasons it’s helpful to share it with your manager – it reminds me of all the amazing things they did, and when they list their goals in their brag document it also helps me see what areas they might be most interested in feedback on. + +On some teams at work it’s a team norm to share a brag document with peer reviewers to make it easier for them. + +### explain the big picture + +In addition to just listing accomplishments, in your brag document you can write the narrative explaining the big picture of your work. Have you been really focused on security? On building your product skills & having really good relationships with your users? On building a strong culture of code review on the team? + +In my brag document, I like to do this by making a section for areas that I’ve been focused on (like “security”) and listing all the work I’ve done in that area there. This is especially good if you’re working on something fuzzy like “building a stronger culture of code review” where all the individual actions you do towards that might be relatively small and there isn’t a big shiny ship. + +### use your brag document to notice patterns + +In the past I’ve found the brag document useful not just to hype my accomplishments, but also to reflect on the work I’ve done. Some questions it’s helped me with: + + * What work do I feel most proud of? + * Are there themes in these projects I should be thinking about? What’s the big picture of what I’m working on? (am I working a lot on security? localization?). + * What do I wish I was doing more / less of? + * Which of my projects had the effect I wanted, and which didn’t? Why might that have been? + * What could have gone better with project X? What might I want to do differently next time? + + + +### you can write it all at once or update it every 2 weeks + +Many people have told me that it works best for them if they take a few minutes to update their brag document every 2 weeks ago. For me it actually works better to do a single marathon session every 6 months or every year where I look through everything I did and reflect on it all at once. Try out different approaches and see what works for you! + +### don’t forget to include the fuzzy work + +A lot of us work on fuzzy projects that can feel hard to quantify, like: + + * improving code quality on the team / making code reviews a little more in depth + * making on call easier + * building a more fair interview process / performance review system + * refactoring / driving down technical debt + + + +A lot of people will leave this kind of work out because they don’t know how to explain why it’s important. But I think this kind of work is especially important to put into your brag document because it’s the most likely to fall under the radar! One way to approach this is to, for each goal: + + 1. explain your goal for the work (why do you think it’s important to refactor X piece of code?) + 2. list some things you’ve done towards that goal + 3. list any effects you’ve seen of the work, even if they’re a little indirect + + + +If you tell your coworkers this kind of work is important to you and tell them what you’ve been doing, maybe they can also give you ideas about how to do it more effectively or make the effects of that work more obvious! + +### encourage each other to celebrate accomplishments + +One nice side effect of having a shared idea that it’s normal/good to maintain a brag document at work is that I sometimes see people encouraging each other to record & celebrate their accomplishments (“hey, you should put that in your brag doc, that was really good!”). It can be hard to see the value of your work sometimes, especially when you’re working on something hard, and an outside perspective from a friend or colleague can really help you see why what you’re doing is important. + +Brag documents are good when you use them on your own to advocate for yourself, but I think they’re better as a collaborative effort to recognize where people are excelling. + +Next, I want to talk about a couple of structures that we’ve used to help people recognize their accomplishments. + +### the brag workshop: help people list their accomplishments + +The way this “brag document” practice started in the first place is that my coworker [Karla][2] and I wanted to help other women in engineering advocate for themselves more in the performance review process. The idea is that some people undersell their accomplishments more than they should, so we wanted to encourage those people to “brag” a little bit and write down what they did that was important. + +We did this by running a “brag workshop” just before performance review season. The format of the workshop is like this: + +**Part 1: write the document: 1-2 hours**. Everybody sits down with their laptop, starts looking through their pull requests, tickets they resolved, design docs, etc, and puts together a list of important things they did in the last 6 months. + +**Part 2: pair up and make the impact of your work clearer: 1 hour**. The goal of this part is to pair up, review each other’s documents, and identify places where people haven’t bragged “enough” – maybe they worked on an extremely critical project to the company but didn’t highlight how important it was, maybe they improved test performance but didn’t say that they made the tests 3 times faster and that it improved everyone’s developer experience. It’s easy to accidentally write “I shipped $feature” and miss the follow up (“… which caused $thing to happen”). Another person reading through your document can help you catch the places where you need to clarify the impact. + +### biweekly brag document writing session + +Another approach to helping people remember their accomplishments: my friend Dave gets some friends together every couple of weeks or so for everyone to update their brag documents. It’s a nice way for people to talk about work that they’re happy about & celebrate it a little bit, and updating your brag document as you go can be easier than trying to remember everything you did all at once at the end of the year. + +These don’t have to be people in the same company or even in the same city – that group meets over video chat and has people from many different companies doing this together from Portland, Toronto, New York, and Montreal. + +In general, especially if you’re someone who really cares about your work, I think it’s really positive to share your goals & accomplishments (and the things that haven’t gone so well too!) with your friends and coworkers. It makes it feel less like you’re working alone and more like everyone is supporting each other in helping them accomplish what they want. + +### thanks + +Thanks to Karla Burnett who I worked with on spreading this idea at work, to Dave Vasilevsky for running brag doc writing sessions, to Will Larson who encouraged me to start one [of these][3] in the first place, to my manager Jay Shirley for always being encouraging & showing me that this is a useful way to work with a manager, and to Allie, Dan, Laura, Julian, Kamal, Stanley, and Vaibhav for reading a draft of this. + +I’d also recommend the blog post [Hype Yourself! You’re Worth It!][4] by Aashni Shah which talks about a similar approach. + +## Appendix: brag document template + +Here’s a template for a brag document! Usually I make one brag document per year. (“Julia’s 2017 brag document”). I think it’s okay to make it quite long / comprehensive – 5-10 pages or more for a year of work doesn’t seem like too much to me, especially if you’re including some graphs/charts / screenshots to show the effects of what you did. + +One thing I want to emphasize, for people who don’t like to brag, is – **you don’t have to try to make your work sound better than it is**. Just make it sound **exactly as good as it is**! For example “was the primary contributor to X new feature that’s now used by 60% of our customers and has gotten Y positive feedback”. + +### Goals for this year: + + * List your major goals here! Sharing your goals with your manager & coworkers is really nice because it helps them see how they can support you in accomplishing those goals! + + + +### Goals for next year + + * If it’s getting towards the end of the year, maybe start writing down what you think your goals for next year might be. + + + +### Projects + +For each one, go through: + + * What your contributions were (did you come up with the design? Which components did you build? Was there some useful insight like “wait, we can cut scope and do what we want by doing way less work” that you came up with?) + * The impact of the project – who was it for? Are there numbers you can attach to it? (saved X dollars? shipped new feature that has helped sell Y big deals? Improved performance by X%? Used by X internal users every day?). Did it support some important non-numeric company goal (required to pass an audit? helped retain an important user?) + + + +Remember: don’t forget to explain what the results of you work actually were! It’s often important to go back a few months later and fill in what actually happened after you launched the project. + +### Collaboration & mentorship + +Examples of things in this category: + + * Helping others in an area you’re an expert in (like “other engineers regularly ask me for one-off help solving weird bugs in their CSS” or “quoting from the C standard at just the right moment”) + * Mentoring interns / helping new team members get started + * Writing really clear emails/meeting notes + * Foundational code that other people built on top of + * Improving monitoring / dashboards / on call + * Any code review that you spent a particularly long time on / that you think was especially important + * Important questions you answered (“helped Risha from OTHER_TEAM with a lot of questions related to Y”) + * Mentoring someone on a project (“gave Ben advice from time to time on leading his first big project”) + * Giving an internal talk or workshop + + + +### Design & documentation + +List design docs & documentation that you worked on + + * Design docs: I usually just say “wrote design for X” or “reviewed design for X” + * Documentation: maybe briefly explain the goal behind this documentation (for example “we were getting a lot of questions about X, so I documented it and now we can answer the questions more quickly”) + + + +### Company building + +This is a category we have at work – it basically means “things you did to help the company overall, not just your project / team”. Some things that go in here: + + * Going above & beyond with interviewing or recruiting (doing campus recruiting, etc) + * Improving important processes, like the interview process or writing better onboarding materials + + + +### What you learned + +My friend Julian suggested this section and I think it’s a great idea – try listing important things you learned or skills you’ve acquired recently! Some examples of skills you might be learning or improving: + + * how to do performance analysis & make code run faster + * internals of an important piece of software (like the JVM or Postgres or Linux) + * how to use a library (like React) + * how to use an important tool (like the command line or Firefox dev tools) + * about a specific area of programming (like localization or timezones) + * an area like product management / UX design + * how to write a clear design doc + * a new programming language + + + +It’s really easy to lose track of what skills you’re learning, and usually when I reflect on this I realize I learned a lot more than I thought and also notice things that I’m _not_ learning that I wish I was. + +### Outside of work + +It’s also often useful to track accomplishments outside of work, like: + + * blog posts + * talks/panels + * open source work + * Industry recognition + + + +I think this can be a nice way to highlight how you’re thinking about your career outside of strictly what you’re doing at work. + +This can also include other non-career-related things you’re proud of, if that feels good to you! Some people like to keep a combined personal + work brag document. + +### General prompts + +If you’re feeling stuck for things to mention, try: + + * If you were trying to convince a friend to come join your company/team, what would you tell them about your work? + * Did anybody tell you you did something well recently? + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/brag-documents/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: tmp.nd0Dg3RXQE#template +[2]: https://karla.io/ +[3]: https://lethain.com/career-narratives/ +[4]: http://blog.aashni.me/2019/01/hype-yourself-youre-worth-it/ From 8dd7ad8f695f02ba9737253fd1c1dabad3024005 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:47 +0800 Subject: [PATCH 115/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190623=20What?= =?UTF-8?q?=20does=20debugging=20a=20program=20look=20like=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190623 What does debugging a program look like.md --- ...What does debugging a program look like.md | 184 ++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 sources/tech/20190623 What does debugging a program look like.md diff --git a/sources/tech/20190623 What does debugging a program look like.md b/sources/tech/20190623 What does debugging a program look like.md new file mode 100644 index 0000000000..7cc7c1432e --- /dev/null +++ b/sources/tech/20190623 What does debugging a program look like.md @@ -0,0 +1,184 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (What does debugging a program look like?) +[#]: via: (https://jvns.ca/blog/2019/06/23/a-few-debugging-resources/) +[#]: author: (Julia Evans https://jvns.ca/) + +What does debugging a program look like? +====== + +I was debugging with a friend who’s a relatively new programmer yesterday, and showed them a few debugging tips. Then I was thinking about how to teach debugging this morning, and [mentioned on Twitter][1] that I’d never seen a really good guide to debugging your code. (there are a ton of really great replies by Anne Ogborn to that tweet if you are interested in debugging tips) + +As usual, I got a lot of helpful answers and now I have a few ideas about how to teach debugging skills / describe the process of debugging. + +### a couple of debugging resources + +I was hoping for more links to debugging books/guides, but here are the 2 recommendations I got: + +**“Debugging” by David Agans**: Several people recommended the book [Debugging][2], which looks like a nice and fairly short book that explains a debugging strategy. I haven’t read it yet (though I ordered it to see if I should be recommending it) and the rules laid out in the book (“understand the system”, “make it fail”, “quit thinking and look”, “divide and conquer”, “change one thing at a time”, “keep an audit trail”, “check the plug”, “get a fresh view”, and “if you didn’t fix it, it ain’t fixed”) seem extremely resaonable :). He also has a charming [debugging poster][3]. + +**“How to debug” by John Regehr**: [How to Debug][4] is a very good blog post based on Regehr’s experience teaching a university embedded systems course. Lots of good advice. He also has a [blog post reviewing 4 books about debugging][5], including Agans’ book. + +### reproduce your bug (but how do you do that?) + +The rest of this post is going to be an attempt to aggregate different ideas about debugging people tweeted at me. + +Somewhat obviously, everybody agrees that being able to consistently reproduce a bug is important if you want to figure out what’s going on. I have an intuitive sense for how to do this but I’m not sure how to **explain** how to go from “I saw this bug twice” to “I can consistently reproduce this bug on demand on my laptop”, and I wonder whether the techniques you use to do this depend on the domain (backend web dev, frontend, mobile, games, C++ programs, embedded etc). + +### reproduce your bug _quickly_ + +Everybody also agrees that it’s extremely useful be able to reproduce the bug quickly (if it takes you 3 minutes to check if every change helped, iterating is VERY SLOW). + +A few suggested approaches: + + * for something that requires clicking on a bunch of things in a browser to reproduce, recording what you clicked on with [Selenium][6] and getting Selenium to replay the UI interactions (suggested [here][7]) + * writing a unit test that reproduces the bug (if you can). bonus: you can add this to your test suite later if it makes sense + * writing a script / finding a command line incantation that does it (like `curl MY_APP.local/whatever`) + + + +### accept that it’s probably your code’s fault + +Sometimes I see a problem and I’m like “oh, library X has a bug”, “oh, it’s DNS”, “oh, SOME OTHER THING THAT IS NOT MY CODE is broken”. And sometimes it’s not my code! But in general between an established library and my code that I wrote last month, usually it’s my code that I wrote last month that’s the problem :). + +### start doing experiments + +@act_gardner gave a [nice, short explanation of what you have to do after you reproduce your bug][8] + +> I try to encourage people to first fully understand the bug - What’s happening? What do you expect to happen? When does it happen? When does it not happen? Then apply their mental model of the system to guess at what could be breaking and come up with experiments. +> +> Experiments could be changing or removing code, making API calls from a REPL, trying new inputs, poking at memory values with a debugger or print statements. + +I think the loop here may be: + + * make guess about one aspect about what might be happening (“this variable is set to X where it should be Y”, “the server is being sent the wrong request”, “this code is never running at all”) + * do experiment to check that guess + * repeat until you understand what’s going on + + + +### change one thing at a time + +Everybody definitely agrees that it is important to change one thing a time when doing an experiment to verify an assumption. + +### check your assumptions + +A lot of debugging is realizing that something you were **sure** was true (“wait this request is going to the new server, right, not the old one???“) is actually… not true. I made an attempt to [list some common incorrect assumptions][9]. Here are some examples: + + * this variable is set to X (“that filename is definitely right”) + * that variable’s value can’t possibly have changed between X and Y + * this code was doing the right thing before + * this function does X + * I’m editing the right file + * there can’t be any typos in that line I wrote it is just 1 line of code + * the documentation is correct + * the code I’m looking at is being executed at some point + * these two pieces of code execute sequentially and not in parallel + * the code does the same thing when compiled in debug / release mode (or with -O2 and without, or…) + * the compiler is not buggy (though this is last on purpose, the compiler is only very rarely to blame :)) + + + +### weird methods to get information + +There are a lot of normal ways to do experiments to check your assumptions / guesses about what the code is doing (print out variable values, use a debugger, etc). Sometimes, though, you’re in a more difficult environment where you can’t print things out and don’t have access to a debugger (or it’s inconvenient to do those things, maybe because there are too many events). Some ways to cope: + + * [adding sounds on mobile][10]: “In the mobile world, I live on this advice. Xcode can play a sound when you hit a breakpoint (and continue without stopping). I place them certain places in the code, and listen for buzzing Tink to indicate tight loops or Morse/Pop pairs to catch unbalanced events” (also [this tweet][11]) + * there’s a very cool talk about [using XCode to play sound for iOS debugging here][12] + * [adding LEDs][13]: “When I did embedded dev ages ago on grids of transputers, we wired up an LED to an unused pin on each chip. It was surprisingly effective for diagnosing parallelism issues.” + * [string][14]: “My networks prof told me about a hack he saw at Xerox in the early days of Ethernet: a tap in the coax with an amp and motor and piece of string. The busier the network was, the faster the string twirled.” + * [peep][15] is a “network auralizer” that translates what’s happening on your system into sounds. I spent 10 minutes trying to get it to compile and failed so far but it looks very fun and I want to try it!! + + + +The point here is that information is the most important thing and you need to do whatever’s necessary to get information. + +### write your code so it’s easier to debug + +Another point a few people brought up is that you can improve your program to make it easier to debug. tef has a nice post about this: [Write code that’s easy to delete, and easy to debug too.][16] here. I thought this was very true: + +> Debuggable code isn’t necessarily clean, and code that’s littered with checks or error handling rarely makes for pleasant reading. + +I think one interpretation of “easy to debug” is “every single time there’s an error, the program reports to you exactly what happened in an easy to understand way”. Whenever my program has a problem and says sometihng “error: failure to connect to SOME_IP port 443: connection timeout” I’m like THANK YOU THAT IS THE KIND OF THING I WANTED TO KNOW and I can check if I need to fix a firewall thing or if I got the wrong IP for some reason or what. + +One simple example of this recently: I was making a request to a server I wrote and the reponse I got was “upstream connect error or disconnect/reset before headers”. This is an nginx error which basically in this case boiled down to “your program crashed before it sent anything in response to the request”. Figuring out the cause of the crash was pretty easy, but having better error handling (returning an error instead of crashing) would have saved me a little time because instead of having to go check the cause of the crash, I could have just read the error message and figured out what was going on right away. + +### error messages are better than silently failing + +To get closer to the dream of “every single time there’s an error, the program reports to you exactly what happened in an easy to understand way” you also need to be disciplined about immediately returning an error message instead of silently writing incorrect data / passing a nonsense value to another function which will do WHO KNOWS WHAT with it and cause you a gigantic headache. This means adding code like this: + +``` +if UNEXPECTED_THING: + raise "oh no THING happened" +``` + +This isn’t easy to get right (it’s not always obvious where you should be raising errors!“) but it really helps a lot. + +### failure: print out a stack of errors, not just one error. + +Related to returning helpful errors that make it easy to debug: Rust has a really incredible error handling library [called failure][17] which basicaly lets you return a chain of errors instead of just one error, so you can print out a stack of errors like: + +``` +"error starting server process" caused by +"error initializing logging backend" caused by +"connection failure: timeout connecting to 1.2.3.4 port 1234". +``` + +This is SO MUCH MORE useful than just `connection failure: timeout connecting to 1.2.3.4 port 1234` by itself because it tells you the significance of 1.2.3.4 (it’s something to do with the logging backend!). And I think it’s also more useful than `connection failure: timeout connecting to 1.2.3.4 port 1234` with a stack trace, because it summarizes at a high level the parts that went wrong instead of making you read all the lines in the stack trace (some of which might not be relevant!). + +tools like this in other languages: + + * Go: the idiom to do this seems to be to just concatenate your stack of errors together as a big string so you get “error: thing one: error: thing two : error: thing three” which works okay but is definitely a lot less structured than `failure`’s system + * Java: I hear you can give exceptions causes but haven’t used that myself + * Python 3: you can use `raise ... from` which sets the `__cause__` attribute on the exception and then your exceptions will be separated by `The above exception was the direct cause of the following exception:..` + + + +If you know how to do this in other languages I’d be interested to hear! + +### understand what the error messages mean + +One sub debugging skill that I take for granted a lot of the time is understanding what error messages mean! I came across this nice graphic explaining [common Python errors and what they mean][18], which breaks down things like `NameError`, `IOError`, etc. + +I think a reason interpreting error messages is hard is that understanding a new error message might mean learning a new concept – `NameError` can mean “Your code uses a variable outside the scope where it’s defined”, but to really understand that you need to understand what variable scope is! I ran into this a lot when learning Rust – the Rust compiler would be like “you have a weird lifetime error” and I’d like be “ugh ok Rust I get it I will go actually learn about how lifetimes work now!“. + +And a lot of the time error messages are caused by a problem very different from the text of the message, like how “upstream connect error or disconnect/reset before headers” might mean “julia, your server crashed!“. The skill of understanding what error messages mean is often not transferable when you switch to a new area (if I started writing a lot of React or something tomorrow, I would probably have no idea what any of the error messages meant!). So this definitely isn’t just an issue for beginner programmers. + +### that’s all for now! + +I feel like the big thing I’m missing when talking about debugging skills is a stronger understanding of where people get stuck with debugging – it’s easy to say “well, you need to reproduce the problem, then make a more minimal reproduction, then start coming up with guesses and verifying them, and improve your mental model of the system, and then figure it out, then fix the problem and hopefully write a test to make it not come back”, but – where are people actually getting stuck in practice? What are the hardest parts? I have some sense of what the hardest parts usually are for me but I’m still not sure what the hardest parts usually are for someone newer to debugging their code. + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/06/23/a-few-debugging-resources/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://twitter.com/b0rk/status/1142825259546140673 +[2]: http://debuggingrules.com/ +[3]: http://debuggingrules.com/?page_id=40 +[4]: https://blog.regehr.org/archives/199 +[5]: https://blog.regehr.org/archives/849 +[6]: https://www.seleniumhq.org/ +[7]: https://twitter.com/AnnieTheObscure/status/1142843984642899968 +[8]: https://twitter.com/act_gardner/status/1142838587437830144 +[9]: https://twitter.com/b0rk/status/1142812831420768257 +[10]: https://twitter.com/cocoaphony/status/1142847665690030080 +[11]: https://twitter.com/AnnieTheObscure/status/1142842421954244608 +[12]: https://qnoid.com/2013/06/08/Sound-Debugging.html +[13]: https://twitter.com/wombatnation/status/1142887843963867136 +[14]: https://twitter.com/irvingreid/status/1142887472441040896 +[15]: http://peep.sourceforge.net/intro.html +[16]: https://programmingisterrible.com/post/173883533613/code-to-debug +[17]: https://github.com/rust-lang-nursery/failure +[18]: https://pythonforbiologists.com/29-common-beginner-errors-on-one-page/ From b956f1e53ca3f88f26889aaeef20d00288251485 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 17 Sep 2019 12:23:48 +0800 Subject: [PATCH 116/202] TSL&PRF --- ...ive, open source in Hollywood,-and more.md | 103 ------------------ ...ive, open source in Hollywood,-and more.md | 103 ++++++++++++++++++ 2 files changed, 103 insertions(+), 103 deletions(-) delete mode 100644 sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md create mode 100644 translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md diff --git a/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md deleted file mode 100644 index fc01b9d200..0000000000 --- a/sources/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md +++ /dev/null @@ -1,103 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more) -[#]: via: (https://opensource.com/article/19/9/news-september-15) -[#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeo) - -Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more -====== -Catch up on the biggest open source headlines from the past two weeks. -![Weekly news roundup with TV][1] - -In this edition of our open source news roundup, Sandboxie's path to open source, update on the Pentagon's adoption of open source, open source in Hollywood, and more! - -### Sandboxie becomes freeware on its way to open source - -Sophos Group plc, a British security company, released a [free version of its popular Sandboxie tool][2], used as an isolated operating environment for Windows ([downloadable here][2]). - -Sophos said that since Sandboxie isn't a core aspect of its business, the easier decision would've been to shut it down. But Sandboxie has [earned a reputation][3] for letting users run unknown software in a safe environment without risking their systems, so the team is putting in the additional work to release it as open source software. This intermediate phase of free-but-not-open-source appears to be related to the current system design, which requires an activation key: - -> Sandboxie currently uses a license key to activate and grant access to premium features only available to paid customers (as opposed to those using a free version). We have modified the code and have released an updated free version that does not restrict any features. In other words, the new free license will have access to all the features previously only available to paid customers. - -Citing this tool's community impact, senior leaders at Sophos announced the release of Sandboxie version 5.31.4–an unrestricted version of the program–will remain free until the tool is fully open sourced. - -"The Sandboxie user base represents some of the most passionate, forward thinking, and knowledgeable members of the security community and we didn’t want to let you down," [Sophos' blog post read][4]. "After thoughtful consideration we decided that the best way to keep Sandboxie going was to give it back to its users -- transitioning it to an open source tool." - -### The Pentagon doesn't meet White House mandate for more open source software - -In 2016, the White House mandated that each government agency had to open source at least 20 percent of its custom software within three years. There is an [interesting article][5] about this initiative from 2017 that laid out some of the excitement and challenges. - -According to the Government Accountability Office, [the Pentagon's not even halfway there][6]. - -In an article for Nextgov, Jack Corrigan wrote that as of July 2019, the Pentagon had released just 10 percent of its code as open source. They've also not yet implemented other aspects of the White House mandate, including the directive to build an open source software policy and inventories of custom code. - -According to the report, some government officials told the GAO that they worry about security risks of sharing code across government departments. They also admitted to not creating metrics that could measure their open source efforts' successes. The Pentagon's Chief Technology Officer cited the Pentagon's size as the reason for not implementing the White House's open source mandate. In a report published Tuesday, the GAO said, “Until [the Defense Department] fully implements its pilot program and establishes milestones for completing the OMB requirements, the department will not be positioned to take advantage of significant cost savings and efficiencies." - -### A team of volunteers works to find and digitize copyright-free books - -All books published in the U.S. before 1924 are [publicly owned and can be freely used/copied][7]. Books published in and after 1964 will stay under copyright for 95 years after their publication dates. But thanks to a copyright loophole, up to 75 percent of books published between 1923 and 1964 are free to read and copy. The time-consuming trick is confirming which books those are. - -So, a group of libraries, volunteers, and archivists have united to learn which books are copyright-free, then digitize and upload them to the Internet. Since renewal records were already digitized, it's been easy to tell if books published between 1923 and 1964 had their copyrights renewed. But looking for a lack of copyright renewal is much harder since you're trying to prove a negative. - -Participants include the New York Public Library, [which recently explained][8] why the time-consuming project is worthwhile. To help find more books faster, the NYPL converted many records to XML format. This makes it easier to automate the process of finding which books can be added to the public domain.  - -### Hollywood's Academy Software Foundation gains new members - -Microsoft and Apple announced plans to contribute at the premier membership level of the ASF. They'll join [founding board members][9] including Netflix, Google Cloud, Disney Studios, and Sony Pictures. - -The Academy Software Foundation launched in 2018 as a joint project of the [Academy of Motion Picture Arts and Sciences][10] and the [Linux Foundation][11]. - -> The mission of the Academy Software Foundation (ASWF) is to increase the quality and quantity of contributions to the content creation industry’s open source software base; to provide a neutral forum to coordinate cross-project efforts; to provide a common build and test infrastructure; and to provide individuals and organizations a clear path to participation in advancing our open source ecosystem. - -Within its first year, the Foundation built [OpenTimelineIO][12], an open source API and interchange format that helps studio teams collaborate across departments. OpenTImelineIO was formally accepted by [the Foundation's Technical Advisory Council][13] as its fifth hosted project last July. They now maintain it alongside [OpenColorIO][14], [OpenCue][15], [OpenEXR][16], and [OpenVDB][17]. - -#### In other news - - * [Comcast puts open source networking software into production][18] - * [SD Times open source project of the week: Ballerina][19] - * [DOD struggles to implement open source pilots][20] - * [Kong open sources universal service mesh Kuma][21] - * [Eclipse unveils Jakarta EE 8][22] - - - -_Thanks, as always, to Opensource.com staff members and moderators for their help this week._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/9/news-september-15 - -作者:[Lauren Maffeo][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/lmaffeo -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/weekly_news_roundup_tv.png?itok=B6PM4S1i (Weekly news roundup with TV) -[2]: https://www.sandboxie.com/DownloadSandboxie -[3]: https://betanews.com/2019/09/13/sandboxie-free-open-source/ -[4]: https://community.sophos.com/products/sandboxie/f/forum/115109/major-sandboxie-news-sandboxie-is-now-a-free-tool-with-plans-to-transition-it-to-an-open-source-tool/414522 -[5]: https://medium.com/@DefenseDigitalService/code-mil-an-open-source-initiative-at-the-pentagon-5ae4986b79bc -[6]: https://www.nextgov.com/analytics-data/2019/09/pentagon-needs-make-more-software-open-source-watchdog-says/159832/ -[7]: https://www.vice.com/en_us/article/a3534j/libraries-and-archivists-are-scanning-and-uploading-books-that-are-secretly-in-the-public-domain -[8]: https://www.nypl.org/blog/2019/09/01/historical-copyright-records-transparency -[9]: https://variety.com/2019/digital/news/microsoft-apple-academy-software-foundation-1203334675/ -[10]: https://www.oscars.org/ -[11]: http://www.linuxfoundation.org/ -[12]: https://github.com/PixarAnimationStudios/OpenTimelineIO -[13]: https://www.linuxfoundation.org/press-release/2019/07/opentimelineio-joins-aswf/ -[14]: https://opencolorio.org/ -[15]: https://www.opencue.io/ -[16]: https://www.openexr.com/ -[17]: https://www.openvdb.org/ -[18]: https://www.fiercetelecom.com/operators/comcast-puts-open-source-networking-software-into-production -[19]: https://sdtimes.com/os/sd-times-open-source-project-of-the-week-ballerina/ -[20]: https://www.fedscoop.com/open-source-software-dod-struggles/ -[21]: https://sdtimes.com/micro/kong-open-sources-universal-service-mesh-kuma/ -[22]: https://devclass.com/2019/09/11/hey-were-open-source-again-eclipse-unveils-jakarta-ee-8/ diff --git a/translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md new file mode 100644 index 0000000000..02597d190f --- /dev/null +++ b/translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md @@ -0,0 +1,103 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more) +[#]: via: (https://opensource.com/article/19/9/news-september-15) +[#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeo) + +开源新闻综述:五角大楼、好莱坞和 Sandboxie 的开源 +====== + +> 不要错过两周以来最大的开源头条新闻。 + +![Weekly news roundup with TV][1] + +在本期我们的开源新闻综述中有 Sandboxie 的开源之路、五角大楼开源计划的进一步变化、好莱坞开源等等! + +### 五角大楼不符合白宫对开源软件的要求 + +2016 年,美国白宫要求每个美国政府机构必须在三年内开放至少 20% 的定制软件。2017 年有一篇关于这一倡议的[有趣文章][5],其中列出了一些令人激动的事情和面临的挑战。 + +根据美国政府问责局(GAO)的说法,[美国五角大楼做的还远远不足][6]。 + +在一篇关于 Nextgov 的文章中,Jack Corrigan 写道,截至 2019 年 7 月,美国五角大楼仅发布了 10% 的代码为开源代码。他们还没有实施的其它白宫任务包括要求制定开源软件政策和定制代码的清单。 + +根据该报告,一些美国政府官员告诉 GAO,他们担心美国政府部门间共享代码的安全风险。他们还承认没有创建衡量开源工作成功的指标。美国五角大楼的首席技术官将五角大楼的规模列为不执行白宫的开源任务的原因。在周二发布的一份报告中,GAO 表示,“在(美国国防部)完全实施其试点计划并确定完成行政管理和预算局(OMB)要求的里程碑之前,该部门将无法达成显著的成本节约和效率的目的。” + +### Sandboxie 在开源的过程中变成了免费软件 + +一家英国安全公司 Sophos Group plc 发布了[其流行的 Sandboxie 工具的免费版本][2],它用作Windows 的隔离操作环境([可在此下载][2])。 + +Sophos 表示,由于 Sandboxie 不是其业务的核心,因此更容易做出的决定就是关闭它。但 Sandboxie 因为无需让用户的操作系统冒风险就可以在安全的环境中运行未知软件而[广受赞誉][3],因此该团队正在投入额外的工作将其作为开源软件发布。这个免费但非开源的中间阶段似乎与当前的系统设计有关,因为它需要激活密钥: + +> Sandboxie 目前使用许可证密钥来激活和授予仅针对付费客户开放的高级功能的访问权限(与使用免费版本的用户相比)。我们修改了代码,并发布了一个不限制任何功能的免费版本的更新版。换句话说,新的免费许可证将可以访问之前仅供付费客户使用的所有功能。 + +受此工具的社区影响,Sophos 的高级领导人宣布发布 Sandboxie 版本 5.31.4,这个不受限制的程序版本将保持免费,直到该工具完全开源。 + +> “Sandboxie 用户群代表了一些最热情、前瞻性和知识渊博的安全社区成员,我们不想让你失望,”[Sophos 的博文说到][4]。“经过深思熟虑后,我们认为让 Sandboxie 走下去的最佳方式是将其交还给用户,将其转换为开源工具。” + +### 志愿者团队致力于查找和数字化无版权书籍 + +1924 年以前在美国出版的所有书籍都是[公有的、可以自由使用/复制的][7]。1964 年及之后出版的图书在出版日期后将保留 95 年的版权。但由于版权漏洞,1923 年至 1964 年间出版的书籍中有高达 75% 可以免费阅读和复制。现在只需要耗时确认那些书是什么。 + +因此,一些图书馆、志愿者和档案管理员们联合起来了解哪些图书没有版权,然后将其数字化并上传到互联网。由于版权续约记录已经数字化,因此很容易判断 1923 年至 1964 年间出版的书籍是否更新了其版权。但是,由于试图提供的是反证,因此寻找缺乏版权更新的难度要大得多。 + +参与者包括纽约公共图书馆(NYPL),它[最近解释了][8]为什么这个耗时的项目是值得的。为了帮助更快地找到更多书籍,NYPL 将许多记录转换为 XML 格式。这样可以更轻松地自动执行查找可以将哪些书籍添加到公共域的过程。 + +### 好莱坞的学院软件基金会获得新成员 + +微软和苹果公司宣布计划以学院软件基金会Academy Software Foundation(ASWF)的高级会员做出贡献。他们将加入[创始董事会成员][9],其它成员还包括 Netflix、Google Cloud、Disney Studios 和 Sony Pictures。 + +学院软件基金会于 2018 年作为[电影艺术与科学学院][10]和[Linux 基金会][11]的联合项目而启动。 + +> 学院软件基金会(ASWF)的使命是提高贡献到内容创作行业的开源软件库的质量和数量;提供一个中立的论坛来协调跨项目的工作;提供通用的构建和测试基础架构;并为个人和组织提供参与推进我们的开源生态系统的明确途径。 + +在第一年内,该基金会构建了 [OpenTimelineIO][12],这是一种开源 API 和交换格式,可帮助工作室团队跨部门协作。OpenTImelineIO 被该[基金会技术咨询委员会][13]去年 7 月正式接受为第五个托管项目。他们现在将它与 [OpenColorIO][14]、[OpenCue][15]、[OpenEXR][16] 和 [OpenVDB] [17] 并列维护。 + +### 其它新闻 + +* [Comcast 将开源网络软件投入生产环境][18] +* [SD Times 本周开源项目:Ballerina][19] +* [美国国防部努力实施开源计划][20] +* [Kong 开源通用服务网格 Kuma][21] +* [Eclipse 推出 Jakarta EE 8][22] + +一如既往地感谢 Opensource.com 的工作人员和主持人本周的帮助。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/news-september-15 + +作者:[Lauren Maffeo][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/lmaffeo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/weekly_news_roundup_tv.png?itok=B6PM4S1i (Weekly news roundup with TV) +[2]: https://www.sandboxie.com/DownloadSandboxie +[3]: https://betanews.com/2019/09/13/sandboxie-free-open-source/ +[4]: https://community.sophos.com/products/sandboxie/f/forum/115109/major-sandboxie-news-sandboxie-is-now-a-free-tool-with-plans-to-transition-it-to-an-open-source-tool/414522 +[5]: https://medium.com/@DefenseDigitalService/code-mil-an-open-source-initiative-at-the-pentagon-5ae4986b79bc +[6]: https://www.nextgov.com/analytics-data/2019/09/pentagon-needs-make-more-software-open-source-watchdog-says/159832/ +[7]: https://www.vice.com/en_us/article/a3534j/libraries-and-archivists-are-scanning-and-uploading-books-that-are-secretly-in-the-public-domain +[8]: https://www.nypl.org/blog/2019/09/01/historical-copyright-records-transparency +[9]: https://variety.com/2019/digital/news/microsoft-apple-academy-software-foundation-1203334675/ +[10]: https://www.oscars.org/ +[11]: http://www.linuxfoundation.org/ +[12]: https://github.com/PixarAnimationStudios/OpenTimelineIO +[13]: https://www.linuxfoundation.org/press-release/2019/07/opentimelineio-joins-aswf/ +[14]: https://opencolorio.org/ +[15]: https://www.opencue.io/ +[16]: https://www.openexr.com/ +[17]: https://www.openvdb.org/ +[18]: https://www.fiercetelecom.com/operators/comcast-puts-open-source-networking-software-into-production +[19]: https://sdtimes.com/os/sd-times-open-source-project-of-the-week-ballerina/ +[20]: https://www.fedscoop.com/open-source-software-dod-struggles/ +[21]: https://sdtimes.com/micro/kong-open-sources-universal-service-mesh-kuma/ +[22]: https://devclass.com/2019/09/11/hey-were-open-source-again-eclipse-unveils-jakarta-ee-8/ From 561d727f2edfb635aafcf2d29f8356f1e6c9ef67 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:23:57 +0800 Subject: [PATCH 117/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190326=20Why=20?= =?UTF-8?q?are=20monoidal=20categories=20interesting=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190326 Why are monoidal categories interesting.md --- ...Why are monoidal categories interesting.md | 134 ++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 sources/tech/20190326 Why are monoidal categories interesting.md diff --git a/sources/tech/20190326 Why are monoidal categories interesting.md b/sources/tech/20190326 Why are monoidal categories interesting.md new file mode 100644 index 0000000000..37aaef753a --- /dev/null +++ b/sources/tech/20190326 Why are monoidal categories interesting.md @@ -0,0 +1,134 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Why are monoidal categories interesting?) +[#]: via: (https://jvns.ca/blog/2019/03/26/what-are-monoidal-categories/) +[#]: author: (Julia Evans https://jvns.ca/) + +Why are monoidal categories interesting? +====== + +Hello! Someone on Twitter asked a question about tensor categories recently and I remembered “oh, I know something about that!! These are a cool thing!“. Monoidal categories are also called “tensor categories” and I think that term feels a little more concrete: one of the biggest examples of a tensor category is the category of vector spaces with the tensor product as the way you combine vectors / functions. “Monoidal” means “has an associative binary operation with an identity”, and with vector spaces the tensor product is the “associative binary operation” it’s referring to. So I’m going to mostly use “tensor categories” in this post instead. + +So here’s a quick stab at explaining why tensor categories are cool. I’m going to make a lot of oversimplifications which I figure is better than trying to explain category theory from the ground up. I’m not a category theorist (though I spent 2 years in grad school doing a bunch of category theory) and I will almost certainly say wrong things about category theory. + +In this post I’m going to try to talk about [Seven Sketches in Compositionality: An Invitation to Applied Category Theory][1] using mostly plain English. + +### tensor categories aren’t monads + +If you have been around functional programming for a bit, you might see the word “monoid” and “categories” and wonder “oh, is julia writing about monads, like in Haskell”? I am not!! + +There is a sentence “monads are a monoid in the category of endofunctors” which includes both the word “monoid” and “category” but that is not what I am talking about at all. We’re not going to talk about types or Haskell or monads or anything. + +#### tensor categories are about proving (or defining) things with pictures + +Here’s what I think is a really nice example from this [“seven sketches in compositionality”](() PDF (on page 47): + +![][2] + +The idea here is that you have 3 inequalities + + 1. `t <= v + w` + 2. `w + u <= x + z` + 3. `v + x <= y`, + + + +and you want to prove that `t + u <= y + z`. + +You can do this algebraically pretty easily. + +But in this diagram they’ve done something really different! They’ve sort of drawn the inequalities as boxes with lines coming out of them for each variable, and then you can see that you end up with a `t` and a `u` on the left and a `y` and a `z` on the right, and so maybe that means that `t + u <= y + z`. + +The first time I saw something like this in a math class I felt like – what? what is happening? you can’t just draw PICTURES to prove things?!! And of course you can’t _just_ draw pictures to prove things. + +What’s actually happening in pictures like this is that when you put 2 things next to each other in the picture (like `t` and `u`), that actually represents the “tensor product” of `t` and `u`. In this case the “tensor product” is defined to be addition. And the tensor product (addition in this case) has some special properties – + + 1. it’s associative + 2. if `a <= b` and `c <= d` then `a + c <= b + d` + + + +so saying that this picture proves that `t + u <= y + z` **actually** means that you can read a proof off the diagram in a straightforward way: + +``` + t + u +<= (v + w) + u += v + (w + u) +<= v + (x + z) += (v + x) + z +<= y + z +``` + +So all the things that “look like they would work” according to the picture actually do work in practice because our tensor product thing is associative and because addition works nicely with the `<=` relationship. The book explains all this in a lot more detail. + +### draw vector spaces with “string diagrams” + +Proving this simple inequality is kind of boring though! We want to do something more interesting, so let’s talk about vector spaces! Here’s a diagram that includes some vector spaces (U1, U2, V1, V2) and some functions (f,g) between them. + +![][3] + +Again, here what it means to have U1 stacked on top of U2 is that we’re taking a tensor product of U1 and U2. And the tensor product is associative, so there’s no ambiguity if we stack 3 or 4 vector spaces together! + +This is all explained in a lot more detail in this nice blog post called [introduction to string diagrams][4] (which I took that picture from). + +### define the trace of a matrix with a picture + +So far this is pretty boring! But in a [follow up blog post][5], they talk about something more outrageous: you can (using vector space duality) take the lines in one of these diagrams and move them **backwards** and make loops. So that lets us define the trace of a function `f : V -> V` like this: + +![][6] + +This is a really outrageous thing! We’ve said, hey, we have a function and we want to get a number in return right? Okay, let’s just… draw a circle around it so that there are no lines left coming out of it, and then that will be a number! That seems a lot more natural and prettier than the usual way of defining the trace of a matrix (“sum up the numbers on the diagonal”)! + +When I first saw this I thought it was super cool that just drawing a circle is actually a legitimate way of defining a mathematical concept! + +### how are tensor category diagrams different from regular category theory diagrams? + +If you see “tensor categories let you prove things with pictures” you might think “well, the whole point of category theory is to prove things with pictures, so what?“. I think there are a few things that are different in tensor category diagrams: + + 1. with string diagrams, the lines are objects and the boxes are functions which is the opposite of how usual category theory diagrams are + 2. putting things next to each other in the diagram has a specific meaning (“take the tensor product of those 2 things”) where as in usual category theory diagrams it doesn’t. being able to combine things in this way is powerful! + 3. half circles have a specific meaning (“take the dual”) + 4. you can use specific elements of a (eg vector space) in a diagram which usually you wouldn’t do in a category theory diagram (the objects would be the whole vector space, not one element of that vector space) + + + +### what does this have to do with programming? + +Even though this is usually a programming blog I don’t know whether this particular thing really has anything to do with programming, I just remembered I thought it was cool. I wrote my [master’s thesis][7] (which i will link to even though it’s not very readable) on topological quantum computing which involves a bunch of monoidal categories. + +Some of the diagrams in this post are sort of why I got interested in that area in the first place – I thought it was really cool that you could formally define / prove things with pictures. And useful things, like the trace of a matrix! + +### edit: some ways this might be related to programming + +Someone pointed me to a couple of twitter threads (coincidentally from this week!!) that relate tensor categories & diagrammatic methods to programming: + + 1. [this thread from @KenScambler][8] (“My best kept secret* is that string & wiring diagrams–plucked straight out of applied category theory–are _fabulous_ for software and system design.) + 2. [this other thread by him of 31 interesting related things to this topic][9] + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/03/26/what-are-monoidal-categories/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://arxiv.org/pdf/1803.05316.pdf +[2]: https://jvns.ca/images/monoidal-preorder.png +[3]: https://jvns.ca/images/tensor-vector.png +[4]: https://qchu.wordpress.com/2012/11/05/introduction-to-string-diagrams/ +[5]: https://qchu.wordpress.com/2012/11/06/string-diagrams-duality-and-trace/ +[6]: https://jvns.ca/images/trace.png +[7]: https://github.com/jvns/masters-thesis/raw/master/thesis.pdf +[8]: https://twitter.com/KenScambler/status/1108738366529400832 +[9]: https://twitter.com/KenScambler/status/1109474342822244353 From e5b55ea914f176311e006bd5567c960f873bf21c Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:24:08 +0800 Subject: [PATCH 118/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190315=20New=20?= =?UTF-8?q?zine:=20Bite=20Size=20Networking!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190315 New zine- Bite Size Networking.md --- ...20190315 New zine- Bite Size Networking.md | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 sources/tech/20190315 New zine- Bite Size Networking.md diff --git a/sources/tech/20190315 New zine- Bite Size Networking.md b/sources/tech/20190315 New zine- Bite Size Networking.md new file mode 100644 index 0000000000..cd47c5619a --- /dev/null +++ b/sources/tech/20190315 New zine- Bite Size Networking.md @@ -0,0 +1,100 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (New zine: Bite Size Networking!) +[#]: via: (https://jvns.ca/blog/2019/03/15/new-zine--bite-size-networking-/) +[#]: author: (Julia Evans https://jvns.ca/) + +New zine: Bite Size Networking! +====== + +Last week I released a new zine: Bite Size Networking! It’s the third zine in the “bite size” series: + + 1. [Bite Size Linux][1] + 2. [Bite Size Command Line][2] + 3. [Bite Size Networking][3] + + + +You can get it for $10 at ! (or $150/$250/$600 for the corporate rate). + +Here’s the cover and table of contents! + +[![][4]][5] + +A few people have asked for a 3-pack with all 3 “bite size” zines which is coming soon! + +### why this zine? + +In last few years I’ve been doing a lot of networking at work, and along the way I’ve gone from “uh, what even is tcpdump” to “yes I can just type in `sudo tcpdump -c 200 -n port 443 -i lo`” without even thinking twice about it. As usual this zine is the resource I wish I had 4 years ago. There are so many things it took me a long time to figure out how to do like: + + * inspect SSL certificates + * make DNS queries + * figure out what server is using that port + * find out whether the firewall is causing you problems or not + * capture / search network traffic on a machine + + + +and as often happens with computers none of them are really that hard!! But the man pages for the tols you need to do these things are Very Long and as usual don’t differentiate between “everybody always uses this option and you 10000% need to know it” and “you will never use this option it does not matter”. So I spent a long time staring sadly at the tcpdump man page. + +the pitch for this zine is: + +> It’s Thursday afternoon and your users are reporting SSL errors in production and you don’t know why. Or a HTTP header isn’t being set correctly and it’s breaking the site. Or you just got a notification that your site’s SSL certificate is expiring in 2 days. Or you need to update DNS to point to a new server. Or a server suddenly isn’t able to connect to a service. And networking maybe isn’t your full time job, but you still need to get the problem fixed. + +Kamal (my partner) proofreads all my zines and we hit an exciting milestone with this one: this is the first zine where he was like “wow, I really did not know a lot of the stuff in this zine”. This is of course because I’ve spent a lot more time than him debugging weird networking things, and when you practice something you get better at it :) + +### a couple of example pages + +Here are a couple of example pages, to give you an idea of what’s in the zine: + +![][6] ![][7] + +### next thing to get better at: getting feedback! + +One thing I’ve realized that while I get a ton of help from people while writing these zines (I read probably a thousand tweets from people suggesting ideas for things to include in the zine), I don’t get as much feedback from people about the final product as I’d like! + +I often hear positive things (“I love them!”, “thank you so much!”, “this helped me in my job!”) but I’d really love to hear more about which bits specifically helped the most and what didn’t make as much sense or what you would have liked to see more of. So I’ll probably be asking a few questions about that to people who buy this zine! + +### selling zines is going well + +When I made the switch about a year ago from “every zine I release is free” to “the old zines are free but all the new ones are not free” it felt scary! It’s been startlingly totally fine and a very positive thing. Sales have been really good, people take the work more seriously, I can spend more time on them, and I think the quality has gone up. + +And I’ve been doing occasional [giveaways][8] for people who can’t afford a $10 zine, which feels like a nice way to handle “some people legitimately can’t afford $10 and I would like to get them information too”. + +### what’s next? + +I’m not sure yet! A few options: + + * kubernetes + * more about linux concepts (bite size linux part II) + * how to do statistics using simulations + * something else! + + + +We’ll see what I feel most inspired by :) + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/03/15/new-zine--bite-size-networking-/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://wizardzines.com/zines/bite-size-linux/ +[2]: https://wizardzines.com/zines/bite-size-command-line/ +[3]: https://wizardzines.com/zines/bite-size-networking/ +[4]: https://jvns.ca/images/bite-size-networking-cover.png +[5]: https://gum.co/bite-size-networking +[6]: https://jvns.ca/images/ngrep.png +[7]: https://jvns.ca/images/ping.png +[8]: https://twitter.com/b0rk/status/1104368319816220674 From 25e1a3b1448e60df875426078cda1e7ac0c2c3de Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:24:18 +0800 Subject: [PATCH 119/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190217=20Organi?= =?UTF-8?q?zing=20this=20blog=20into=20categories?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190217 Organizing this blog into categories.md --- ...17 Organizing this blog into categories.md | 155 ++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 sources/tech/20190217 Organizing this blog into categories.md diff --git a/sources/tech/20190217 Organizing this blog into categories.md b/sources/tech/20190217 Organizing this blog into categories.md new file mode 100644 index 0000000000..e8a03f1bdd --- /dev/null +++ b/sources/tech/20190217 Organizing this blog into categories.md @@ -0,0 +1,155 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Organizing this blog into categories) +[#]: via: (https://jvns.ca/blog/2019/02/17/organizing-this-blog-into-categories/) +[#]: author: (Julia Evans https://jvns.ca/) + +Organizing this blog into categories +====== + +Today I organized the front page of this blog ([jvns.ca][1]) into CATEGORIES! Now it is actually possible to make some sense of what is on here!! There are 28 categories (computer networking! learning! “how things work”! career stuff! many more!) I am so excited about this. + +How it works: Every post is in only 1 category. Obviously the categories aren’t “perfect” (there is a “how things work” category and a “kubernetes” category and a “networking” category, and so for a “how container networking works in kubernetes” I need to just pick one) but I think it’s really nice and I’m hoping that it’ll make the blog easier for folks to navigate. + +If you’re interested in more of the story of how I’m thinking about this: I’ve been a little dissatisfied for a long time with how this blog is organized. Here’s where I started, in 2013, with a pretty classic blog layout (this is Octopress, which was a Jekyll Wordpress-lookalike theme that was cool back then and which served me very well for a long time): + +![][2] + +### problem with “show the 5 most recent posts”: you don’t know what the person’s writing is about! + +This is a super common way to organize a blog: on the homepage of your blog, you display maybe the 5 most recent posts, and then maybe have a “previous” link. + +The thing I find tricky about this (as a blog reader) is that + + 1. it’s hard to hunt through their back catalog to find cool things they’ve written + 2. it’s SO HARD to get an overall sense for the body of a person’s work by reading 1 blog post at a time + + + +### next attempt: show every post in chronological order + +My next attempt at blog organization was to show every post on the homepage in chronological order. This was inspired by [Dan Luu’s blog][3], which takes a super minimal approach. I switched to this (according to the internet archive) sometime in early 2016. Here’s what it looked like (with some CSS issues :)) + +![][4] + +The reason I like this “show every post in chronological order” approach more is that when I discover a new blog, I like to obsessively binge read through the whole thing to see all the cool stuff the person has written. [Rachel by the bay][5] also organizes her writing this way, and when I found her blog I was like OMG WOW THIS IS AMAZING I MUST READ ALL OF THIS NOW and being able to look through all the entries quickly and start reading ones that caught my eye was SO FUN. + +[Will Larson’s blog][6] also has a “list of all posts” page which I find useful because it’s a good blog, and sometimes I want to refer back to something he wrote months ago and can’t remember what it was called, and being able to scan through all the titles makes it easier to do that. + +I was pretty happy with this and that’s how it’s been for the last 3 years. + +### problem: a chronological list of 390 posts still kind of sucks + +As of today, I have 390 posts here (360,000 words! that’s, like, 4 300-page books! eep!). This is objectively a lot of writing and I would like people new to the blog to be able to navigate it and actually have some idea what’s going on. + +And this blog is not actually just a totally disorganized group of words! I have a lot of specific interests: I’ve written probably 30 posts about computer networking, 15ish on ML/statistics, 20ish career posts, etc. And when I write a new Kubernetes post or whatever, it’s usually at least sort of related to some ongoing train of thought I have about Kubernetes. And it’s totally obvious to _me_ what other posts that post is related to, but obviously to a new person it’s not at all clear what the trains of thought are in this blog. + +### solution for now: assign every post 1 (just 1) category + +My new plan is to assign every post a single category. I got this idea from [Itamar Turner-Trauring’s site][7]. + +Here are the initial categories: + + * Cool computer tools / features / ideas + * Computer networking + * How a computer thing works + * Kubernetes / containers + * Zines / comics + * On writing comics / zines + * Conferences + * Organizing conferences + * Businesses / marketing + * Statistics / machine learning / data analysis + * Year in review + * Infrastructure / operations engineering + * Career / work + * Working with others / communication + * Remote work + * Talks transcripts / podcasts + * On blogging / speaking + * On learning + * Rust + * Linux debugging / tracing tools + * Debugging stories + * Fan posts about awesome work by other people + * Inclusion + * rbspy + * Performance + * Open source + * Linux systems stuff + * Recurse Center (my daily posts during my RC batch) + + + +I guess you can tell this is a systems-y blog because there are 8 different systems-y categories (kubernetes, infrastructure, linux debugging tools, rust, debugging stories, performance, and linux systems stuff, how a computer thing works) :). + +But it was nice to see that I also have this huge career / work category! And that category is pretty meaningful to me, it includes a lot of things that I struggled with and were hard for me to learn. And I get to put all my machine learning posts together, which is an area I worked in for 3 years and am still super interested in and every so often learn a new thing about! + +### How I assign the categories: a big text file + +I came up with a scheme for assigning the categories that I thought was really fun! I knew immediately that coming up with categories in advance would be impossible (how was I supposed to know that “fan posts about awesome work by other people” was a substantial category?) + +So instead, I took kind of a Marie Kondo approach: I wrote a script to just dump all the titles of every blog post into a text file, and then I just used vim to organize them roughly into similar sections. Seeing everything in one place (a la marie kondo) really helped me see the patterns and figure out what some categories were. + +[Here’s the final result of that text file][8]. I think having a lightweight way of organizing the posts all in one file made a huge difference and that it would have been impossible for me to seen the patterns otherwise. + +### How I implemented it: a hugo taxonomy + +Once I had that big text file, I wrote [a janky python script][9] to assign the categories in that text file to the actual posts. + +I use Hugo for this blog, and so I also needed to tell Hugo about the categories. This blog already technically has tags (though they’re woefully underused, I didn’t want to delete them). I use Hugo, and it turns out that in Hugo you can define arbitrary taxonomies. So I defined a new taxonomy for these sections (right now it’s called, unimaginitively, `juliasections`). + +The details of how I did this are pretty boring but [here’s the hugo template that makes it display on the homepage][10]. I used this [Hugo documentation page on taxonomies a lot][11]. + +### organizing my site is cool! reverse chronology maybe isn’t the best possible thing! + +Amy Hoy has this interesting article called [how the blog broke the web][12] about how the rise of blog software made people adopt a site format that maybe didn’t serve what they were writing the best. + +I don’t personally feel that mad about the blog / reverse chronology organization: I like blogging! I think it was nice for the first 6 years or whatever to be able to just write things that I think are cool without thinking about where they “fit”. It’s worked really well for me. + +But today, 360,000 words in, I think it makes sense to add a little more structure :). + +### what it looks like now! + +Here’s what the new front page organization looks like! These are the blogging / learning / rust sections! I think it’s cool how you can see the evolution of some of my thinking (I sure have written a lot of posts about asking questions :)). + +![][13] + +### I ❤ the personal website + +This is also part of why I love having a personal website that I can organize any way I want: for both of my main sites ([jvns.ca][1] and now [wizardzines.com][14]) I have total control over how they appear! And I can evolve them over time at my own pace if I decide something a little different will work better for me. I’ve gone from a jekyll blog to octopress to a custom-designed octopress blog to Hugo and made a ton of little changes over time. It’s so nice. + +I think it’s fun that these 3 screenshots are each 3 years apart – what I wanted in 2013 is not the same as 2016 is not the same as 2019! This is okay! + +And I really love seeing how other people choose to organize their personal sites! Please keep making cool different personal sites. + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/02/17/organizing-this-blog-into-categories/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://jvns.ca +[2]: https://jvns.ca/images/website-2013.png +[3]: https://danluu.com +[4]: https://jvns.ca/images/website-2016.png +[5]: https://rachelbythebay.com/w/ +[6]: https://lethain.com/all-posts/ +[7]: https://codewithoutrules.com/worklife/ +[8]: https://github.com/jvns/jvns.ca/blob/2f7b2723994628a5348069dd87b3df68c2f0285c/scripts/titles.txt +[9]: https://github.com/jvns/jvns.ca/blob/2f7b2723994628a5348069dd87b3df68c2f0285c/scripts/parse_titles.py +[10]: https://github.com/jvns/jvns.ca/blob/25d239a3ba36c1bae1d055d2b7d50a4f1d0489ef/themes/orange/layouts/index.html#L39-L59 +[11]: https://gohugo.io/templates/taxonomy-templates/ +[12]: https://stackingthebricks.com/how-blogs-broke-the-web/ +[13]: https://jvns.ca/images/website-2019.png +[14]: https://wizardzines.com From d07c5cf53bc7556db2a5c30ebba15d542f44edaa Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:24:28 +0800 Subject: [PATCH 120/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190216=20!!Con?= =?UTF-8?q?=202019:=20submit=20a=20talk!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190216 --Con 2019- submit a talk.md --- .../20190216 --Con 2019- submit a talk.md | 144 ++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 sources/tech/20190216 --Con 2019- submit a talk.md diff --git a/sources/tech/20190216 --Con 2019- submit a talk.md b/sources/tech/20190216 --Con 2019- submit a talk.md new file mode 100644 index 0000000000..7a28651f6f --- /dev/null +++ b/sources/tech/20190216 --Con 2019- submit a talk.md @@ -0,0 +1,144 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (!!Con 2019: submit a talk!) +[#]: via: (https://jvns.ca/blog/2019/02/16/--con-2019--submit-a-talk-/) +[#]: author: (Julia Evans https://jvns.ca/) + +!!Con 2019: submit a talk! +====== + +As some of you might know, for the last 5 years I’ve been one of the organizers for a conferences called [!!Con][1]. This year it’s going to be held on **May 11-12 in NYC**. + +The submission deadline is **Sunday, March 3** and you can [submit a talk here][2]. + +(we also expanded to the west coast this year: [!!Con West][3] is next week!! I’m not on the !!Con West team since I live on the east coast but they’re doing amazing work, I have a ticket, and I’m so excited for there to be more !!Con in the world) + +### !!Con is about the joy, excitement, and surprise of computing + +Computers are AMAZING. You can make programs that seem like magic, computer science has all kind of fun and surprising tidbits, there are all kinds of ways to make really cool art with computers, the systems that we use every day (like DNS!) are often super fascinating, and sometimes our computers do REALLY STRANGE THINGS and it’s very fun to figure out why. + +!!Con is about getting together for 2 days to share what we all love about computing. The only rule of !!Con talks is that the talk has to have an exclamation mark in the title :) + +We originally considered calling !!Con ExclamationMarkCon but that was too unwieldy so we went with !!Con :). + +### !!Con is inclusive + +The other big thing about !!Con is that we think computing should include everyone. To make !!Con a space where everyone can participate, we + + * have open captioning for all talks (so that people who can’t hear well can read the text of the talk as it’s happening). This turns out to be great for LOTS of people – if you just weren’t paying attention for a second, you can look at the live transcript to see what you missed! + * pay our speakers & pay for speaker travel + * have a code of conduct (of course) + * use the RC [social rules][4] + * make sure our washrooms work for people of all genders + * let people specify on their badges if they don’t want photos taken of them + * do a lot of active outreach to make sure our set of speakers is diverse + + + +### past !!Con talks + +I think maybe the easiest way to explain !!Con if you haven’t been is through the talk titles! Here are a few arbitrarily chosen talks from past !!Cons: + + * [Four Fake Filesystems!][5] + * [Islamic Geometry: Hankin’s Polygons in Contact Algorithm!!!][6] + * [Don’t know about you, but I’m feeling like SHA-2!: Checksumming with Taylor Swift][7] + * [MissingNo., my favourite Pokémon!][8] + * [Music! Programming! Arduino! (Or: Building Electronic Musical Interfaces to Create Awesome)][9] + * [How I Code and Use a Computer at 1,000 WPM!!][10] + * [The emoji that Killed Chrome!!][11] + * [We built a map to aggregate real-time flood data in under two days!][12] + * [PUSH THE BUTTON! 🔴 Designing a fun game where the only input is a BIG RED BUTTON! 🔴 !!!][13] + * [Serious programming with jq?! A practical and purely functional programming language!][14] + * [I wrote to a dead address in a deleted PDF and now I know where all the airplanes are!!][15] + * [Making Mushrooms Glow!][16] + * [HDR Photography in Microsoft Excel?!][17] + * [DHCP: IT’S MOSTLY YELLING!!][18] + * [Lossy text compression, for some reason?!][19] + * [Plants are Recursive!!: Using L-Systems to Generate Realistic Weeds][20] + + + +If you want to see more (or get an idea of what !!Con talk descriptions usually look like), here’s every past year of the conference: + + * 2018: [talk descriptions][21] and [recordings][22] + * 2017: [talk descriptions][23] and [recordings][24] + * 2016: [talk descriptions][25] and [recordings][26] + * 2015: [talk descriptions][27] and [recordings][28] + * 2014: [talk descriptions][29] and [recordings][30] + + + +### this year you can also submit a play / song / performance! + +One difference from previous !!Cons is that if you want submit a non-talk-talk to !!Con this year (like a play!), you can! I’m very excited to see what people come up with. For more of that see [Expanding the !!Con aesthetic][31]. + +### all talks are reviewed anonymously + +One big choice that we’ve made is to review all talks anonymously. This means that we’ll review your talk the same way whether you’ve never given a talk before or if you’re an internationally recognized public speaker. I love this because many of our best talks are from first time speakers or people who I’d never heard of before, and I think anonymous review makes it easier to find great people who aren’t well known. + +### writing a good outline is important + +We can’t rely on someone’s reputation to determine if they’ll give a good talk, but we do need a way to see that people have a plan for how to present their material in an engaging way. So we ask everyone to give a somewhat detailed outline explaining how they’ll spend their 10 minutes. Some people do it minute-by-minute and some people just say “I’ll explain X, then Y, then Z, then W”. + +Lindsey Kuper wrote some good advice about writing a clear !!Con outline here which has some examples of really good outlines [which you can see here][32]. + +### We’re looking for sponsors + +!!Con is pay-what-you-can (if you can’t afford a $300 conference ticket, we’re the conference for you!). Because of that, we rely on our incredible sponsors (companies who want to build an inclusive future for tech with us!) to help make up the difference so that we can pay our speakers for their amazing work, pay for speaker travel, have open captioning, and everything else that makes !!Con the amazing conference it is. + +If you love !!Con, a huge way you can help support the conference is to ask your company to sponsor us! Here’s our [sponsorship page][33] and you can email me at [[email protected]][34] if you’re interested. + +### hope to see you there ❤ + +I’ve met so many fantastic people through !!Con, and it brings me a lot of joy every year. The thing that makes !!Con great is all the amazing people who come to share what they’re excited about every year, and I hope you’ll be one of them. + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/02/16/--con-2019--submit-a-talk-/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: http://bangbangcon.com +[2]: http://bangbangcon.com/give-a-talk.html +[3]: http://bangbangcon.com/west/ +[4]: https://www.recurse.com/social-rules +[5]: https://youtube.com/watch?v=pfHpDDXJQVg +[6]: https://youtube.com/watch?v=ld4gpQnaziU +[7]: https://youtube.com/watch?v=1QgamEwwPro +[8]: https://youtube.com/watch?v=yX7tDROZUt8 +[9]: https://youtube.com/watch?v=67Y-wH0FJFg +[10]: https://youtube.com/watch?v=G1r55efei5c +[11]: https://youtube.com/watch?v=UE-fJjMasec +[12]: https://youtube.com/watch?v=hfatYo2J8gY +[13]: https://youtube.com/watch?v=KqEc2Ek4GzA +[14]: https://youtube.com/watch?v=PS_9pyIASvQ +[15]: https://youtube.com/watch?v=FhVob_sRqQk +[16]: https://youtube.com/watch?v=T75FvUDirNM +[17]: https://youtube.com/watch?v=bkQJdaGGVM8 +[18]: https://youtube.com/watch?v=enRY9jd0IJw +[19]: https://youtube.com/watch?v=meovx9OqWJc +[20]: https://youtube.com/watch?v=0eXg4B1feOY +[21]: http://bangbangcon.com/2018/speakers.html +[22]: http://bangbangcon.com/2018/recordings.html +[23]: http://bangbangcon.com/2017/speakers.html +[24]: http://bangbangcon.com/2017/recordings.html +[25]: http://bangbangcon.com/2016/speakers.html +[26]: http://bangbangcon.com/2016/recordings.html +[27]: http://bangbangcon.com/2015/speakers.html +[28]: http://bangbangcon.com/2015/recordings.html +[29]: http://bangbangcon.com/2014/speakers.html +[30]: http://bangbangcon.com/2014/recordings.html +[31]: https://organicdonut.com/2019/01/expanding-the-con-aesthetic/ +[32]: http://composition.al/blog/2017/06/30/how-to-write-a-timeline-for-a-bangbangcon-talk-proposal/ +[33]: http://bangbangcon.com/sponsors +[34]: https://jvns.ca/cdn-cgi/l/email-protection From 7dd065fa8dc515888d7897a8df382f92c1fa5f43 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:24:59 +0800 Subject: [PATCH 121/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190129=20A=20fe?= =?UTF-8?q?w=20early=20marketing=20thoughts?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190129 A few early marketing thoughts.md --- ...20190129 A few early marketing thoughts.md | 164 ++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 sources/tech/20190129 A few early marketing thoughts.md diff --git a/sources/tech/20190129 A few early marketing thoughts.md b/sources/tech/20190129 A few early marketing thoughts.md new file mode 100644 index 0000000000..79cc6b1b1d --- /dev/null +++ b/sources/tech/20190129 A few early marketing thoughts.md @@ -0,0 +1,164 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (A few early marketing thoughts) +[#]: via: (https://jvns.ca/blog/2019/01/29/marketing-thoughts/) +[#]: author: (Julia Evans https://jvns.ca/) + +A few early marketing thoughts +====== + +At some point last month I said I might write more about business, so here are some very early marketing thoughts for my zine business (!). The question I’m trying to make some progress on in this post is: “how to do marketing in a way that feels good?” + +### what’s the point of marketing? + +Okay! What’s marketing? What’s the point? I think the ideal way marketing works is: + + 1. you somehow tell a person about a thing + 2. you explain somehow why the thing will be useful to them / why it is good + 3. they buy it and they like the thing because it’s what they expected + + + +(or, when you explain it they see that they don’t want it and don’t buy it which is good too!!) + +So basically as far as I can tell good marketing is just explaining what the thing is and why it is good in a clear way. + +### what internet marketing techniques do people use? + +I’ve been thinking a bit about internet marketing techniques I see people using on me recently. Here are a few examples of internet marketing techniques I’ve seen: + + 1. word of mouth (“have you seen this cool new thing?!”) + 2. twitter / instagram marketing (build a twitter/instagram account) + 3. email marketing (“build a mailing list with a bajillion people on it and sell to them”) + 4. email marketing (“tell your existing users about features that they already have that they might want to use”) + 5. social proof marketing (“jane from georgia bought a sweater”), eg fomo.com + 6. cart notifications (“you left this sweater in your cart??! did you mean to buy it? maybe you should buy it!“) + 7. content marketing (which is fine but whenever people refer to my writing as ‘content’ I get grumpy :)) + + + +### you need _some_ way to tell people about your stuff + +Something that is definitely true about marketing is that you need some way to tell new people about the thing you are doing. So for me when I’m thinking about running a business it’s less about “should i do marketing” and more like “well obviously i have to do marketing, how do i do it in a way that i feel good about?” + +### what’s up with email marketing? + +I feel like every single piece of internet marketing advice I read says “you need a mailing list”. This is advice that I haven’t really taken to heart – technically I have 2 mailing lists: + + 1. the RSS feed for this blog, which sends out new blog posts to a mailing list for folks who don’t use RSS (which 3000 of you get) + 2. ’s list, for comics / new zine announcements (780 people subscribe to that! thank you!) + + + +but definitely neither of them is a Machine For Making Sales and I’ve put in almost no efforts in that direction yet. + +here are a few things I’ve noticed about marketing mailing lists: + + * most marketing mailing lists are boring but some marketing mailing lists are actually interesting! For example I kind of like [amy hoy][1]’s emails. + * Someone told me recently that they have 200,000 people on their mailing list (?!!) which made the “a mailing list is a machine for making money” concept make a lot more sense to me. I wonder if people who make a lot of money from their mailing lists all have huge 10k+ person mailing lists like this? + + + +### what works for me: twitter + +Right now for my zines business I’d guess maybe 70% of my sales come from Twitter. The main thing I do is tweet pages from zines I’m working on (for example: yesterday’s [comic about ss][2]). The comics are usually good and fun so invariably they get tons of retweets, which means that I end up with lots of followers, which means that when I later put up the zine for sale lots of people will buy it. + +And of course people don’t _have_ to buy the zines, I post most of what ends up in my zines on twitter for free, so it feels like a nice way to do it. Everybody wins, I think. + +(side note: when I started getting tons of new followers from my comics I was actually super worried that it would make my experience of Twitter way worse. That hasn’t happened! the new followers all seem totally reasonable and I still get a lot of really interesting twitter replies which is wonderful ❤) + +I don’t try to hack/optimize this really: I just post comics when I make them and I try to make them good. + +### a small Twitter innovation: putting my website on the comics + +Here’s one small marketing change that I made that I think makes sense! + +In the past, I didn’t put anything about how to buy my comics on the comics I posted on Twitter, just my Twitter username. Like this: + +![][3] + +After a while, I realized people were asking me all the time “hey, can I buy a book/collection? where do these come from? how do I get more?“! I think a marketing secret is “people actually want to buy things that are good, it is useful to tell people where they can buy things that are good”. + +So just recently I’ve started adding my website and a note about my current project on the comics I post on Twitter. It doesn’t say much: just “❤ these comics? buy a collection! wizardzines.com” and “page 11 of my upcoming bite size networking zine”. Here’s what it looks like: + +![][4] + +I feel like this strikes a pretty good balance between “julia you need to tell people what you’re doing otherwise how are they supposed to buy things from you” and “omg too many sales pitches everywhere”? I’ve only started doing this recently so we’ll see how it goes. + +### should I work on a mailing list? + +It seems like the same thing that works on twitter would work by email if I wanted to put in the time (email people comics! when a zine comes out, email them about the zine and they can buy it if they want!). + +One thing I LOVE about Twitter though is that people always reply to the comics I post with their own tips and tricks that they love and I often learn something new. I feel like email would be nowhere near as fun :) + +But I still think this is a pretty good idea: keeping up with twitter can be time consuming and I bet a lot of people would like to get occasional email with programming drawings. (would you?) + +One thing I’m not sure about is – a lot of marketing mailing lists seem to use somewhat aggressive techniques to get new emails (a lot of popups on a website, or adding everyone who signs up to their service / buys a thing to a marketing list) and while I’m basically fine with that (unsubscribing is easy!), I’m not sure that it’s what I’d want to do, and maybe less aggressive techniques will work just as well? We’ll see. + +### should I track conversion rates? + +A piece of marketing advice I assume people give a lot is “be data driven, figure out what things convert the best, etc”. I don’t do this almost at all – gumroad used to tell me that most of my sales came from Twitter which was good to know, but right now I have basically no idea how it works. + +Doing a bunch of work to track conversion rates feels bad to me: it seems like it would be really easy to go down a dumb rabbit hole of “oh, let’s try to increase conversion by 5%” instead of just focusing on making really good and cool things. + +My guess is that what will work best for me for a while is to have some data that tells me in broad strokes how the business works (like “about 70% of sales come from twitter”) and just leave it at that. + +### should I do advertising? + +I had a conversation with Kamal about this post that went: + + * julia: “hmm, maybe I should talk about ads?” + * julia: “wait, are ads marketing?” + * kamal: “yes ads are marketing” + + + +So, ads! I don’t know anything about advertising except that you can advertise on Facebook or Twitter or Google. Some non-ethical questions I have about advertising: + + * how do you choose what keywords to advertise on? + * are there actually cheap keywords, like is ‘file descriptors’ cheap? + * how much do you need to pay per click? (for some weird linux keywords, google estimated 20 cents a click?) + * can you use ads effectively for something that costs $10? + + + +This seems nontrivial to learn about and I don’t think I’m going to try soon. + +### other marketing things + +a few other things I’ve thought about: + + * I learned about “social proof marketing” sites like fomo.com yesterday which makes popups on your site like “someone bought COOL THING 3 hours ago”. This seems like it has some utility (people are actually buying things from me all the time, maybe that’s useful to share somehow?) but those popups feel a bit cheap to me and I don’t really think it’s something I’d want to do right now. + * similarly a lot of sites like to inject these popups like “HELLO PLEASE SIGN UP FOR OUR MAILING LIST”. similar thoughts. I’ve been putting an email signup link in the footer which seems like a good balance between discoverable and annoying. As an example of a popup which isn’t too intrusive, though: nate berkopec has [one on his site][5] which feels really reasonable! (scroll to the bottom to see it) + + + +Maybe marketing is all about “make your things discoverable without being annoying”? :) + +### that’s all! + +Hopefully some of this was interesting! Obviously the most important thing in all of this is to make cool things that are useful to people, but I think cool useful writing does not actually sell itself! + +If you have thoughts about what kinds of marketing have worked well for you / you’ve felt good about I would love to hear them! + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/01/29/marketing-thoughts/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://stackingthebricks.com/ +[2]: https://twitter.com/b0rk/status/1090058524137345025 +[3]: https://jvns.ca/images/kill.jpeg +[4]: https://jvns.ca/images/ss.jpeg +[5]: https://www.speedshop.co/2019/01/10/three-activerecord-mistakes.html From ad817f9a44e3fc4f80f06fd3fa8307482370b1f2 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:25:09 +0800 Subject: [PATCH 122/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181229=20Some?= =?UTF-8?q?=20nonparametric=20statistics=20math?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181229 Some nonparametric statistics math.md --- ...1229 Some nonparametric statistics math.md | 178 ++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 sources/tech/20181229 Some nonparametric statistics math.md diff --git a/sources/tech/20181229 Some nonparametric statistics math.md b/sources/tech/20181229 Some nonparametric statistics math.md new file mode 100644 index 0000000000..452c295781 --- /dev/null +++ b/sources/tech/20181229 Some nonparametric statistics math.md @@ -0,0 +1,178 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Some nonparametric statistics math) +[#]: via: (https://jvns.ca/blog/2018/12/29/some-initial-nonparametric-statistics-notes/) +[#]: author: (Julia Evans https://jvns.ca/) + +Some nonparametric statistics math +====== + +I’m trying to understand nonparametric statistics a little more formally. This post may not be that intelligible because I’m still pretty confused about nonparametric statistics, there is a lot of math, and I make no attempt to explain any of the math notation. I’m working towards being able to explain this stuff in a much more accessible way but first I would like to understand some of the math! + +There’s some MathJax in this post so the math may or may not render in an RSS reader. + +Some questions I’m interested in: + + * what is nonparametric statistics exactly? + * what guarantees can we make? are there formulas we can use? + * why do methods like the bootstrap method work? + + + +since these notes are from reading a math book and math books are extremely dense this is basically going to be “I read 7 pages of this math book and here are some points I’m confused about” + +### what’s nonparametric statistics? + +Today I’m looking at “all of nonparametric statistics” by Larry Wasserman. He defines nonparametric inference as: + +> a set of modern statistical methods that aim to keep the number of underlying assumptions as weak as possible + +Basically my interpretation of this is that – instead of assuming that your data comes from a specific family of distributions (like the normal distribution) and then trying to estimate the paramters of that distribution, you don’t make many assumptions about the distribution (“this is just some data!!“). Not having to make assumptions is nice! + +There aren’t **no** assumptions though – he says + +> we assume that the distribution $F$ lies in some set $\mathfrak{F}$ called a **statistical model**. For example, when estimating a density $f$, we might assume that $$ f \in \mathfrak{F} = \left\\{ g : \int(g^{\prime\prime}(x))^2dx \leq c^2 \right\\}$$ which is the set of densities that are not “too wiggly”. + +I have not too much intuition for the condition $\int(g^{\prime\prime}(x))^2dx \leq c^2$. I calculated that integral for [the normal distribution on wolfram alpha][1] and got 4, which is a good start. (4 is not infinity!) + +some questions I still have about this definition: + + * what’s an example of a probability density function that _doesn’t_ satisfy that $\int(g^{\prime\prime}(x))^2dx \leq c^2$ condition? (probably something with an infinite number of tiny wiggles, and I don’t think any distribution i’m interested in in practice would have an infinite number of tiny wiggles?) + * why does the density function being “too wiggly” cause problems for nonparametric inference? very unclear as yet. + + + +### we still have to assume independence + +One assumption we **won’t** get away from is that the samples in the data we’re dealing with are independent. Often data in the real world actually isn’t really independent, but I think the what people do a lot of the time is to make a good effort at something approaching independence and then close your eyes and pretend it is? + +### estimating the density function + +Okay! Here’s a useful section! Let’s say that I have 100,000 data points from a distribution. I can draw a histogram like this of those data points: + +![][2] + +If I have 100,000 data points, it’s pretty likely that that histogram is pretty close to the actual distribution. But this is math, so we should be able to make that statement precise, right? + +For example suppose that 5% of the points in my sample are more than 100. Is the probability that a point is greater than 100 **actually** 0.05? The book gives a nice formula for this: + +$$ \mathbb{P}(|\widehat{P}_n(A) - P(A)| > \epsilon ) \leq 2e^{-2n\epsilon^2} $$ + +(by [“Hoeffding’s inequality”][3] which I’ve never heard of before). Fun aside about that inequality: here’s a nice jupyter notebook by henry wallace using it to [identify the most common Boggle words][4]. + +here, in our example: + + * n is 1000 (the number of data points we have) + * $A$ is the set of points more than 100 + * $\widehat{P}_n(A)$ is the empirical probability that a point is more than 100 (0.05) + * $P(A)$ is the actual probability + * $\epsilon$ is how certain we want to be that we’re right + + + +So, what’s the probability that the **real** probability is between 0.04 and 0.06? $\epsilon = 0.01$, so it’s $2e^{-2 \times 100,000 \times (0.01)^2} = 4e^{-9} $ ish (according to wolfram alpha) + +here is a table of how sure we can be: + + * 100,000 data points: 4e-9 (TOTALLY CERTAIN that 4% - 6% of points are more than 100) + * 10,000 data points: 0.27 (27% probability that we’re wrong! that’s… not bad?) + * 1,000 data points: 1.6 (we know the probability we’re wrong is less than.. 160%? that’s not good!) + * 100 data points: lol + + + +so basically, in this case, using this formula: 100,000 data points is AMAZING, 10,000 data points is pretty good, and 1,000 is much less useful. If we have 1000 data points and we see that 5% of them are more than 100, we DEFINITELY CANNOT CONCLUDE that 4% to 6% of points are more than 100. But (using the same formula) we can use $\epsilon = 0.04$ and conclude that with 92% probability 1% to 9% of points are more than 100. So we can still learn some stuff from 1000 data points! + +This intuitively feels pretty reasonable to me – like it makes sense to me that if you have NO IDEA what your distribution that with 100,000 points you’d be able to make quite strong inferences, and that with 1000 you can do a lot less! + +### more data points are exponentially better? + +One thing that I think is really cool about this estimating the density function formula is that how sure you can be of your inferences scales **exponentially** with the size of your dataset (this is the $e^{-n\epsilon^2}$). And also exponentially with the square of how sure you want to be (so wanting to be sure within 0.01 is VERY DIFFERENT than within 0.04). So 100,000 data points isn’t 10x better than 10,000 data points, it’s actually like 10000000000000x better. + +Is that true in other places? If so that seems like a super useful intuition! I still feel pretty uncertain about this, but having some basic intuition about “how much more useful is 10,000 data points than 1,000 data points?“) feels like a really good thing. + +### some math about the bootstrap + +The next chapter is about the bootstrap! Basically the way the bootstrap works is: + + 1. you want to estimate some statistic (like the median) of your distribution + 2. the bootstrap lets you get an estimate and also the variance of that estimate + 3. you do this by repeatedly sampling with replacement from your data and then calculating the statistic you want (like the median) on your samples + + + +I’m not going to go too much into how to implement the bootstrap method because it’s explained in a lot of place on the internet. Let’s talk about the math! + +I think in order to say anything meaningful about bootstrap estimates I need to learn a new term: a **consistent estimator**. + +### What’s a consistent estimator? + +Wikipedia says: + +> In statistics, a **consistent estimator** or **asymptotically consistent estimator** is an estimator — a rule for computing estimates of a parameter $\theta_0$ — having the property that as the number of data points used increases indefinitely, the resulting sequence of estimates converges in probability to $\theta_0$. + +This includes some terms where I forget what they mean (what’s “converges in probability” again?). But this seems like a very good thing! If I’m estimating some parameter (like the median), I would DEFINITELY LIKE IT TO BE TRUE that if I do it with an infinite amount of data then my estimate works. An estimator that is not consistent does not sound very useful! + +### why/when are bootstrap estimators consistent? + +spoiler: I have no idea. The book says the following: + +> Consistency of the boostrap can now be expressed as follows. +> +> **3.19 Theorem**. Suppose that $\mathbb{E}(X_1^2) < \infty$. Let $T_n = g(\overline{X}_n)$ where $g$ is continuously differentiable at $\mu = \mathbb{E}(X_1)$ and that $g\prime(\mu) \neq 0$. Then, +> +> $$ \sup_u | \mathbb{P}_{\widehat{F}_n} \left( \sqrt{n} (T( \widehat{F}_n*) - T( \widehat{F}_n) \leq u \right) - \mathbb{P}_{\widehat{F}} \left( \sqrt{n} (T( \widehat{F}_n) - T( \widehat{F}) \leq u \right) | \rightarrow^\text{a.s.} 0 $$ +> +> **3.21 Theorem**. Suppose that $T(F)$ is Hadamard differentiable with respect to $d(F,G)= sup_x|F(x)-G(x)|$ and that $0 < \int L^2_F(x) dF(x) < \infty$. Then, +> +> $$ \sup_u | \mathbb{P}_{\widehat{F}_n} \left( \sqrt{n} (T( \widehat{F}_n*) - T( \widehat{F}_n) \leq u \right) - \mathbb{P}_{\widehat{F}} \left( \sqrt{n} (T( \widehat{F}_n) - T( \widehat{F}) \leq u \right) | \rightarrow^\text{P} 0 $$ + +things I understand about these theorems: + + * the two formulas they’re concluding are the same, except I think one is about convergence “almost surely” and one about “convergence in probability”. I don’t remember what either of those mean. + * I think for our purposes of doing Regular Boring Things we can replace “Hadamard differentiable” with “differentiable” + * I think they don’t actually show the consistency of the bootstrap, they’re actually about consistency of the bootstrap confidence interval estimate (which is a different thing) + + + +I don’t really understand how they’re related to consistency, and in particular the $\sup_u$ thing is weird, like if you’re looking at $\mathbb{P}(something < u)$, wouldn’t you want to minimize $u$ and not maximize it? Maybe it’s a typo and it should be $\inf_u$? + +it concludes: + +> there is a tendency to treat the bootstrap as a panacea for all problems. But the bootstrap requires regularity conditions to yield valid answers. It should not be applied blindly. + +### this book does not seem to explain why the bootstrap is consistent + +In the appendix (3.7) it gives a sketch of a proof for showing that estimating the **median** using the bootstrap is consistent. I don’t think this book actually gives a proof anywhere that bootstrap estimates in general are consistent, which was pretty surprising to me. It gives a bunch of references to papers. Though I guess bootstrap confidence intervals are the most important thing? + +### that’s all for now + +This is all extremely stream of consciousness and I only spent 2 hours trying to work through this, but some things I think I learned in the last couple hours are: + + 1. maybe having more data is exponentially better? (is this true??) + 2. “consistency” of an estimator is a thing, not all estimators are consistent + 3. understanding when/why nonparametric bootstrap estimators are consistent in general might be very hard (the proof that the bootstrap median estimator is consistent already seems very complicated!) + 4. boostrap confidence intervals are not the same thing as bootstrap estimators. Maybe I’ll learn the difference next! + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/12/29/some-initial-nonparametric-statistics-notes/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://www.wolframalpha.com/input/?i=integrate+(d%2Fdx(d%2Fdx(exp(-x%5E2))))%5E2++dx+from+x%3D-infinity+to+infinity +[2]: https://jvns.ca/images/nonpar-histogram.png +[3]: https://en.wikipedia.org/wiki/Hoeffding%27s_inequality +[4]: https://nbviewer.jupyter.org/github/henrywallace/games/blob/master/boggle/boggle.ipynb#Estimating-Word-Probabilities From 8541dc5cb710142373389e9312dc0e57c37a8ab7 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:25:20 +0800 Subject: [PATCH 123/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181228=202018:?= =?UTF-8?q?=20Year=20in=20review?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181228 2018- Year in review.md --- sources/tech/20181228 2018- Year in review.md | 173 ++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 sources/tech/20181228 2018- Year in review.md diff --git a/sources/tech/20181228 2018- Year in review.md b/sources/tech/20181228 2018- Year in review.md new file mode 100644 index 0000000000..91099492ac --- /dev/null +++ b/sources/tech/20181228 2018- Year in review.md @@ -0,0 +1,173 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (2018: Year in review) +[#]: via: (https://jvns.ca/blog/2018/12/23/2018--year-in-review/) +[#]: author: (Julia Evans https://jvns.ca/) + +2018: Year in review +====== + +I wrote these in [2015][1] and [2016][2] and [2017][3] and it’s always interesting to look back at them, so here’s a summary of what went on in my side projects in 2018. + +### ruby profiler! + +At the beginning of this year I wrote [rbspy][4] (docs: ). It inspired a Python version called [py-spy][5] and a PHP profiler called [phpspy][6], both of which are excellent. I think py-spy in particular is [probably _better_][7] than rbspy which makes me really happy. + +Writing a program that does something innovative (`top` for your Ruby program’s functions!) and inspiring other people to make amazing new tools is something I’m really proud of. + +### started a side business! + +A very surprising thing that happened in 2018 is that I started a business! This is the website: , and I sell programming zines. + +It’s been astonishingly successful (it definitely made me enough money that I could have lived on just the revenue from the business this year), and I’m really grateful to everyone’s who’s supported that work. I hope the zines have helped you. I always thought that it was impossible to make anywhere near as much money teaching people useful things as I can as a software developer, and now I think that’s not true. I don’t think that I’d _want_ to make that switch (I like working as a programmer!), but now I actually think that if I was serious about it and was interested in working on my business skills, I could probably make it work. + +I don’t really know what’s next, but I plan to write at least one zine next year. I learned a few things about business this year, mainly from: + + * [stephanie hurlburt’s twitter][8] + * [amy hoy][9] + * the book [growing a business by paul hawken][10] + * seeing what joel hooks is doing with [egghead.io][11] + * a little from [indie hackers][12] + + + +I used to think that sales / marketing had to be gross, but reading some of these business books made me think that it’s actually possible to run a business by being honest & just building good things. + +### work! + +this is mostly about side projects, but a few things about work: + + * I still have the same manager ([jay][13]). He’s been really great to work with. The [help! i have a manager!][14] zine is secretly largely things I learned from working with him. + * my team made some big networking infrastructure changes and it went pretty well. I learned a lot about proxies/TLS and a little bit about C++. + * I mentored another intern, and the intern I mentored last year joined us full time! + + + +When I go back to work I’m going to switch to working on something COMPLETELY DIFFERENT (writing code that sends messages to banks!) for 3 months. It’s a lot closer to the company’s core business, and I think it’ll be neat to learn more about how financial infastracture works. + +I struggled a bit with understanding/defining my job this year. I wrote [What’s a senior engineer’s job?][15] about that, but I have not yet reached enlightenment. + +### talks! + +I gave 4 talks in 2018: + + * [So you want to be a wizard][16] at StarCon + * [Building a Ruby profiler][17] at the Recurse Center’s localhost series + * [Build Impossible Programs][18] in May at Deconstruct. + * [High Reliability Infrastructure Migrations][19] at Kubecon. I’m pretty happy about this talk because I’ve wanted to give a good talk about what I do at work for a long time and I think I finally succeeded. Previously when I gave talks about my work I think I fell into the trap of just describing what we do (“we do X Y Z” … “okay, so what?“). With this one, I think I was able to actually say things that were useful to other people. + + + +In past years I’ve mostly given talks which can mostly be summarized “here are some cool tools” and “here is how to learn hard things”. This year I changed focus to giving talks about the actual work I do – there were two talks about building a Ruby profiler, and one about what I do at work (I spend a lot of time on infrastructure migrations!) + +I’m not sure whether if I’ll give any talks in 2019. I travelled more than I wanted to in 2018, and to stay sane I ended up having to cancel on a talk I was planning to give with relatively short notice which wasn’t good. + +### podcasts! + +I also experimented a bit with a new format: the podcast! These were basically all really fun! They don’t take that long (about 2 hours total?). + + * [Software Engineering Daily][20], on rbspy and how to use a profiler + * [FLOSS weekly][21], again about rbspy. They told me I’m the guest that asked _them_ the most questions, which I took as a compliment :) + * [CodeNewbie][22] on computer networking & how the Internet works + * [Hanselminutes with Scott Hanselman][23] on writing zines / teaching / learning + * [egghead.io][24], on making zines & running a business + + + +what I learned about doing podcasts: + + * It’s really important to give the hosts a list of good questions to ask, and to be prepared to give good answers to those questions! I’m not a super polished podcast guest. + * you need a good microphone. At least one of these people told me I actually couldn’t be on their podcast unless I had a good enough microphone, so I bought a [medium fancy microphone][25]. It wasn’t too expensive and it’s nice to have a better quality microphone! Maybe I will use it more to record audio/video at some point! + + + +### !!Con + +I co-organized [!!Con][26] for the 4th time – I ran sponsorships. It’s always such a delight and the speakers are so great. + +!!Con is expanding [to the west coast in 2019][27] – I’m not directly involved with that but it’s going to be amazing. + +### blog posts! + +I apparently wrote 54 blog posts in 2018. A couple of my favourites are [What’s a senior engineer’s job?][15] , [How to teach yourself hard things][28], and [batch editing files with ed][29]. + +There were basically 4 themes in blogging for 2018: + + * progress on the rbspy project while I was working on it ([this category][30]) + * computer networking / infrastructure engineering (basically all I did at work this year was networking, though I didn’t write about it as much as I might have) + * musings about zines / business / developer education, for instance [why sell zines?][31] and [who pays to educate developers?][32] + * a few of the usual “how do you learn things” / “how do you succeed at your job” posts as I figure things about about that, for instance [working remotely, 4 years in][33] + + + +### a tiny inclusion project: a guide to performance reviews + +[Last year][3] in addition to my actual job, I did a couple of projects at work towards helping make sure the performance/promotion process works well for folks – i collaborated with the amazing [karla][34] on the idea of a “brag document”, and redid our engineering levels. + +This year, in the same vein, I wrote a document called the “Unofficial guide to the performance reviews”. A lot of folks said it helped them but probably it’s too early to celebrate. I think explaining to folks how the performance review process actually works and how to approach it is really valuable and I might try to publish a more general version here at some point. + +I like that I work at a place where it’s possible/encouraged to do projects like this. I spend a relatively small amount of time on them (maybe I spent 15 hours on this one?) but it feels good to be able to make tiny steps towards building a better workplace from time to time. It’s really hard to judge the results though! + +### conclusions? + +some things that worked in 2018: + + * setting [boundaries][15] around what my job is + * doing open source work while being paid for it + * starting a side business + * doing small inclusion projects at work + * writing zines is very time consuming but I feel happy about the time I spent on that + * blogging is always great + + + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/12/23/2018--year-in-review/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://jvns.ca/blog/2015/12/26/2015-year-in-review/ +[2]: https://jvns.ca/blog/2016/12/21/2016--year-in-review/ +[3]: https://jvns.ca/blog/2017/12/31/2017--year-in-review/ +[4]: https://github.com/rbspy/rbspy +[5]: https://github.com/benfred/py-spy +[6]: https://github.com/adsr/phpspy/ +[7]: https://jvns.ca/blog/2018/09/08/an-awesome-new-python-profiler--py-spy-/ +[8]: https://twitter.com/sehurlburt +[9]: https://stackingthebricks.com/ +[10]: https://www.amazon.com/Growing-Business-Paul-Hawken/dp/0671671642 +[11]: https://egghead.io/ +[12]: https://www.indiehackers.com/ +[13]: https://twitter.com/jshirley +[14]: https://wizardzines.com/zines/manager/ +[15]: https://jvns.ca/blog/senior-engineer/ +[16]: https://www.youtube.com/watch?v=FBMC9bm-KuU +[17]: https://jvns.ca/blog/2018/04/16/rbspy-talk/ +[18]: https://www.deconstructconf.com/2018/julia-evans-build-impossible-programs +[19]: https://www.youtube.com/watch?v=obB2IvCv-K0 +[20]: https://softwareengineeringdaily.com/2018/06/05/profilers-with-julia-evans/ +[21]: https://twit.tv/shows/floss-weekly/episodes/487 +[22]: https://www.codenewbie.org/podcast/how-does-the-internet-work +[23]: https://hanselminutes.com/643/learning-how-to-be-a-wizard-programmer-with-julia-evans +[24]: https://player.fm/series/eggheadio-developer-chats-1728019/exploring-concepts-and-teaching-using-focused-zines-with-julia-evans +[25]: https://www.amazon.com/gp/product/B000EOPQ7E/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=B000EOPQ7E&linkCode=as2&tag=diabeticbooks&linkId=ZBZBIVR4EB7V6JFL +[26]: http://bangbangcon.com +[27]: http://bangbangcon.com/west/ +[28]: https://jvns.ca/blog/2018/09/01/learning-skills-you-can-practice/ +[29]: https://jvns.ca/blog/2018/05/11/batch-editing-files-with-ed/ +[30]: https://jvns.ca/categories/ruby-profiler/ +[31]: https://jvns.ca/blog/2018/09/23/why-sell-zines/ +[32]: https://jvns.ca/blog/2018/09/01/who-pays-to-educate-developers-/ +[33]: https://jvns.ca/blog/2018/02/18/working-remotely--4-years-in/ +[34]: https://karla.io/ From b1618a4f3e5773c3fa8e866c05099f8530add961 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:25:30 +0800 Subject: [PATCH 124/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181215=20New=20?= =?UTF-8?q?talk:=20High=20Reliability=20Infrastructure=20Migrations?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181215 New talk- High Reliability Infrastructure Migrations.md --- ...h Reliability Infrastructure Migrations.md | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 sources/tech/20181215 New talk- High Reliability Infrastructure Migrations.md diff --git a/sources/tech/20181215 New talk- High Reliability Infrastructure Migrations.md b/sources/tech/20181215 New talk- High Reliability Infrastructure Migrations.md new file mode 100644 index 0000000000..93755329c7 --- /dev/null +++ b/sources/tech/20181215 New talk- High Reliability Infrastructure Migrations.md @@ -0,0 +1,78 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (New talk: High Reliability Infrastructure Migrations) +[#]: via: (https://jvns.ca/blog/2018/12/15/new-talk--high-reliability-infrastructure-migrations/) +[#]: author: (Julia Evans https://jvns.ca/) + +New talk: High Reliability Infrastructure Migrations +====== + +On Tuesday I gave a talk at KubeCon called [High Reliability Infrastructure Migrations][1]. The abstract was: + +> For companies with high availability requirements (99.99% uptime or higher), running new software in production comes with a lot of risks. But it’s possible to make significant infrastructure changes while maintaining the availability your customers expect! I’ll give you a toolbox for derisking migrations and making infrastructure changes with confidence, with examples from our Kubernetes & Envoy experience at Stripe. + +### video + +#### slides + +Here are the slides: + +since everyone always asks, I drew them in the Notability app on an iPad. I do this because it’s faster than trying to use regular slides software and I can make better slides. + +### a few notes + +Here are a few links & notes about things I mentioned in the talk + +#### skycfg: write functions, not YAML + +I talked about how my team is working on non-YAML interfaces for configuring Kubernetes. The demo is at [skycfg.fun][2], and it’s [on GitHub here][3]. It’s based on [Starlark][4], a configuration language that’s a subset of Python. + +My coworker [John][5] has promised that he’ll write a blog post about it at some point, and I’m hoping that’s coming soon :) + +#### no haunted forests + +I mentioned a deploy system rewrite we did. John has a great blog post about when rewrites are a good idea and how he approached that rewrite called [no haunted forests][6]. + +#### ignore most kubernetes ecosystem software + +One small point that I made in the talk was that on my team we ignore almost all software in the Kubernetes ecosystem so that we can focus on a few core pieces (Kubernetes & Envoy, plus some small things like kiam). I wanted to mention this because I think often in Kubernetes land it can seem like everyone is using Cool New Things (helm! istio! knative! eep!). I’m sure those projects are great but I find it much simpler to stay focused on the basics and I wanted people to know that it’s okay to do that if that’s what works for your company. + +I think the reality is that actually a lot of folks are still trying to work out how to use this new software in a reliable and secure way. + +#### other talks + +I haven’t watched other Kubecon talks yet, but here are 2 links: + +I heard good things about [this keynote from melanie cebula about kubernetes at airbnb][7], and I’m excited to see [this talk about kubernetes security][8]. The [slides from that security talk look useful][9] + +Also I’m very excited to see Kelsey Hightower’s keynote as always, but that recording isn’t up yet. If you have other Kubecon talks to recommend I’d love to know what they are. + +#### my first work talk I’m happy with + +I usually give talks about debugging tools, or side projects, or how I approach my job at a high level – not on the actual work that I do at my job. What I talked about in this talk is basically what I’ve been learning how to do at work for the last ~2 years. Figuring out how to make big infrastructure changes safely took me a long time (and I’m not done!), and so I hope this talk helps other folks do the same thing. + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/12/15/new-talk--high-reliability-infrastructure-migrations/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://www.youtube.com/watch?v=obB2IvCv-K0 +[2]: http://skycfg.fun +[3]: https://github.com/stripe/skycfg +[4]: https://github.com/bazelbuild/starlark +[5]: https://john-millikin.com/ +[6]: https://john-millikin.com/sre-school/no-haunted-forests +[7]: https://www.youtube.com/watch?v=ytu3aUCwlSg&index=127&t=0s&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU +[8]: https://www.youtube.com/watch?v=a03te8xEjUg&index=65&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU&t=0s +[9]: https://schd.ws/hosted_files/kccna18/1c/KubeCon%20NA%20-%20This%20year%2C%20it%27s%20about%20security%20-%2020181211.pdf From c99b305851cf0d7b93531bff81ce48e47f52ca45 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:25:40 +0800 Subject: [PATCH 125/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181209=20How=20?= =?UTF-8?q?do=20you=20document=20a=20tech=20project=20with=20comics=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181209 How do you document a tech project with comics.md --- ...you document a tech project with comics.md | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 sources/tech/20181209 How do you document a tech project with comics.md diff --git a/sources/tech/20181209 How do you document a tech project with comics.md b/sources/tech/20181209 How do you document a tech project with comics.md new file mode 100644 index 0000000000..02d4981875 --- /dev/null +++ b/sources/tech/20181209 How do you document a tech project with comics.md @@ -0,0 +1,100 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How do you document a tech project with comics?) +[#]: via: (https://jvns.ca/blog/2018/12/09/how-do-you-document-a-tech-project-with-comics/) +[#]: author: (Julia Evans https://jvns.ca/) + +How do you document a tech project with comics? +====== + +Every so often I get email from people saying basically “hey julia! we have an open source project! we’d like to use comics / zines / art to document our project! Can we hire you?“. + +spoiler: the answer is “no, you can’t hire me” – I don’t do commissions. But I do think this is a cool idea and I’ve often wished I had something more useful to say to people than “no”, so if you’re interested in this, here are some ideas about how to accomplish it! + +### zine != drawing + +First, a terminology distinction. One weird thing I’ve noticed is that people frequently refer to individual tech drawings as “zines”. I think this is due to me communicating poorly somehow, but – drawings are not zines! A zine is a **printed booklet**, like a small maga**zine**. You wouldn’t call a photo of a model in Vogue a magazine! The magazine has like a million pages! An individual drawing is a drawing/comic/graphic/whatever. Just clarifying this because I think it causes a bit of unnecessary confusion. + +### comics without good information are useless + +Usually when folks ask me “hey, could we make a comic explaining X”, it doesn’t seem like they have a clear idea of what information exactly they want to get across, they just have a vague idea that maybe it would be cool to draw some comics. This makes sense – figuring out what information would be useful to tell people is very hard!! It’s 80% of what I spend my time on when making comics. + +You should think about comics the same way as any kind of documentation – start with the information you want to convey, who your target audience is, and how you want to distribute it (twitter? on your website? in person?), and figure out how to illustrate it after :). The information is the main thing, not the art! + +Once you have a clear story about what you want to get across, you can start trying to think about how to represent it using illustrations! + +### focus on concepts that don’t change + +Drawing comics is a much bigger investment than writing documentation (it takes me like 5x longer to convey the same information in a comic than in writing). So use it wisely! Because it’s not that easy to edit, if you’re going to make something a comic you want to focus on concepts that are very unlikely to change. So talk about the core ideas in your project instead of the exact command line arguments it takes! + +Here are a couple of options for how you could use comics/illustrations to document your project! + +### option 1: a single graphic + +One format you might want to try is a single, small graphic explaining what your project is about and why folks might be interested in it. For example: [this zulip comic][1] + +This is a short thing, you could post it on Twitter or print it as a pamphlet to give out. The information content here would probably be basically what’s on your project homepage, but presented in a more fun/exciting way :) + +You can put a pretty small amount of information in a single comic. With that Zulip comic, the things I picked out were: + + * zulip is sort of like slack, but it has threads + * it’s easy to keep track of threads even if the conversation takes place over several days + * you can much more easily selectively catch up with Zulip + * zulip is open source + * there’s an open zulip server you can try out + + + +That’s not a lot of information! It’s 50 words :). So to do this effectively you need to distill your project down to 50 words in a way that’s still useful. It’s not easy! + +### option 2: many comics + +Another approach you can take is to make a more in depth comic / illustration, like [google’s guide to kubernetes][2] or [the children’s illustrated guide to kubernetes][3]. + +To do this, you need a much stronger concept than “uh, I want to explain our project” – you want to have a clear target audience in mind! For example, if I were drawing a set of Docker comics, I’d probably focus on folks who want to use Docker in production. so I’d want to discuss: + + * publishing your containers to a public/private registry + * some best practices for tagging your containers + * how to make sure your hosts don’t run out of disk space from downloading too many containers + * how to use layers to save on disk space / download less stuff + * whether it’s reasonable to run the same containers in production & in dev + + + +That’s totally different from the set of comics I’d write for folks who just want to use Docker to develop locally! + +### option 3: a printed zine + +The main thing that differentiates this from “many comics” is that zines are printed! Because of that, for this to make sense you need to have a place to give out the printed copies! Maybe you’re going present your project at a major conference? Maybe you give workshops about your project and want to give our the zine to folks in the workshop as notes? Maybe you want to mail it to people? + +### how to hire someone to help you + +There are basically 3 ways to hire someone: + + 1. Hire someone who both understands (or can quickly learn) the technology you want to document and can illustrate well. These folks are tricky to find and probably expensive (I certainly wouldn’t do a project like this for less than $10,000 even if I did do commissions), just because programmers can usually charge a pretty high consulting rate. I’d guess that the main failure mode here is that it might be impossible/very hard to find someone, and it might be expensive. + 2. Collaborate with an illustrator to draw it for you. The main failure mode here is that if you don’t give the illustrator clear explanations of your tech to work with, you.. won’t end up with a clear and useful explanation. From what I’ve seen, **most folks underinvest in writing clear explanations for their illustrators** – I’ve seen a few really adorable tech comics that I don’t find useful or clear at all. I’d love to see more people do a better job of this. What’s the point of having an adorable illustration if it doesn’t teach anyone anything? :) + 3. Draw it yourself :). This is what I do, obviously. stick figures are okay! + + + +Most people seem to use method #2 – I’m not actually aware of any tech folks who have done commissioned comics (though I’m sure it’s happened!). I think method #2 is a great option and I’d love to see more folks do it. Paying illustrators is really fun! + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/12/09/how-do-you-document-a-tech-project-with-comics/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://twitter.com/b0rk/status/986444234365521920 +[2]: https://cloud.google.com/kubernetes-engine/kubernetes-comic/ +[3]: https://thenewstack.io/kubernetes-gets-childrens-book/ From 010ca0b7c40737426a9a6a33189f93bc31d7a329 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:25:54 +0800 Subject: [PATCH 126/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181118=20An=20e?= =?UTF-8?q?xample=20of=20how=20C++=20destructors=20are=20useful=20in=20Env?= =?UTF-8?q?oy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181118 An example of how C-- destructors are useful in Envoy.md --- ...how C-- destructors are useful in Envoy.md | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 sources/tech/20181118 An example of how C-- destructors are useful in Envoy.md diff --git a/sources/tech/20181118 An example of how C-- destructors are useful in Envoy.md b/sources/tech/20181118 An example of how C-- destructors are useful in Envoy.md new file mode 100644 index 0000000000..f95f17db01 --- /dev/null +++ b/sources/tech/20181118 An example of how C-- destructors are useful in Envoy.md @@ -0,0 +1,130 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (An example of how C++ destructors are useful in Envoy) +[#]: via: (https://jvns.ca/blog/2018/11/18/c---destructors---really-useful/) +[#]: author: (Julia Evans https://jvns.ca/) + +An example of how C++ destructors are useful in Envoy +====== + +For a while now I’ve been working with a C++ project (Envoy), and sometimes I need to contribute to it, so my C++ skills have gone from “nonexistent” to “really minimal”. I’ve learned what an initializer list is and that a method starting with `~` is a destructor. I almost know what an lvalue and an rvalue are but not quite. + +But the other day when writing some C++ code I figured out something exciting about how to use destructors that I hadn’t realized! (the tl;dr of this post for people who know C++ is “julia finally understands what RAII is and that it is useful” :)) + +### what’s a destructor? + +C++ has objects. When an C++ object goes out of scope, the compiler inserts a call to its destructor. So if you have some code like + +``` +function do_thing() { + Thing x{}; // this calls the Thing constructor + return 2; +} +``` + +there will be a call to x’s destructor at the end of the `do_thing` function. so the code c++ generates looks something like: + + * make new thing + * call the new thing’s destructor + * return 2 + + + +Obviously destructors are way more complicated like this. They need to get called when there are exceptions! And sometimes they get called manually. And for lots of other reasons too. But there are 10 million things to know about C++ and that is not what we’re doing today, we are just talking about one thing. + +### what happens in a destructor? + +A lot of the time memory gets freed, which is how you avoid having memory leaks. But that’s not what we’re talking about in this post! We are talking about something more interesting. + +### the thing we’re interested in: Envoy circuit breakers + +So I’ve been working with Envoy a lot. 3 second Envoy refresher: it’s a HTTP proxy, your application makes requests to Envoy, which then proxies the request to the servers the application wants to talk to. + +One very useful feature Envoy has is this thing called “circuit breakers”. Basically the idea with is that if your application makes 50 billion connections to a service, that will probably overwhelm the service. So Envoy keeps track how many TCP connections you’ve made to a service, and will stop you from making new requests if you hit the limit. The default `max_connection` limit + +### how do you track connection count? + +To maintain a circuit breaker on the number of TCP connections, that means you need to keep an accurate count of how many TCP connections are currently open! How do you do that? Well, the way it works is to maintain a `connections` counter and: + + * every time a connection is opened, increment the counter + * every time a connection is destroyed (because of a reset / timeout / whatever), decrement the counter + * when creating a new connection, check that the `connections` counter is not over the limit + + + +that’s all! And incrementing the counter when creating a new connection is pretty easy. But how do you make sure that the counter gets _decremented_ wheh the connection is destroyed? Connections can be destroyed in a lot of ways (they can time out! they can be closed by Envoy! they can be closed by the server! maybe something else I haven’t thought of could happen!) and it seems very easy to accidentally miss a way of closing them. + +### destructors to the rescue + +The way Envoy solves this problem is to create a connection object (called `ActiveClient` in the HTTP connection pool) for every connection. + +Then it: + + * increments the counter in the constructor ([code][1]) + * decrements the counter in the destructor ([code][2]) + * checks the counter when a new connection is created ([code][3]) + + + +The beauty of this is that now you don’t need to make sure that the counter gets decremented in all the right places, you now just need to organize your code so that the `ActiveClient` object’s destructor gets called when the connection has closed. + +Where does the `ActiveClient` destructor get called in Envoy? Well, Envoy maintains 2 lists of clients (`ready_clients` and `busy_clients`), and when a connection gets closed, Envoy removes the client from those lists. And when it does that, it doesn’t need to do any extra cleanup!! In C++, anytime a object is removed from a list, its destructor is called. So `client.removeFromList(ready_clients_);` takes care of all the cleanup. And there’s no chance of forgetting to decrement the counter!! It will definitely always happen unless you accidentally leave the object on one of these lists, which would be a bug anyway because the connection is closed :) + +### RAII + +This pattern Envoy is using here is an extremely common C++ programming pattern called “resource acquisition is initialization”. I find that name very confusing but that’s what it’s called. basically the way it works is: + + * identify a resource (like “connection”) where a lot of things need to happen when the connection is initialized / finished + * make a class for that connection + * put all the initialization / finishing code in the constructor / destructor + * make sure the object’s destructor method gets called when appropriate! (by removing it from a vector / having it go out of scope) + + + +Previously I knew about using this pattern for kind of obvious things (make sure all the memory gets freed in the destructor, or make sure file descriptors get closed). But I didn’t realize it was also useful for cases that are slightly less obviously a resource like “decrement a counter”. + +The reason this pattern works is because the C++ compiler/standard library does a bunch of work to make sure that destructors get called when you’re done with an object – the compiler inserts destructor calls at the end of each block of code, after exceptions, and many standard library collections make sure destructors are called when you remove an object from a collection. + +### RAII gives you prompt, deterministic, and hard-to-screw-up cleanup of resources + +The exciting thing here is that this programming pattern gives you a way to schedule cleaning up resources that’s: + + * easy to ensure always happens (when the object goes away, it always happens, even if there was an exception!) + * prompt & determinstic (it happens right away and it’s guaranteed to happen!) + + + +### what languages have RAII? + +C++ and Rust have RAII. Probably other languages too. Java, Python, Go, and garbage collected languages in general do not. In a garbage collected language you can often set up destructors to be run when the object is GC’d. But often (like in this case, which the connection count) you want things to be cleaned up **right away** when the object is no longer in use, not some indeterminate period later whenever GC happens to run. + +Python context managers are a related idea, you could do something like: + +``` +with conn_pool.connection() as conn: + do stuff +``` + +### that’s all for now! + +Hopefully this explanation of RAII is interesting and mostly correct. Thanks to Kamal for clarifying some RAII things for me! + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/11/18/c---destructors---really-useful/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://github.com/envoyproxy/envoy/blob/200b0e41641be46471c2ce3d230aae395fda7ded/source/common/http/http1/conn_pool.cc#L301 +[2]: https://github.com/envoyproxy/envoy/blob/200b0e41641be46471c2ce3d230aae395fda7ded/source/common/http/http1/conn_pool.cc#L315 +[3]: https://github.com/envoyproxy/envoy/blob/200b0e41641be46471c2ce3d230aae395fda7ded/source/common/http/http1/conn_pool.cc#L97 From 7dc55d708a689cf1695cb010478caefb0e411a25 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 17 Sep 2019 12:26:00 +0800 Subject: [PATCH 127/202] PUB @wxy https://linux.cn/article-11351-1.html --- ...n source initiative, open source in Hollywood,-and more.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/news => published}/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md (99%) diff --git a/translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md b/published/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md similarity index 99% rename from translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md rename to published/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md index 02597d190f..e70c9f8832 100644 --- a/translated/news/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md +++ b/published/20190915 Sandboxie-s path to-open source, update on the Pentagon-s open source initiative, open source in Hollywood,-and more.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11351-1.html) [#]: subject: (Sandboxie's path to open source, update on the Pentagon's open source initiative, open source in Hollywood, and more) [#]: via: (https://opensource.com/article/19/9/news-september-15) [#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeo) From 11f54a49c8841856daacb1d8ae8a4488d575fb92 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 12:26:08 +0800 Subject: [PATCH 128/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020181111=20Some?= =?UTF-8?q?=20notes=20on=20running=20new=20software=20in=20production?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20181111 Some notes on running new software in production.md --- ...s on running new software in production.md | 151 ++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 sources/tech/20181111 Some notes on running new software in production.md diff --git a/sources/tech/20181111 Some notes on running new software in production.md b/sources/tech/20181111 Some notes on running new software in production.md new file mode 100644 index 0000000000..bfdfb66a44 --- /dev/null +++ b/sources/tech/20181111 Some notes on running new software in production.md @@ -0,0 +1,151 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Some notes on running new software in production) +[#]: via: (https://jvns.ca/blog/2018/11/11/understand-the-software-you-use-in-production/) +[#]: author: (Julia Evans https://jvns.ca/) + +Some notes on running new software in production +====== + +I’m working on a talk for kubecon in December! One of the points I want to get across is the amount of time/investment it takes to use new software in production without causing really serious incidents, and what that’s looked like for us in our use of Kubernetes. + +To start out, this post isn’t blanket advice. There are lots of times when it’s totally fine to just use software and not worry about **how** it works exactly. So let’s start by talking about when it’s important to invest. + +### when it matters: 99.99% + +If you’re running a service with a low SLO like 99% I don’t think it matters that much to understand the software you run in production. You can be down for like 2 hours a month! If something goes wrong, just fix it and it’s fine. + +At 99.99%, it’s different. That’s 45 minutes / year of downtime, and if you find out about a serious issue for the first time in production it could easily take you 20 minutes or to revert the change. That’s half your uptime budget for the year! + +### when it matters: software that you’re using heavily + +Also, even if you’re running a service with a 99.99% SLO, it’s impossible to develop a super deep understanding of every single piece of software you’re using. For example, a web service might use: + + * 100 library dependencies + * the filesystem (so there’s linux filesystem code!) + * the network (linux networking code!) + * a database (like postgres) + * a proxy (like nginx/haproxy) + + + +If you’re only reading like 2 files from disk, you don’t need to do a super deep dive into Linux filesystems internals, you can just read the file from disk. + +What I try to do in practice is identify the components which we rely on the (or have the most unusual use cases for!), and invest time into understanding those. These are usually pretty easy to identify because they’re the ones which will cause the most problems :) + +### when it matters: new software + +Understanding your software especially matters for newer/less mature software projects, because it’s morely likely to have bugs & or just not have matured enough to be used by most people without having to worry. I’ve spent a bunch of time recently with Kubernetes/Envoy which are both relatively new projects, and neither of those are remotely in the category of “oh, it’ll just work, don’t worry about it”. I’ve spent many hours debugging weird surprising edge cases with both of them and learning how to configure them in the right way. + +### a playbook for understanding your software + +The playbook for understanding the software you run in production is pretty simple. Here it is: + + 1. Start using it in production in a non-critical capacity (by sending a small percentage of traffic to it, on a less critical service, etc) + 2. Let that bake for a few weeks. + 3. Run into problems. + 4. Fix the problems. Go to step 3. + + + +Repeat until you feel like you have a good handle on this software’s failure modes and are comfortable running it in a more critical capacity. Let’s talk about that in a little more detail, though: + +### what running into bugs looks like + +For example, I’ve been spending a lot of time with Envoy in the last year. Some of the issues we’ve seen along the way are: (in no particular order) + + * One of the default settings resulted in retry & timeout headers not being respected + * Envoy (as a client) doesn’t support TLS session resumption, so servers with a large amount of Envoy clients get DDOSed by TLS handshakes + * Envoy’s active healthchecking means that you services get healthchecked by every client. This is mostly okay but (again) services with many clients can get overwhelmed by it. + * Having every client independently healthcheck every server interacts somewhat poorly with services which are under heavy load, and can exacerbate performance issues by removing up-but-slow clients from the load balancer rotation. + * Envoy doesn’t retry failed connections by default + * it frequently segfaults when given incorrect configuration + * various issues with it segfaulting because of resource leaks / memory safety issues + * hosts running out of disk space between we didn’t rotate Envoy log files often enough + + + +A lot of these aren’t bugs – they’re just cases where what we expected the default configuration to do one thing, and it did another thing. This happens all the time, and it can result in really serious incidents. Figuring out how to configure a complicated piece of software appropriately takes a lot of time, and you just have to account for that. + +And Envoy is great software! The maintainers are incredibly responsive, they fix bugs quickly and its performance is good. It’s overall been quite stable and it’s done well in production. But just because something is great software doesn’t mean you won’t also run into 10 or 20 relatively serious issues along the way that need to be addressed in one way or another. And it’s helpful to understand those issues **before** putting the software in a really critical place. + +### try to have each incident only once + +My view is that running new software in production inevitably results in incidents. The trick: + + 1. Make sure the incidents aren’t too serious (by making ‘production’ a less critical system first) + 2. Whenever there’s an incident (even if it’s not that serious!!!), spend the time necessary to understand exactly why it happened and how to make sure it doesn’t happen again + + + +My experience so far has been that it’s actually relatively possible to pull off “have every incident only once”. When we investigate issues and implement remediations, usually that issue **never comes back**. The remediation can either be: + + * a configuration change + * reporting a bug upstream and either fixing it ourselves or waiting for a fix + * a workaround (“this software doesn’t work with 10,000 clients? ok, we just won’t use it with in cases where there are that many clients for now!“, “oh, a memory leak? let’s just restart it every hour”) + + + +Knowledge-sharing is really important here too – it’s always unfortunate when one person finds an incident in production, fixes it, but doesn’t explain the issue to the rest of the team so somebody else ends up causing the same incident again later because they didn’t hear about the original incident. + +### Understand what is ok to break and isn’t + +Another huge part of understanding the software I run in production is understanding which parts are OK to break (aka “if this breaks, it won’t result in a production incident”) and which aren’t. This lets me **focus**: I can put big boxes around some components and decide “ok, if this breaks it doesn’t matter, so I won’t pay super close attention to it”. + +For example, with Kubernetes: + +ok to break: + + * any stateless control plane component can crash or be cycled out or go down for 5 minutes at any time. If we had 95% uptime for the kubernetes control plane that would probably be fine, it just needs to be working most of the time. + * kubernetes networking (the system where you give every pod an IP addresses) can break as much as it wants because we decided not to use it to start + + + +not ok: + + * for us, if etcd goes down for 10 minutes, that’s ok. If it goes down for 2 hours, it’s not + * containers not starting or crashing on startup (iam issues, docker not starting containers, bugs in the scheduler, bugs in other controllers) is serious and needs to be looked at immediately + * containers not having access to the resources they need (because of permissions issues, etc) + * pods being terminated unexpectedly by Kubernetes (if you configure kubernetes wrong it can terminate your pods!) + + + +with Envoy, the breakdown is pretty different: + +ok to break: + + * if the envoy control plane goes down for 5 minutes, that’s fine (it’ll keep working with stale data) + * segfaults on startup due to configuration errors are sort of okay because they manifest so early and they’re unlikely to surprise us (if the segfault doesn’t happen the 1st time, it shouldn’t happen the 200th time) + + + +not ok: + + * Envoy crashes / segfaults are not good – if it crashes, network connections don’t happen + * if the control server serves incorrect or incomplete data that’s extremely dangerous and can result in serious production incidents. (so downtime is fine, but serving incorrect data is not!) + + + +Neither of these lists are complete at all, but they’re examples of what I mean by “understand your sofware”. + +### sharing ok to break / not ok lists is useful + +I think these “ok to break” / “not ok” lists are really useful to share, because even if they’re not 100% the same for every user, the lessons are pretty hard won. I’d be curious to hear about your breakdown of what kinds of failures are ok / not ok for software you’re using! + +Figuring out all the failure modes of a new piece of software and how they apply to your situation can take months. (this is is why when you ask your database team “hey can we just use NEW DATABASE” they look at you in such a pained way). So anything we can do to help other people learn faster is amazing +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2018/11/11/understand-the-software-you-use-in-production/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 From 501ba260d9ec3540e1ba4354fb72969070e08da5 Mon Sep 17 00:00:00 2001 From: "qfzy1233@163.com" Date: Tue, 17 Sep 2019 12:42:10 +0800 Subject: [PATCH 129/202] Update 20190905 How to Change Themes in Linux Mint.md --- ...0905 How to Change Themes in Linux Mint.md | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/sources/tech/20190905 How to Change Themes in Linux Mint.md b/sources/tech/20190905 How to Change Themes in Linux Mint.md index 6f1c1ce3da..54bc21cdca 100644 --- a/sources/tech/20190905 How to Change Themes in Linux Mint.md +++ b/sources/tech/20190905 How to Change Themes in Linux Mint.md @@ -7,74 +7,74 @@ [#]: via: (https://itsfoss.com/install-themes-linux-mint/) [#]: author: (It's FOSS Community https://itsfoss.com/author/itsfoss/) -How to Change Themes in Linux Mint +如何在 Linux Mint 中更换主题 ====== -Using Linux Mint is, from the start, a unique experience for its main Desktop Environment: Cinnamon. This is one of the main [features why I love Linux Mint][1]. +自始至终,使用 Cinnamon 桌面环境的 Linux Mint 都是一种卓越的体验。这也是[为何我喜爱 Linux Mint ][1]的主要原因之一。 -Since Mint’s dev team [started to take design more serious][2], “Themes” applet became an important way not only to choose new themes, icons, buttons, window borders and mouse pointers, but also to install new themes directly from it. Interested? Let’s jump into it. +自从 Mint 的开发团队 [开始更为严肃的对待设计][2], “桌面主题” 应用便成为了更换新主题,图标,按钮样式,窗口边框以及鼠标指针的重要方式,当然你也可以直接通过它安装新的主题。感兴趣么? 让我们开始吧。 -### How to change themes in Linux Mint +### 如何在 Linux Mint 中更换主题 -Search for themes in the Menu and open the Themes applet. +在菜单中搜索主题并打开主题应用。 ![Theme Applet provides an easy way of installing and changing themes][3] -At the applet there’s a “Add/Remove” button, pretty simple, huh? And, clicking on it, you and I can see Cinnamon Spices (Cinnamon’s official addons repository) themes ordered first by popularity. +在应用中中有一个“添加/删除”按钮,非常简单,不是么?而且,点击它,我们可以看到Cinnamon Spices( Cinnamon 的官方插件库)的主题按流行程度排序。 ![Installing new themes in Linux Mint Cinnamon][4] -To install one, all it’s needed to do is click on yours preferred one and wait for it to download. After that, the theme will be available at the “Desktop” option on the first page of the applet. Just double click on one of the installed themes to start using it. +要安装主题,你所要做的就是点击你喜欢的一个,然后等待它下载。之后,主题将在应用第一页的“Desktop”选项中显示可用。只需双击已安装的主题之一就可以开始使用它。 ![Changing themes in Linux Mint Cinnamon][5] -Here’s the default Linux Mint look: +下面是默认的 Linux Mint 外观: ![Linux Mint Default Theme][6] -And here’s after I change the theme: +这是在我更换主题之后: ![Linux Mint with Carta Theme][7] -All the themes are also available at the Cinnamon Spices site for more information and bigger screenshots so you can take a better look on how your system will look. +所有的主题都可以在 Cinnamon Spices 网站上获得更多的信息和更大的截图,这样你就可以更好地了解你的系统的外观。 -[Browse Cinnamon Themes][8] +[浏览 Cinnamon 主题][8] -### Installing third party themes in Linux Mint +### 在 Linux Mint 中安装第三方主题 -_“I saw this amazing theme on another site and it is not available at Cinnamon Spices…”_ +_“我在另一个网站上看到了这个优异的主题,但 Cinnamon Spices 网站上没有……”_ -Cinnamon Spices has a good collection of themes but you’ll still find that the theme you saw some place else is not available on the official Cinnamon website. +Cinnamon Spices 集成了许多优秀的主题,但你仍然会发现,你看到的主题并没有被 Cinnamon Spices 官方网站收录。 -Well, it would be nice if there was another way, huh? You might imagine that there is (I’m mean…obviously there is). So, first things first, there are other websites where you and I can find new cool themes. +这时你可能会想:如果有别的办法就好了,对么?你可能会认为有(我的意思是……当然啦)。首先,我们可以在其他网站上找到一些很酷的主题。 -I’ll recommend going to Cinnamon Look and browse themes there. If you like something download it. +我推荐你去 Cinnamon 浏览主题。如果你喜欢什么,就下载吧。 -[Get more themes at Cinnamon Look][9] +[在 Cinnamon 外观中获取更多主题][9] -After the preferred theme is downloaded, you will have a compressed file now with all you need for the installation. Extract it and save at ~/.themes. Confused? The “~” file path is actually your home folder: /home/{YOURUSER}/.themes. +下载了首选主题之后,你现在将得到一个压缩文件,其中包含安装所需的所有内容。提取它并保存到 ~/.themes. 迷糊么? “~” 代表了你的 home 文件夹的对应路径: /home/{YOURUSER}/.themes. [][10] -Suggested read  Fix "Failed To Start Session" At Login In Ubuntu 16.04 +建议在登录 Ubuntu 16.04 时读取“启动会话失败”的修复程序。 -So go to the your Home directory. Press Ctrl+H to [show hidden files in Linux][11]. If you don’t see a .themes folder, create a new folder and name .themes. Remember that the dot at the beginning of the folder name is important. +然后跳转到你的主目录。按Ctrl+H[显示Linux中的隐藏文件][11]。如果没有看到.themes文件夹,创建一个新文件夹并命名为.themes。记住,文件夹名称开头的点很重要。 -Copy the extracted theme folder from your Downloads directory to the .themes folder in your Home. +将提取的主题文件夹从下载目录复制到你的 home 中的.themes文件夹。 -After that, look for the installed theme at the applet above mentioned. +最后,在上面提到的应用中查找已安装的主题。 -Note +注记 -Remember that the themes must be made to work on Cinnamon, even though it is a fork from GNOME, not all themes made for GNOME works at Cinnamon. +请记住,主题必须是 Cinnamon 相对应的,即使它是一个从 GNOME 复刻的系统也不行,并不是所有的 GNOME 主题都适用于 Cinnamon 。 -Changing theme is one part of Cinnamon customization. You can also [change the looks of Linux Mint by changing the icons][12]. +改变主题是 Cinnamon 定制的一部分。你还可以[通过更改图标来更改 Linux Mint 的外观][12]。 -I hope you now you know how to change themes in Linux Mint. Which theme are you going to use? +我希望你现在已经知道如何在 Linux Mint 中更改主题了。快去选取你喜欢的主题吧? ### João Gondim -Linux enthusiast from Brasil. +来自巴西的Linux爱好者。 -------------------------------------------------------------------------------- From 9d9edef632191b28a38c6629725a2a0900011f8a Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 17 Sep 2019 12:52:06 +0800 Subject: [PATCH 130/202] PRF @geekpi --- ... from SAR Reports Using the Bash Script.md | 64 ++++++------------- 1 file changed, 19 insertions(+), 45 deletions(-) diff --git a/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md b/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md index 52bfcd19a8..c04d5e9322 100644 --- a/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md +++ b/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md @@ -1,37 +1,28 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script) [#]: via: (https://www.2daygeek.com/linux-get-average-cpu-memory-utilization-from-sar-data-report/) [#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) -如何使用 Bash 脚本从 SAR 报告中获取 CPU 和内存的平均使用情况 +如何使用 Bash 脚本从 SAR 报告中获取 CPU 和内存使用情况 ====== -大多数 Linux 管理员使用 **[SAR 报告][1]**监控系统性能,因为它会收集一周的性能数据。 +大多数 Linux 管理员使用 [SAR 报告][1]监控系统性能,因为它会收集一周的性能数据。但是,你可以通过更改 `/etc/sysconfig/sysstat` 文件轻松地将其延长到四周。同样,这段时间可以延长一个月以上。如果超过 28,那么日志文件将放在多个目录中,每月一个。 +要将覆盖期延长至 28 天,请对 `/etc/sysconfig/sysstat` 文件做以下更改。 -但是,你可以通过更改 “/etc/sysconfig/sysstat” 文件轻松地将其延长到四周。 - -同样,这段时间可以延长一个月以上。如果超过 28,那么日志文件将放在多个目录中,每月一个。 - -要将覆盖期延长至 28 天,请对 “/etc/sysconfig/sysstat” 文件做以下更改。 - -编辑 sysstat 文件并将 HISTORY=7 更改为 HISTORY=28.。 +编辑 `sysstat` 文件并将 `HISTORY=7` 更改为 `HISTORY=28`。 在本文中,我们添加了三个 bash 脚本,它们可以帮助你在一个地方轻松查看每个数据文件的平均值。 我们过去加过许多有用的 shell 脚本。如果你想查看它们,请进入下面的链接。 - * **[如何使用 shell 脚本自动化日常操作][2]** +* [如何使用 shell 脚本自动化日常操作][2] - - -这些脚本简单明了。出于测试目的,我们仅包括两个性能指标,即 CPU 和内存。 - -你可以修改脚本中的其他性能指标以满足你的需求。 +这些脚本简单明了。出于测试目的,我们仅包括两个性能指标,即 CPU 和内存。你可以修改脚本中的其他性能指标以满足你的需求。 ### 脚本 1:从 SAR 报告中获取平均 CPU 利用率的 Bash 脚本 @@ -49,15 +40,10 @@ echo "|Average: CPU %user %nice %system %iowait %steal echo "+----------------------------------------------------------------------------------+" for file in `ls -tr /var/log/sa/sa* | grep -v sar` - do - -dat=`sar -f $file | head -n 1 | awk '{print $4}'` - -echo -n $dat - -sar -f $file | grep -i Average | sed "s/Average://" - + dat=`sar -f $file | head -n 1 | awk '{print $4}'` + echo -n $dat + sar -f $file | grep -i Average | sed "s/Average://" done echo "+----------------------------------------------------------------------------------+" @@ -105,15 +91,10 @@ echo "|Average: kbmemfree kbmemused %memused kbbuffers kbcached kbcommit echo "+-------------------------------------------------------------------------------------------------------------------+" for file in `ls -tr /var/log/sa/sa* | grep -v sar` - do - -dat=`sar -f $file | head -n 1 | awk '{print $4}'` - -echo -n $dat - -sar -r -f $file | grep -i Average | sed "s/Average://" - + dat=`sar -f $file | head -n 1 | awk '{print $4}'` + echo -n $dat + sar -r -f $file | grep -i Average | sed "s/Average://" done echo "+-------------------------------------------------------------------------------------------------------------------+" @@ -157,19 +138,12 @@ echo "+------------------------------------------------------------------------- #!/bin/bash for file in `ls -tr /var/log/sa/sa* | grep -v sar` - do - - sar -f $file | head -n 1 | awk '{print $4}' - - echo "-----------" - - sar -u -f $file | awk '/Average:/{printf("CPU Average: %.2f%\n"), 100 - $8}' - - sar -r -f $file | awk '/Average:/{printf("Memory Average: %.2f%\n"),(($3-$5-$6)/($2+$3)) * 100 }' - - printf "\n" - + sar -f $file | head -n 1 | awk '{print $4}' + echo "-----------" + sar -u -f $file | awk '/Average:/{printf("CPU Average: %.2f%\n"), 100 - $8}' + sar -r -f $file | awk '/Average:/{printf("Memory Average: %.2f%\n"),(($3-$5-$6)/($2+$3)) * 100 }' + printf "\n" done ``` @@ -223,7 +197,7 @@ via: https://www.2daygeek.com/linux-get-average-cpu-memory-utilization-from-sar- 作者:[Magesh Maruthamuthu][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 9180c2ef2b8b32caf9816e1d8b86410d7f98de85 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Tue, 17 Sep 2019 12:52:32 +0800 Subject: [PATCH 131/202] PUB @geekpi https://linux.cn/article-11352-1.html --- ...and Memory Usage from SAR Reports Using the Bash Script.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md (99%) diff --git a/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md b/published/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md similarity index 99% rename from translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md rename to published/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md index c04d5e9322..87c0308360 100644 --- a/translated/tech/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md +++ b/published/20190905 How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11352-1.html) [#]: subject: (How to Get Average CPU and Memory Usage from SAR Reports Using the Bash Script) [#]: via: (https://www.2daygeek.com/linux-get-average-cpu-memory-utilization-from-sar-data-report/) [#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) From 6957566344c627709231c5e0ab8f4ae3702487b5 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:03:56 +0800 Subject: [PATCH 132/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180916=20The=20?= =?UTF-8?q?Rise=20and=20Demise=20of=20RSS=20(Old=20Version)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20180916 The Rise and Demise of RSS (Old Version).md --- ...he Rise and Demise of RSS (Old Version).md | 278 ++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 sources/talk/20180916 The Rise and Demise of RSS (Old Version).md diff --git a/sources/talk/20180916 The Rise and Demise of RSS (Old Version).md b/sources/talk/20180916 The Rise and Demise of RSS (Old Version).md new file mode 100644 index 0000000000..b6e1a4fdd9 --- /dev/null +++ b/sources/talk/20180916 The Rise and Demise of RSS (Old Version).md @@ -0,0 +1,278 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (The Rise and Demise of RSS (Old Version)) +[#]: via: (https://twobithistory.org/2018/09/16/the-rise-and-demise-of-rss.html) +[#]: author: (Two-Bit History https://twobithistory.org) + +The Rise and Demise of RSS (Old Version) +====== + +_A newer version of this post was published on [December 18th, 2018][1]._ + +There are two stories here. The first is a story about a vision of the web’s future that never quite came to fruition. The second is a story about how a collaborative effort to improve a popular standard devolved into one of the most contentious forks in the history of open-source software development. + +In the late 1990s, in the go-go years between Netscape’s IPO and the Dot-com crash, everyone could see that the web was going to be an even bigger deal than it already was, even if they didn’t know exactly how it was going to get there. One theory was that the web was about to be revolutionized by syndication. The web, originally built to enable a simple transaction between two parties—a client fetching a document from a single host server—would be broken open by new standards that could be used to repackage and redistribute entire websites through a variety of channels. Kevin Werbach, writing for _Release 1.0_, a newsletter influential among investors in the 1990s, predicted that syndication “would evolve into the core model for the Internet economy, allowing businesses and individuals to retain control over their online personae while enjoying the benefits of massive scale and scope.”[1][2] He invited his readers to imagine a future in which fencing aficionados, rather than going directly to an “online sporting goods site” or “fencing equipment retailer,” could buy a new épée directly through e-commerce widgets embedded into their favorite website about fencing.[2][3] Just like in the television world, where big networks syndicate their shows to smaller local stations, syndication on the web would allow businesses and publications to reach consumers through a multitude of intermediary sites. This would mean, as a corollary, that consumers would gain significant control over where and how they interacted with any given business or publication on the web. + +RSS was one of the standards that promised to deliver this syndicated future. To Werbach, RSS was “the leading example of a lightweight syndication protocol.”[3][4] Another contemporaneous article called RSS the first protocol to realize the potential of XML.[4][5] It was going to be a way for both users and content aggregators to create their own customized channels out of everything the web had to offer. And yet, two decades later, RSS [appears to be a dying technology][6], now used chiefly by podcasters and programmers with tech blogs. Moreover, among that latter group, RSS is perhaps used as much for its political symbolism as its actual utility. Though of course some people really do have RSS readers, stubbornly adding an RSS feed to your blog, even in 2018, is a reactionary statement. That little tangerine bubble has become a wistful symbol of defiance against a centralized web increasingly controlled by a handful of corporations, a web that hardly resembles the syndicated web of Werbach’s imagining. + +The future once looked so bright for RSS. What happened? Was its downfall inevitable, or was it precipitated by the bitter infighting that thwarted the development of a single RSS standard? + +### Muddied Water + +RSS was invented twice. This meant it never had an obvious owner, a state of affairs that spawned endless debate and acrimony. But it also suggests that RSS was an important idea whose time had come. + +In 1998, Netscape was struggling to envision a future for itself. Its flagship product, the Netscape Navigator web browser—once preferred by 80% of web users—was quickly losing ground to Internet Explorer. So Netscape decided to compete in a new arena. In May, a team was brought together to start work on what was known internally as “Project 60.”[5][7] Two months later, Netscape announced “My Netscape,” a web portal that would fight it out with other portals like Yahoo, MSN, and Excite. + +The following year, in March, Netscape announced an addition to the My Netscape portal called the “My Netscape Network.” My Netscape users could now customize their My Netscape page so that it contained “channels” featuring the most recent headlines from sites around the web. As long as your favorite website published a special file in a format dictated by Netscape, you could add that website to your My Netscape page, typically by clicking an “Add Channel” button that participating websites were supposed to add to their interfaces. A little box containing a list of linked headlines would then appear. + +![A My Netscape Network Channel][8] + +The special file that participating websites had to publish was an RSS file. In the My Netscape Network announcement, Netscape explained that RSS stood for “RDF Site Summary.”[6][9] This was somewhat of a misnomer. RDF, or the Resource Description Framework, is basically a grammar for describing certain properties of arbitrary resources. (See [my article about the Semantic Web][10] if that sounds really exciting to you.) In 1999, a draft specification for RDF was being considered by the W3C. Though RSS was supposed to be based on RDF, the example RSS document Netscape actually released didn’t use any RDF tags at all, even if it declared the RDF XML namespace. In a document that accompanied the Netscape RSS specification, Dan Libby, one of the specification’s authors, explained that “in this release of MNN, Netscape has intentionally limited the complexity of the RSS format.”[7][11] The specification was given the 0.90 version number, the idea being that subsequent versions would bring RSS more in line with the W3C’s XML specification and the evolving draft of the RDF specification. + +RSS had been cooked up by Libby and another Netscape employee, Ramanathan Guha. Guha previously worked for Apple, where he came up with something called the Meta Content Framework. MCF was a format for representing metadata about anything from web pages to local files. Guha demonstrated its power by developing an application called [HotSauce][12] that visualized relationships between files as a network of nodes suspended in 3D space. After leaving Apple for Netscape, Guha worked with a Netscape consultant named Tim Bray to produce an XML-based version of MCF, which in turn became the foundation for the W3C’s RDF draft.[8][13] It’s no surprise, then, that Guha and Libby were keen to incorporate RDF into RSS. But Libby later wrote that the original vision for an RDF-based RSS was pared back because of time constraints and the perception that RDF was “‘too complex’ for the ‘average user.’”[9][14] + +While Netscape was trying to win eyeballs in what became known as the “portal wars,” elsewhere on the web a new phenomenon known as “weblogging” was being pioneered.[10][15] One of these pioneers was Dave Winer, CEO of a company called UserLand Software, which developed early content management systems that made blogging accessible to people without deep technical fluency. Winer ran his own blog, [Scripting News][16], which today is one of the oldest blogs on the internet. More than a year before Netscape announced My Netscape Network, on December 15th, 1997, Winer published a post announcing that the blog would now be available in XML as well as HTML.[11][17] + +Dave Winer’s XML format became known as the Scripting News format. It was supposedly similar to Microsoft’s Channel Definition Format (a “push technology” standard submitted to the W3C in March, 1997), but I haven’t been able to find a file in the original format to verify that claim.[12][18] Like Netscape’s RSS, it structured the content of Winer’s blog so that it could be understood by other software applications. When Netscape released RSS 0.90, Winer and UserLand Software began to support both formats. But Winer believed that Netscape’s format was “woefully inadequate” and “missing the key thing web writers and readers need.”[13][19] It could only represent a list of links, whereas the Scripting News format could represent a series of paragraphs, each containing one or more links. + +In June, 1999, two months after Netscape’s My Netscape Network announcement, Winer introduced a new version of the Scripting News format, called ScriptingNews 2.0b1. Winer claimed that he decided to move ahead with his own format only after trying but failing to get anyone at Netscape to care about RSS 0.90’s deficiencies.[14][20] The new version of the Scripting News format added several items to the `
` element that brought the Scripting News format to parity with RSS. But the two formats continued to differ in that the Scripting News format, which Winer nicknamed the “fat” syndication format, could include entire paragraphs and not just links. + +Netscape got around to releasing RSS 0.91 the very next month. The updated specification was a major about-face. RSS no longer stood for “RDF Site Summary”; it now stood for “Rich Site Summary.” All the RDF—and there was almost none anyway—was stripped out. Many of the Scripting News tags were incorporated. In the text of the new specification, Libby explained: + +> RDF references removed. RSS was originally conceived as a metadata format providing a summary of a website. Two things have become clear: the first is that providers want more of a syndication format than a metadata format. The structure of an RDF file is very precise and must conform to the RDF data model in order to be valid. This is not easily human-understandable and can make it difficult to create useful RDF files. The second is that few tools are available for RDF generation, validation and processing. For these reasons, we have decided to go with a standard XML approach.[15][21] + +Winer was enormously pleased with RSS 0.91, calling it “even better than I thought it would be.”[16][22] UserLand Software adopted it as a replacement for the existing ScriptingNews 2.0b1 format. For a while, it seemed that RSS finally had a single authoritative specification. + +### The Great Fork + +A year later, the RSS 0.91 specification had become woefully inadequate. There were all sorts of things people were trying to do with RSS that the specification did not address. There were other parts of the specification that seemed unnecessarily constraining—each RSS channel could only contain a maximum of 15 items, for example. + +By that point, RSS had been adopted by several more organizations. Other than Netscape, which seemed to have lost interest after RSS 0.91, the big players were Dave Winer’s UserLand Software; O’Reilly Net, which ran an RSS aggregator called Meerkat; and Moreover.com, which also ran an RSS aggregator focused on news.[17][23] Via mailing list, representatives from these organizations and others regularly discussed how to improve on RSS 0.91. But there were deep disagreements about what those improvements should look like. + +The mailing list in which most of the discussion occurred was called the Syndication mailing list. [An archive of the Syndication mailing list][24] is still available. It is an amazing historical resource. It provides a moment-by-moment account of how those deep disagreements eventually led to a political rupture of the RSS community. + +On one side of the coming rupture was Winer. Winer was impatient to evolve RSS, but he wanted to change it only in relatively conservative ways. In June, 2000, he published his own RSS 0.91 specification on the UserLand website, meant to be a starting point for further development of RSS. It made no significant changes to the 0.91 specification published by Netscape. Winer claimed in a blog post that accompanied his specification that it was only a “cleanup” documenting how RSS was actually being used in the wild, which was needed because the Netscape specification was no longer being maintained.[18][25] In the same post, he argued that RSS had succeeded so far because it was simple, and that by adding namespaces or RDF back to the format—some had suggested this be done in the Syndication mailing list—it “would become vastly more complex, and IMHO, at the content provider level, would buy us almost nothing for the added complexity.” In a message to the Syndication mailing list sent around the same time, Winer suggested that these issues were important enough that they might lead him to create a fork: + +> I’m still pondering how to move RSS forward. I definitely want ICE-like stuff in RSS2, publish and subscribe is at the top of my list, but I am going to fight tooth and nail for simplicity. I love optional elements. I don’t want to go down the namespaces and schema road, or try to make it a dialect of RDF. I understand other people want to do this, and therefore I guess we’re going to get a fork. I have my own opinion about where the other fork will lead, but I’ll keep those to myself for the moment at least.[19][26] + +Arrayed against Winer were several other people, including Rael Dornfest of O’Reilly, Ian Davis (responsible for a search startup called Calaba), and a precocious, 14-year-old Aaron Swartz, who all thought that RSS needed namespaces in order to accommodate the many different things everyone wanted to do with it. On another mailing list hosted by O’Reilly, Davis proposed a namespace-based module system, writing that such a system would “make RSS as extensible as we like rather than packing in new features that over-complicate the spec.”[20][27] The “namespace camp” believed that RSS would soon be used for much more than the syndication of blog posts, so namespaces, rather than being a complication, were the only way to keep RSS from becoming unmanageable as it supported more and more use cases. + +At the root of this disagreement about namespaces was a deeper disagreement about what RSS was even for. Winer had invented his Scripting News format to syndicate the posts he wrote for his blog. Guha and Libby at Netscape had designed RSS and called it “RDF Site Summary” because in their minds it was a way of recreating a site in miniature within Netscape’s online portal. Davis, writing to the Syndication mailing list, explained his view that RSS was “originally conceived as a way of building mini sitemaps,” and that now he and others wanted to expand RSS “to encompass more types of information than simple news headlines and to cater for the new uses of RSS that have emerged over the last 12 months.”[21][28] Winer wrote a prickly reply, stating that his Scripting News format was in fact the original RSS and that it had been meant for a different purpose. Given that the people most involved in the development of RSS disagreed about why RSS had even been created, a fork seems to have been inevitable. + +The fork happened after Dornfest announced a proposed RSS 1.0 specification and formed the RSS-DEV Working Group—which would include Davis, Swartz, and several others but not Winer—to get it ready for publication. In the proposed specification, RSS once again stood for “RDF Site Summary,” because RDF had had been added back in to represent metadata properties of certain RSS elements. The specification acknowledged Winer by name, giving him credit for popularizing RSS through his “evangelism.”[22][29] But it also argued that just adding more elements to RSS without providing for extensibility with a module system—that is, what Winer was suggesting—”sacrifices scalability.” The specification went on to define a module system for RSS based on XML namespaces. + +Winer was furious that the RSS-DEV Working Group had arrogated the “RSS 1.0” name for themselves.[23][30] In another mailing list about decentralization, he described what the RSS-DEV Working Group had done as theft.[24][31] Other members of the Syndication mailing list also felt that the RSS-DEV Working Group should not have used the name “RSS” without unanimous agreement from the community on how to move RSS forward. But the Working Group stuck with the name. Dan Brickley, another member of the RSS-DEV Working Group, defended this decision by arguing that “RSS 1.0 as proposed is solidly grounded in the original RSS vision, which itself had a long heritage going back to MCF (an RDF precursor) and related specs (CDF etc).”[25][32] He essentially felt that the RSS 1.0 effort had a better claim to the RSS name than Winer did, since RDF had originally been a part of RSS. The RSS-DEV Working Group published a final version of their specification in December. That same month, Winer published his own improvement to RSS 0.91, which he called RSS 0.92, on UserLand’s website. RSS 0.92 made several small optional improvements to RSS, among which was the addition of the `` tag soon used by podcasters everywhere. RSS had officially forked. + +It’s not clear to me why a better effort was not made to involve Winer in the RSS-DEV Working Group. He was a prominent contributor to the Syndication mailing list and obviously responsible for much of RSS’ popularity, as the members of the Working Group themselves acknowledged. But Tim O’Reilly, founder and CEO of O’Reilly, explained in a UserLand discussion group that Winer more or less refused to participate: + +> A group of people involved in RSS got together to start thinking about its future evolution. Dave was part of the group. When the consensus of the group turned in a direction he didn’t like, Dave stopped participating, and characterized it as a plot by O’Reilly to take over RSS from him, despite the fact that Rael Dornfest of O’Reilly was only one of about a dozen authors of the proposed RSS 1.0 spec, and that many of those who were part of its development had at least as long a history with RSS as Dave had.[26][33] + +To this, Winer said: + +> I met with Dale [Dougherty] two weeks before the announcement, and he didn’t say anything about it being called RSS 1.0. I spoke on the phone with Rael the Friday before it was announced, again he didn’t say that they were calling it RSS 1.0. The first I found out about it was when it was publicly announced. +> +> Let me ask you a straight question. If it turns out that the plan to call the new spec “RSS 1.0” was done in private, without any heads-up or consultation, or for a chance for the Syndication list members to agree or disagree, not just me, what are you going to do? +> +> UserLand did a lot of work to create and popularize and support RSS. We walked away from that, and let your guys have the name. That’s the top level. If I want to do any further work in Web syndication, I have to use a different name. Why and how did that happen Tim?[27][34] + +I have not been able to find a discussion in the Syndication mailing list about using the RSS 1.0 name prior to the announcement of the RSS 1.0 proposal. + +RSS would fork again in 2003, when several developers frustrated with the bickering in the RSS community sought to create an entirely new format. These developers created Atom, a format that did away with RDF but embraced XML namespaces. Atom would eventually be specified by [a proposed IETF standard][35]. After the introduction of Atom, there were three competing versions of RSS: Winer’s RSS 0.92 (updated to RSS 2.0 in 2002 and renamed “Really Simple Syndication”), the RSS-DEV Working Group’s RSS 1.0, and Atom. + +### Decline + +The proliferation of competing RSS specifications may have hampered RSS in other ways that I’ll discuss shortly. But it did not stop RSS from becoming enormously popular during the 2000s. By 2004, the New York Times had started offering its headlines in RSS and had written an article explaining to the layperson what RSS was and how to use it.[28][36] Google Reader, an RSS aggregator ultimately used by millions, was launched in 2005. By 2013, RSS seemed popular enough that the New York Times, in its obituary for Aaron Swartz, called the technology “ubiquitous.”[29][37] For a while, before a third of the planet had signed up for Facebook, RSS was simply how many people stayed abreast of news on the internet. + +The New York Times published Swartz’ obituary in January, 2013. By that point, though, RSS had actually turned a corner and was well on its way to becoming an obscure technology. Google Reader was shutdown in July, 2013, ostensibly because user numbers had been falling “over the years.”[30][38] This prompted several articles from various outlets declaring that RSS was dead. But people had been declaring that RSS was dead for years, even before Google Reader’s shuttering. Steve Gillmor, writing for TechCrunch in May, 2009, advised that “it’s time to get completely off RSS and switch to Twitter” because “RSS just doesn’t cut it anymore.”[31][39] He pointed out that Twitter was basically a better RSS feed, since it could show you what people thought about an article in addition to the article itself. It allowed you to follow people and not just channels. Gillmor told his readers that it was time to let RSS recede into the background. He ended his article with a verse from Bob Dylan’s “Forever Young.” + +Today, RSS is not dead. But neither is it anywhere near as popular as it once was. Lots of people have offered explanations for why RSS lost its broad appeal. Perhaps the most persuasive explanation is exactly the one offered by Gillmor in 2009. Social networks, just like RSS, provide a feed featuring all the latest news on the internet. Social networks took over from RSS because they were simply better feeds. They also provide more benefits to the companies that own them. Some people have accused Google, for example, of shutting down Google Reader in order to encourage people to use Google+. Google might have been able to monetize Google+ in a way that it could never have monetized Google Reader. Marco Arment, the creator of Instapaper, wrote on his blog in 2013: + +> Google Reader is just the latest casualty of the war that Facebook started, seemingly accidentally: the battle to own everything. While Google did technically “own” Reader and could make some use of the huge amount of news and attention data flowing through it, it conflicted with their far more important Google+ strategy: they need everyone reading and sharing everything through Google+ so they can compete with Facebook for ad-targeting data, ad dollars, growth, and relevance.[32][40] + +So both users and technology companies realized that they got more out of using social networks than they did out of RSS. + +Another theory is that RSS was always too geeky for regular people. Even the New York Times, which seems to have been eager to adopt RSS and promote it to its audience, complained in 2006 that RSS is a “not particularly user friendly” acronym coined by “computer geeks.”[33][41] Before the RSS icon was designed in 2004, websites like the New York Times linked to their RSS feeds using little orange boxes labeled “XML,” which can only have been intimidating.[34][42] The label was perfectly accurate though, because back then clicking the link would take a hapless user to a page full of XML. [This great tweet][43] captures the essence of this explanation for RSS’ demise. Regular people never felt comfortable using RSS; it hadn’t really been designed as a consumer-facing technology and involved too many hurdles; people jumped ship as soon as something better came along. + +RSS might have been able to overcome some of these limitations if it had been further developed. Maybe RSS could have been extended somehow so that friends subscribed to the same channel could syndicate their thoughts about an article to each other. But whereas a company like Facebook was able to “move fast and break things,” the RSS developer community was stuck trying to achieve consensus. The Great RSS Fork only demonstrates how difficult it was to do that. So if we are asking ourselves why RSS is no longer popular, a good first-order explanation is that social networks supplanted it. If we ask ourselves why social networks were able to supplant it, then the answer may be that the people trying to make RSS succeed faced a problem much harder than, say, building Facebook. As Dornfest wrote to the Syndication mailing list at one point, “currently it’s the politics far more than the serialization that’s far from simple.”[35][44] + +So today we are left with centralized silos of information. In a way, we _do_ have the syndicated internet that Kevin Werbach foresaw in 1999. After all, _The Onion_ is a publication that relies on syndication through Facebook and Twitter the same way that Seinfeld relied on syndication to rake in millions after the end of its original run. But syndication on the web only happens through one of a very small number of channels, meaning that none of us “retain control over our online personae” the way that Werbach thought we would. One reason this happened is garden-variety corporate rapaciousness—RSS, an open format, didn’t give technology companies the control over data and eyeballs that they needed to sell ads, so they did not support it. But the more mundane reason is that centralized silos are just easier to design than common standards. Consensus is difficult to achieve and it takes time, but without consensus spurned developers will go off and create competing standards. The lesson here may be that if we want to see a better, more open web, we have to get better at not screwing each other over. + +_If you enjoyed this post, more like it come out every four weeks! Follow [@TwoBitHistory][45] on Twitter or subscribe to the [RSS feed][46] to make sure you know when a new post is out._ + +_Previously on TwoBitHistory…_ + +> New post: This week we're traveling back in time in our DeLorean to see what it was like learning to program on early home computers. +> +> — TwoBitHistory (@TwoBitHistory) [September 2, 2018][47] + + 1. Kevin Werbach, “The Web Goes into Syndication,” Release 1.0, July 22, 1999, 1, accessed September 14, 2018, . [↩︎][48] + + 2. ibid. [↩︎][49] + + 3. Werbach, 8. [↩︎][50] + + 4. Peter Wiggin, “RSS Delivers the XML Promise,” Web Review, October 29, 1999, accessed September 14, 2018, . [↩︎][51] + + 5. Ben Hammersley, RSS and Atom (O’Reilly), 8, accessed September 14, 2018, . [↩︎][52] + + 6. “RSS 0.90 Specification,” RSS Advisory Board, accessed September 14, 2018, . [↩︎][53] + + 7. “My Netscape Network Future Directions,” RSS Advisory Board, accessed September 14, 2018, . [↩︎][54] + + 8. Tim Bray, “The RDF.net Challenge,” Ongoing by Tim Bray, May 21, 2003, accessed September 14, 2018, . [↩︎][55] + + 9. Dan Libby, “RSS: Introducing Myself,” August 24, 2000, RSS-DEV Mailing List, accessed September 14, 2018, . [↩︎][56] + + 10. Alexandra Krasne, “Browser Wars May Become Portal Wars,” CNN, accessed September 14, 2018, . [↩︎][57] + + 11. Dave Winer, “Scripting News in XML,” Scripting News, December 15, 1997, accessed September 14, 2018, . [↩︎][58] + + 12. Joseph Reagle, “RSS History,” 2004, accessed September 14, 2018, . [↩︎][59] + + 13. Dave Winer, “A Faceoff with Netscape,” Scripting News, June 16, 1999, accessed September 14, 2018, . [↩︎][60] + + 14. ibid. [↩︎][61] + + 15. Dan Libby, “RSS 0.91 Specification (Netscape),” RSS Advisory Board, accessed September 14, 2018, . [↩︎][62] + + 16. Dave Winer, “Scripting News: 7/28/1999,” Scripting News, July 28, 1999, accessed September 14, 2018, . [↩︎][63] + + 17. Oliver Willis, “RSS Aggregators?” June 19, 2000, Syndication Mailing List, accessed September 14, 2018, . [↩︎][64] + + 18. Dave Winer, “Scripting News: 07/07/2000,” Scripting News, July 07, 2000, accessed September 14, 2018, . [↩︎][65] + + 19. Dave Winer, “Re: RSS 0.91 Restarted,” June 9, 2000, Syndication Mailing List, accessed September 14, 2018, . [↩︎][66] + + 20. Leigh Dodds, “RSS Modularization,” XML.com, July 5, 2000, accessed September 14, 2018, . [↩︎][67] + + 21. Ian Davis, “Re: [syndication] RSS Modularization Demonstration,” June 28, 2000, Syndication Mailing List, accessed September 14, 2018, . [↩︎][68] + + 22. “RDF Site Summary (RSS) 1.0,” December 09, 2000, accessed September 14, 2018, . [↩︎][69] + + 23. Dave Winer, “Re: [syndication] Re: Thoughts, Questions, and Issues,” August 16, 2000, Syndication Mailing List, accessed September 14, 2018, . [↩︎][70] + + 24. Mark Pilgrim, “History of the RSS Fork,” Dive into Mark, September 5, 2002, accessed September 14, 2018, . [↩︎][71] + + 25. Dan Brickley, “RSS-Classic, RSS 1.0 and a Historical Debt,” November 7, 2000, Syndication Mailing List, accessed September 14, 2018, . [↩︎][72] + + 26. Tim O’Reilly, “Re: Asking Tim,” UserLand, September 20, 2000, accessed September 14, 2018, . [↩︎][73] + + 27. Dave Winer, “Re: Asking Tim,” UserLand, September 20, 2000, accessed September 14, 2018, . [↩︎][74] + + 28. John Quain, “BASICS; Fine-Tuning Your Filter for Online Information,” The New York Times, 2004, accessed September 14, 2018, . [↩︎][75] + + 29. John Schwartz, “Aaron Swartz, Internet Activist, Dies at 26,” The New York Times, January 12, 2013, accessed September 14, 2018, . [↩︎][76] + + 30. “A Second Spring of Cleaning,” Official Google Blog, March 13, 2013, accessed September 14, 2018, . [↩︎][77] + + 31. Steve Gillmor, “Rest in Peace, RSS,” TechCrunch, May 5, 2009, accessed September 14, 2018, . [↩︎][78] + + 32. Marco Arment, “Lockdown,” Marco.org, July 3, 2013, accessed September 14, 2018, . [↩︎][79] + + 33. Bob Tedeschi, “There’s a Popular New Code for Deals: RSS,” The New York Times, January 29, 2006, accessed September 14, 2018, . [↩︎][80] + + 34. “NYTimes.com RSS Feeds,” The New York Times, accessed September 14, 2018, . [↩︎][81] + + 35. Rael Dornfest, “RE: Re: [syndication] RE: RFC: Clearing Confusion for RSS, Agreement for Forward Motion,” May 31, 2001, Syndication Mailing List, accessed September 14, 2018, . [↩︎][82] + + + + +-------------------------------------------------------------------------------- + +via: https://twobithistory.org/2018/09/16/the-rise-and-demise-of-rss.html + +作者:[Two-Bit History][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://twobithistory.org +[b]: https://github.com/lujun9972 +[1]: https://twobithistory.org/2018/12/18/rss.html +[2]: tmp.F599d8dnXW#fn:3 +[3]: tmp.F599d8dnXW#fn:4 +[4]: tmp.F599d8dnXW#fn:5 +[5]: tmp.F599d8dnXW#fn:6 +[6]: https://trends.google.com/trends/explore?date=all&geo=US&q=rss +[7]: tmp.F599d8dnXW#fn:7 +[8]: https://twobithistory.org/images/mnn-channel.gif +[9]: tmp.F599d8dnXW#fn:8 +[10]: https://twobithistory.org/2018/05/27/semantic-web.html +[11]: tmp.F599d8dnXW#fn:9 +[12]: http://web.archive.org/web/19970703020212/http://mcf.research.apple.com:80/hs/screen_shot.html +[13]: tmp.F599d8dnXW#fn:10 +[14]: tmp.F599d8dnXW#fn:11 +[15]: tmp.F599d8dnXW#fn:12 +[16]: http://scripting.com/ +[17]: tmp.F599d8dnXW#fn:13 +[18]: tmp.F599d8dnXW#fn:14 +[19]: tmp.F599d8dnXW#fn:15 +[20]: tmp.F599d8dnXW#fn:16 +[21]: tmp.F599d8dnXW#fn:17 +[22]: tmp.F599d8dnXW#fn:18 +[23]: tmp.F599d8dnXW#fn:19 +[24]: https://groups.yahoo.com/neo/groups/syndication/info +[25]: tmp.F599d8dnXW#fn:20 +[26]: tmp.F599d8dnXW#fn:21 +[27]: tmp.F599d8dnXW#fn:22 +[28]: tmp.F599d8dnXW#fn:23 +[29]: tmp.F599d8dnXW#fn:24 +[30]: tmp.F599d8dnXW#fn:25 +[31]: tmp.F599d8dnXW#fn:26 +[32]: tmp.F599d8dnXW#fn:27 +[33]: tmp.F599d8dnXW#fn:28 +[34]: tmp.F599d8dnXW#fn:29 +[35]: https://tools.ietf.org/html/rfc4287 +[36]: tmp.F599d8dnXW#fn:30 +[37]: tmp.F599d8dnXW#fn:31 +[38]: tmp.F599d8dnXW#fn:32 +[39]: tmp.F599d8dnXW#fn:33 +[40]: tmp.F599d8dnXW#fn:34 +[41]: tmp.F599d8dnXW#fn:35 +[42]: tmp.F599d8dnXW#fn:36 +[43]: https://twitter.com/mgsiegler/status/311992206716203008 +[44]: tmp.F599d8dnXW#fn:37 +[45]: https://twitter.com/TwoBitHistory +[46]: https://twobithistory.org/feed.xml +[47]: https://twitter.com/TwoBitHistory/status/1036295112375115778?ref_src=twsrc%5Etfw +[48]: tmp.F599d8dnXW#fnref:3 +[49]: tmp.F599d8dnXW#fnref:4 +[50]: tmp.F599d8dnXW#fnref:5 +[51]: tmp.F599d8dnXW#fnref:6 +[52]: tmp.F599d8dnXW#fnref:7 +[53]: tmp.F599d8dnXW#fnref:8 +[54]: tmp.F599d8dnXW#fnref:9 +[55]: tmp.F599d8dnXW#fnref:10 +[56]: tmp.F599d8dnXW#fnref:11 +[57]: tmp.F599d8dnXW#fnref:12 +[58]: tmp.F599d8dnXW#fnref:13 +[59]: tmp.F599d8dnXW#fnref:14 +[60]: tmp.F599d8dnXW#fnref:15 +[61]: tmp.F599d8dnXW#fnref:16 +[62]: tmp.F599d8dnXW#fnref:17 +[63]: tmp.F599d8dnXW#fnref:18 +[64]: tmp.F599d8dnXW#fnref:19 +[65]: tmp.F599d8dnXW#fnref:20 +[66]: tmp.F599d8dnXW#fnref:21 +[67]: tmp.F599d8dnXW#fnref:22 +[68]: tmp.F599d8dnXW#fnref:23 +[69]: tmp.F599d8dnXW#fnref:24 +[70]: tmp.F599d8dnXW#fnref:25 +[71]: tmp.F599d8dnXW#fnref:26 +[72]: tmp.F599d8dnXW#fnref:27 +[73]: tmp.F599d8dnXW#fnref:28 +[74]: tmp.F599d8dnXW#fnref:29 +[75]: tmp.F599d8dnXW#fnref:30 +[76]: tmp.F599d8dnXW#fnref:31 +[77]: tmp.F599d8dnXW#fnref:32 +[78]: tmp.F599d8dnXW#fnref:33 +[79]: tmp.F599d8dnXW#fnref:34 +[80]: tmp.F599d8dnXW#fnref:35 +[81]: tmp.F599d8dnXW#fnref:36 +[82]: tmp.F599d8dnXW#fnref:37 From f889a1db3397b880d23005344180f892a09fcab1 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:09:53 +0800 Subject: [PATCH 133/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180507=20Multin?= =?UTF-8?q?omial=20Logistic=20Classification?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180507 Multinomial Logistic Classification.md --- ...507 Multinomial Logistic Classification.md | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 sources/tech/20180507 Multinomial Logistic Classification.md diff --git a/sources/tech/20180507 Multinomial Logistic Classification.md b/sources/tech/20180507 Multinomial Logistic Classification.md new file mode 100644 index 0000000000..01fb7b2e90 --- /dev/null +++ b/sources/tech/20180507 Multinomial Logistic Classification.md @@ -0,0 +1,215 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Multinomial Logistic Classification) +[#]: via: (https://www.jtolio.com/2018/05/multinomial-logistic-classification) +[#]: author: (jtolio.com https://www.jtolio.com/) + +Multinomial Logistic Classification +====== + +_This article was originally a problem I wrote for a coding competition I hosted, Vivint’s 2017 Game of Codes (now offline). The goal of this problem was not only to be a fun challenge but also to teach contestants almost everything they needed to know to build a neural network from scratch. I thought it might be neat to revive on my site! If machine learning is still scary sounding and foreign to you, you should feel much more at ease after working through this problem. I left out the details of [back-propagation][1], and a single-layer neural network isn’t really a neural network, but in this problem you can learn how to train and run a complete model! There’s lots of maybe scary-looking math but honestly if you can [multiply matrices][2] you should be fine._ + +In this problem, you’re going to build and train a machine learning model… from scratch! Don’t be intimidated - it will be much easier than it sounds! + +### What is machine learning? + +_Machine learning_ is a broad and growing range of topics, but essentially the idea is to teach the computer how to find patterns in large amounts of data, then use those patterns to make predictions. Surprisingly, the techniques that have been developed allow computers to translate languages, drive cars, recognize cats, synthesize voice, understand your music tastes, cure diseases, and even adjust your thermostat! + +You might be surprised to learn that since about 2010, the entire artificial intelligence and machine learning community has reorganized around a surprisingly small and common toolbox for all of these problems. So, let’s dive in to this toolbox! + +### Classification + +One of the most fundamental ways of solving problems in machine learning is by recasting problems as _classification_ problems. In other words, if you can describe a problem as data that needs labels, you can use machine learning! + +Machine learning will go through a phase of _training_, where data and existing labels are provided to the system. As a motivating example, imagine you have a large collection of photos that either contain hot dogs or don’t. Some of your photos have already been labeled if they contain a hot dog or not, but the other photos we want to build a system that will automatically label them “hotdog” or “nothotdog.” During training, we attempt to build a model of what exactly the essence of each label is. In this case, we will run all of our existing labeled photos through the system so it can learn what makes a hot dog a hot dog. + +After training, we run the unseen photos through the model and use the model to generate classifications. If you provide a new photo to your hotdog/nothotdog model, your model should be able to tell you if the photo contains a hot dog, assuming your model had a good training data set and was able to capture the core concept of what a hot dog is. + +Many different types of problems can be described as classification problems. As an example, perhaps you want to predict which word comes next in a sequence. Given four input words, a classifier can label those four words as “likely the fourth word follows the last three words” or “not likely.” Alternatively, the classification label for three words could be the most likely word to follow those three. + +### How I learned to stop worrying and love multinomial logistic classification + +Okay, let’s do the simplest thing we can think of to take input data and classify it. + +Let’s imagine our data that we want to classify is a big list of values. If what we have is a 16 by 16 pixel picture, we’re going to just put all the pixels in one big row so we have 256 pixel values in a row. So we’ll say \\(\mathbf{x}\\) is a vector in 256 dimensions, and each dimension is the pixel value. + +We have two labels, “hotdog” and “nothotdog.” Just like any other machine learning system, our system will never be 100% confident with a classification, so we will need to output confidence probabilities. The output of our system will be a two-dimensional vector, \\(\mathbf{p}\\). \\(p_0\\) will represent the probability that the input should be labeled “hotdog” and \\(p_1\\) will represent the probability that the input should be labeled “nothotdog.” + +How do we take a vector in 256 (or \\(\dim(\mathbf{x})\\)) dimensions and make something in just 2 (or \\(\dim(\mathbf{p})\\)) dimensions? Why, [matrix multiplication][2] of course! If you have a matrix with 2 rows and 256 columns, multiplying it by a 256-dimensional vector will result in a 2-dimensional one. + +Surprisingly, this is actually really close to the final construction of our classifier, but there are two problems: + + 1. If one of the input \\(\mathbf{x}\\)s is all zeros, the output will have to be zeros. But we need one of the output dimensions to not be zero! + 2. There’s nothing guaranteeing the probabilities in the output will be non-negative and all sum to 1. + + + +The first problem is easy, we add a bias vector \\(\mathbf{b}\\), turning our matrix multiplication into a standard linear equation of the form \\(\mathbf{W}\cdot\mathbf{x}+\mathbf{b}=\mathbf{y}\\). + +The second problem can be solved by using the [softmax function][3]. For a given vector \\(\mathbf{v}\\), softmax is defined as: + +In case the \\(\sum\\) scares you, \\(\sum_{j=0}^{n-1}\\) is basically a math “for loop.” All it’s saying is that we’re going to add together everything that comes after it (\\(e^{v_j}\\)) for every \\(j\\) value from 0 to \\(n-1\\). + +Softmax is a neat function! The output will be a vector where the largest dimension in the input will be the closest number to 1, no dimensions will be less than zero, and all dimensions sum to 1. Here are some examples: + +Unbelievably, these are all the building blocks you need for a linear model! Let’s put all the blocks together. If you already have \\(\mathbf{W}\cdot\mathbf{x}+\mathbf{b}=\mathbf{y}\\), your prediction \\(\mathbf{p}\\) can be found as \\(\text{softmax}\left(\mathbf{y}\right)\\). More fully, given an input \\(\mathbf{x}\\) and a trained model \\(\left(\mathbf{W},\mathbf{b}\right)\\), your prediction \\(\mathbf{p}\\) is: + +Once again, in this context, \\(p_0\\) is the probability given the model that the input should be labeled “hotdog” and \\(p_1\\) is the probability given the model that the input should be labeled “nothotdog.” + +It’s kind of amazing that all you need for good success with things even as complex as handwriting recognition is a linear model such as this one. + +### Scoring + +How do we find \\(\mathbf{W}\\) and \\(\mathbf{b}\\)? It might surprise you but we’re going to start off by guessing some random numbers and then changing them until we aren’t predicting things too badly (via a process known as [gradient descent][4]). But what does “too badly” mean? + +Recall that we have data that we’ve already labeled. We already have photos labeled “hotdog” and “nothotdog” in what’s called our _training set_. For each photo, we’re going to take whatever our current model is (\\(\mathbf{W}\\) and \\(\mathbf{b}\\)) and find \\(\mathbf{p}\\). Perhaps for one photo (that really is of a hot dog) our \\(\mathbf{p}\\) looks like this: + +This isn’t great! Our model says that the photo should be labeled “nothotdog” with 60% probability, but it is a hot dog. + +We need a bit more terminology. So far, we’ve only talked about one sample, one label, and one prediction at a time, but obviously we have lots of samples, lots of labels, and lots of predictions, and we want to score how our model does not just on one sample, but on all of our training samples. Assume we have \\(s\\) training samples, each sample has \\(d\\) dimensions, and there are \\(l\\) labels. In the case of our 16 by 16 pixel hot dog photos, \\(d = 256\\) and \\(l = 2\\). We’ll refer to sample \\(i\\) as \\(\mathbf{x}^{(i)}\\), our prediction for sample \\(i\\) as \\(\mathbf{p}^{(i)}\\), and the correct label vector for sample \\(i\\) as \\(\mathbf{L}^{(i)}\\). \\(\mathbf{L}^{(i)}\\) is a vector that is all zeros except for the dimension corresponding to the correct label, where that dimension is a 1. In other words, we have \\(\mathbf{W}\cdot\mathbf{x}^{(i)}+\mathbf{b} = \mathbf{p}^{(i)}\\) and we want \\(\mathbf{p}^{(i)}\\) to be as close to \\(\mathbf{L}^{(i)}\\) as possible, for all \\(s\\) samples. + +To score our model, we’re going to compute something called the _average cross entropy loss_. In general, [loss][5] is used to mean how off the mark a machine learning model is. While there are many ways of calculating loss, we’re going to use average [cross entropy][6] because it has some nice properties. + +Here’s the definition of the average cross entropy loss across all samples: + +All we need to do is find \\(\mathbf{W}\\) and \\(\mathbf{b}\\) that make this loss smallest. How do we do that? + +### Training + +As we said before, we will start \\(\mathbf{W}\\) and \\(\mathbf{b}\\) off with random values. For each value, choose a floating-point random number between -1 and 1. + +Of course, we’ll need to correct these values given the training data, and we now have enough information to describe how we will back-propagate corrections. + +The plan is to process all of the training data enough times that the loss drops to an “acceptable level.” Each time through the training data we’ll collect all of the predictions, and at the end we’ll update \\(\mathbf{W}\\) and \\(\mathbf{b}\\) with the information we’ve found. + +One problem that can occur is that your model might overcorrect after each run. A simple way to limit overcorrection some is to add a “learning rate”, usually designated \\(\alpha\\), which is some small fraction. You get to choose the learning rate! A good default choice for \\(\alpha\\) is 0.1. + +At the end of each run through all of the training data, here’s how you update \\(\mathbf{W}\\) and \\(\mathbf{b}\\): + +Just because this syntax is starting to get out of hand, let’s refresh what each symbol means. + + * \\(W_{m,n}\\) is the cell in weight matrix \\(\mathbf{W}\\) at row \\(m\\) and column \\(n\\). + * \\(b_m\\) is the \\(m\\)-th dimension in the “bias” vector \\(\mathbf{b}\\). + * \\(\alpha\\) is again your learning rate, 0.1, and \\(s\\) is how many training samples you have. + * \\(x_n^{(i)}\\) is the \\(n\\)-th dimension of sample \\(i\\). + * Likewise, \\(p_m^{(i)}\\) and \\(L_m^{(i)}\\) are the \\(m\\)-th dimensions of our prediction and true labels for sample \\(i\\), respectively. Remember that for each sample \\(i\\), \\(L_m^{(i)}\\) is zero for all but the dimension corresponding to the correct label, where it is 1. + + + +If you’re curious how we got these equations, we applied the [chain rule][7] to calculate partial derivatives of the total loss. It’s hairy, and this problem description is already too long! + +Anyway, once you’ve updated your \\(\mathbf{W}\\) and \\(\mathbf{b}\\), you start the whole process over! + +### When do we stop? + +Knowing when to stop is a hard problem. How low your loss goes is a function of your learning rate, how many iterations you run over your training data, and a huge number of other factors. On the flip side, if you train your model so your loss is too low, you run the risk of overfitting your model to your training data, so it won’t work well on data it hasn’t seen before. + +One of the more common ways of deciding when to [stop training][8] is to have a separate validation set of samples we check our success on and stop when we stop improving. But for this problem, to keep things simple what we’re going to do is just keep track of how our loss changes and stop when the loss stops changing as much. + +After the first 10 iterations, your loss will have changed 9 times (there was no change from the first time since it was the first time). Take the average of those 9 changes and stop training when your loss change is less than a hundredth the average loss change. + +### Tie it all together + +Alright! If you’ve stuck with me this far, you’ve learned to implement a multinomial logistic classifier using gradient descent, [back-propagation][1], and [one-hot encoding][9]. Good job! + +You should now be able to write a program that takes labeled training samples, trains a model, then takes unlabeled test samples and predicts labels for them! + +### Your program + +As input your program should take vectors of floating-point values, followed by a label. Some of the labels will be question marks. Your program should output the correct label for all of the question marks it sees. The label your program should output will always be one it has seen training examples of. + +Your program will pass the tests if it labels 75% or more of the unlabeled data correctly. + +### Where to learn more + +If you want to learn more or dive deeper into optimizing your solution, you may be interested in the first section of [Udacity’s free course on Deep Learning][10], or [Dom Luma’s tutorial on building a mini-TensorFlow][11]. + +### Example + +#### Input + +``` + 0.93 -1.52 1.32 0.05 1.72 horse + 1.57 -1.74 0.92 -1.33 -0.68 staple + 0.18 1.24 -1.53 1.53 0.78 other + 1.96 -1.29 -1.50 -0.19 1.47 staple + 1.24 0.15 0.73 -0.22 1.15 battery + 1.41 -1.56 1.04 1.09 0.66 horse +-0.70 -0.93 -0.18 0.75 0.88 horse + 1.12 -1.45 -1.26 -0.43 -0.05 staple + 1.89 0.21 -1.45 0.47 0.62 other +-0.60 -1.87 0.82 -0.66 1.86 staple +-0.80 -1.99 1.74 0.65 1.46 horse +-0.03 1.35 0.11 -0.92 -0.04 battery +-0.24 -0.03 0.58 1.32 -1.51 horse +-0.60 -0.70 1.61 0.56 -0.66 horse + 1.29 -0.39 -1.57 -0.45 1.63 staple + 0.87 1.59 -1.61 -1.79 1.47 battery + 1.86 1.92 0.83 -0.34 1.06 battery +-1.09 -0.81 1.47 1.82 0.06 horse +-0.99 -1.00 -1.45 -1.02 -1.06 staple +-0.82 -0.56 0.82 0.79 -1.02 horse +-1.86 0.77 -0.58 0.82 -1.94 other + 0.15 1.18 -0.87 0.78 2.00 other + 1.18 0.79 1.08 -1.65 -0.73 battery + 0.37 1.78 0.01 0.06 -0.50 other +-0.35 0.31 1.18 -1.83 -0.57 battery + 0.91 1.14 -1.85 0.39 0.07 other +-1.61 0.28 -0.31 0.93 0.77 other +-0.11 -1.75 -1.66 -1.55 -0.79 staple + 0.05 1.03 -0.23 1.49 1.66 other +-1.99 0.43 -0.99 1.72 0.52 other +-0.30 0.40 -0.70 0.51 0.07 other +-0.54 1.92 -1.13 -1.53 1.73 battery +-0.52 0.44 -0.84 -0.11 0.10 battery +-1.00 -1.82 -1.19 -0.67 -1.18 staple +-1.81 0.10 -1.64 -1.47 -1.86 battery +-1.77 0.53 -1.28 0.55 -1.15 other + 0.29 -0.28 -0.41 0.70 1.80 horse +-0.91 0.02 1.60 -1.44 -1.89 battery + 1.24 -0.42 -1.30 -0.80 -0.54 staple +-1.98 -1.15 0.54 -0.14 -1.24 staple + 1.26 -1.02 -1.08 -1.27 1.65 ? + 1.97 1.14 0.51 0.96 -0.36 ? + 0.99 0.14 -0.97 -1.90 -0.87 ? + 1.54 -1.83 1.59 1.98 -0.41 ? +-1.81 0.34 -0.83 0.90 -1.60 ? +``` + +#### Output + +``` +staple +other +battery +horse +other +``` + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2018/05/multinomial-logistic-classification + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://en.wikipedia.org/wiki/Backpropagation +[2]: https://en.wikipedia.org/wiki/Matrix_multiplication +[3]: https://en.wikipedia.org/wiki/Softmax_function +[4]: https://en.wikipedia.org/wiki/Gradient_descent +[5]: https://en.wikipedia.org/wiki/Loss_function +[6]: https://en.wikipedia.org/wiki/Cross_entropy +[7]: https://en.wikipedia.org/wiki/Chain_rule +[8]: https://en.wikipedia.org/wiki/Early_stopping +[9]: https://en.wikipedia.org/wiki/One-hot +[10]: https://classroom.udacity.com/courses/ud730 +[11]: https://nbviewer.jupyter.org/github/domluna/labs/blob/master/Build%20Your%20Own%20TensorFlow.ipynb From 53895af1e0d5123ddbdfd347b39652431fed2f2f Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:10:10 +0800 Subject: [PATCH 134/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180319=20How=20?= =?UTF-8?q?to=20not=20be=20a=20white=20male=20asshole,=20by=20a=20former?= =?UTF-8?q?=20offender?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180319 How to not be a white male asshole, by a former offender.md --- ...hite male asshole, by a former offender.md | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 sources/tech/20180319 How to not be a white male asshole, by a former offender.md diff --git a/sources/tech/20180319 How to not be a white male asshole, by a former offender.md b/sources/tech/20180319 How to not be a white male asshole, by a former offender.md new file mode 100644 index 0000000000..3478787ea1 --- /dev/null +++ b/sources/tech/20180319 How to not be a white male asshole, by a former offender.md @@ -0,0 +1,153 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to not be a white male asshole, by a former offender) +[#]: via: (https://www.jtolio.com/2018/03/how-to-not-be-a-white-male-asshole-by-a-former-offender) +[#]: author: (jtolio.com https://www.jtolio.com/) + +How to not be a white male asshole, by a former offender +====== + +_Huge thanks to Caitlin Jarvis for editing, contributing to, and proofreading to this post._ + +First off, let’s start off with some assumptions. You, dear reader, don’t intend to cause anyone harm. You have good intentions, see yourself as a good person, and are interested in self improvement. That’s great! + +Second, I don’t actually know for sure if I’m not still a current offender. I might be! It’s certainly something I’ll never be done working on. + +### 1\. You don’t know what others are going through + +Unfortunately, your good intentions are not enough to make sure the experiences of others are, in fact, good because we live in a world of asymmetric information. If another person’s dog just died unbeknownst to you and you start talking excitedly about how great dogs are to try and cheer a sad person up, you may end up causing them to be even sadder. You know things other people don’t, and others know things you don’t. + +So when I say that if you are a white man, there is an invisible world of experiences happening all around you that you are inherently blind to, it’s because of asymmetric information. You can’t know what others are going through because you are not an impartial observer of a system. _You exist within the system._ + +![][1] + +Let me show you what I mean: did you know a recent survey found that _[81 percent of women have experienced sexual harassment of some kind][2]_? Fully 1 out of every 2 women you know have had to deal specifically with _unwanted sexual touching_. + +What should have been most amazing about the [#MeToo movement][3] was not how many women reported harassment, but how many men were surprised. + +### 2\. You can inadvertently contribute to a racist, sexist, or prejudiced society + +I [previously wrote a lot about how small little interactions can add up][4], illustrating that even if you don’t intend to subject someone to racism, sexism, or some other prejudice, you might be doing it anyway. Intentions are meaningless when your actions amplify the negative experience of someone else. + +An example from [Maisha Johnson in Everyday Feminism][5]: + +> Black women deal with people touching our hair a lot. Now you know. Okay, there’s more to it than that: Black women deal with people touching our hair a _hell_ of a lot. +> +> If you approach a Black woman saying “I just have to feel your hair,” it’s pretty safe to assume this isn’t the first time she’s heard that. +> +> Everyone who asks me if they can touch follows a long line of people othering me – including strangers who touch my hair without asking. The psychological impact of having people constantly feel entitled my personal space has worn me down. + +Another example is that men frequently demand proof. Even though it makes sense in general to check your sources for something, the predominant response of men when confronted with claims of sexist treatment is to [ask for evidence][6]. Because this happens so frequently, this action _itself_ contributes to the sexist subjugation of women. The parallel universe women live in is so distinct from the experiences of men that men can’t believe their ears, and treat the report of a victim with skepticism. + +As you might imagine, this sort of effect is not limited to asking women for evidence or hair touching. Microaggressions are real and everywhere; the accumulation of lots of small things can be enormous. + +If you’re someone in charge of building things, this can be even more important and an even greater responsibility. If you build an app that is blind to the experiences of people who don’t look or act like you, you can significantly amplify negative experiences for others by causing systemic and system-wide issues. + +### 3\. The only way to stop contributing is to continually listen to others + +If you don’t already know what others are going through, and by not knowing what others are going through you may be subjecting them to prejudice even if you don’t mean to, what can you do to help others avoid prejudice? You can listen to them! People who are experiencing prejudice _don’t want to be experiencing prejudice_ and tend to be vocal about the experience. It is your job to really listen and then turn around and change the way you approach these situations in the future. + +### 4\. How do I listen? + +To listen to someone, you need to have empathy. You need to actually care about them. You need to process what they’re saying and not treat them with suspicion. + +Listening is very different from interjecting and arguing. Listening to others is different from making them do the work to educate you. It is your job to find the experiences of others you haven’t had and learn from them without demanding a curriculum. + +When people say you should just believe marginalized people, [no one is asking you to check your critical thinking at the door][7]. What you’re being asked to do is to be aware that your incredulity is a further reminder that you are not experiencing the same thing. Worse - white men acting incredulous is _so unbelievably common_ that it itself is a microaggression. Don’t be a sea lion: + +![][8] + +#### Aside about diversity of experience vs. diversity of thought. + +When trying to find others to listen to, who should you find? Recently, a growing number of people have echoed that all that’s really required of diversity is different viewpoints, and having diversity of thought is the ultimate goal. + +I want to point out that this is not the kind of diversity that will be useful to you. It’s easy to have a bunch of different opinions and then reject them when they complicate your life. What you want to be listening to is diversity of _experience_. Some experiences can’t be chosen. You can choose to be contrarian, but you can’t choose the color of your skin. + +### 5\. Where do I listen? + +What you need is a way to be a fly on the wall and observe the life experiences of others through their words and perspectives. Being friends and hanging out with people who are different from you is great. Getting out of monocultures is fantastic. Holding your company to diversity and inclusion initiatives is wonderful. + +But if you still need more or you live somewhere like Utah? + +What if there was a website where people from all walks of life opted in to talking about their day and what they’re feeling and experiencing from their viewpoint in a way you could read? It’d be almost like seeing the world through their eyes. + +Yep, this blog post is an unsolicited Twitter ad. Twitter definitely has its share of problems, but after [writing about how I finally figured out Twitter][9], in 2014 I decided to embark on a year-long effort to use Twitter (I wasn’t really using it before) to follow mostly women or people of color in my field and just see what the field is like for them on a day to day basis. + +Listening to others in this way blew my mind clean open. Suddenly I was aware of this invisible world around me, much of which is still invisible. Now, I’m looking for it, and I catch glimpses. I would challenge anyone and everyone to do this. Make sure the content you’re consuming is predominantly viewpoints from life experiences you haven’t had. + +If you need a start, here are some links to accounts to fill your Twitter feed up with: + + * [200 Women of Color in Tech on Twitter][10] + * [Women Engineers on Twitter][11] + + + +You can also check out [who I follow][12], though I should warn I also follow a lot of political accounts, joke accounts, and my following of someone is not an endorsement. + +It’s also worth pointing out that no individual can possibly speak for an entire class of people, but if 38 out of 50 women are saying they’re dealing with something, you should listen. + +### 6\. Does this work? + +Listening to others works, but you don’t have to just take my word for it. Here are two specific and recent experience reports of people turning their worldview for the better by listening to others: + + * [A professor at the University of New Brunswick][13] + * [A senior design developer at Microsoft][14] + + + +You can see how much of a profound and fast impact this had on me because by early 2015, only a few months into my Twitter experiment, I was worked up enough to write [my unicycle post][4] in response to what I was reading on Twitter. + +Having diverse perspectives in a workplace has even been shown to [increase productivity][15] and [increase creativity][16]. + +### 7\. Don’t stop there! + +Not everyone is as growth-oriented as you. Just because you’re listening now doesn’t mean others are hearing the same distribution of experiences. + +If this is new to you, it’s not new to marginalized people. Imagine how tired they must be in trying to convince everyone their experiences are real, valid, and ongoing. Help get the word out! Repeat and retweet what women and minorities say. Give them credit. In meetings at your work, give credit to others for their ideas and amplify their voices. + +Did you know that [non-white or female bosses who push diversity are judged negatively by their peers and managers][17] but white male bosses are not? If you’re a white male, use your position where others can’t. + +If you need an example list of things your company can do, [here’s a list Susan Fowler wrote after her experience at Uber][18]. + +Speak up, use your experiences to help others. + +### 8\. Am I not prejudiced now? + +The asymmetry of experiences we all have means we’re all inherently prejudiced to some degree and will likely continue to contribute to a prejudiced society. That said, the first step to fixing it is admitting it! + +There will always be work to do. You will always need to keep listening, keep learning, and work to improve every day. + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2018/03/how-to-not-be-a-white-male-asshole-by-a-former-offender + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://www.jtolio.com/images/mrmouse.jpg +[2]: https://www.npr.org/sections/thetwo-way/2018/02/21/587671849/a-new-survey-finds-eighty-percent-of-women-have-experienced-sexual-harassment +[3]: https://en.wikipedia.org/wiki/Me_Too_movement +[4]: https://www.jtolio.com/2015/03/what-riding-a-unicycle-can-teach-us-about-microaggressions/ +[5]: https://everydayfeminism.com/2015/09/dont-touch-black-womens-hair/ +[6]: https://twitter.com/ArielDumas/status/970692180766490630 +[7]: https://www.elle.com/culture/career-politics/a13977980/me-too-movement-false-accusations-believe-women/ +[8]: https://www.jtolio.com/images/sealion.png +[9]: https://www.jtolio.com/2009/03/i-finally-figured-out-twitter/ +[10]: http://peopleofcolorintech.com/articles/a-list-of-200-women-of-color-on-twitter/ +[11]: https://github.com/ryanburgess/female-engineers-twitter +[12]: https://twitter.com/jtolds/following +[13]: https://www.theglobeandmail.com/opinion/ill-start-2018-by-recognizing-my-white-privilege/article37472875/ +[14]: https://micahgodbolt.com/blog/changing-your-worldview/ +[15]: http://edis.ifas.ufl.edu/hr022 +[16]: https://faculty.insead.edu/william-maddux/documents/PSPB-learning-paper.pdf +[17]: https://digest.bps.org.uk/2017/07/12/non-white-or-female-bosses-who-push-diversity-are-judged-negatively-by-their-peers-and-managers/ +[18]: https://www.susanjfowler.com/blog/2017/5/20/five-things-tech-companies-can-do-better From 1934f43476df31ef2ab8c4eab424a0d184963a69 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:10:58 +0800 Subject: [PATCH 135/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020170320=20Whiteb?= =?UTF-8?q?oard=20problems=20in=20pure=20Lambda=20Calculus?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20170320 Whiteboard problems in pure Lambda Calculus.md --- ...eboard problems in pure Lambda Calculus.md | 836 ++++++++++++++++++ 1 file changed, 836 insertions(+) create mode 100644 sources/tech/20170320 Whiteboard problems in pure Lambda Calculus.md diff --git a/sources/tech/20170320 Whiteboard problems in pure Lambda Calculus.md b/sources/tech/20170320 Whiteboard problems in pure Lambda Calculus.md new file mode 100644 index 0000000000..02200befe7 --- /dev/null +++ b/sources/tech/20170320 Whiteboard problems in pure Lambda Calculus.md @@ -0,0 +1,836 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Whiteboard problems in pure Lambda Calculus) +[#]: via: (https://www.jtolio.com/2017/03/whiteboard-problems-in-pure-lambda-calculus) +[#]: author: (jtolio.com https://www.jtolio.com/) + +Whiteboard problems in pure Lambda Calculus +====== + +My team at [Vivint][1], the [Space Monkey][2] group, stopped doing whiteboard interviews a while ago. We certainly used to do them, but we’ve transitioned to homework problems or actually just hiring a candidate as a short term contractor for a day or two to solve real work problems and see how that goes. Whiteboard interviews are kind of like [Festivus][3] but in a bad way: you get the feats of strength and then the airing of grievances. Unfortunately, modern programming is nothing like writing code in front of a roomful of strangers with only a whiteboard and a marker, so it’s probably not best to optimize for that. + +Nonetheless, [Kyle][4]’s recent (wonderful, amazing) post titled [acing the technical interview][5] got me thinking about fun ways to approach whiteboard problems as an interviewee. Kyle’s [Church-encodings][6] made me wonder how many “standard” whiteboard problems you could solve in pure lambda calculus. If this isn’t seen as a feat of strength by your interviewers, there will certainly be some airing of grievances. + +➡️️ **Update**: I’ve made a lambda calculus web playground so you can run lambda calculus right in your browser! I’ve gone through and made links to examples in this post with it. Check it out at + +### Lambda calculus + +Wait, what is lambda calculus? Did I learn that in high school? + +Big-C “Calculus” of course usually refers to derivatives, integrals, Taylor series, etc. You might have learned about Calculus in high school, but this isn’t that. + +More generally, a little-c “calculus” is really just any system of calculation. The [lambda calculus][7] is essentially a formalization of the smallest set of primitives needed to make a completely [Turing-complete][8] programming language. Expressions in the language can only be one of three things. + + * An expression can define a function that takes exactly one argument (no more, no less) and then has another expression as the body. + * An expression can call a function by applying two subexpressions. + * An expression can reference a variable. + + + +Here is the entire grammar: + +``` + ::= + | `λ` `.` + | `(` `)` +``` + +That’s it. There’s nothing else you can do. There are no numbers, strings, booleans, pairs, structs, anything. Every value is a function that takes one argument. All variables refer to these functions, and all functions can do is return another function, either directly, or by calling yet another function. There’s nothing else to help you. + +To be honest, it’s a little surprising that this is even Turing-complete. How do you do branches or loops or recursion? This seems too simple to work, right? + +A common whiteboard problem is the [fizz buzz problem][9]. The goal is to write a function that prints out all the numbers from 0 to 100, but instead of printing numbers divisible by 3 it prints “fizz”, and instead of printing numbers divisible by 5 it prints “buzz”, and in the case of both it prints “fizzbuzz”. It’s a simple toy problem but it’s touted as a good whiteboard problem because evidently many self-proclaimed programmers can’t solve it. Maybe part of that is cause whiteboard problems suck? I dunno. + +Anyway, here’s fizz buzz in pure lambda calculus: + +``` +(λU.(λY.(λvoid.(λ0.(λsucc.(λ+.(λ*.(λ1.(λ2.(λ3.(λ4.(λ5.(λ6.(λ7.(λ8.(λ9.(λ10.(λnum.(λtrue.(λfalse.(λif.(λnot.(λand.(λor.(λmake-pair.(λpair-first.(λpair-second.(λzero?.(λpred.(λ-.(λeq?.(λ/.(λ%.(λnil.(λnil?.(λcons.(λcar.(λcdr.(λdo2.(λdo3.(λdo4.(λfor.(λprint-byte.(λprint-list.(λprint-newline.(λzero-byte.(λitoa.(λfizzmsg.(λbuzzmsg.(λfizzbuzzmsg.(λfizzbuzz.(fizzbuzz (((num 1) 0) 1)) λn.((for n) λi.((do2 (((if (zero? ((% i) 3))) λ_.(((if (zero? ((% i) 5))) λ_.(print-list fizzbuzzmsg)) λ_.(print-list fizzmsg))) λ_.(((if (zero? ((% i) 5))) λ_.(print-list buzzmsg)) λ_.(print-list (itoa i))))) (print-newline nil)))) ((cons (((num 0) 7) 0)) ((cons (((num 1) 0) 5)) ((cons (((num 1) 2) 2)) ((cons (((num 1) 2) 2)) ((cons (((num 0) 9) 8)) ((cons (((num 1) 1) 7)) ((cons (((num 1) 2) 2)) ((cons (((num 1) 2) 2)) nil))))))))) ((cons (((num 0) 6) 6)) ((cons (((num 1) 1) 7)) ((cons (((num 1) 2) 2)) ((cons (((num 1) 2) 2)) nil))))) ((cons (((num 0) 7) 0)) ((cons (((num 1) 0) 5)) ((cons (((num 1) 2) 2)) ((cons (((num 1) 2) 2)) nil))))) λn.(((Y λrecurse.λn.λresult.(((if (zero? n)) λ_.(((if (nil? result)) λ_.((cons zero-byte) nil)) λ_.result)) λ_.((recurse ((/ n) 10)) ((cons ((+ zero-byte) ((% n) 10))) result)))) n) nil)) (((num 0) 4) 8)) λ_.(print-byte (((num 0) 1) 0))) (Y λrecurse.λl.(((if (nil? l)) λ_.void) λ_.((do2 (print-byte (car l))) (recurse (cdr l)))))) PRINT_BYTE) λn.λf.((((Y λrecurse.λremaining.λcurrent.λf.(((if (zero? remaining)) λ_.void) λ_.((do2 (f current)) (((recurse (pred remaining)) (succ current)) f)))) n) 0) f)) λa.do3) λa.do2) λa.λb.b) λl.(pair-second (pair-second l))) λl.(pair-first (pair-second l))) λe.λl.((make-pair true) ((make-pair e) l))) λl.(not (pair-first l))) ((make-pair false) void)) λm.λn.((- m) ((* ((/ m) n)) n))) (Y λ/.λm.λn.(((if ((eq? m) n)) λ_.1) λ_.(((if (zero? ((- m) n))) λ_.0) λ_.((+ 1) ((/ ((- m) n)) n)))))) λm.λn.((and (zero? ((- m) n))) (zero? ((- n) m)))) λm.λn.((n pred) m)) λn.(((λn.λf.λx.(pair-second ((n λp.((make-pair (f (pair-first p))) (pair-first p))) ((make-pair x) x))) n) succ) 0)) λn.((n λ_.false) true)) λp.(p false)) λp.(p true)) λx.λy.λt.((t x) y)) λa.λb.((a true) b)) λa.λb.((a b) false)) λp.λt.λf.((p f) t)) λp.λa.λb.(((p a) b) void)) λt.λf.f) λt.λf.t) λa.λb.λc.((+ ((+ ((* ((* 10) 10)) a)) ((* 10) b))) c)) (succ 9)) (succ 8)) (succ 7)) (succ 6)) (succ 5)) (succ 4)) (succ 3)) (succ 2)) (succ 1)) (succ 0)) λm.λn.λx.(m (n x))) λm.λn.λf.λx.((((m succ) n) f) x)) λn.λf.λx.(f ((n f) x))) λf.λx.x) λx.(U U)) (U λh.λf.(f λx.(((h h) f) x)))) λf.(f f)) +``` + +➡️️ [Try it out in your browser!][10] + +(This program expects a function to be defined called `PRINT_BYTE` which takes a Church-encoded numeral, turns it into a byte, writes it to `stdout`, and then returns the same Church-encoded numeral. Expecting a function that has side-effects might arguably disqualify this from being pure, but it’s definitely arguable.) + +Don’t be deceived! I said there were no native numbers or lists or control structures in lambda calculus and I meant it. `0`, `7`, `if`, and `+` are all _variables_ that represent _functions_ and have to be constructed before they can be used in the code block above. + +### What? What’s happening here? + +Okay let’s start over and build up to fizz buzz. We’re going to need a lot. We’re going to need to build up concepts of numbers, logic, and lists all from scratch. Ask your interviewers if they’re comfortable cause this might be a while. + +Here is a basic lambda calculus function: + +``` +λx.x +``` + +This is the identity function and it is equivalent to the following Javascript: + +``` +function(x) { return x; } +``` + +It takes an argument and returns it! We can call the identity function with another value. Function calling in many languages looks like `f(x)`, but in lambda calculus, it looks like `(f x)`. + +``` +(λx.x y) +``` + +This will return `y`. Once again, here’s equivalent Javascript: + +``` +function(x) { return x; }(y) +``` + +Aside: If you’re already familiar with lambda calculus, my formulation of precedence is such that `(λx.x y)` is not the same as `λx.(x y)`. `(λx.x y)` applies `y` to the identity function `λx.x`, and `λx.(x y)` is a function that applies `y` to its argument `x`. Perhaps not what you’re used to, but the parser was way more straightforward, and programming with it this way seems a bit more natural, believe it or not. + +Okay, great. We can call functions. What if we want to pass more than one argument? + +### Currying + +Imagine the following Javascript function: + +``` +let s1 = function(f, x) { return f(x); } +``` + +We want to call it with two arguments, another function and a value, and we want the function to then be called on the value, and have its result returned. Can we do this while using only one argument? + +[Currying][11] is a technique for dealing with this. Instead of taking two arguments, take the first argument and return another function that takes the second argument. Here’s the Javascript: + +``` +let s2 = function(f) { + return function(x) { + return f(x); + } +}; +``` + +Now, `s1(f, x)` is the same as `s2(f)(x)`. So the equivalent lambda calculus for `s2` is then + +``` +λf.λx.(f x) +``` + +Calling this function with `g` for `f` and `y` for `x` is like so: + +``` +((s2 g) y) +``` + +or + +``` +((λf.λx.(f x) g) y) +``` + +The equivalent Javascript here is: + +``` +function(f) { + return function(x) { + f(x) + } +}(g)(y) +``` + +### Numbers + +Since everything is a function, we might feel a little stuck with what to do about numbers. Luckily, [Alonzo Church][12] already figured it out for us! When you have a number, often what you want to do is represent how many times you might do something. + +So let’s represent a number as how many times we’ll apply a function to a value. This is called a [Church numeral][13]. If we have `f` and `x`, `0` will mean we don’t call `f` at all, and just return `x`. `1` will mean we call `f` one time, `2` will mean we call `f` twice, and so on. + +Here are some definitions! (N.B.: assignment isn’t actually part of lambda calculus, but it makes writing down definitions easier) + +``` +0 = λf.λx.x +``` + +Here, `0` takes a function `f`, a value `x`, and never calls `f`. It just returns `x`. `f` is called 0 times. + +``` +1 = λf.λx.(f x) +``` + +Like `0`, `1` takes `f` and `x`, but here it calls `f` exactly once. Let’s see how this continues for other numbers. + +``` +2 = λf.λx.(f (f x)) +3 = λf.λx.(f (f (f x))) +4 = λf.λx.(f (f (f (f x)))) +5 = λf.λx.(f (f (f (f (f x))))) +``` + +`5` is a function that takes `f`, `x`, and calls `f` 5 times! + +Okay, this is convenient, but how are we going to do math on these numbers? + +### Successor + +Let’s make a _successor_ function that takes a number and returns a new number that calls `f` just one more time. + +``` +succ = λn. λf.λx.(f ((n f) x)) +``` + +`succ` is a function that takes a Church-encoded number, `n`. The spaces after `λn.` are ignored. I put them there to indicate that we expect to usually call `succ` with one argument, curried or no. `succ` then returns another Church-encoded number, `λf.λx.(f ((n f) x))`. What is it doing? Let’s break it down. + + * `((n f) x)` looks like that time we needed to call a function that took two “curried” arguments. So we’re calling `n`, which is a Church numeral, with two arguments, `f` and `x`. This is going to call `f` `n` times! + * `(f ((n f) x))` This is calling `f` again, one more time, on the result of the previous value. + + + +So does `succ` work? Let’s see what happens when we call `(succ 1)`. We should get the `2` we defined earlier! + +``` + (succ 1) +-> (succ λf.λx.(f x)) # resolve the variable 1 +-> (λn.λf.λx.(f ((n f) x)) λf.λx.(f x)) # resolve the variable succ +-> λf.λx.(f ((λf.λx.(f x) f) x)) # call the outside function. replace n + # with the argument + +let's sidebar and simplify the subexpression + (λf.λx.(f x) f) +-> λx.(f x) # call the function, replace f with f! + +now we should be able to simplify the larger subexpression + ((λf.λx.(f x) f) x) +-> (λx.(f x) x) # sidebar above +-> (f x) # call the function, replace x with x! + +let's go back to the original now + λf.λx.(f ((λf.λx.(f x) f) x)) +-> λf.λx.(f (f x)) # subexpression simplification above +``` + +and done! That last line is identical to the `2` we defined originally! It calls `f` twice. + +### Math + +Now that we have the successor function, if your interviewers haven’t checked out, tell them that fizz buzz isn’t too far away now; we have [Peano Arithmetic][14]! They can then check their interview bingo cards and see if they’ve increased their winnings. + +No but for real, since we have the successor function, we can now easily do addition and multiplication, which we will need for fizz buzz. + +First, recall that a number `n` is a function that takes another function `f` and an initial value `x` and applies `f` _n_ times. So if you have two numbers _m_ and _n_, what you want to do is apply `succ` to `m` _n_ times! + +``` ++ = λm.λn.((n succ) m) +``` + +Here, `+` is a variable. If it’s not a lambda expression or a function call, it’s a variable! + +Multiplication is similar, but instead of applying `succ` to `m` _n_ times, we’re going to add `m` to `0` `n` times. + +First, note that if `((+ m) n)` is adding `m` and `n`, then that means that `(+ m)` is a _function_ that adds `m` to its argument. So we want to apply the function `(+ m)` to `0` `n` times. + +``` +* = λm.λn.((n (+ m)) 0) +``` + +Yay! We have multiplication and addition now. + +### Logic + +We’re going to need booleans and if statements and logic tests and so on. So, let’s talk about booleans. Recall how with numbers, what we kind of wanted with a number `n` is to do something _n_ times. Similarly, what we want with booleans is to do one of two things, either/or, but not both. Alonzo Church to the rescue again. + +Let’s have booleans be functions that take two arguments (curried of course), where the `true` boolean will return the first option, and the `false` boolean will return the second. + +``` +true = λt.λf.t +false = λt.λf.f +``` + +So that we can demonstrate booleans, we’re going to define a simple sample function called `zero?` that returns `true` if a number `n` is zero, and `false` otherwise: + +``` +zero? = λn.((n λ_.false) true) +``` + +To explain: if we have a Church numeral for 0, it will call the first argument it gets called with 0 times and just return the second argument. In other words, 0 will just return the second argument and that’s it. Otherwise, any other number will call the first argument at least once. So, `zero?` will take `n` and give it a function that throws away its argument and always returns `false` whenever it’s called, and start it off with `true`. Only zero values will return `true`. + +➡️️ [Try it out in your browser!][15] + +We can now write an `if'` function to make use of these boolean values. `if'` will take a predicate value `p` (the boolean) and two options `a` and `b`. + +``` +if' = λp.λa.λb.((p a) b) +``` + +You can use it like this: + +``` +((if' (zero? n) + (something-when-zero x)) + (something-when-not-zero y)) +``` + +One thing that’s weird about this construction is that the interpreter is going to evaluate both branches (my lambda calculus interpreter is [eager][16] instead of [lazy][17]). Both `something-when-zero` and `something-when-not-zero` are going to be called to determine what to pass in to `if'`. To make it so that we don’t actually call the function in the branch we don’t want to run, let’s protect the logic in another function. We’ll name the argument to the function `_` to indicate that we want to just throw it away. + +``` +((if (zero? n) + λ_. (something-when-zero x)) + λ_. (something-when-not-zero y)) +``` + +This means we’re going to have to make a new `if` function that calls the correct branch with a throwaway argument, like `0` or something. + +``` +if = λp.λa.λb.(((p a) b) 0) +``` + +Okay, now we have booleans and `if`! + +### Currying part deux + +At this point, you might be getting sick of how calling something with multiple curried arguments involves all these extra parentheses. `((f a) b)` is annoying, can’t we just do `(f a b)`? + +It’s not part of the strict grammar, but my interpreter makes this small concession. `(a b c)` will be expanded to `((a b) c)` by the parser. `(a b c d)` will be expanded to `(((a b) c) d)` by the parser, and so on. + +So, for the rest of the post, for ease of explanation, I’m going to use this [syntax sugar][18]. Observe how using `if` changes: + +``` +(if (zero? n) + λ_. (something-when-zero x) + λ_. (something-when-not-zero y)) +``` + +It’s a little better. + +### More logic + +Let’s talk about `and`, `or`, and `not`! + +`and` returns true if and only if both `a` and `b` are true. Let’s define it! + +``` +and = λa.λb. + (if (a) + λ_. b + λ_. false) +``` + +`or` returns true if `a` is true or if `b` is true: + +``` +or = λa.λb. + (if (a) + λ_. true + λ_. b) +``` + +`not` just returns the opposite of whatever it was given: + +``` +not = λa. + (if (a) + λ_. false + λ_. true) +``` + +It turns out these can be written a bit more simply, but they’re basically doing the same thing: + +``` +and = λa.λb.(a b false) +or = λa.λb.(a true b) +not = λp.λt.λf.(p f t) +``` + +➡️️ [Try it out in your browser!][19] + +### Pairs! + +Sometimes it’s nice to keep data together. Let’s make a little 2-tuple type! We want three functions. We want a function called `make-pair` that will take two arguments and return a “pair”, we want a function called `pair-first` that will return the first element of the pair, and we want a function called `pair-second` that will return the second element. How can we achieve this? You’re almost certainly in the interview room alone, but now’s the time to yell “Alonzo Church”! + +``` +make-pair = λx.λy. λa.(a x y) +``` + +`make-pair` is going to take two arguments, `x` and `y`, and they will be the elements of the pair. The pair itself is a function that takes an “accessor” `a` that will be given `x` and `y`. All `a` has to do is take the two arguments and return the one it wants. + +Here is someone making a pair with variables `1` and `2`: + +``` +(make-pair 1 2) +``` + +This returns: + +``` +λa.(a 1 2) +``` + +There’s a pair! Now we just need to access the values inside. + +Remember how `true` takes two arguments and returns the first one and `false` takes two arguments and returns the second one? + +``` +pair-first = λp.(p true) +pair-second = λp.(p false) +``` + +`pair-first` is going to take a pair `p` and give it `true` as the accessor `a`. `pair-second` is going to give the pair `false` as the accessor. + +Voilà, you can now store 2-tuples of values and recover the data from them. + +➡️️ [Try it out in your browser!][20] + +### Lists! + +We’re going to construct [linked lists][21]. Each list item needs two things: the value at the current position in the list and a reference to the rest of the list. + +One additional caveat is we want to be able to identify an empty list, so we’re going to store whether or not the current value is the end of a list as well. In [LISP][22]-based programming languages, the end of the list is the special value `nil`, and checking if we’ve hit the end of the list is accomplished with the `nil?` predicate. + +Because we want to distinguish `nil` from a list with a value, we’re going to store three things in each linked list item. Whether or not the list is empty, and if not, the value and the rest of the list. So we need a 3-tuple. + +Once we have pairs, other-sized tuples are easy. For instance, a 3-tuple is just one pair with another pair inside for one of the slots. + +For each list element, we’ll store: + +``` +[not-empty [value rest-of-list]] +``` + +As an example, a list element with a value of `1` would look like: + +``` +[true [1 remainder]] +``` + +whereas `nil` will look like + +``` +[false whatever] +``` + +That second part of `nil` just doesn’t matter. + +First, let’s define `nil` and `nil?`: + +``` +nil = (make-pair false false) +nil? = λl. (not (pair-first l)) +``` + +The important thing about `nil` is that the first element in the pair is `false`. + +Now that we have an empty list, let’s define how to add something to the front of it. In LISP-based languages, the operation to _construct_ a new list element is called `cons`, so we’ll call this `cons`, too. + +`cons` will take a value and an existing list and return a new list with the given value at the front of the list. + +``` +cons = λvalue.λlist. + (make-pair true (make-pair value list)) +``` + +`cons` is returning a pair where, unlike `nil`, the first element of the pair is `true`. This represents that there’s something in the list here. The second pair element is what we wanted in our linked list: the value at the current position, and a reference to the rest of the list. + +So how do we access things in the list? Let’s define two functions called `head` and `tail`. `head` is going to return the value at the front of the list, and `tail` is going to return everything but the front of the list. In LISP-based languages, these functions are sometimes called `car` and `cdr` for surprisingly [esoteric reasons][23]. `head` and `tail` have undefined behavior here when called on `nil`, so let’s just assume `nil?` is false for the list and keep going. + +``` +head = λlist. (pair-first (pair-second list)) +tail = λlist. (pair-second (pair-second list)) +``` + +Both `head` and `tail` first get `(pair-second list)`, which returns the tuple that has the value and reference to the remainder. Then, they use either `pair-first` or `pair-second` to get the current value or the rest of the list. + +Great, we have lists! + +➡️️ [Try it out in your browser!][24] + +### Recursion and loops + +Let’s make a simple function that sums up a list of numbers. + +``` +sum = λlist. + (if (nil? list) + λ_. 0 + λ_. (+ (head list) (sum (tail list)))) +``` + +If the list is empty, let’s return 0. If the list has an element, let’s add that element to the sum of the rest of the list. [Recursion][25] is a cornerstone tool of computer science, and being able to assume a solution to a subproblem to solve a problem is super neat! + +Okay, except, this doesn’t work like this in lambda calculus. Remember how I said assignment wasn’t something that exists in lambda calculus? If you have: + +``` +x = y + +``` + +This really means you have: + +``` +(λx. y) +``` + +In the case of our sum definition, we have: + +``` +(λsum. + + + λlist. + (if (nil? list) + λ_. 0 + λ_. (+ (head list) (sum (tail list))))) +``` + +What that means is `sum` doesn’t have any access to itself. It can’t call itself like we’ve written, because when it tries to call `sum`, it’s undefined! + +This is a pretty crushing blow, but it turns out there’s a mind bending and completely unexpected trick the universe has up its sleeve. + +Assume we wrote `sum` so that it takes two arguments. A reference to something like `sum` we’ll call `helper` and then the list. If we could figure out how to solve the recursion problem, then we could use this `sum`. Let’s do that. + +``` +sum = λhelper.λlist. + (if (nil? list) + λ_. 0 + λ_. (+ (head list) (helper (tail list)))) +``` + +But hey! When we call `sum`, we have a reference to `sum` then! Let’s just give `sum` itself before the list. + +``` +(sum sum list) +``` + +This seems promising, but unfortunately now the `helper` invocation inside of `sum` is broken. `helper` is just `sum` and `sum` expects a reference to itself. Let’s try again, changing the `helper` call: + +``` +sum = λhelper.λlist. + (if (nil? list) + λ_. 0 + λ_. (+ (head list) (helper helper (tail list)))) + +(sum sum list) +``` + +We did it! This actually works! We engineered recursion out of math! At no point does `sum` refer to itself inside of itself, and yet we managed to make a recursive function anyways! + +➡️️ [Try it out in your browser!][26] + +Despite the minor miracle we’ve just performed, we’ve now ruined how we program recursion to involve calling recursive functions with themselves. This isn’t the end of the world, but it’s a little annoying. Luckily for us, there’s a function that cleans this all right up called the [Y combinator][27]. + +The _Y combinator_ is probably now more famously known as [a startup incubator][28], or perhaps even more so as the domain name for one of the most popular sites that has a different name than its URL, [Hacker News][29], but fixed point combinators such as the Y combinator have had a longer history. + +The Y combinator can be defined in different ways, but definition I’m using is: + +``` +Y = λf.(λx.(x x) λx.(f λy.((x x) y))) +``` + +You might consider reading more about how the Y combinator can be derived from an excellent tutorial such as [this one][30] or [this one][31]. + +Anyway, `Y` will make our original `sum` work as expected. + +``` +sum = (Y λhelper.λlist. + (if (nil? list) + λ_. 0 + λ_. (+ (head list) (helper (tail list))))) +``` + +We can now call `(sum list)` without any wacky doubling of the function name, either inside or outside of the function. Hooray! + +➡️️ [Try it out in your browser!][32] + +### More math + +“Get ready to do more math! We now have enough building blocks to do subtraction, division, and modulo, which we’ll need for fizz buzz,” you tell the security guards that are approaching you. + +Just like addition, before we define subtraction we’ll define a predecessor function. Unlike addition, the predecessor function `pred` is much more complicated than the successor function `succ`. + +The basic idea is we’re going to create a pair to keep track of the previous value. We’ll start from zero and build up `n` but also drag the previous value such that at `n` we also have `n - 1`. Notably, this solution does not figure out how to deal with negative numbers. The predecessor of 0 will be 0, and negatives will have to be dealt with some other time and some other way. + +First, we’ll make a helper function that takes a pair of numbers and returns a new pair where the first number in the old pair is the second number in the new pair, and the new first number is the successor of the old first number. + +``` +pred-helper = λpair. + (make-pair (succ (pair-first pair)) (pair-first pair)) +``` + +Make sense? If we call `pred-helper` on a pair `[0 0]`, the result will be `[1 0]`. If we call it on `[1 0]`, the result will be `[2 1]`. Essentially this helper slides older numbers off to the right. + +Okay, so now we’re going to call `pred-helper` _n_ times, with a starting pair of `[0 0]`, and then get the _second_ value, which should be `n - 1` when we’re done, from the pair. + +``` +pred = λn. + (pair-second (n pred-helper (make-pair 0 0))) +``` + +We can combine these two functions now for the full effect: + +``` +pred = λn. + (pair-second + (n + λpair.(make-pair (succ (pair-first pair)) (pair-first pair)) + (make-pair 0 0))) +``` + +➡️️ [Try it out in your browser!][33] + +Now that we have `pred`, subtraction is easy! To subtract `n` from `m`, we’re going to apply `pred` to `m` _n_ times. + +``` +- = λm.λn.(n pred m) +``` + +Keep in mind that if `n` is equal to _or greater than_ `m`, the result of `(- m n)` will be zero, since there are no negative numbers and the predecessor of `0` is `0`. This fact means we can implement some new logic tests. Let’s make `(ge? m n)` return `true` if `m` is greater than or equal to `n` and make `(le? m n)` return `true` if `m` is less than or equal to `n`. + +``` +ge? = λm.λn.(zero? (- n m)) +le? = λm.λn.(zero? (- m n)) +``` + +If we have greater-than-or-equal-to and less-than-or-equal-to, then we can make equal! + +``` +eq? = λm.λn.(and (ge? m n) (le? m n)) +``` + +Now we have enough for integer division! The idea for integer division of `n` and `m` is we will keep count of the times we can subtract `m` from `n` without going past zero. + +``` +/ = (Y λ/.λm.λn. + (if (eq? m n) + λ_. 1 + λ_. (if (le? m n) + λ_. 0 + λ_. (+ 1 (/ (- m n) n))))) +``` + +Once we have subtraction, multiplication, and integer division, we can create modulo. + +``` +% = λm.λn. (- m (* (/ m n) n)) +``` + +➡️️ [Try it out in your browser!][34] + +### Aside about performance + +You might be wondering about performance at this point. Every time we subtract one from 100, we count up from 0 to 100 to generate 99. This effect compounds itself for division and modulo. The truth is that Church numerals and other encodings aren’t very performant! Just like how tapes in Turing machines aren’t a particularly efficient way to deal with data, Church encodings are most interesting from a theoretical perspective for proving facts about computation. + +That doesn’t mean we can’t make things faster though! + +Lambda calculus is purely functional and side-effect free, which means that all sorts of optimizations can applied. Functions can be aggressively memoized. In other words, once a specific function and its arguments have been computed, there’s no need to compute them ever again. The result of that function will always be the same anyways. Further, functions can be computed lazily and only if needed. What this means is if a branch of your program’s execution renders a result that’s never used, the compiler can decide to just not run that part of the program and end up with the exact same result. + +[My interpreter][35] does have side effects, since programs written in it can cause the system to write output to the user via the special built-in function `PRINT_BYTE`. As a result, I didn’t choose lazy evaluation. The only optimization I chose was aggressive memoization for all functions that are side-effect free. The memoization still has room for improvement, but the result is much faster than a naive implementation. + +### Output + +“We’re rounding the corner on fizz buzz!” you shout at the receptionist as security drags you around the corner on the way to the door. “We just need to figure out how to communicate results to the user!” + +Unfortunately, lambda calculus can’t communicate with your operating system kernel without some help, but a small concession is all we need. [Sheepda][35] provides a single built-in function `PRINT_BYTE`. `PRINT_BYTE` takes a number as its argument (a Church encoded numeral) and prints the corresponding byte to the configured output stream (usually `stdout`). + +With `PRINT_BYTE`, we’re going to need to reference a number of different [ASCII bytes][36], so we should make writing numbers in code easier. Earlier we defined numbers 0 - 5, so let’s start and define numbers 6 - 10. + +``` +6 = (succ 5) +7 = (succ 6) +8 = (succ 7) +9 = (succ 8) +10 = (succ 9) +``` + +Now let’s define a helper to create three digit decimal numbers. + +``` +num = λa.λb.λc.(+ (+ (* (* 10 10) a) (* 10 b)) c) +``` + +The newline byte is decimal 10. Here’s a function to print newlines! + +``` +print-newline = λ_.(PRINT_BYTE (num 0 1 0)) +``` + +### Doing multiple things + +Now that we have this `PRINT_BYTE` function, we have functions that can cause side-effects. We want to call `PRINT_BYTE` but we don’t care about its return value. We need a way to call multiple functions in sequence. + +What if we make a function that takes two arguments and throws away the first one again? + +``` +do2 = λ_.λx.x +``` + +Here’s a function to print every value in a list: + +``` +print-list = (Y λrecurse.λlist. + (if (nil? list) + λ_. 0 + λ_. (do2 (PRINT_BYTE (head list)) + (recurse (tail list))))) +``` + +And here’s a function that works like a for loop. It calls `f` with every number from `0` to `n`. It uses a small helper function that continues to call itself until `i` is equal to `n`, and starts `i` off at `0`. + +``` +for = λn.λf.( + (Y λrecurse.λi. + (if (eq? i n) + λ_. void + λ_. (do2 (f i) + (recurse (succ i))))) + 0) +``` + +### Converting an integer to a string + +The last thing we need to complete fizz buzz is a function that turns a number into a string of bytes to print. You might have noticed the `print-num` calls in some of the web-based examples above. We’re going to see how to make it! Writing this function is sometimes a whiteboard problem in its own right. In C, this function is called `itoa`, for integer to ASCII. + +Here’s an example of how it works. Imagine the number we’re converting to bytes is `123`. We can get the `3` out by doing `(% 123 10)`, which will be `3`. Then we can divide by `10` to get `12`, and then start over. `(% 12 10)` is `2`. We’ll loop down until we hit zero. + +Once we have a number, we can convert it to ASCII by adding the value of the `'0'` ASCII byte. Then we can make a list of ASCII bytes for use with `print-list`. + +``` +zero-char = (num 0 4 8) # the ascii code for the byte that represents 0. + +itoa = λn.( + (Y λrecurse.λn.λresult. + (if (zero? n) + λ_. (if (nil? result) + λ_. (cons zero-char nil) + λ_. result) + λ_. (recurse (/ n 10) (cons (+ zero-char (% n 10)) result)))) + n nil) + +print-num = λn.(print-list (itoa n)) +``` + +### Fizz buzz + +“Here we go,” you shout at the building you just got kicked out of, “here’s how you do fizz buzz.” + +First, we need to define three strings: “Fizz”, “Buzz”, and “Fizzbuzz”. + +``` +fizzmsg = (cons (num 0 7 0) # F + (cons (num 1 0 5) # i + (cons (num 1 2 2) # z + (cons (num 1 2 2) # z + nil)))) +buzzmsg = (cons (num 0 6 6) # B + (cons (num 1 1 7) # u + (cons (num 1 2 2) # z + (cons (num 1 2 2) # z + nil)))) +fizzbuzzmsg = (cons (num 0 7 0) # F + (cons (num 1 0 5) # i + (cons (num 1 2 2) # z + (cons (num 1 2 2) # z + (cons (num 0 9 8) # b + (cons (num 1 1 7) # u + (cons (num 1 2 2) # z + (cons (num 1 2 2) # z + nil)))))))) +``` + +Okay, now let’s define a function that will run from 0 to `n` and output numbers, fizzes, and buzzes: + +``` +fizzbuzz = λn. + (for n λi. + (do2 + (if (zero? (% i 3)) + λ_. (if (zero? (% i 5)) + λ_. (print-list fizzbuzzmsg) + λ_. (print-list fizzmsg)) + λ_. (if (zero? (% i 5)) + λ_. (print-list buzzmsg) + λ_. (print-list (itoa i)))) + (print-newline 0))) +``` + +Let’s do the first 20! + +``` +(fizzbuzz (num 0 2 0)) +``` + +➡️️ [Try it out in your browser!][37] + +### Reverse a string + +“ENCORE!” you shout to no one as the last cars pull out of the company parking lot. Everyone’s gone home but this is your last night before the restraining order goes through. + +``` +reverse-list = λlist.( + (Y λrecurse.λold.λnew. + (if (nil? old) + λ_.new + λ_.(recurse (tail old) (cons (head old) new)))) + list nil) +``` + +➡️️ [Try it out in your browser!][38] + +### Sheepda + +As I mentioned, I wrote a lambda calculus interpreter called [Sheepda][35] for playing around. By itself it’s pretty interesting if you’re interested in learning more about how to write programming language interpreters. Lambda calculus is as simple of a language as you can make, so the interpreter is very simple itself! + +It’s written in Go and thanks to [GopherJS][39] it’s what powers the [web playground][40]. + +There are some fun projects if someone’s interested in getting more involved. Using the library to prune lambda expression trees and simplify expressions if possible would be a start! I’m sure my fizz buzz implementation isn’t as minimal as it could be, and playing [code golf][41] with it would be pretty neat! + +Feel free to fork , star it, bop it, twist it, or even pull it! + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2017/03/whiteboard-problems-in-pure-lambda-calculus + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://www.vivint.com/ +[2]: https://www.spacemonkey.com/ +[3]: https://en.wikipedia.org/wiki/Festivus +[4]: https://twitter.com/aphyr +[5]: https://aphyr.com/posts/340-acing-the-technical-interview +[6]: https://en.wikipedia.org/wiki/Church_encoding +[7]: https://en.wikipedia.org/wiki/Lambda_calculus +[8]: https://en.wikipedia.org/wiki/Turing_completeness +[9]: https://imranontech.com/2007/01/24/using-fizzbuzz-to-find-developers-who-grok-coding/ +[10]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBZmFsc2UlMkMlMjJvdXRwdXQlMjIlM0ElMjJvdXRwdXQlMjIlMkMlMjJjb2RlJTIyJTNBJTIyKCVDRSVCQlUuKCVDRSVCQlkuKCVDRSVCQnZvaWQuKCVDRSVCQjAuKCVDRSVCQnN1Y2MuKCVDRSVCQiUyQi4oJUNFJUJCKi4oJUNFJUJCMS4oJUNFJUJCMi4oJUNFJUJCMy4oJUNFJUJCNC4oJUNFJUJCNS4oJUNFJUJCNi4oJUNFJUJCNy4oJUNFJUJCOC4oJUNFJUJCOS4oJUNFJUJCMTAuKCVDRSVCQm51bS4oJUNFJUJCdHJ1ZS4oJUNFJUJCZmFsc2UuKCVDRSVCQmlmLiglQ0UlQkJub3QuKCVDRSVCQmFuZC4oJUNFJUJCb3IuKCVDRSVCQm1ha2UtcGFpci4oJUNFJUJCcGFpci1maXJzdC4oJUNFJUJCcGFpci1zZWNvbmQuKCVDRSVCQnplcm8lM0YuKCVDRSVCQnByZWQuKCVDRSVCQi0uKCVDRSVCQmVxJTNGLiglQ0UlQkIlMkYuKCVDRSVCQiUyNS4oJUNFJUJCbmlsLiglQ0UlQkJuaWwlM0YuKCVDRSVCQmNvbnMuKCVDRSVCQmNhci4oJUNFJUJCY2RyLiglQ0UlQkJkbzIuKCVDRSVCQmRvMy4oJUNFJUJCZG80LiglQ0UlQkJmb3IuKCVDRSVCQnByaW50LWJ5dGUuKCVDRSVCQnByaW50LWxpc3QuKCVDRSVCQnByaW50LW5ld2xpbmUuKCVDRSVCQnplcm8tYnl0ZS4oJUNFJUJCaXRvYS4oJUNFJUJCZml6em1zZy4oJUNFJUJCYnV6em1zZy4oJUNFJUJCZml6emJ1enptc2cuKCVDRSVCQmZpenpidXp6LihmaXp6YnV6eiUyMCgoKG51bSUyMDEpJTIwMCklMjAxKSklMjAlQ0UlQkJuLigoZm9yJTIwbiklMjAlQ0UlQkJpLigoZG8yJTIwKCgoaWYlMjAoemVybyUzRiUyMCgoJTI1JTIwaSklMjAzKSkpJTIwJUNFJUJCXy4oKChpZiUyMCh6ZXJvJTNGJTIwKCglMjUlMjBpKSUyMDUpKSklMjAlQ0UlQkJfLihwcmludC1saXN0JTIwZml6emJ1enptc2cpKSUyMCVDRSVCQl8uKHByaW50LWxpc3QlMjBmaXp6bXNnKSkpJTIwJUNFJUJCXy4oKChpZiUyMCh6ZXJvJTNGJTIwKCglMjUlMjBpKSUyMDUpKSklMjAlQ0UlQkJfLihwcmludC1saXN0JTIwYnV6em1zZykpJTIwJUNFJUJCXy4ocHJpbnQtbGlzdCUyMChpdG9hJTIwaSkpKSkpJTIwKHByaW50LW5ld2xpbmUlMjBuaWwpKSkpJTIwKChjb25zJTIwKCgobnVtJTIwMCklMjA3KSUyMDApKSUyMCgoY29ucyUyMCgoKG51bSUyMDEpJTIwMCklMjA1KSklMjAoKGNvbnMlMjAoKChudW0lMjAxKSUyMDIpJTIwMikpJTIwKChjb25zJTIwKCgobnVtJTIwMSklMjAyKSUyMDIpKSUyMCgoY29ucyUyMCgoKG51bSUyMDApJTIwOSklMjA4KSklMjAoKGNvbnMlMjAoKChudW0lMjAxKSUyMDEpJTIwNykpJTIwKChjb25zJTIwKCgobnVtJTIwMSklMjAyKSUyMDIpKSUyMCgoY29ucyUyMCgoKG51bSUyMDEpJTIwMiklMjAyKSklMjBuaWwpKSkpKSkpKSklMjAoKGNvbnMlMjAoKChudW0lMjAwKSUyMDYpJTIwNikpJTIwKChjb25zJTIwKCgobnVtJTIwMSklMjAxKSUyMDcpKSUyMCgoY29ucyUyMCgoKG51bSUyMDEpJTIwMiklMjAyKSklMjAoKGNvbnMlMjAoKChudW0lMjAxKSUyMDIpJTIwMikpJTIwbmlsKSkpKSklMjAoKGNvbnMlMjAoKChudW0lMjAwKSUyMDcpJTIwMCkpJTIwKChjb25zJTIwKCgobnVtJTIwMSklMjAwKSUyMDUpKSUyMCgoY29ucyUyMCgoKG51bSUyMDEpJTIwMiklMjAyKSklMjAoKGNvbnMlMjAoKChudW0lMjAxKSUyMDIpJTIwMikpJTIwbmlsKSkpKSklMjAlQ0UlQkJuLigoKFklMjAlQ0UlQkJyZWN1cnNlLiVDRSVCQm4uJUNFJUJCcmVzdWx0LigoKGlmJTIwKHplcm8lM0YlMjBuKSklMjAlQ0UlQkJfLigoKGlmJTIwKG5pbCUzRiUyMHJlc3VsdCkpJTIwJUNFJUJCXy4oKGNvbnMlMjB6ZXJvLWJ5dGUpJTIwbmlsKSklMjAlQ0UlQkJfLnJlc3VsdCkpJTIwJUNFJUJCXy4oKHJlY3Vyc2UlMjAoKCUyRiUyMG4pJTIwMTApKSUyMCgoY29ucyUyMCgoJTJCJTIwemVyby1ieXRlKSUyMCgoJTI1JTIwbiklMjAxMCkpKSUyMHJlc3VsdCkpKSklMjBuKSUyMG5pbCkpJTIwKCgobnVtJTIwMCklMjA0KSUyMDgpKSUyMCVDRSVCQl8uKHByaW50LWJ5dGUlMjAoKChudW0lMjAwKSUyMDEpJTIwMCkpKSUyMChZJTIwJUNFJUJCcmVjdXJzZS4lQ0UlQkJsLigoKGlmJTIwKG5pbCUzRiUyMGwpKSUyMCVDRSVCQl8udm9pZCklMjAlQ0UlQkJfLigoZG8yJTIwKHByaW50LWJ5dGUlMjAoY2FyJTIwbCkpKSUyMChyZWN1cnNlJTIwKGNkciUyMGwpKSkpKSklMjBQUklOVF9CWVRFKSUyMCVDRSVCQm4uJUNFJUJCZi4oKCgoWSUyMCVDRSVCQnJlY3Vyc2UuJUNFJUJCcmVtYWluaW5nLiVDRSVCQmN1cnJlbnQuJUNFJUJCZi4oKChpZiUyMCh6ZXJvJTNGJTIwcmVtYWluaW5nKSklMjAlQ0UlQkJfLnZvaWQpJTIwJUNFJUJCXy4oKGRvMiUyMChmJTIwY3VycmVudCkpJTIwKCgocmVjdXJzZSUyMChwcmVkJTIwcmVtYWluaW5nKSklMjAoc3VjYyUyMGN1cnJlbnQpKSUyMGYpKSkpJTIwbiklMjAwKSUyMGYpKSUyMCVDRSVCQmEuZG8zKSUyMCVDRSVCQmEuZG8yKSUyMCVDRSVCQmEuJUNFJUJCYi5iKSUyMCVDRSVCQmwuKHBhaXItc2Vjb25kJTIwKHBhaXItc2Vjb25kJTIwbCkpKSUyMCVDRSVCQmwuKHBhaXItZmlyc3QlMjAocGFpci1zZWNvbmQlMjBsKSkpJTIwJUNFJUJCZS4lQ0UlQkJsLigobWFrZS1wYWlyJTIwdHJ1ZSklMjAoKG1ha2UtcGFpciUyMGUpJTIwbCkpKSUyMCVDRSVCQmwuKG5vdCUyMChwYWlyLWZpcnN0JTIwbCkpKSUyMCgobWFrZS1wYWlyJTIwZmFsc2UpJTIwdm9pZCkpJTIwJUNFJUJCbS4lQ0UlQkJuLigoLSUyMG0pJTIwKCgqJTIwKCglMkYlMjBtKSUyMG4pKSUyMG4pKSklMjAoWSUyMCVDRSVCQiUyRi4lQ0UlQkJtLiVDRSVCQm4uKCgoaWYlMjAoKGVxJTNGJTIwbSklMjBuKSklMjAlQ0UlQkJfLjEpJTIwJUNFJUJCXy4oKChpZiUyMCh6ZXJvJTNGJTIwKCgtJTIwbSklMjBuKSkpJTIwJUNFJUJCXy4wKSUyMCVDRSVCQl8uKCglMkIlMjAxKSUyMCgoJTJGJTIwKCgtJTIwbSklMjBuKSklMjBuKSkpKSkpJTIwJUNFJUJCbS4lQ0UlQkJuLigoYW5kJTIwKHplcm8lM0YlMjAoKC0lMjBtKSUyMG4pKSklMjAoemVybyUzRiUyMCgoLSUyMG4pJTIwbSkpKSklMjAlQ0UlQkJtLiVDRSVCQm4uKChuJTIwcHJlZCklMjBtKSklMjAlQ0UlQkJuLigoKCVDRSVCQm4uJUNFJUJCZi4lQ0UlQkJ4LihwYWlyLXNlY29uZCUyMCgobiUyMCVDRSVCQnAuKChtYWtlLXBhaXIlMjAoZiUyMChwYWlyLWZpcnN0JTIwcCkpKSUyMChwYWlyLWZpcnN0JTIwcCkpKSUyMCgobWFrZS1wYWlyJTIweCklMjB4KSkpJTIwbiklMjBzdWNjKSUyMDApKSUyMCVDRSVCQm4uKChuJTIwJUNFJUJCXy5mYWxzZSklMjB0cnVlKSklMjAlQ0UlQkJwLihwJTIwZmFsc2UpKSUyMCVDRSVCQnAuKHAlMjB0cnVlKSklMjAlQ0UlQkJ4LiVDRSVCQnkuJUNFJUJCdC4oKHQlMjB4KSUyMHkpKSUyMCVDRSVCQmEuJUNFJUJCYi4oKGElMjB0cnVlKSUyMGIpKSUyMCVDRSVCQmEuJUNFJUJCYi4oKGElMjBiKSUyMGZhbHNlKSklMjAlQ0UlQkJwLiVDRSVCQnQuJUNFJUJCZi4oKHAlMjBmKSUyMHQpKSUyMCVDRSVCQnAuJUNFJUJCYS4lQ0UlQkJiLigoKHAlMjBhKSUyMGIpJTIwdm9pZCkpJTIwJUNFJUJCdC4lQ0UlQkJmLmYpJTIwJUNFJUJCdC4lQ0UlQkJmLnQpJTIwJUNFJUJCYS4lQ0UlQkJiLiVDRSVCQmMuKCglMkIlMjAoKCUyQiUyMCgoKiUyMCgoKiUyMDEwKSUyMDEwKSklMjBhKSklMjAoKColMjAxMCklMjBiKSkpJTIwYykpJTIwKHN1Y2MlMjA5KSklMjAoc3VjYyUyMDgpKSUyMChzdWNjJTIwNykpJTIwKHN1Y2MlMjA2KSklMjAoc3VjYyUyMDUpKSUyMChzdWNjJTIwNCkpJTIwKHN1Y2MlMjAzKSklMjAoc3VjYyUyMDIpKSUyMChzdWNjJTIwMSkpJTIwKHN1Y2MlMjAwKSklMjAlQ0UlQkJtLiVDRSVCQm4uJUNFJUJCeC4obSUyMChuJTIweCkpKSUyMCVDRSVCQm0uJUNFJUJCbi4lQ0UlQkJmLiVDRSVCQnguKCgoKG0lMjBzdWNjKSUyMG4pJTIwZiklMjB4KSklMjAlQ0UlQkJuLiVDRSVCQmYuJUNFJUJCeC4oZiUyMCgobiUyMGYpJTIweCkpKSUyMCVDRSVCQmYuJUNFJUJCeC54KSUyMCVDRSVCQnguKFUlMjBVKSklMjAoVSUyMCVDRSVCQmguJUNFJUJCZi4oZiUyMCVDRSVCQnguKCgoaCUyMGgpJTIwZiklMjB4KSkpKSUyMCVDRSVCQmYuKGYlMjBmKSklNUNuJTIyJTdE +[11]: https://en.wikipedia.org/wiki/Currying +[12]: https://en.wikipedia.org/wiki/Alonzo_Church +[13]: https://en.wikipedia.org/wiki/Church_encoding#Church_numerals +[14]: https://en.wikipedia.org/wiki/Peano_axioms#Arithmetic +[15]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBZmFsc2UlMkMlMjJvdXRwdXQlMjIlM0ElMjJyZXN1bHQlMjIlMkMlMjJjb2RlJTIyJTNBJTIyMCUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC54JTVDbjElMjAlM0QlMjAlQ0UlQkJmLiVDRSVCQnguKGYlMjB4KSU1Q24yJTIwJTNEJTIwJUNFJUJCZi4lQ0UlQkJ4LihmJTIwKGYlMjB4KSklNUNuc3VjYyUyMCUzRCUyMCVDRSVCQm4uJUNFJUJCZi4lQ0UlQkJ4LihmJTIwKChuJTIwZiklMjB4KSklNUNuJTVDbnRydWUlMjAlMjAlM0QlMjAlQ0UlQkJ0LiVDRSVCQmYudCU1Q25mYWxzZSUyMCUzRCUyMCVDRSVCQnQuJUNFJUJCZi5mJTVDbiU1Q256ZXJvJTNGJTIwJTNEJTIwJUNFJUJCbi4oKG4lMjAlQ0UlQkJfLmZhbHNlKSUyMHRydWUpJTVDbiU1Q24lMjMlMjB0cnklMjBjaGFuZ2luZyUyMHRoZSUyMG51bWJlciUyMHplcm8lM0YlMjBpcyUyMGNhbGxlZCUyMHdpdGglNUNuKHplcm8lM0YlMjAwKSU1Q24lNUNuJTIzJTIwdGhlJTIwb3V0cHV0JTIwd2lsbCUyMGJlJTIwJTVDJTIyJUNFJUJCdC4lQ0UlQkJmLnQlNUMlMjIlMjBmb3IlMjB0cnVlJTIwYW5kJTIwJTVDJTIyJUNFJUJCdC4lQ0UlQkJmLmYlNUMlMjIlMjBmb3IlMjBmYWxzZS4lMjIlN0Q= +[16]: https://en.wikipedia.org/wiki/Eager_evaluation +[17]: https://en.wikipedia.org/wiki/Lazy_evaluation +[18]: https://en.wikipedia.org/wiki/Syntactic_sugar +[19]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBZmFsc2UlMkMlMjJvdXRwdXQlMjIlM0ElMjJyZXN1bHQlMjIlMkMlMjJjb2RlJTIyJTNBJTIyMCUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC54JTVDbjElMjAlM0QlMjAlQ0UlQkJmLiVDRSVCQnguKGYlMjB4KSU1Q24yJTIwJTNEJTIwJUNFJUJCZi4lQ0UlQkJ4LihmJTIwKGYlMjB4KSklNUNuMyUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMChmJTIwKGYlMjB4KSkpJTVDbnN1Y2MlMjAlM0QlMjAlQ0UlQkJuLiVDRSVCQmYuJUNFJUJCeC4oZiUyMCgobiUyMGYpJTIweCkpJTVDbiU1Q250cnVlJTIwJTIwJTNEJTIwJUNFJUJCdC4lQ0UlQkJmLnQlNUNuZmFsc2UlMjAlM0QlMjAlQ0UlQkJ0LiVDRSVCQmYuZiU1Q24lNUNuemVybyUzRiUyMCUzRCUyMCVDRSVCQm4uKChuJTIwJUNFJUJCXy5mYWxzZSklMjB0cnVlKSU1Q24lNUNuaWYlMjAlM0QlMjAlQ0UlQkJwLiVDRSVCQmEuJUNFJUJCYi4oKChwJTIwYSklMjBiKSUyMDApJTVDbmFuZCUyMCUzRCUyMCVDRSVCQmEuJUNFJUJCYi4oYSUyMGIlMjBmYWxzZSklNUNub3IlMjAlM0QlMjAlQ0UlQkJhLiVDRSVCQmIuKGElMjB0cnVlJTIwYiklNUNubm90JTIwJTNEJTIwJUNFJUJCcC4lQ0UlQkJ0LiVDRSVCQmYuKHAlMjBmJTIwdCklNUNuJTVDbiUyMyUyMHRyeSUyMGNoYW5naW5nJTIwdGhpcyUyMHVwISU1Q24oaWYlMjAob3IlMjAoemVybyUzRiUyMDEpJTIwKHplcm8lM0YlMjAwKSklNUNuJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAyJTVDbiUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwMyklMjIlN0Q= +[20]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBZmFsc2UlMkMlMjJvdXRwdXQlMjIlM0ElMjJyZXN1bHQlMjIlMkMlMjJjb2RlJTIyJTNBJTIyMCUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC54JTVDbjElMjAlM0QlMjAlQ0UlQkJmLiVDRSVCQnguKGYlMjB4KSU1Q24yJTIwJTNEJTIwJUNFJUJCZi4lQ0UlQkJ4LihmJTIwKGYlMjB4KSklNUNuMyUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMChmJTIwKGYlMjB4KSkpJTVDbiU1Q250cnVlJTIwJTIwJTNEJTIwJUNFJUJCdC4lQ0UlQkJmLnQlNUNuZmFsc2UlMjAlM0QlMjAlQ0UlQkJ0LiVDRSVCQmYuZiU1Q24lNUNubWFrZS1wYWlyJTIwJTNEJTIwJUNFJUJCeC4lQ0UlQkJ5LiUyMCVDRSVCQmEuKGElMjB4JTIweSklNUNucGFpci1maXJzdCUyMCUzRCUyMCVDRSVCQnAuKHAlMjB0cnVlKSU1Q25wYWlyLXNlY29uZCUyMCUzRCUyMCVDRSVCQnAuKHAlMjBmYWxzZSklNUNuJTVDbiUyMyUyMHRyeSUyMGNoYW5naW5nJTIwdGhpcyUyMHVwISU1Q25wJTIwJTNEJTIwKG1ha2UtcGFpciUyMDIlMjAzKSU1Q24ocGFpci1zZWNvbmQlMjBwKSUyMiU3RA== +[21]: https://en.wikipedia.org/wiki/Linked_list +[22]: https://en.wikipedia.org/wiki/Lisp_%28programming_language%29 +[23]: https://en.wikipedia.org/wiki/CAR_and_CDR#Etymology +[24]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBZmFsc2UlMkMlMjJvdXRwdXQlMjIlM0ElMjJyZXN1bHQlMjIlMkMlMjJjb2RlJTIyJTNBJTIyMCUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC54JTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwMSUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMHgpJTIwJTIwJTIwJTIwJTIwMiUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMChmJTIweCkpJTIwJTIwJTIwJTIwMyUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMChmJTIwKGYlMjB4KSkpJTVDbnRydWUlMjAlMjAlM0QlMjAlQ0UlQkJ0LiVDRSVCQmYudCUyMCUyMCUyMCUyMGZhbHNlJTIwJTNEJTIwJUNFJUJCdC4lQ0UlQkJmLmYlNUNuJTVDbm1ha2UtcGFpciUyMCUzRCUyMCVDRSVCQnguJUNFJUJCeS4lMjAlQ0UlQkJhLihhJTIweCUyMHkpJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwcGFpci1maXJzdCUyMCUzRCUyMCVDRSVCQnAuKHAlMjB0cnVlKSUyMCUyMCUyMCUyMCUyMHBhaXItc2Vjb25kJTIwJTNEJTIwJUNFJUJCcC4ocCUyMGZhbHNlKSU1Q24lNUNubmlsJTIwJTNEJTIwKG1ha2UtcGFpciUyMGZhbHNlJTIwZmFsc2UpJTIwJTIwJTIwJTIwJTIwbmlsJTNGJTIwJTNEJTIwJUNFJUJCbC4lMjAobm90JTIwKHBhaXItZmlyc3QlMjBsKSklNUNuY29ucyUyMCUzRCUyMCVDRSVCQnZhbHVlLiVDRSVCQmxpc3QuKG1ha2UtcGFpciUyMHRydWUlMjAobWFrZS1wYWlyJTIwdmFsdWUlMjBsaXN0KSklNUNuJTVDbmhlYWQlMjAlM0QlMjAlQ0UlQkJsaXN0LiUyMChwYWlyLWZpcnN0JTIwKHBhaXItc2Vjb25kJTIwbGlzdCkpJTVDbnRhaWwlMjAlM0QlMjAlQ0UlQkJsaXN0LiUyMChwYWlyLXNlY29uZCUyMChwYWlyLXNlY29uZCUyMGxpc3QpKSU1Q24lNUNuJTIzJTIwdHJ5JTIwY2hhbmdpbmclMjB0aGlzJTIwdXAhJTVDbmwlMjAlM0QlMjAoY29ucyUyMDElMjAoY29ucyUyMDIlMjAoY29ucyUyMDMlMjBuaWwpKSklNUNuKGhlYWQlMjAodGFpbCUyMGwpKSUyMiU3RA== +[25]: https://en.wikipedia.org/wiki/Recursion +[26]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjJzdW0lMjAlM0QlMjAlQ0UlQkJoZWxwZXIuJUNFJUJCbGlzdC4lNUNuJTIwJTIwKGlmJTIwKG5pbCUzRiUyMGxpc3QpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwMCU1Q24lMjAlMjAlMjAlMjAlMjAlMjAlQ0UlQkJfLiUyMCglMkIlMjAoaGVhZCUyMGxpc3QpJTIwKGhlbHBlciUyMGhlbHBlciUyMCh0YWlsJTIwbGlzdCkpKSklNUNuJTVDbnJlc3VsdCUyMCUzRCUyMChzdW0lMjBzdW0lMjAoY29ucyUyMDElMjAoY29ucyUyMDIlMjAoY29ucyUyMDMlMjBuaWwpKSkpJTVDbiU1Q24lMjMlMjB3ZSdsbCUyMGV4cGxhaW4lMjBob3clMjBwcmludC1udW0lMjB3b3JrcyUyMGxhdGVyJTJDJTIwYnV0JTIwd2UlMjBuZWVkJTIwaXQlMjB0byUyMHNob3clMjB0aGF0JTIwc3VtJTIwaXMlMjB3b3JraW5nJTVDbihwcmludC1udW0lMjByZXN1bHQpJTIyJTdE +[27]: https://en.wikipedia.org/wiki/Fixed-point_combinator#Fixed_point_combinators_in_lambda_calculus +[28]: https://www.ycombinator.com/ +[29]: https://news.ycombinator.com/ +[30]: http://matt.might.net/articles/implementation-of-recursive-fixed-point-y-combinator-in-javascript-for-memoization/ +[31]: http://kestas.kuliukas.com/YCombinatorExplained/ +[32]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjJZJTIwJTNEJTIwJUNFJUJCZi4oJUNFJUJCeC4oeCUyMHgpJTIwJUNFJUJCeC4oZiUyMCVDRSVCQnkuKCh4JTIweCklMjB5KSkpJTVDbiU1Q25zdW0lMjAlM0QlMjAoWSUyMCVDRSVCQmhlbHBlci4lQ0UlQkJsaXN0LiU1Q24lMjAlMjAoaWYlMjAobmlsJTNGJTIwbGlzdCklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAwJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwKCUyQiUyMChoZWFkJTIwbGlzdCklMjAoaGVscGVyJTIwKHRhaWwlMjBsaXN0KSkpKSklNUNuJTVDbiUyMyUyMHdlJ2xsJTIwZXhwbGFpbiUyMGhvdyUyMHRoaXMlMjB3b3JrcyUyMGxhdGVyJTJDJTIwYnV0JTIwd2UlMjBuZWVkJTIwaXQlMjB0byUyMHNob3clMjB0aGF0JTIwc3VtJTIwaXMlMjB3b3JraW5nJTVDbnByaW50LW51bSUyMCUzRCUyMCVDRSVCQm4uKHByaW50LWxpc3QlMjAoaXRvYSUyMG4pKSU1Q24lNUNuKHByaW50LW51bSUyMChzdW0lMjAoY29ucyUyMDElMjAoY29ucyUyMDIlMjAoY29ucyUyMDMlMjBuaWwpKSkpKSUyMiU3RA +[33]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjIwJTIwJTNEJTIwJUNFJUJCZi4lQ0UlQkJ4LnglNUNuMSUyMCUzRCUyMCVDRSVCQmYuJUNFJUJCeC4oZiUyMHgpJTVDbjIlMjAlM0QlMjAlQ0UlQkJmLiVDRSVCQnguKGYlMjAoZiUyMHgpKSU1Q24zJTIwJTNEJTIwJUNFJUJCZi4lQ0UlQkJ4LihmJTIwKGYlMjAoZiUyMHgpKSklNUNuJTVDbnByZWQlMjAlM0QlMjAlQ0UlQkJuLiU1Q24lMjAlMjAocGFpci1zZWNvbmQlNUNuJTIwJTIwJTIwJTIwKG4lNUNuJTIwJTIwJTIwJTIwJTIwJUNFJUJCcGFpci4obWFrZS1wYWlyJTIwKHN1Y2MlMjAocGFpci1maXJzdCUyMHBhaXIpKSUyMChwYWlyLWZpcnN0JTIwcGFpcikpJTVDbiUyMCUyMCUyMCUyMCUyMChtYWtlLXBhaXIlMjAwJTIwMCkpKSU1Q24lNUNuJTIzJTIwd2UnbGwlMjBleHBsYWluJTIwaG93JTIwcHJpbnQtbnVtJTIwd29ya3MlMjBsYXRlciElNUNuKHByaW50LW51bSUyMChwcmVkJTIwMykpJTVDbiUyMiU3RA== +[34]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjIlMkIlMjAlM0QlMjAlQ0UlQkJtLiVDRSVCQm4uKG0lMjBzdWNjJTIwbiklNUNuKiUyMCUzRCUyMCVDRSVCQm0uJUNFJUJCbi4obiUyMCglMkIlMjBtKSUyMDApJTVDbi0lMjAlM0QlMjAlQ0UlQkJtLiVDRSVCQm4uKG4lMjBwcmVkJTIwbSklNUNuJTJGJTIwJTNEJTIwKFklMjAlQ0UlQkIlMkYuJUNFJUJCbS4lQ0UlQkJuLiU1Q24lMjAlMjAoaWYlMjAoZXElM0YlMjBtJTIwbiklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAxJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwKGlmJTIwKGxlJTNGJTIwbSUyMG4pJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwMCU1Q24lMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlQ0UlQkJfLiUyMCglMkIlMjAxJTIwKCUyRiUyMCgtJTIwbSUyMG4pJTIwbikpKSkpJTVDbiUyNSUyMCUzRCUyMCVDRSVCQm0uJUNFJUJCbi4lMjAoLSUyMG0lMjAoKiUyMCglMkYlMjBtJTIwbiklMjBuKSklNUNuJTVDbihwcmludC1udW0lMjAoJTI1JTIwNyUyMDMpKSUyMiU3RA== +[35]: https://github.com/jtolds/sheepda/ +[36]: https://en.wikipedia.org/wiki/ASCII#Code_chart +[37]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjIlMjMlMjBkZWZpbmUlMjB0aGUlMjBtZXNzYWdlcyU1Q25maXp6bXNnJTIwJTNEJTIwKGNvbnMlMjAobnVtJTIwMCUyMDclMjAwKSUyMChjb25zJTIwKG51bSUyMDElMjAwJTIwNSklMjAoY29ucyUyMChudW0lMjAxJTIwMiUyMDIpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDIlMjAyKSUyMG5pbCkpKSklNUNuYnV6em1zZyUyMCUzRCUyMChjb25zJTIwKG51bSUyMDAlMjA2JTIwNiklMjAoY29ucyUyMChudW0lMjAxJTIwMSUyMDcpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDIlMjAyKSUyMChjb25zJTIwKG51bSUyMDElMjAyJTIwMiklMjBuaWwpKSkpJTVDbmZpenpidXp6bXNnJTIwJTNEJTIwKGNvbnMlMjAobnVtJTIwMCUyMDclMjAwKSUyMChjb25zJTIwKG51bSUyMDElMjAwJTIwNSklMjAoY29ucyUyMChudW0lMjAxJTIwMiUyMDIpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDIlMjAyKSU1Q24lMjAlMjAlMjAlMjAoY29ucyUyMChudW0lMjAwJTIwOSUyMDgpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDElMjA3KSUyMChjb25zJTIwKG51bSUyMDElMjAyJTIwMiklMjAoY29ucyUyMChudW0lMjAxJTIwMiUyMDIpJTIwbmlsKSkpKSkpKSklNUNuJTVDbiUyMyUyMGZpenpidXp6JTVDbmZpenpidXp6JTIwJTNEJTIwJUNFJUJCbi4lNUNuJTIwJTIwKGZvciUyMG4lMjAlQ0UlQkJpLiU1Q24lMjAlMjAlMjAlMjAoZG8yJTVDbiUyMCUyMCUyMCUyMCUyMCUyMChpZiUyMCh6ZXJvJTNGJTIwKCUyNSUyMGklMjAzKSklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAoaWYlMjAoemVybyUzRiUyMCglMjUlMjBpJTIwNSkpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwKHByaW50LWxpc3QlMjBmaXp6YnV6em1zZyklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAocHJpbnQtbGlzdCUyMGZpenptc2cpKSU1Q24lMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjAlQ0UlQkJfLiUyMChpZiUyMCh6ZXJvJTNGJTIwKCUyNSUyMGklMjA1KSklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4lMjAocHJpbnQtbGlzdCUyMGJ1enptc2cpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCVDRSVCQl8uJTIwKHByaW50LWxpc3QlMjAoaXRvYSUyMGkpKSkpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMChwcmludC1uZXdsaW5lJTIwbmlsKSkpJTVDbiU1Q24lMjMlMjBydW4lMjBmaXp6YnV6eiUyMDIwJTIwdGltZXMlNUNuKGZpenpidXp6JTIwKG51bSUyMDAlMjAyJTIwMCkpJTIyJTdE +[38]: https://jtolds.github.io/sheepda/#JTdCJTIyc3RkbGliJTIyJTNBdHJ1ZSUyQyUyMm91dHB1dCUyMiUzQSUyMm91dHB1dCUyMiUyQyUyMmNvZGUlMjIlM0ElMjJoZWxsby13b3JsZCUyMCUzRCUyMChjb25zJTIwKG51bSUyMDAlMjA3JTIwMiklMjAoY29ucyUyMChudW0lMjAxJTIwMCUyMDEpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDAlMjA4KSUyMChjb25zJTIwKG51bSUyMDElMjAwJTIwOCklMjAoY29ucyUyMChudW0lMjAxJTIwMSUyMDEpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMChjb25zJTIwKG51bSUyMDAlMjA0JTIwNCklMjAoY29ucyUyMChudW0lMjAwJTIwMyUyMDIpJTIwKGNvbnMlMjAobnVtJTIwMSUyMDElMjA5KSUyMChjb25zJTIwKG51bSUyMDElMjAxJTIwMSklMjAoY29ucyUyMChudW0lMjAxJTIwMSUyMDQpJTVDbiUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMChjb25zJTIwKG51bSUyMDElMjAwJTIwOCklMjAoY29ucyUyMChudW0lMjAxJTIwMCUyMDApJTIwKGNvbnMlMjAobnVtJTIwMCUyMDMlMjAzKSUyMG5pbCkpKSkpKSkpKSkpKSklNUNuJTVDbnJldmVyc2UtbGlzdCUyMCUzRCUyMCVDRSVCQmxpc3QuKCU1Q24lMjAlMjAoWSUyMCVDRSVCQnJlY3Vyc2UuJUNFJUJCb2xkLiVDRSVCQm5ldy4lNUNuJTIwJTIwJTIwJTIwKGlmJTIwKG5pbCUzRiUyMG9sZCklNUNuJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy5uZXclNUNuJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwJUNFJUJCXy4ocmVjdXJzZSUyMCh0YWlsJTIwb2xkKSUyMChjb25zJTIwKGhlYWQlMjBvbGQpJTIwbmV3KSkpKSU1Q24lMjAlMjBsaXN0JTIwbmlsKSU1Q24lNUNuKGRvNCU1Q24lMjAlMjAocHJpbnQtbGlzdCUyMGhlbGxvLXdvcmxkKSU1Q24lMjAlMjAocHJpbnQtbmV3bGluZSUyMHZvaWQpJTVDbiUyMCUyMChwcmludC1saXN0JTIwKHJldmVyc2UtbGlzdCUyMGhlbGxvLXdvcmxkKSklNUNuJTIwJTIwKHByaW50LW5ld2xpbmUlMjB2b2lkKSklMjIlN0Q= +[39]: https://github.com/gopherjs/gopherjs +[40]: https://jtolds.github.io/sheepda/ +[41]: https://en.wikipedia.org/wiki/Code_golf From f753d6d7c3c9c92b4e900d493ef0930264d89fd2 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:11:11 +0800 Subject: [PATCH 136/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020170115=20Magic?= =?UTF-8?q?=20GOPATH?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20170115 Magic GOPATH.md --- sources/tech/20170115 Magic GOPATH.md | 119 ++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 sources/tech/20170115 Magic GOPATH.md diff --git a/sources/tech/20170115 Magic GOPATH.md b/sources/tech/20170115 Magic GOPATH.md new file mode 100644 index 0000000000..1d4cd16e24 --- /dev/null +++ b/sources/tech/20170115 Magic GOPATH.md @@ -0,0 +1,119 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Magic GOPATH) +[#]: via: (https://www.jtolio.com/2017/01/magic-gopath) +[#]: author: (jtolio.com https://www.jtolio.com/) + +Magic GOPATH +====== + +_**Update:** With the advent of Go 1.11 and [Go modules][1], this whole post is now useless. Unset your GOPATH entirely and switch to Go modules today!_ + +Maybe someday I’ll start writing about things besides Go again. + +Go requires that you set an environment variable for your workspace called your `GOPATH`. The `GOPATH` is one of the most confusing aspects of Go to newcomers and even relatively seasoned developers alike. It’s not immediately clear what would be better, but finding a good `GOPATH` value has implications for your source code repository layout, how many separate projects you have on your computer, how default project installation instructions work (via `go get`), and even how you interoperate with other projects and libraries. + +It’s taken until Go 1.8 to decide to [set a default][2] and that small change was one of [the most talked about code reviews][3] for the 1.8 release cycle. + +After [writing about GOPATH himself][4], [Dave Cheney][5] [asked me][6] to write a blog post about what I do. + +### My proposal + +I set my `GOPATH` to always be the current working directory, unless a parent directory is clearly the `GOPATH`. + +Here’s the relevant part of my `.bashrc`: + +``` +# bash command to output calculated GOPATH. +calc_gopath() { + local dir="$PWD" + + # we're going to walk up from the current directory to the root + while true; do + + # if there's a '.gopath' file, use its contents as the GOPATH relative to + # the directory containing it. + if [ -f "$dir/.gopath" ]; then + ( cd "$dir"; + # allow us to squash this behavior for cases we want to use vgo + if [ "$(cat .gopath)" != "" ]; then + cd "$(cat .gopath)"; + echo "$PWD"; + fi; ) + return + fi + + # if there's a 'src' directory, the parent of that directory is now the + # GOPATH + if [ -d "$dir/src" ]; then + echo "$dir" + return + fi + + # we can't go further, so bail. we'll make the original PWD the GOPATH. + if [ "$dir" == "/" ]; then + echo "$PWD" + return + fi + + # now we'll consider the parent directory + dir="$(dirname "$dir")" + done +} + +my_prompt_command() { + export GOPATH="$(calc_gopath)" + + # you can have other neat things in here. I also set my PS1 based on git + # state +} + +case "$TERM" in +xterm*|rxvt*) + # Bash provides an environment variable called PROMPT_COMMAND. The contents + # of this variable are executed as a regular Bash command just before Bash + # displays a prompt. Let's only set it if we're in some kind of graphical + # terminal I guess. + PROMPT_COMMAND=my_prompt_command + ;; +*) + ;; +esac +``` + +The benefits are fantastic. If you want to quickly `go get` something and not have it clutter up your workspace, you can do something like: + +``` +cd $(mktemp -d) && go get github.com/the/thing +``` + +On the other hand, if you’re jumping between multiple projects (whether or not they have the full workspace checked in or are just library packages), the `GOPATH` is set accurately. + +More flexibly, if you have a tree where some parent directory is outside of the `GOPATH` but you want to set the `GOPATH` anyways, you can create a `.gopath` file and it will automatically set your `GOPATH` correctly any time your shell is inside that directory. + +The whole thing is super nice. I kinda can’t imagine doing something else anymore. + +### Fin. + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2017/01/magic-gopath + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more +[2]: https://rakyll.org/default-gopath/ +[3]: https://go-review.googlesource.com/32019/ +[4]: https://dave.cheney.net/2016/12/20/thinking-about-gopath +[5]: https://dave.cheney.net/ +[6]: https://twitter.com/davecheney/status/811334240247812097 From 8d2d32327e4b5315bcad1a36ce2a382d370859c3 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:14:17 +0800 Subject: [PATCH 137/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020160302=20Go=20c?= =?UTF-8?q?hannels=20are=20bad=20and=20you=20should=20feel=20bad?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20160302 Go channels are bad and you should feel bad.md --- ...hannels are bad and you should feel bad.md | 443 ++++++++++++++++++ 1 file changed, 443 insertions(+) create mode 100644 sources/tech/20160302 Go channels are bad and you should feel bad.md diff --git a/sources/tech/20160302 Go channels are bad and you should feel bad.md b/sources/tech/20160302 Go channels are bad and you should feel bad.md new file mode 100644 index 0000000000..0ad2a5ed97 --- /dev/null +++ b/sources/tech/20160302 Go channels are bad and you should feel bad.md @@ -0,0 +1,443 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Go channels are bad and you should feel bad) +[#]: via: (https://www.jtolio.com/2016/03/go-channels-are-bad-and-you-should-feel-bad) +[#]: author: (jtolio.com https://www.jtolio.com/) + +Go channels are bad and you should feel bad +====== + +_Update: If you’re coming to this blog post from a compendium titled “Go is not good,” I want to make it clear that I am ashamed to be on such a list. Go is absolutely the least worst programming language I’ve ever used. At the time I wrote this, I wanted to curb a trend I was seeing, namely, overuse of one of the more warty parts of Go. I still think channels could be much better, but overall, Go is wonderful. It’s like if your favorite toolbox had [this][1] in it; the tool can have uses (even if it could have had more uses), and it can still be your favorite toolbox!_ + +_Update 2: I would be remiss if I didn’t point out this excellent survey of real issues: [Understanding Real-World Concurrency Bugs In Go][2]. A significant finding of this survey is that… Go channels cause lots of bugs._ + +I’ve been using Google’s [Go programming language][3] on and off since mid-to-late 2010, and I’ve had legitimate product code written in Go for [Space Monkey][4] since January 2012 (before Go 1.0!). My initial experience with Go was back when I was researching Hoare’s [Communicating Sequential Processes][5] model of concurrency and the [π-calculus][6] under [Matt Might][7]’s [UCombinator research group][8] as part of my ([now redirected][9]) PhD work to better enable multicore development. Go was announced right then (how serendipitous!) and I immediately started kicking tires. + +It quickly became a core part of Space Monkey development. Our production systems at Space Monkey currently account for over 425k lines of pure Go (_not_ counting all of our vendored libraries, which would make it just shy of 1.5 million lines), so not the most Go you’ll ever see, but for the relatively young language we’re heavy users. We’ve [written about our Go usage][10] before. We’ve open-sourced some fairly heavily used libraries; many people seem to be fans of our [OpenSSL bindings][11] (which are faster than [crypto/tls][12], but please keep openssl itself up-to-date!), our [error handling library][13], [logging library][14], and [metric collection library/zipkin client][15]. We use Go, we love Go, we think it’s the least bad programming language for our needs we’ve used so far. + +Although I don’t think I can talk myself out of mentioning my widely avoided [goroutine-local-storage library][16] here either (which even though it’s a hack that you shouldn’t use, it’s a beautiful hack), hopefully my other experience will suffice as valid credentials that I kind of know what I’m talking about before I explain my deliberately inflamatory post title. + +![][17] + +### Wait, what? + +If you ask the proverbial programmer on the street what’s so special about Go, she’ll most likely tell you that Go is most known for channels and goroutines. Go’s theoretical underpinnings are heavily based in Hoare’s CSP model, which is itself incredibly fascinating and interesting and I firmly believe has much more to yield than we’ve appropriated so far. + +CSP (and the π-calculus) both use communication as the core synchronization primitive, so it makes sense Go would have channels. Rob Pike has been fascinated with CSP (with good reason) for a [considerable][18] [while][19] [now][20]. + +But from a pragmatic perspective (which Go prides itself on), Go got channels wrong. Channels as implemented are pretty much a solid anti-pattern in my book at this point. Why? Dear reader, let me count the ways. + +#### You probably won’t end up using just channels. + +Hoare’s Communicating Sequential Processes is a computational model where essentially the only synchronization primitive is sending or receiving on a channel. As soon as you use a mutex, semaphore, or condition variable, bam, you’re no longer in pure CSP land. Go programmers often tout this model and philosophy through the chanting of the [cached thought][21] “[share memory by communicating][22].” + +So let’s try and write a small program using just CSP in Go! Let’s make a high score receiver. All we will do is keep track of the largest high score value we’ve seen. That’s it. + +First, we’ll make a `Game` struct. + +``` +type Game struct { + bestScore int + scores chan int +} +``` + +`bestScore` isn’t going to be protected by a mutex! That’s fine, because we’ll simply have one goroutine manage its state and receive new scores over a channel. + +``` +func (g *Game) run() { + for score := range g.scores { + if g.bestScore < score { + g.bestScore = score + } + } +} +``` + +Okay, now we’ll make a helpful constructor to start a game. + +``` +func NewGame() (g *Game) { + g = &Game{ + bestScore: 0, + scores: make(chan int), + } + go g.run() + return g +} +``` + +Next, let’s assume someone has given us a `Player` that can return scores. It might also return an error, cause hey maybe the incoming TCP stream can die or something, or the player quits. + +``` +type Player interface { + NextScore() (score int, err error) +} +``` + +To handle the player, we’ll assume all errors are fatal and pass received scores down the channel. + +``` +func (g *Game) HandlePlayer(p Player) error { + for { + score, err := p.NextScore() + if err != nil { + return err + } + g.scores <- score + } +} +``` + +Yay! Okay, we have a `Game` type that can keep track of the highest score a `Player` receives in a thread-safe way. + +You wrap up your development and you’re on your way to having customers. You make this game server public and you’re incredibly successful! Lots of games are being created with your game server. + +Soon, you discover people sometimes leave your game. Lots of games no longer have any players playing, but nothing stopped the game loop. You are getting overwhelmed by dead `(*Game).run` goroutines. + +**Challenge:** fix the goroutine leak above without mutexes or panics. For real, scroll up to the above code and come up with a plan for fixing this problem using just channels. + +I’ll wait. + +For what it’s worth, it totally can be done with channels only, but observe the simplicity of the following solution which doesn’t even have this problem: + +``` +type Game struct { + mtx sync.Mutex + bestScore int +} + +func NewGame() *Game { + return &Game{} +} + +func (g *Game) HandlePlayer(p Player) error { + for { + score, err := p.NextScore() + if err != nil { + return err + } + g.mtx.Lock() + if g.bestScore < score { + g.bestScore = score + } + g.mtx.Unlock() + } +} +``` + +Which one would you rather work on? Don’t be deceived into thinking that the channel solution somehow makes this more readable and understandable in more complex cases. Teardown is very hard. This sort of teardown is just a piece of cake with a mutex, but the hardest thing to work out with Go-specific channels only. Also, if anyone replies that channels sending channels is easier to reason about here it will cause me an immediate head-to-desk motion. + +Importantly, this particular case might actually be _easily_ solved _with channels_ with some runtime assistance Go doesn’t provide! Unfortunately, as it stands, there are simply a surprising amount of problems that are solved better with traditional synchronization primitives than with Go’s version of CSP. We’ll talk about what Go could have done to make this case easier later. + +**Exercise:** Still skeptical? Try making both solutions above (channel-only vs. mutex-only) stop asking for scores from `Players` once `bestScore` is 100 or greater. Go ahead and open your text editor. This is a small, toy problem. + +The summary here is that you will be using traditional synchronization primitives in addition to channels if you want to do anything real. + +#### Channels are slower than implementing it yourself + +One of the things I assumed about Go being so heavily based in CSP theory is that there should be some pretty killer scheduler optimizations the runtime can make with channels. Perhaps channels aren’t always the most straightforward primitive, but surely they’re efficient and fast, right? + +![][23] + +As [Dustin Hiatt][24] points out on [Tyler Treat’s post about Go][25], + +> Behind the scenes, channels are using locks to serialize access and provide threadsafety. So by using channels to synchronize access to memory, you are, in fact, using locks; locks wrapped in a threadsafe queue. So how do Go’s fancy locks compare to just using mutex’s from their standard library `sync` package? The following numbers were obtained by using Go’s builtin benchmarking functionality to serially call Put on a single set of their respective types. + +``` +> BenchmarkSimpleSet-8 3000000 391 ns/op +> BenchmarkSimpleChannelSet-8 1000000 1699 ns/o +> +``` + +It’s a similar story with unbuffered channels, or even the same test under contention instead of run serially. + +Perhaps the Go scheduler will improve, but in the meantime, good old mutexes and condition variables are very good, efficient, and fast. If you want performance, you use the tried and true methods. + +#### Channels don’t compose well with other concurrency primitives + +Alright, so hopefully I have convinced you that you’ll at least be interacting with primitives besides channels sometimes. The standard library certainly seems to prefer traditional synchronization primitives over channels. + +Well guess what, it’s actually somewhat challenging to use channels alongside mutexes and condition variables correctly! + +One of the interesting things about channels that makes a lot of sense coming from CSP is that channel sends are synchronous. A channel send and channel receive are intended to be synchronization barriers, and the send and receive should happen at the same virtual time. That’s wonderful if you’re in well-executed CSP-land. + +![][26] + +Pragmatically, Go channels also come in a buffered variety. You can allocate a fixed amount of space to account for possible buffering so that sends and receives are disparate events, but the buffer size is capped. Go doesn’t provide a way to have arbitrarily sized buffers - you have to allocate the buffer size in advance. _This is fine_, I’ve seen people argue on the mailing list, _because memory is bounded anyway._ + +Wat. + +This is a bad answer. There’s all sorts of reasons to use an arbitrarily buffered channel. If we knew everything up front, why even have `malloc`? + +Not having arbitrarily buffered channels means that a naive send on _any_ channel could block at any time. You want to send on a channel and update some other bookkeeping under a mutex? Careful! Your channel send might block! + +``` +// ... +s.mtx.Lock() +// ... +s.ch <- val // might block! +s.mtx.Unlock() +// ... +``` + +This is a recipe for dining philosopher dinner fights. If you take a lock, you should quickly update state and release it and not do anything blocking under the lock if possible. + +There is a way to do a non-blocking send on a channel in Go, but it’s not the default behavior. Assume we have a channel `ch := make(chan int)` and we want to send the value `1` on it without blocking. Here is the minimum amount of typing you have to do to send without blocking: + +``` +select { +case ch <- 1: // it sent +default: // it didn't +} +``` + +This isn’t what naturally leaps to mind for beginning Go programmers. + +The summary is that because many operations on channels block, it takes careful reasoning about philosophers and their dining to successfully use channel operations alongside and under mutex protection, without causing deadlocks. + +#### Callbacks are strictly more powerful and don’t require unnecessary goroutines. + +![][27] + +Whenever an API uses a channel, or whenever I point out that a channel makes something hard, someone invariably points out that I should just spin up a goroutine to read off the channel and make whatever translation or fix I need as it reads of the channel. + +Um, no. What if my code is in a hotpath? There’s very few instances that require a channel, and if your API could have been designed with mutexes, semaphores, and callbacks and no additional goroutines (because all event edges are triggered by API events), then using a channel forces me to add another stack of memory allocation to my resource usage. Goroutines are much lighter weight than threads, yes, but lighter weight doesn’t mean the lightest weight possible. + +As I’ve formerly [argued in the comments on an article about using channels][28] (lol the internet), your API can _always_ be more general, _always_ more flexible, and take drastically less resources if you use callbacks instead of channels. “Always” is a scary word, but I mean it here. There’s proof-level stuff going on. + +If someone provides a callback-based API to you and you need a channel, you can provide a callback that sends on a channel with little overhead and full flexibility. + +If, on the other hand, someone provides a channel-based API to you and you need a callback, you have to spin up a goroutine to read off the channel _and_ you have to hope that no one tries to send more on the channel when you’re done reading so you cause blocked goroutine leaks. + +For a super simple real-world example, check out the [context interface][29] (which incidentally is an incredibly useful package and what you should be using instead of [goroutine-local storage][16]): + +``` +type Context interface { + ... + // Done returns a channel that closes when this work unit should be canceled. + Done() <-chan struct{} + + // Err returns a non-nil error when the Done channel is closed + Err() error + ... +} +``` + +Imagine all you want to do is log the corresponding error when the `Done()` channel fires. What do you have to do? If you don’t have a good place you’re already selecting on a channel, you have to spin up a goroutine to deal with it: + +``` +go func() { + <-ctx.Done() + logger.Errorf("canceled: %v", ctx.Err()) +}() +``` + +What if `ctx` gets garbage collected without closing the channel `Done()` returned? Whoops! Just leaked a goroutine! + +Now imagine we changed `Done`’s signature: + +``` +// Done calls cb when this work unit should be canceled. +Done(cb func()) +``` + +First off, logging is so easy now. Check it out: `ctx.Done(func() { log.Errorf("canceled: %v", ctx.Err()) })`. But lets say you really do need some select behavior. You can just call it like this: + +``` +ch := make(chan struct{}) +ctx.Done(func() { close(ch) }) +``` + +Voila! No expressiveness lost by using a callback instead. `ch` works like the channel `Done()` used to return, and in the logging case we didn’t need to spin up a whole new stack. I got to keep my stack traces (if our log package is inclined to use them); I got to avoid another stack allocation and another goroutine to give to the scheduler. + +Next time you use a channel, ask yourself if there’s some goroutines you could eliminate if you used mutexes and condition variables instead. If the answer is yes, your code will be more efficient if you change it. And if you’re trying to use channels just to be able to use the `range` keyword over a collection, I’m going to have to ask you to put your keyboard away or just go back to writing Python books. + +![more like Zooey De-channel, amirite][30] + +#### The channel API is inconsistent and just cray-cray + +Closing or sending on a closed channel panics! Why? If you want to close a channel, you need to either synchronize its closed state externally (with mutexes and so forth that don’t compose well!) so that other writers don’t write to or close a closed channel, or just charge forward and close or write to closed channels and expect you’ll have to recover any raised panics. + +This is such bizarre behavior. Almost every other operation in Go has a way to avoid a panic (type assertions have the `, ok =` pattern, for example), but with channels you just get to deal with it. + +Okay, so when a send will fail, channels panic. I guess that makes some kind of sense. But unlike almost everything else with nil values, sending to a nil channel won’t panic. Instead, it will block forever! That’s pretty counter-intuitive. That might be useful behavior, just like having a can-opener attached to your weed-whacker might be useful (and found in Skymall), but it’s certainly unexpected. Unlike interacting with nil maps (which do implicit pointer dereferences), nil interfaces (implicit pointer dereferences), unchecked type assertions, and all sorts of other things, nil channels exhibit actual channel behavior, as if a brand new channel was just instantiated for this operation. + +Receives are slightly nicer. What happens when you receive on a closed channel? Well, that works - you get a zero value. Okay that makes sense I guess. Bonus! Receives allow you to do a `, ok =`-style check if the channel was open when you received your value. Thank heavens we get `, ok =` here. + +But what happens if you receive from a nil channel? _Also blocks forever!_ Yay! Don’t try and use the fact that your channel is nil to keep track of if you closed it! + +### What are channels good for? + +Of course channels are good for some things (they are a generic container after all), and there are certain things you can only do with them (`select`). + +#### They are another special-cased generic datastructure + +Go programmers are so used to arguments about generics that I can feel the PTSD coming on just by bringing up the word. I’m not here to talk about it so wipe the sweat off your brow and let’s keep moving. + +Whatever your opinion of generics is, Go’s maps, slices, and channels are data structures that support generic element types, because they’ve been special-cased into the language. + +In a language that doesn’t allow you to write your own generic containers, _anything_ that allows you to better manage collections of things is valuable. Here, channels are a thread-safe datastructure that supports arbitrary value types. + +So that’s useful! That can save some boilerplate I suppose. + +I’m having trouble counting this as a win for channels. + +#### Select + +The main thing you can do with channels is the `select` statement. Here you can wait on a fixed number of inputs for events. It’s kind of like epoll, but you have to know upfront how many sockets you’re going to be waiting on. + +This is truly a useful language feature. Channels would be a complete wash if not for `select`. But holy smokes, let me tell you about the first time you decide you might need to select on multiple things but you don’t know how many and you have to use `reflect.Select`. + +### How could channels be better? + +It’s really tough to say what the most tactical thing the Go language team could do for Go 2.0 is (the Go 1.0 compatibility guarantee is good but hand-tying), but that won’t stop me from making some suggestions. + +#### Select on condition variables! + +We could just obviate the need for channels! This is where I propose we get rid of some sacred cows, but let me ask you this, how great would it be if you could select on any custom synchronization primitive? (A: So great.) If we had that, we wouldn’t need channels at all. + +#### GC could help us? + +In the very first example, we could easily solve the high score server cleanup with channels if we were able to use directionally-typed channel garbage collection to help us clean up. + +![][31] + +As you know, Go has directionally-typed channels. You can have a channel type that only supports reading (`<-chan`) and a channel type that only supports writing (`chan<-`). Great! + +Go also has garbage collection. It’s clear that certain kinds of book keeping are just too onerous and we shouldn’t make the programmer deal with them. We clean up unused memory! Garbage collection is useful and neat. + +So why not help clean up unused or deadlocked channel reads? Instead of having `make(chan Whatever)` return one bidirectional channel, have it return two single-direction channels (`chanReader, chanWriter := make(chan Type)`). + +Let’s reconsider the original example: + +``` +type Game struct { + bestScore int + scores chan<- int +} + +func run(bestScore *int, scores <-chan int) { + // we don't keep a reference to a *Game directly because then we'd be holding + // onto the send side of the channel. + for score := range scores { + if *bestScore < score { + *bestScore = score + } + } +} + +func NewGame() (g *Game) { + // this make(chan) return style is a proposal! + scoreReader, scoreWriter := make(chan int) + g = &Game{ + bestScore: 0, + scores: scoreWriter, + } + go run(&g.bestScore, scoreReader) + return g +} + +func (g *Game) HandlePlayer(p Player) error { + for { + score, err := p.NextScore() + if err != nil { + return err + } + g.scores <- score + } +} +``` + +If garbage collection closed a channel when we could prove no more values are ever coming down it, this solution is completely fixed. Yes yes, the comment in `run` is indicative of the existence of a rather large gun aimed at your foot, but at least the problem is easily solveable now, whereas it really wasn’t before. Furthermore, a smart compiler could probably make appropriate proofs to reduce the damage from said foot-gun. + +#### Other smaller issues + + * **Dup channels?** \- If we could use an equivalent of the `dup` syscall on channels, then we could also solve the multiple producer problem quite easily. Each producer could close their own `dup`-ed channel without ruining the other producers. + * **Fix the channel API!** \- Close isn’t idempotent? Send on closed channel panics with no way to avoid it? Ugh! + * **Arbitrarily buffered channels** \- If we could make buffered channels with no fixed buffer size limit, then we could make channels that don’t block. + + + +### What do we tell people about Go then? + +If you haven’t yet, please go take a look at my current favorite programming post: [What Color is Your Function][32]. Without being about Go specifically, this blog post much more eloquently than I could lays out exactly why goroutines are Go’s best feature (and incidentally one of the ways Go is better than Rust for some applications). + +If you’re still writing code in a programming language that forces keywords like `yield` on you to get high performance, concurrency, or an event-driven model, you are living in the past, whether or not you or anyone else knows it. Go is so far one of the best entrants I’ve seen of languages that implement an M:N threading model that’s not 1:1, and dang that’s powerful. + +So, tell folks about goroutines. + +If I had to pick one other leading feature of Go, it’s interfaces. Statically-typed [duck typing][33] makes extending and working with your own or someone else’s project so fun and amazing it’s probably worth me writing an entirely different set of words about it some other time. + +### So… + +I keep seeing people charge in to Go, eager to use channels to their full potential. Here’s my advice to you. + +**JUST STAHP IT** + +When you’re writing APIs and interfaces, as bad as the advice “never” can be, I’m pretty sure there’s never a time where channels are better, and every Go API I’ve used that used channels I’ve ended up having to fight. I’ve never thought “oh good, there’s a channel here;” it’s always instead been some variant of _**WHAT FRESH HELL IS THIS?**_ + +So, _please, please use channels where appropriate and only where appropriate._ + +In all of my Go code I work with, I can count on one hand the number of times channels were really the best choice. Sometimes they are. That’s great! Use them then. But otherwise just stop. + +![][34] + +_Special thanks for the valuable feedback provided by my proof readers Jeff Wendling, [Andrew Harding][35], [George Shank][36], and [Tyler Treat][37]._ + +If you want to work on Go with us at Space Monkey, please [hit me up][38]! + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2016/03/go-channels-are-bad-and-you-should-feel-bad + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://blog.codinghorror.com/content/images/uploads/2012/06/6a0120a85dcdae970b017742d249d5970d-800wi.jpg +[2]: https://songlh.github.io/paper/go-study.pdf +[3]: https://golang.org/ +[4]: http://www.spacemonkey.com/ +[5]: https://en.wikipedia.org/wiki/Communicating_sequential_processes +[6]: https://en.wikipedia.org/wiki/%CE%A0-calculus +[7]: http://matt.might.net +[8]: http://www.ucombinator.org/ +[9]: https://www.jtolio.com/writing/2015/11/research-log-cell-states-and-microarrays/ +[10]: https://www.jtolio.com/writing/2014/04/go-space-monkey/ +[11]: https://godoc.org/github.com/spacemonkeygo/openssl +[12]: https://golang.org/pkg/crypto/tls/ +[13]: https://godoc.org/github.com/spacemonkeygo/errors +[14]: https://godoc.org/github.com/spacemonkeygo/spacelog +[15]: https://godoc.org/gopkg.in/spacemonkeygo/monitor.v1 +[16]: https://github.com/jtolds/gls +[17]: https://www.jtolio.com/images/wat/darth-helmet.jpg +[18]: https://en.wikipedia.org/wiki/Newsqueak +[19]: https://en.wikipedia.org/wiki/Alef_%28programming_language%29 +[20]: https://en.wikipedia.org/wiki/Limbo_%28programming_language%29 +[21]: https://lesswrong.com/lw/k5/cached_thoughts/ +[22]: https://blog.golang.org/share-memory-by-communicating +[23]: https://www.jtolio.com/images/wat/jon-stewart.jpg +[24]: https://twitter.com/HiattDustin +[25]: http://bravenewgeek.com/go-is-unapologetically-flawed-heres-why-we-use-it/ +[26]: https://www.jtolio.com/images/wat/obama.jpg +[27]: https://www.jtolio.com/images/wat/yael-grobglas.jpg +[28]: http://www.informit.com/articles/article.aspx?p=2359758#comment-2061767464 +[29]: https://godoc.org/golang.org/x/net/context +[30]: https://www.jtolio.com/images/wat/zooey-deschanel.jpg +[31]: https://www.jtolio.com/images/wat/joel-mchale.jpg +[32]: http://journal.stuffwithstuff.com/2015/02/01/what-color-is-your-function/ +[33]: https://en.wikipedia.org/wiki/Duck_typing +[34]: https://www.jtolio.com/images/wat/michael-cera.jpg +[35]: https://github.com/azdagron +[36]: https://twitter.com/taterbase +[37]: http://bravenewgeek.com +[38]: https://www.jtolio.com/contact/ From 0dcefcd383869005cff4b9261860cccff45c1eb6 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Tue, 17 Sep 2019 13:14:31 +0800 Subject: [PATCH 138/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020151127=20Resear?= =?UTF-8?q?ch=20log:=20gene=20signatures=20and=20connectivity=20map?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20151127 Research log- gene signatures and connectivity map.md --- ...g- gene signatures and connectivity map.md | 133 ++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 sources/tech/20151127 Research log- gene signatures and connectivity map.md diff --git a/sources/tech/20151127 Research log- gene signatures and connectivity map.md b/sources/tech/20151127 Research log- gene signatures and connectivity map.md new file mode 100644 index 0000000000..f4e7faa4bc --- /dev/null +++ b/sources/tech/20151127 Research log- gene signatures and connectivity map.md @@ -0,0 +1,133 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Research log: gene signatures and connectivity map) +[#]: via: (https://www.jtolio.com/2015/11/research-log-gene-signatures-and-connectivity-map) +[#]: author: (jtolio.com https://www.jtolio.com/) + +Research log: gene signatures and connectivity map +====== + +Happy Thanksgiving everyone! + +### Context + +This is the third post in my continuing series on my attempts at research. Previously we talked about: + + * [what I’m doing, cell states, and microarrays][1] + * and then [more about microarrays and R][2]. + + + +By the end of last week we had discussed how to get a table of normalized gene expression intensities that looks like this: + +``` +ENSG00000280099_at 0.15484421 +ENSG00000280109_at 0.16881395 +ENSG00000280178_at -0.19621641 +ENSG00000280316_at 0.08622216 +ENSG00000280401_at 0.15966256 +ENSG00000281205_at -0.02085352 +... +``` + +The reason for doing this is to figure out which genes are related, and perhaps more importantly, what a cell is even doing. + +_Summary:_ new post, also, I’m bringing back the short section summaries. + +### Cell lines + +The first thing to do when trying to figure out what cells are doing is to choose a cell. There’s all sorts of cells. Healthy brain cells, cancerous blood cells, bruised skin cells, etc. + +For any experiment, you’ll need a control to eliminate noise and apply statistical tests for validity. If you don’t use a control, the effect you’re seeing may not even exist, and so for any experiment with cells, you will need a control cell. + +Cells often divide, which means that a cell, once chosen, will duplicate itself for you in the presence of the appropriate resources. Not all cells divide ad nauseam which provides some challenges, but many cells under study luckily do. + +So, a _cell line_ is simply a set of cells that have all replicated from a specific chosen initial cell. Any set of cells from a cell line will be as identical as possible (unless you screwed up! geez). They will be the same type of cell with the same traits and behaviors, at least, as much as possible. + +_Summary:_ a cell line is a large amount of cells that are as close to being the same as possible. + +### Perturbagens + +There are many things that might affect what a cell is doing. Drugs, agitation, temperature, disease, cancer, gene splicing, small molecules (maybe you give a cell more iron or calcium or something), hormones, light, Jello, ennui, etc. Given any particular cell line, giving a cell from that cell line one of these _perturbagens_, or, perturbing the cell in a specific way, when compared to a control will say what that cell does differently in the face of that perturbagen. + +If you’d like to find out what exactly a certain type of cell does when you give it lemon lime soda, then you choose the right cell line, leave out some control cells and give the rest of the cells soda. + +Then, you measure gene expression intensities for both the control cells and the perturbed cells. The _differential expression_ of genes between the perturbed cells and the controls cells is likely due to the introduction of the lemon lime soda. + +Genes that end up getting expressed _more_ in the presence of the soda are considered _up-regulated_, whereas genes that end up getting expressed _less_ are considered _down-regulated_. The degree to which a gene is up or down regulated constitutes how much of an effect the soda may have had on that gene. + +Of course, all of this has such a significant amount of experimental noise that you could find pretty much anything. You’ll need to replicate your experiment independently a few times before you publish that lemon lime soda causes increased expression in the [Sonic hedgehog gene][3]. + +_Summary:_ A perturbagen is something you introduce/do to a cell to change its behavior, such as drugs or throwing it at a wall or something. The wall perturbagen. + +### Gene signature + +For a given change or perturbagen to a cell, we now have enough to compute lists of up-regulated and down-regulated genes and the magnitude change in expression for each gene. + +This gene expression pattern for some subset of important genes (perhaps the most changed in expression) is called a _gene signature_, and gene signatures are very useful. By comparing signatures, you can: + + * identify or compare cell states + * find sets of positively or negatively correlated genes + * find similar disease signatures + * find similar drug signatures + * find drug signatures that might counteract opposite disease signatures. + + + +(That last bullet point is essentially where I’m headed with my research.) + +_Summary:_ a gene signature is a short summary of the most important gene expression differences a perturbagen causes in a cell. + +### Drugs! + +The pharmaceutical industry is constantly on the lookout for new breakthrough drugs that might represent huge windfalls in cash, and drugs don’t always work as planned. Many drugs spend years in research and development, only to ultimately find poor efficacy or adoption. Sometimes drugs even become known [much more for their side-effects than their originally intended therapy][4]. + +The practical upshot is that there’s countless FDA-approved drugs that represent decades of work that are simply underused or even unused entirely. These drugs have already cleared many challenging regulatory hurdles, but are simply and quite literally cures looking for a disease. + +If even just one of these drugs can be given a new lease on life for some yet-to-be-cured disease, then perhaps we can give some people new leases on life! + +_Summary:_ instead of developing new drugs, there’s already lots of drugs that aren’t being used. Maybe we can find matching diseases! + +### The Connectivity Map project + +The [Broad Institute’s Connectivity Map project][5] isn’t particularly new anymore, but it represents a ground breaking and promising idea - we can dump a bunch of signatures into a database and construct all sorts of new hypotheses we might not even have thought to check before. + +To prove out the usefulness of this idea, the Connectivity Map (or cmap) project chose 5 different cell lines (all cancer cells, which are easy to get to replicate!) and a library of FDA approved drugs, and then gave some cells these drugs. + +They then constructed a database of all of the signatures they computed for each possible perturbagen they measured. Finally, they constructed a web interface where a user can upload a gene signature and get a result list back of all of the signatures they collected, ordered by the most to least similar. You can totally go sign up and [try it out][5]. + +This simple tool is surprisingly powerful. It allows you to find similar drugs to a drug you know, but it also allows you to find drugs that might counteract a disease you’ve created a signature for. + +Ultimately, the project led to [a number of successful applications][6]. So useful was it that the Broad Institute has doubled down and created the much larger and more comprehensive [LINCS Project][7] that targets an order of magnitude more cell lines (77) and more perturbagens (42,532, compared to cmap’s 6100). You can sign up and use that one too! + +_Summary_: building a system that supports querying signature connections has already proved to be super useful. + +### Whew + +Alright, I wrote most of this on a plane yesterday but since I should now be spending time with family I’m going to cut it short here. + +Stay tuned for next week! + +-------------------------------------------------------------------------------- + +via: https://www.jtolio.com/2015/11/research-log-gene-signatures-and-connectivity-map + +作者:[jtolio.com][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.jtolio.com/ +[b]: https://github.com/lujun9972 +[1]: https://www.jtolio.com/writing/2015/11/research-log-cell-states-and-microarrays/ +[2]: https://www.jtolio.com/writing/2015/11/research-log-r-and-more-microarrays/ +[3]: https://en.wikipedia.org/wiki/Sonic_hedgehog +[4]: https://en.wikipedia.org/wiki/Sildenafil#History +[5]: https://www.broadinstitute.org/cmap/ +[6]: https://www.broadinstitute.org/cmap/publications.jsp +[7]: http://www.lincscloud.org/ From 5ac28fdda6741baf1c1466c1009b44fb76475156 Mon Sep 17 00:00:00 2001 From: geekpi Date: Wed, 18 Sep 2019 09:00:38 +0800 Subject: [PATCH 139/202] translated --- ...20190909 Firefox 69 available in Fedora.md | 63 ------------------- ...20190909 Firefox 69 available in Fedora.md | 63 +++++++++++++++++++ 2 files changed, 63 insertions(+), 63 deletions(-) delete mode 100644 sources/news/20190909 Firefox 69 available in Fedora.md create mode 100644 translated/news/20190909 Firefox 69 available in Fedora.md diff --git a/sources/news/20190909 Firefox 69 available in Fedora.md b/sources/news/20190909 Firefox 69 available in Fedora.md deleted file mode 100644 index 256c9c9f5e..0000000000 --- a/sources/news/20190909 Firefox 69 available in Fedora.md +++ /dev/null @@ -1,63 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (geekpi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Firefox 69 available in Fedora) -[#]: via: (https://fedoramagazine.org/firefox-69-available-in-fedora/) -[#]: author: (Paul W. Frields https://fedoramagazine.org/author/pfrields/) - -Firefox 69 available in Fedora -====== - -![][1] - -When you install the Fedora Workstation, you’ll find the world-renowned Firefox browser included. The Mozilla Foundation underwrites work on Firefox, as well as other projects that promote an open, safe, and privacy respecting Internet. Firefox already features a fast browsing engine and numerous privacy features. - -A community of developers continues to improve and enhance Firefox. The latest version, Firefox 69, was released recently and you can get it for your stable Fedora system (30 and later). Read on for more details. - -### New features in Firefox 69 - -The newest version of Firefox includes [Enhanced Tracking Protection][2] (or ETP). When you use Firefox 69 with a new (or reset) settings profile, the browser makes it harder for sites to track your information or misuse your computer resources. - -For instance, less scrupulous websites use scripts that cause your system to do lots of intense calculations to produce cryptocurrency results, called _[cryptomining][3]_. Cryptomining happens without your knowledge or permission and is therefore a misuse of your system. The new standard setting in Firefox 69 prevents sites from this kind of abuse. - -Firefox 69 has additional settings to prevent sites from identifying or fingerprinting your browser for later use. These improvements give you additional protection from having your activities tracked online. - -Another common annoyance is videos that start in your browser without warning. Video playback also uses extra CPU power and you may not want this happening on your laptop without permission. Firefox already stops this from happening using the [Block Autoplay][4] feature. But Firefox 69 also lets you stop videos from playing even if they start without sound. This feature prevents unwanted sudden noise. It also solves more of the real problem — having your computer’s power used without permission. - -There are numerous other new features in the new release. Read more about them in the [Firefox release notes][5]. - -### How to get the update - -Firefox 69 is available in the stable Fedora 30 and pre-release Fedora 31 repositories, as well as Rawhide. The update is provided by Fedora’s maintainers of the Firefox package. The maintainers also ensured an update to Mozilla’s Network Security Services (the nss package). We appreciate the hard work of the Mozilla project and Firefox community in providing this new release. - -If you’re using Fedora 30 or later, use the _Software_ tool on Fedora Workstation, or run the following command on any Fedora system: - -``` -$ sudo dnf --refresh upgrade firefox -``` - -If you’re on Fedora 29, [help test the update][6] for that release so it can become stable and easily available for all users. - -Firefox may prompt you to upgrade your profile to use the new settings. To take advantage of new features, you should do this. - --------------------------------------------------------------------------------- - -via: https://fedoramagazine.org/firefox-69-available-in-fedora/ - -作者:[Paul W. Frields][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://fedoramagazine.org/author/pfrields/ -[b]: https://github.com/lujun9972 -[1]: https://fedoramagazine.org/wp-content/uploads/2019/09/firefox-v69-816x345.jpg -[2]: https://blog.mozilla.org/blog/2019/09/03/todays-firefox-blocks-third-party-tracking-cookies-and-cryptomining-by-default/ -[3]: https://www.webopedia.com/TERM/C/cryptocurrency-mining.html -[4]: https://support.mozilla.org/kb/block-autoplay -[5]: https://www.mozilla.org/en-US/firefox/69.0/releasenotes/ -[6]: https://bodhi.fedoraproject.org/updates/FEDORA-2019-89ae5bb576 diff --git a/translated/news/20190909 Firefox 69 available in Fedora.md b/translated/news/20190909 Firefox 69 available in Fedora.md new file mode 100644 index 0000000000..4507fe10d5 --- /dev/null +++ b/translated/news/20190909 Firefox 69 available in Fedora.md @@ -0,0 +1,63 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Firefox 69 available in Fedora) +[#]: via: (https://fedoramagazine.org/firefox-69-available-in-fedora/) +[#]: author: (Paul W. Frields https://fedoramagazine.org/author/pfrields/) + +Firefox 69 已可在 Fedora 中获取 +====== + +![][1] + +当你安装 Fedora Workstation 时,你会发现它包括了世界知名的 Firefox 浏览器。 Mozilla 基金会负责开发 Firefox 以及其他促进开放、安全和隐私的互联网项目。Firefox 有快速的浏览引擎和大量的隐私功能。 + +开发者社区不断改进和增强 Firefox。最新版本 Firefox 69 最近发布,你可在稳定版 Fedora 系统(30 及更高版本)中获取它。继续阅读以获得更多详情。 + +### Firefox 69 中的新功能 + +最新版本的 Firefox 包括[增强跟踪保护][2](或称 ETP)。当你使用带有新(或重置)配置文件的 Firefox 69 时,浏览器会使网站更难以跟踪你的信息或滥用你的计算机资源。 + +例如,不太谨慎的网站使用脚本让你的系统进行大量计算来产生加密货币结果,称为 _[cryptomining][3]_。Cryptomining 在你不知情或未经许可的情况下发生,因此是对你的系统的滥用。Firefox 69 中的新标准设置可防止网站遭受此类滥用。 + +Firefox 69 还有其他设置,可防止识别或记录你的浏览器指纹,以供日后使用。这些改进为你提供了额外的保护,免于你的活动被在线追踪。 + +另一个常见的烦恼是在没有提示的情况下播放视频。视频播放也会使用额外的 CPU,你可能不希望未经许可在你的笔记本上发生这种情况。Firefox 使用 [Block Autoplay][4] 这个功能阻止了这种情况的发生。但是 Firefox 69 还允许你停止播放视频,即使它们没有声音也开始播放。此功能可防止不必要的突然的噪音。它还解决了更多真正的问题 - 未经许可使用计算机资源。 + +新版本中还有许多其他新功能。在 [Firefox 发行说明][5]中阅读有关它们的更多信息。 + +### 如何获得更新 + +Firefox 69 存在于稳定版 Fedora 30、预发布版 Fedora 31 和 Rawhide 仓库中。该更新由 Fedora 的 Firefox 包维护者提供。维护人员还确保更新了 Mozilla 的网络安全服务(nss 包)。我们感谢 Mozilla 项目和 Firefox 社区在提供此新版本方面的辛勤工作。 + +如果你使用的是 Fedora 30 或更高版本,请在 Fedora Workstation 上使用_软件中心_,或在任何 Fedora 系统上运行以下命令: + +``` +$ sudo dnf --refresh upgrade firefox +``` + +如果你使用的是 Fedora 29,请[帮助测试更新] [6],这样它可以变得稳定,让所有用户可以轻松使用。 + +Firefox 可能会提示你升级个人设置以使用新设置。要使用新功能,你应该这样做。 + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/firefox-69-available-in-fedora/ + +作者:[Paul W. Frields][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/pfrields/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2019/09/firefox-v69-816x345.jpg +[2]: https://blog.mozilla.org/blog/2019/09/03/todays-firefox-blocks-third-party-tracking-cookies-and-cryptomining-by-default/ +[3]: https://www.webopedia.com/TERM/C/cryptocurrency-mining.html +[4]: https://support.mozilla.org/kb/block-autoplay +[5]: https://www.mozilla.org/en-US/firefox/69.0/releasenotes/ +[6]: https://bodhi.fedoraproject.org/updates/FEDORA-2019-89ae5bb576 From 5a565294ca503f75131b6c9c2065a54deeeab601 Mon Sep 17 00:00:00 2001 From: geekpi Date: Wed, 18 Sep 2019 09:06:50 +0800 Subject: [PATCH 140/202] translating --- .../tech/20190906 How to put an HTML page on the internet.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190906 How to put an HTML page on the internet.md b/sources/tech/20190906 How to put an HTML page on the internet.md index 4524d1c896..55c63aa2d4 100644 --- a/sources/tech/20190906 How to put an HTML page on the internet.md +++ b/sources/tech/20190906 How to put an HTML page on the internet.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 4d5071a3ea96104910dafb7fa823501c0d9a7a8e Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 09:54:22 +0800 Subject: [PATCH 141/202] PRF @geekpi --- .../20190909 Firefox 69 available in Fedora.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/translated/news/20190909 Firefox 69 available in Fedora.md b/translated/news/20190909 Firefox 69 available in Fedora.md index 4507fe10d5..fa580e6697 100644 --- a/translated/news/20190909 Firefox 69 available in Fedora.md +++ b/translated/news/20190909 Firefox 69 available in Fedora.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Firefox 69 available in Fedora) @@ -12,19 +12,19 @@ Firefox 69 已可在 Fedora 中获取 ![][1] -当你安装 Fedora Workstation 时,你会发现它包括了世界知名的 Firefox 浏览器。 Mozilla 基金会负责开发 Firefox 以及其他促进开放、安全和隐私的互联网项目。Firefox 有快速的浏览引擎和大量的隐私功能。 +当你安装 Fedora Workstation 时,你会发现它包括了世界知名的 Firefox 浏览器。 Mozilla 基金会以开发 Firefox 以及其他促进开放、安全和隐私的互联网项目为己任。Firefox 有快速的浏览引擎和大量的隐私功能。 -开发者社区不断改进和增强 Firefox。最新版本 Firefox 69 最近发布,你可在稳定版 Fedora 系统(30 及更高版本)中获取它。继续阅读以获得更多详情。 +开发者社区不断改进和增强 Firefox。最新版本 Firefox 69 于最近发布,你可在稳定版 Fedora 系统(30 及更高版本)中获取它。继续阅读以获得更多详情。 ### Firefox 69 中的新功能 -最新版本的 Firefox 包括[增强跟踪保护][2](或称 ETP)。当你使用带有新(或重置)配置文件的 Firefox 69 时,浏览器会使网站更难以跟踪你的信息或滥用你的计算机资源。 +最新版本的 Firefox 包括[增强跟踪保护][2]Enhanced Tracking Protection(ETP)。当你使用带有新(或重置)配置文件的 Firefox 69 时,浏览器会使网站更难以跟踪你的信息或滥用你的计算机资源。 -例如,不太谨慎的网站使用脚本让你的系统进行大量计算来产生加密货币结果,称为 _[cryptomining][3]_。Cryptomining 在你不知情或未经许可的情况下发生,因此是对你的系统的滥用。Firefox 69 中的新标准设置可防止网站遭受此类滥用。 +例如,不太正直的网站使用脚本让你的系统进行大量计算来产生加密货币,这称为[加密挖矿][3]cryptomining。加密挖矿在你不知情或未经许可的情况下发生,因此是对你的系统的滥用。Firefox 69 中的新标准设置可防止网站遭受此类滥用。 Firefox 69 还有其他设置,可防止识别或记录你的浏览器指纹,以供日后使用。这些改进为你提供了额外的保护,免于你的活动被在线追踪。 -另一个常见的烦恼是在没有提示的情况下播放视频。视频播放也会使用额外的 CPU,你可能不希望未经许可在你的笔记本上发生这种情况。Firefox 使用 [Block Autoplay][4] 这个功能阻止了这种情况的发生。但是 Firefox 69 还允许你停止播放视频,即使它们没有声音也开始播放。此功能可防止不必要的突然的噪音。它还解决了更多真正的问题 - 未经许可使用计算机资源。 +另一个常见的烦恼是在没有提示的情况下播放视频。视频播放也会占用更多的 CPU,你可能不希望未经许可就在你的笔记本上发生这种情况。Firefox 使用[阻止自动播放][4]Block Autoplay这个功能阻止了这种情况的发生。而 Firefox 69 还允许你停止静默开始播放的视频。此功能可防止不必要的突然的噪音。它还解决了更多真正的问题 —— 未经许可使用计算机资源。 新版本中还有许多其他新功能。在 [Firefox 发行说明][5]中阅读有关它们的更多信息。 @@ -32,13 +32,13 @@ Firefox 69 还有其他设置,可防止识别或记录你的浏览器指纹, Firefox 69 存在于稳定版 Fedora 30、预发布版 Fedora 31 和 Rawhide 仓库中。该更新由 Fedora 的 Firefox 包维护者提供。维护人员还确保更新了 Mozilla 的网络安全服务(nss 包)。我们感谢 Mozilla 项目和 Firefox 社区在提供此新版本方面的辛勤工作。 -如果你使用的是 Fedora 30 或更高版本,请在 Fedora Workstation 上使用_软件中心_,或在任何 Fedora 系统上运行以下命令: +如果你使用的是 Fedora 30 或更高版本,请在 Fedora Workstation 上使用*软件中心*,或在任何 Fedora 系统上运行以下命令: ``` $ sudo dnf --refresh upgrade firefox ``` -如果你使用的是 Fedora 29,请[帮助测试更新] [6],这样它可以变得稳定,让所有用户可以轻松使用。 +如果你使用的是 Fedora 29,请[帮助测试更新][6],这样它可以变得稳定,让所有用户可以轻松使用。 Firefox 可能会提示你升级个人设置以使用新设置。要使用新功能,你应该这样做。 @@ -49,7 +49,7 @@ via: https://fedoramagazine.org/firefox-69-available-in-fedora/ 作者:[Paul W. Frields][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 1ad011663d134d646af108338ed90366bc54ad74 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 09:54:53 +0800 Subject: [PATCH 142/202] PUB @geekpi https://linux.cn/article-11354-1.html --- .../201807}/20190909 Firefox 69 available in Fedora.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/news => published/201807}/20190909 Firefox 69 available in Fedora.md (98%) diff --git a/translated/news/20190909 Firefox 69 available in Fedora.md b/published/201807/20190909 Firefox 69 available in Fedora.md similarity index 98% rename from translated/news/20190909 Firefox 69 available in Fedora.md rename to published/201807/20190909 Firefox 69 available in Fedora.md index fa580e6697..79249a373f 100644 --- a/translated/news/20190909 Firefox 69 available in Fedora.md +++ b/published/201807/20190909 Firefox 69 available in Fedora.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11354-1.html) [#]: subject: (Firefox 69 available in Fedora) [#]: via: (https://fedoramagazine.org/firefox-69-available-in-fedora/) [#]: author: (Paul W. Frields https://fedoramagazine.org/author/pfrields/) From bc8c1ba53db112f23671417d73f447f12550679a Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 10:13:19 +0800 Subject: [PATCH 143/202] APL --- ...190916 Linux Plumbers, Appwrite, and more industry trends.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md index d3f1fd3087..60e673a007 100644 --- a/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md +++ b/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (wxy) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 57f6ce6351495ef77394e7c88f2ccdbe4f075694 Mon Sep 17 00:00:00 2001 From: qfzy1233 Date: Wed, 18 Sep 2019 10:21:17 +0800 Subject: [PATCH 144/202] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E8=AF=91=E6=96=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tech/20190905 How to Change Themes in Linux Mint.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {sources => translated}/tech/20190905 How to Change Themes in Linux Mint.md (100%) diff --git a/sources/tech/20190905 How to Change Themes in Linux Mint.md b/translated/tech/20190905 How to Change Themes in Linux Mint.md similarity index 100% rename from sources/tech/20190905 How to Change Themes in Linux Mint.md rename to translated/tech/20190905 How to Change Themes in Linux Mint.md From e1597c471b997c6c42d830ec6ba38c93d593e709 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 11:34:46 +0800 Subject: [PATCH 145/202] TSL&PRF --- ...ers, Appwrite, and more industry trends.md | 79 ---------------- ...ers, Appwrite, and more industry trends.md | 92 +++++++++++++++++++ 2 files changed, 92 insertions(+), 79 deletions(-) delete mode 100644 sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md create mode 100644 translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md diff --git a/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md deleted file mode 100644 index 60e673a007..0000000000 --- a/sources/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md +++ /dev/null @@ -1,79 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Linux Plumbers, Appwrite, and more industry trends) -[#]: via: (https://opensource.com/article/19/9/conferences-industry-trends) -[#]: author: (Tim Hildred https://opensource.com/users/thildred) - -Linux Plumbers, Appwrite, and more industry trends -====== -A weekly look at open source community and industry trends. -![Person standing in front of a giant computer screen with numbers, data][1] - -As part of my role as a senior product marketing manager at an enterprise software company with an open source development model, I publish a regular update about open source community, market, and industry trends for product marketers, managers, and other influencers. Here are five of my and their favorite articles from that update. - -## [Working on Linux's nuts and bolts at Linux Plumbers][2] - -> The Kernel Maintainers Summit, Linux creator Linus Torvalds told me, is an invitation-only gathering of the top Linux kernel developers. But, while you might think it's about planning on the Linux kernel's future, that's not the case. "The maintainer summit is really different because it doesn't even talk about technical issues." Instead, "It's all about the process of creating and maintaining the Linux kernel." - -**The impact**: This is like the technical version of the Bilderberg meeting: you can have your flashy buzzword conferences, but we'll be over here making the real decisions. Or so I imagine. Probably less private jets involved though. - -## [Microsoft hosts first Windows Subsystem for Linux conference][3] - -> Hayden Barnes, founder of [Whitewater Foundry][4], a startup focusing on [Windows Subsystem for Linux (WSL)][5] [announced WSLconf 1][6], the first community conference for WSL. This event will be held on March 10-11, 2020 at Building 20 on the Microsoft HQ campus in Redmond, WA. The conference is still coming together. But we already know it will have presentations and workshops from [Pengwin, Whitewater's Linux for Windows,][7] Microsoft WSL, and [Canonical][8]'s [Ubuntu][9] on WSL developers. - -**The impact**: Microsoft is nurturing the seeds of community growing up around its increasing adoption of and contribution to open source software. It's enough to bring a tear to my eye. - -## [Introducing Appwrite: An open source backend server for mobile and web developers][10] - -> [Appwrite][11] is a new [open source][12], end to end backend server for frontend and mobile developers that allows you to build apps a lot faster. [Appwrite][13] goal is to abstract and simplify common development tasks behind REST APIs and tools, to help developers build advanced apps way faster. -> -> In this post I will shortly cover some of the main [Appwrite][14] services and explain about their main features and how they are designed to help you build your next project way faster than you would when writing all your backend APIs from scratch. - -**The impact**: Software development is getting more and more accessible as more open source middleware gets easier to use. Appwrite claims to reduce the time and cost of development by 70%. Imagine what that would mean to a small mobile development agency or citizen developer. I'm curious about how they'll monetize this. - -## ['More than just IT': Open source technologist says collaborative culture is key to government transformation][15] - -> AGL (agile government leadership) is providing a valuable support network for people who are helping government work better for the public. The organization is focused on things that I am very passionate about — DevOps, digital transformation, open source, and similar topics that are top-of-mind for many government IT leaders. AGL provides me with a community to learn about what the best and brightest are doing today, and share those learnings with my peers throughout the industry. - -**The impact**: It is easy to be cynical about the government no matter your political persuasion. I found it refreshing to have a reminder that the government is comprised of real people who are mostly doing their best to apply relevant technology to the public good. Especially when that technology is open source! - -## [How Bloomberg achieves close to 90-95% hardware utilization with Kubernetes][16] - -> In 2016, Bloomberg adopted Kubernetes—when it was still in alpha—and has seen remarkable results ever since using the project’s upstream code. “With Kubernetes, we’re able to very efficiently use our hardware to the point where we can get close to 90 to 95% utilization rates,” says Rybka. Autoscaling in Kubernetes allows the system to meet demands much faster. Furthermore, Kubernetes “offered us the ability to standardize our approach to how we build and manage services, which means that we can spend more time focused on actually working on the open source tools that we support,” says Steven Bower, Data and Analytics Infrastructure Lead. “If we want to stand up a new cluster in another location in the world, it’s really very straightforward to do that. Everything is all just code. Configuration is code.” - -**The impact**: Nothing cuts through the fog of marketing like utilization stats. One of the things that I've heard about Kube is that people don't know what to do with it when they have it running. Use cases like this give them (and you) something to aspire to. - -_I hope you enjoyed this list of what stood out to me from last week and come back next Monday for more open source community, market, and industry trends._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/9/conferences-industry-trends - -作者:[Tim Hildred][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/thildred -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data) -[2]: https://www.zdnet.com/article/working-on-linuxs-nuts-and-bolts-at-linux-plumbers/ -[3]: https://www.zdnet.com/article/microsoft-hosts-first-windows-subsystem-for-linux-conference/ -[4]: https://github.com/WhitewaterFoundry -[5]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 -[6]: https://www.linkedin.com/feed/update/urn:li:activity:6574754435518599168/ -[7]: https://www.zdnet.com/article/pengwin-a-linux-specifically-for-windows-subsystem-for-linux/ -[8]: https://canonical.com/ -[9]: https://ubuntu.com/ -[10]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d -[11]: https://appwrite.io -[12]: https://github.com/appwrite/appwrite -[13]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d?source=friends_link&sk=b6a2be384aafd1fa5b1b6ff12906082c -[14]: https://appwrite.io/ -[15]: https://medium.com/agile-government-leadership/more-than-just-it-open-source-technologist-says-collaborative-culture-is-key-to-government-c46d1489f822 -[16]: https://www.cncf.io/blog/2019/09/12/how-bloomberg-achieves-close-to-90-95-hardware-utilization-with-kubernetes/ diff --git a/translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md new file mode 100644 index 0000000000..5df30c9621 --- /dev/null +++ b/translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md @@ -0,0 +1,92 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Linux Plumbers, Appwrite, and more industry trends) +[#]: via: (https://opensource.com/article/19/9/conferences-industry-trends) +[#]: author: (Tim Hildred https://opensource.com/users/thildred) + + +每周开源点评:Linux Plumbers、Appwrite +====== + +> 了解每周的开源社区和行业趋势。 + +![Person standing in front of a giant computer screen with numbers, data][1] + +作为采用开源开发模式的企业软件公司的高级产品营销经理,这是我为产品营销人员、经理和其他相关人员发布的有关开源社区、市场和行业趋势的定期更新。以下是本次更新中我最喜欢的五篇文章。 + +### 《在 Linux Plumbers 会议上解决 Linux 具体细节》 + +- [文章地址][2] + +> Linux 的创建者 Linus Torvalds 告诉我,内核维护者峰会Kernel Maintainers Summit是顶级 Linux 内核开发人员的邀请制聚会。但是,虽然你可能认为这是关于规划 Linux 内核的未来的会议,但事实并非如此。“这个维护者峰会真的与众不同,因为它甚至不谈论技术问题。”相反,“全都谈的是关于创建和维护 Linux 内核的过程。” + +**影响**:这就像技术版的 Bilderberg 会议:你们举办的都是各种华丽的流行语会议,而在这里我们做出的才是真正的决定。不过我觉得,可能不太会涉及到私人飞机吧。(LCTT 译注:有关 Bilderberg 请自行搜索) + +### 《微软主办第一个 WSL 会议》 + +- [文章地址][3] + +> [Whitewater Foundry][4] 是一家专注于 [Windows 的 Linux 子系统(WSL)][5]的创业公司,它的创始人 Hayden Barnes [宣布举办 WSLconf 1][6],这是 WSL 的第一次社区会议。该活动将于 2020 年 3 月 10 日至 11 日在华盛顿州雷德蒙市的微软总部 20 号楼举行。会议是合办的。我们已经知道将有来自[Pengwin(Whitewater 的 Linux for Windows)][7]、微软 WSL 和 Canonical 的 Ubuntu on WSL 开发人员的演讲和研讨会。 + +**影响**:微软正在培育社区成长的种子,围绕它越来越多地采用开源软件并作出贡献。这足以让我眼前一亮。 + +### 《Appwrite 简介:面向移动和 Web 开发人员的开源后端服务器》 + +- [文章链接][10] + +> [Appwrite][11] 是一个新的[开源软件][12],用于前端和移动开发人员的端到端的后端服务器,可以让你更快地构建应用程序。[Appwrite][13] 的目标是抽象和简化 REST API 和工具背后的常见开发任务,以帮助开发人员更快地构建高级应用程序。 +> +> 在这篇文章中,我将简要介绍一些主要的 [Appwrite][14] 服务,并解释它们的主要功能以及它们的设计方式,相比从头开始编写所有后端 API,这可以帮助你更快地构建下一个项目。 + +**影响**:随着更多开源中间件变得更易于使用,软件开发越来越容易。Appwrite 声称可将开发时间和成本降低 70%。想象一下这对小型移动开发机构或个人开发者意味着什么。我很好奇他们将如何通过这种方式赚钱。 + +### 《“不只是 IT”:开源技术专家说协作文化是政府转型的关键》 + +- [文章链接][15] + +> AGL(敏捷的政府领导agile government leadership)正在为那些帮助政府更好地为公众工作的人们提供价值支持网络。该组织专注于我非常热衷的事情:DevOps、数字化转型、开源以及许多政府 IT 领导者首选的类似主题。AGL 为我提供了一个社区,可以了解当今最优秀和最聪明的人所做的事情,并与整个行业的同行分享这些知识。 + +**影响**:不管你的政治信仰如何,对政府都很容易愤世嫉俗。我发现令人耳目一新的是,政府也是由一个个实际的人组成的,他们大多在尽力将相关技术应用于公益事业。特别是当该技术是开源的! + +### 《彭博社如何通过 Kubernetes 实现接近 90-95% 的硬件利用率》 + +- [文章链接][16] + +> 2016 年,彭博社采用了 Kubernetes(当时仍处于 alpha 阶段中),自使用该项目的上游代码以来,取得了显著成果。Rybka 说:“借助 Kubernetes,我们能够非常高效地使用我们的硬件,使利用率接近 90% 到 95%。”Kubernetes 中的自动缩放使系统能够更快地满足需求。此外,Kubernetes “为我们提供了标准化我们构建和管理服务的方法的能力,这意味着我们可以花费更多时间专注于实际使用我们支持的开源工具,”数据和分析基础架构主管 Steven Bower 说,“如果我们想要在世界的另一个位置建立一个新的集群,那么这样做真的非常简单。一切都只是代码。配置就是代码。” + +**影响**:没有什么能像利用率统计那样穿过营销的迷雾。我听说过关于 Kube 的一件事是,当人们运行它时,他们不知道用它做什么。像这样的用例可以给他们(和你)一些想要的东西。 + +*我希望你喜欢这个上周重要内容的清单,请下周回来了解有关开源社区、市场和行业趋势的更多信息。* + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/conferences-industry-trends + +作者:[Tim Hildred][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/thildred +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data) +[2]: https://www.zdnet.com/article/working-on-linuxs-nuts-and-bolts-at-linux-plumbers/ +[3]: https://www.zdnet.com/article/microsoft-hosts-first-windows-subsystem-for-linux-conference/ +[4]: https://github.com/WhitewaterFoundry +[5]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 +[6]: https://www.linkedin.com/feed/update/urn:li:activity:6574754435518599168/ +[7]: https://www.zdnet.com/article/pengwin-a-linux-specifically-for-windows-subsystem-for-linux/ +[8]: https://canonical.com/ +[9]: https://ubuntu.com/ +[10]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d +[11]: https://appwrite.io +[12]: https://github.com/appwrite/appwrite +[13]: https://medium.com/@eldadfux/introducing-appwrite-an-open-source-backend-server-for-mobile-web-developers-4be70731575d?source=friends_link&sk=b6a2be384aafd1fa5b1b6ff12906082c +[14]: https://appwrite.io/ +[15]: https://medium.com/agile-government-leadership/more-than-just-it-open-source-technologist-says-collaborative-culture-is-key-to-government-c46d1489f822 +[16]: https://www.cncf.io/blog/2019/09/12/how-bloomberg-achieves-close-to-90-95-hardware-utilization-with-kubernetes/ From 3d7c7b6eea3401ca129a5e8420e1559e12b3b5f6 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 11:36:45 +0800 Subject: [PATCH 146/202] PUB @wxy https://linux.cn/article-11355-1.html --- ...16 Linux Plumbers, Appwrite, and more industry trends.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename {translated/news => published}/20190916 Linux Plumbers, Appwrite, and more industry trends.md (98%) diff --git a/translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md b/published/20190916 Linux Plumbers, Appwrite, and more industry trends.md similarity index 98% rename from translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md rename to published/20190916 Linux Plumbers, Appwrite, and more industry trends.md index 5df30c9621..8ca1e16da6 100644 --- a/translated/news/20190916 Linux Plumbers, Appwrite, and more industry trends.md +++ b/published/20190916 Linux Plumbers, Appwrite, and more industry trends.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (wxy) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11355-1.html) [#]: subject: (Linux Plumbers, Appwrite, and more industry trends) [#]: via: (https://opensource.com/article/19/9/conferences-industry-trends) [#]: author: (Tim Hildred https://opensource.com/users/thildred) From 4927783e939666937230ac43ccf5e4bba9bddf29 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:00:05 +0800 Subject: [PATCH 147/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20Amid?= =?UTF-8?q?=20Epstein=20Controversy,=20Richard=20Stallman=20is=20Forced=20?= =?UTF-8?q?to=20Resign=20as=20FSF=20President?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md --- ...an is Forced to Resign as FSF President.md | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md diff --git a/sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md new file mode 100644 index 0000000000..8d3c853ca3 --- /dev/null +++ b/sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md @@ -0,0 +1,145 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President) +[#]: via: (https://itsfoss.com/richard-stallman-controversy/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President +====== + +_**Richard Stallman, founder and president of the Free Software Foundation, has resigned as the president and from its board of directors. The announcement has come after a relentless campaign by a few activists and media person to remove Stallman for his views on the Epstein victims. Read more to get the details.**_ + +![][1] + +### A little background to the Stallman controversy + +If you are not aware of the context, let me provide some details. + +[Richard Stallman][2], a 66 years old computer scientist at [MIT][3], is best known for founding the [free software movement][4] in 1983. He also developed several software like GCC, Emacs under the GNU project. The free software movement inspired a number of projects to choose the open source GPL license. Linux is one of those projects. + +[Jeffrey Epstein][5] was a billionaire American financier. He was convicted as a sex offender for running an escort service (included underage girls) for the rich and elites in his social service. He committed suicide in his prison cell while still being tried for sex trafficking charges. + +[Marvin Lee Minsky][6] was an eminent computer scientist at MIT. He founded the Artificial Intelligence lab at MIT. He died at the age of 88 in 2016. After his death, an Epstein victim named Misky as one of the people she was “directed to have sex” with on Jeffrey Epstein’s private island while she was a minor. + +So what all this has to do with Richard Stallman? It all started with an email Stallman sent to MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) mailing list over proposed protest by MIT students and affiliates regarding Jeffrey Epstein’s donation (to MIT’s AI lab). + +The announcement of the Friday event does an injustice to Marvin Minsky: + +“deceased AI ‘pioneer’ Marvin Minsky (who is accused of assaulting +one of Epstein’s victims [2])” + +The injustice is in the word “assaulting”. The term “sexual assault” is so vague and slippery that it facilitates accusation inflation: taking claims that someone did X and leading people to think of it as Y, which is much worse than X. + +The accusation quoted is a clear example of inflation. The reference reports the claim that Minsky had sex with one of Epstein’s harem. (See .) +Let’s presume that was true (I see no reason to disbelieve it). + +The word “assaulting” presumes that he applied force or violence, in some unspecified way, but the article itself says no such thing. +Only that they had sex. + +We can imagine many scenarios, but the most plausible scenario is that she presented herself to him as entirely willing. Assuming she was being coerced by Epstein, he would have had every reason to tell her to conceal that from most of his associates. + +I’ve concluded from various examples of accusation inflation that it is absolutely wrong to use the term “sexual assault” in an accusation. + +Whatever conduct you want to criticize, you should describe it with a specific term that avoids moral vagueness about the nature of the criticism. + +### The call for removing Stallman + +‘Epstein’ is an extremely controversial ‘topic’ in the USA. Stallman’s reckless ‘intellectual discourse’ on a sensitive matter like this would not have gone well and it didn’t go well. + +A robotics engineer received this forwarded email from her friend and started a [campaign to remove Stallman][7]. She didn’t want a clarification or apology. All she wanted was to remove Stallman even if it means ‘burning MIT to the ground’. + +> At least Richard Stallman is not accused of raping anyone. But is that our highest standard? The standard that this prestigious institution holds itself to? If this is what MIT wants to defend; if this is what MIT wants to stand for, then, yes, burn it to the ground… +> +> …Remove everyone, if we must, and let something much better be built from the ashes. +> +> Salem, Robotics student who started Remove Stallman campaign + +Salem’s rant was initially ignored by mainstream digital media. But it was picked by activists who fight against meritocracy and gender bias in the software industry. + +> [#epstein][8] [#MIT][9] Hi I'm angry and reporters didn't respond to me so i wrote this story myself. its such a fun time to be an mit alumn right now🙃 +> +> — SZJG (@selamjie) [September 12, 2019][10] + +> are we done with "brilliant jerks" defending child sexual exploitation with "maybe it was consensual" +> +> — Tracy Chou 👩🏻‍💻 (@triketora) [September 13, 2019][11] + +> I've tweeted for many years about how awful Richard "RMS" Stallman is – the pedophilia, the ableism, the misogyny. +> +> Inevitably, each time I do, dudes examine my receipts & then say "all those incidents are from years ago! he's changed now!" +> +> NOPE. +> +> — Sarah Mei (@sarahmei) [September 12, 2019][12] + +A Twitter thread by Sage Sharp on how Stallman’s behavior negatively impact people in tech: + +> 👇Thread about the impact of Richard Stallman on marginalized groups in tech, especially women. [CW: rape, incest, ableism, sex trafficking] +> +> The [@fsf][13] needs to permanently remove Richard Stallman from being President and Chair of the Free Software Foundation Board of Directors. +> +> — Sage Sharp (@_sagesharp_) [September 16, 2019][14] + +It’s not that Stallman is a saint. His crude, insensitive and sexist jokes have been doing the rounds for years. You can read about it [here][15] and [here][16]. + +Soon the news was picked by the big media houses like [The Vice][17], [The Daily Beast][18], [Futurism][19] etc. They painted Stallman as a defender of Jeffrey Epstein. Amidst the outcry, [executive director of GNOME threatened to end the relationship between GNOME and FSF][20]. + +Eventually, Stallman resigned first from MIT and now [from Free Software Foundation][21]. + +![][22] + +### A dangerous precedence? + +All it took five days of activism to remove a person from an organization he created and worked for more than thirty years. And this is when Stallman wasn’t even remotely involved in the sex trafficking scandal. + +Some of these ‘activists’ have also targeted [Linux creator Linus Torvalds in the past][23]. The management behind the Linux Foundation foresaw the growing trend of activism in the tech industry and hence they put up a [code of conduct for Linux kernel development][24] in place and forced [Torvalds to undergo training to improve his behavior][25]. If they had not taken the corrective step, probably Torvalds would have been a goner by now. + +Ignoring reckless and sexist behavior of tech stalwarts is not acceptable but neither is the mob mentality of lynching anyone who disagrees with a certain popular view. I don’t agree with Stallman and his past remarks but I am also not happy that he has been (forced to?) resign in this manner. + +Techrights has some interesting take on it that you can read [here][26] and [here][27]. + +_**What do you think of the entire episode? Please share your views and opinion but in a civilized manner. Abusive comments will not be published. Arguments and discussion must be civil.**_ + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/richard-stallman-controversy/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/stallman-conroversy.png?ssl=1 +[2]: https://en.wikipedia.org/wiki/Richard_Stallman +[3]: https://en.wikipedia.org/wiki/Massachusetts_Institute_of_Technology +[4]: https://en.wikipedia.org/wiki/Free_software_movement +[5]: https://en.wikipedia.org/wiki/Jeffrey_Epstein +[6]: https://en.wikipedia.org/wiki/Marvin_Minsky +[7]: https://medium.com/@selamie/remove-richard-stallman-fec6ec210794 +[8]: https://twitter.com/hashtag/epstein?src=hash&ref_src=twsrc%5Etfw +[9]: https://twitter.com/hashtag/MIT?src=hash&ref_src=twsrc%5Etfw +[10]: https://twitter.com/selamjie/status/1172244207978897408?ref_src=twsrc%5Etfw +[11]: https://twitter.com/triketora/status/1172443389536555009?ref_src=twsrc%5Etfw +[12]: https://twitter.com/sarahmei/status/1172283772428906496?ref_src=twsrc%5Etfw +[13]: https://twitter.com/fsf?ref_src=twsrc%5Etfw +[14]: https://twitter.com/_sagesharp_/status/1173637138413318144?ref_src=twsrc%5Etfw +[15]: https://geekfeminism.wikia.org/wiki/Richard_Stallman +[16]: https://medium.com/@selamie/remove-richard-stallman-appendix-a-a7e41e784f88 +[17]: https://www.vice.com/en_us/article/9ke3ke/famed-computer-scientist-richard-stallman-described-epstein-victims-as-entirely-willing +[18]: https://www.thedailybeast.com/famed-mit-computer-scientist-richard-stallman-defends-epstein-victims-were-entirely-willing +[19]: https://futurism.com/richard-stallman-epstein-scandal +[20]: https://blog.halon.org.uk/2019/09/gnome-foundation-relationship-gnu-fsf/ +[21]: https://www.fsf.org/news/richard-m-stallman-resigns +[22]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/richard-stallman.png?resize=800%2C94&ssl=1 +[23]: https://www.newyorker.com/science/elements/after-years-of-abusive-e-mails-the-creator-of-linux-steps-aside +[24]: https://itsfoss.com/linux-code-of-conduct/ +[25]: https://itsfoss.com/torvalds-takes-a-break-from-linux/ +[26]: http://techrights.org/2019/09/15/media-attention-has-been-shifted/ +[27]: http://techrights.org/2019/09/16/stallman-removed/ From da12b794211a31c81caa21d153cbeacf8ad0af4e Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:00:32 +0800 Subject: [PATCH 148/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20How=20?= =?UTF-8?q?Ansible=20brought=20peace=20to=20my=20home?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 How Ansible brought peace to my home.md --- ...17 How Ansible brought peace to my home.md | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 sources/tech/20190917 How Ansible brought peace to my home.md diff --git a/sources/tech/20190917 How Ansible brought peace to my home.md b/sources/tech/20190917 How Ansible brought peace to my home.md new file mode 100644 index 0000000000..f569ba5f3d --- /dev/null +++ b/sources/tech/20190917 How Ansible brought peace to my home.md @@ -0,0 +1,138 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How Ansible brought peace to my home) +[#]: via: (https://opensource.com/article/19/9/ansible-documentation-kids-laptops) +[#]: author: (James Farrell https://opensource.com/users/jamesfhttps://opensource.com/users/jlozadadhttps://opensource.com/users/jason-bakerhttps://opensource.com/users/aseem-sharmahttps://opensource.com/users/marcobravo) + +How Ansible brought peace to my home +====== +Configuring his young daughters' computers with Ansible made it simple +for this dad to manage the family's computers. +![Coffee and laptop][1] + +A few months ago, I read Marco Bravo's article [_How to use Ansible to document procedures_][2] on Opensource.com. I will admit, I didn't quite get it at the time. I was not actively using [Ansible][3], and I remember thinking it looked like more work than it was worth. But I had an open mind and decided to spend time looking deeper into Ansible. + +I soon found an excuse to embark on my first real Ansible adventure: repurposing old laptops like in [_How to make an old computer useful again_][4]. I've always liked playing with old computers, and the prospect of automating something with modern methods piqued my interest. + +### The task + +Earlier this year, I gave my seven-year-old daughter a repurposed Dell Mini 9 running some flavor of Ubuntu. At first, my six-year-old daughter didn't care much about it, but as the music played and she discovered the fun programs, her interest set in. + +I realized I would need to build another one for her soon. And any parent with small children close in age can likely identify with my dilemma. If both children don't get identical things, conflicts will arise. Similar toys, similar clothes, similar shoes … sometimes the color, shape, and blinking lights must be identical. I am sure they would notice any difference in laptop configuration, and it would become a point of contention. Therefore, I needed these laptops to have identical functionality. + +Also, with small children in the mix, I suspected I would be rebuilding these things a few times. Failures, accidents, upgrades, corruptions … this threatened to become a time sink. + +Since two young girls sharing one Dell Mini 9 was not really a workable solution, I grabbed a Dell D620 from my pile of old hardware, upgraded the RAM, put in an inexpensive SSD, and started to cook up a repeatable process to build the children's computer configuration. + +If you think about it, this task seems ideal for a configuration management system. I needed something to document what I was doing so it could be easily repeatable. + +### Ansible to the rescue + +I didn't try to set up a full-on pre-boot execution environment (PXE) to support an occasional laptop install. I wanted to teach my children to do some of the installation work for me (a different kind of automation, ha!). + +I decided to start from a minimal OS install and eventually broke down my Ansible approach into three parts: bootstrap, account setup, and software installation. I could have put everything into one giant script, but separating these functions allowed me to mix and match them for other projects and refine them individually over time. Ansible's YAML file readability helped keep things clear as I refined my systems. + +For this laptop experiment, I decided to use Debian 32-bit as my starting point, as it seemed to work best on my older hardware. The bootstrap YAML script is intended to take a bare-minimal OS install and bring it up to some standard. It relies on a non-root account to be available over SSH and little else. Since a minimal OS install usually contains very little that is useful to Ansible, I use the following to hit one host and prompt me to log in with privilege escalation: + + +``` +`$ ansible-playbook bootstrap.yml -i '192.168.0.100,' -u jfarrell -Kk` +``` + +The script makes use of Ansible's [raw][5] module to set some base requirements. It ensures Python is available, upgrades the OS, sets up an Ansible control account, transfers SSH keys, and configures sudo privilege escalation. When bootstrap completes, everything should be in place to have this node fully participate in my larger Ansible inventory. I've found that bootstrapping bare-minimum OS installs is nuanced (if there is interest, I'll write another article on this topic). + +The account YAML setup script is used to set up (or reset) user accounts for each family member. This keeps user IDs (UIDs) and group IDs (GIDs) consistent across the small number of machines we have, and it can be used to fix locked accounts when needed. Yes, I know I could have set up Network Information Service or LDAP authentication, but the number of accounts I have is very small, and I prefer to keep these systems very simple. Here is an excerpt I found especially useful for this: + + +``` +\--- +\- name: Set user accounts +  hosts: all +  gather_facts: false +  become: yes +  vars_prompt: +    - name: passwd +      prompt: "Enter the desired ansible password:" +      private: yes + +  tasks: +  - name: Add child 1 account +    user: +      state: present +      name: child1 +      password: "{{ passwd | password_hash('sha512') }}" +      comment: Child One +      uid: 888 +      group: users +      shell: /bin/bash +      generate_ssh_key: yes +      ssh_key_bits: 2048 +      update_password: always +      create_home: yes +``` + +The **vars_prompt** section prompts me for a password, which is put to a Jinja2 transformation to produce the desired password hash. This means I don't need to hardcode passwords into the YAML file and can run it to change passwords as needed. + +The software installation YAML file is still evolving. It includes a base set of utilities for the sysadmin and then the stuff my users need. This mostly consists of ensuring that the same graphical user interface (GUI) interface and all the same programs, games, and media files are installed on each machine. Here is a small excerpt of the software for my young children: + + +``` + - name: Install kids software +    apt: +      name: "{{ packages }}" +      state: present +    vars: +      packages: +     - lxde +      - childsplay +      - tuxpaint +      - tuxtype +      - pysycache +      - pysiogame +      - lmemory +      - bouncy +``` + +I created these three Ansible scripts using a virtual machine. When they were perfect, I tested them on the D620. Then converting the Mini 9 was a snap; I simply loaded the same minimal Debian install then ran the bootstrap, accounts, and software configurations. Both systems then functioned identically. + +For a while, both sisters enjoyed their respective computers, comparing usage and exploring software features. + +### The moment of truth + +A few weeks later came the inevitable. My older daughter finally came to the conclusion that her pink Dell Mini 9 was underpowered. Her sister's D620 had superior power and screen real estate. YouTube was the new rage, and the Mini 9 could not keep up. As you can guess, the poor Mini 9 fell into disuse; she wanted a new machine, and sharing her younger sister's would not do. + +I had another D620 in my pile. I replaced the BIOS battery, gave it a new SSD, and upgraded the RAM. Another perfect example of breathing new life into old hardware. + +I pulled my Ansible scripts from source control, and everything I needed was right there: bootstrap, account setup, and software. By this time, I had forgotten a lot of the specific software installation information. But details like account UIDs and all the packages to install were all clearly documented and ready for use. While I surely could have figured it out by looking at my other machines, there was no need to spend the time! Ansible had it all clearly laid out in YAML. + +Not only was the YAML documentation valuable, but Ansible's automation made short work of the new install. The minimal Debian OS install from USB stick took about 15 minutes. The subsequent shape up of the system using Ansible for end-user deployment only took another nine minutes. End-user acceptance testing was successful, and a new era of computing calmness was brought to my family (other parents will understand!). + +### Conclusion + +Taking the time to learn and practice Ansible with this exercise showed me the true value of its automation and documentation abilities. Spending a few hours figuring out the specifics for the first example saves time whenever I need to provision or fix a machine. The YAML is clear, easy to read, and—thanks to Ansible's idempotency—easy to test and refine over time. When I have new ideas or my children have new requests, using Ansible to control a local virtual machine for testing is a valuable time-saving tool. + +Doing sysadmin tasks in your free time can be fun. Spending the time to automate and document your work pays rewards in the future; instead of needing to investigate and relearn a bunch of things you've already solved, Ansible keeps your work documented and ready to apply so you can move onto other, newer fun things! + +I can see the brightness of curiosity in my six year old niece Shuchi's eyes when she explores a... + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/ansible-documentation-kids-laptops + +作者:[James Farrell][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/jamesfhttps://opensource.com/users/jlozadadhttps://opensource.com/users/jason-bakerhttps://opensource.com/users/aseem-sharmahttps://opensource.com/users/marcobravo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/coffee_cafe_brew_laptop_desktop.jpg?itok=G-n1o1-o (Coffee and laptop) +[2]: https://opensource.com/article/19/4/ansible-procedures +[3]: https://www.ansible.com/ +[4]: https://opensource.com/article/19/7/how-make-old-computer-useful-again +[5]: https://docs.ansible.com/ansible/2.3/raw_module.html From 555e59a70d2ad9c4501a460fedde7ec9beaa86b6 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:00:49 +0800 Subject: [PATCH 149/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20Gettin?= =?UTF-8?q?g=20started=20with=20Zsh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 Getting started with Zsh.md --- .../tech/20190917 Getting started with Zsh.md | 232 ++++++++++++++++++ 1 file changed, 232 insertions(+) create mode 100644 sources/tech/20190917 Getting started with Zsh.md diff --git a/sources/tech/20190917 Getting started with Zsh.md b/sources/tech/20190917 Getting started with Zsh.md new file mode 100644 index 0000000000..d48391eab7 --- /dev/null +++ b/sources/tech/20190917 Getting started with Zsh.md @@ -0,0 +1,232 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Getting started with Zsh) +[#]: via: (https://opensource.com/article/19/9/getting-started-zsh) +[#]: author: (Seth Kenlon https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/falm) + +Getting started with Zsh +====== +Improve your shell game by upgrading from Bash to Z-shell. +![bash logo on green background][1] + +Z-shell (or Zsh) is an interactive Bourne-like POSIX shell known for its abundance of innovative features. Z-Shell users often cite its many conveniences and credit it for increased efficiency and extensive customization. + +If you're relatively new to Linux or Unix but experienced enough to have opened a terminal and run a few commands, you have probably used the Bash shell. Bash is arguably the definitive free software shell, partly because of its progressive features and partly because it ships as the default shell on most of the popular Linux and Unix operating systems. However, the more you use a shell, the more you start to find small things that might be better for the way you want to use it. If there's one thing open source is famous for, it's _choice_. Many people choose to "graduate" from Bash to Z. + +### What is Zsh? + +A shell is just an interface to your operating system. An interactive shell allows you to type in commands through what is called _standard input_, or **stdin**, and get output through _standard output_ and _standard error_, or **stdout** and **stderr**. There are many shells, including Bash, Csh, Ksh, Tcsh, Dash, and Zsh. Each has features based on what its programmers thought would be best for a shell. Whether those features are good or bad is up to you, the end user. + +Zsh has features like interactive Tab completion, automated file searching, regex integration, advanced shorthand for defining command scope, and a rich theme engine. These features are included in an otherwise familiar Bourne-like shell environment, meaning that if you already know and love Bash, you'll find Zsh familiar—except with more features. You might think of it as a kind of Bash++. + +### Installing Zsh + +Install Zsh with your package manager. + +On Fedora, RHEL, and CentOS: + + +``` +`$ sudo dnf install zsh` +``` + +On Ubuntu and Debian: + + +``` +`$ sudo apt install zsh` +``` + +On MacOS, you can install it using MacPorts: + + +``` +`$ sudo port install zsh` +``` + +Or with Homebrew: + + +``` +`$ brew install zsh` +``` + +It's possible to run Zsh on Windows, but only on top of a Linux or Linux-like layer such as [Windows Subsystem for Linux][2] (WSL) or [Cygwin][3]. That installation is out of scope for this article, so refer to Microsoft documentation. + +### Setting up Zsh + +Zsh is not a terminal emulator; it's a shell that runs inside a terminal emulator. So, to launch Zsh, you must first launch a terminal window such as GNOME Terminal, Konsole, Terminal, iTerm2, rxvt, or another terminal of your preference. Then you can launch Zsh by typing: + + +``` +`$ zsh` +``` + +The first time you launch Zsh, you're asked to choose some configuration options. These can all be changed later, so press **1** to continue. + + +``` +This is the Z Shell configuration function for new users, zsh-newuser-install. + +(q)  Quit and do nothing. + +(0)  Exit, creating the file ~/.zshrc + +(1)  Continue to the main menu. +``` + +There are four categories of preferences, so just start at the top. + + 1. The first category lets you choose how many commands are retained in your shell history file. By default, it's set to 1,000 lines. + 2. Zsh completion is one of its most exciting features. To keep things simple, consider activating it with its default options until you get used to how it works. Press **1** for default options, **2** to set options manually. + 3. Choose Emacs or Vi key bindings. Bash uses Emacs bindings, so you may be used to that already. + 4. Finally, you can learn about (and set or unset) some of Zsh's subtle features. For instance, you can stop using the **cd** command by allowing Zsh to initiate a directory change when you provide a non-executable path with no command. To activate one of these extra options, type the option number and enter **s** to _set_ it. Try turning on all options to get the full Zsh experience. You can unset them later by editing **~/.zshrc**. + + + +To complete configuration, press **0**. + +### Using Zsh + +At first, Zsh feels a lot like using Bash, which is unmistakably one of its many features. There are serious differences between, for instance, Bash and Tcsh, so being able to switch between Bash and Zsh is a convenience that makes Zsh easy to try and easy to use at home if you have to use Bash at work or on your server. + +#### Change directory with Zsh + +It's the small differences that make Zsh nice. First, try changing the directory to your Documents folder _without the **cd** command_. It seems too good to be true; but if you enter a directory path with no further instruction, Zsh changes to that directory: + + +``` +% Documents +% pwd +/home/seth/Documents +``` + +That renders an error in Bash or any other normal shell. But Zsh is far from normal, and this is just the beginning. + +#### Search with Zsh + +When you want to find a file using a normal shell, you probably resort to the **find** or **locate** command. At the very least, you may have used **ls -R** for a recursive listing of a set of directories. Zsh has a built-in feature allowing it to find a file in the current or any other subdirectory. + +For instance, assume you have two files called **foo.txt**. One is located in your current directory, and the other is in a subdirectory called **foo**. In a Bash shell, you can list the file in the current directory with: + + +``` +$ ls +foo.txt +``` + +and you can list the other one by stating the subdirectory's path explicitly: + + +``` +$ ls foo +foo.txt +``` + +To list both, you must use the **-R** switch, maybe combined with **grep**: + + +``` +$ ls -R | grep foo.txt +foo.txt +foo.txt +``` + +But in Zsh, you can use the ****** shorthand: + + +``` +% ls **/foo.txt +foo.txt +foo.txt +``` + +And you can use this syntax with any command, not just with **ls**. Imagine your increased efficiency when moving specific file types from one collection of directories to a single location, or concatenating snippets of text into a file, or grepping through logs. + +### Using Zsh Tab completion + +Tab completion is a power-user feature in Bash and some other shells, and it took the Unix world by storm when it became commonplace. No longer did Unix users have to resort to wildcards when typing long and tedious paths (such as **/h*/s*h/V*/SCS/sc*/comp*/t*/a*/*9/04/LS*boat*v**, which is a lot easier than typing **/home/seth/Videos/SCS/scenes/composite/takes/approved/109/04/LS_boat-port-cargo-mover.mkv**). Instead, they could just press the Tab key when they entered enough of a unique string. For example, if you know there's only one directory starting with an **h** at the root level of your system, you might type **/h** and then hit Tab. It's fast, it's simple, it's efficient. It also confirms a path exists; if Tab doesn't complete anything, you know you're looking in the wrong place or you mistyped part of the path. + +However, if you have many directories that share five or more of the same first letters, Tab staunchly refuses to complete. While in most modern terminals it will (at least) reveal the files blocking it from guessing what you mean, it usually takes two Tab presses to reveal them; therefore, Tab completion often becomes such an interplay of letters and Tabs across your keyboard that you feel like you're training for a piano recital. + +Zsh solves this minor annoyance by cycling through possible completions. If you type **ls ~/D** and press Tab, Zsh completes your command with **Documents** first; if you press Tab again, it offers **Downloads**, and so on until you find the one you want. + +### Wildcards in Zsh + +Wildcards behave differently in Zsh than what Bash users are used to. First of all, they can be modified. For example, if you want to list all folders in your current directory, you can use a modified wildcard: + + +``` +% ls +dir0   dir1   dir2   file0   file1 +% ls *(/) +dir0   dir1   dir2 +``` + +In this example, the **(/)** qualifies the results of the wildcard so Zsh will display only directories. To list just the files, use **(.)**. To list symlinks, use **(@)**. To list executable files, use **(*)**. + + +``` +% ls ~/bin/*(*) +fop  exify  tt +``` + +Zsh isn't aware of file types only. It can also list according to modification time, using the same wildcard modifier convention. For example, if you want to find a file that was modified within the past eight hours, use the **mh** modifier (for **modified** and **hours**) and the negative integer of hours: + + +``` +% ls ~/Documents/*(mh-8) +cal.org   game.org   home.org +``` + +To find a file modified more than (for instance) two days ago, the modifiers change to **md** (for **modified** and **day**) with a positive integer: + + +``` +% ls ~/Documents/*(+2) +holiday.org +``` + +There's a lot more you can do with wildcard modifiers and qualifiers, so read the [Zsh man page][4] for full details. + +#### The wildcard side effect + +To use wildcards the way you would use them in Bash, sometimes they must be escaped in Zsh. For instance, if you're copying some files to your server in Bash, you might use a wildcard like this: + + +``` +`$ scp IMG_*.JPG seth@example.com:~/www/ph*/*19/09/14` +``` + +That works in Bash, but Zsh returns an error because it tries to expand the variables on the remote side before issuing the **scp** command. To avoid this, you must escape the remote variables: + + +``` +`% scp IMG_*.JPG seth@example.com:~/www/ph\*/\*19/09/14` +``` + +It's these types of little exceptions that can frustrate you when you're switching to a new shell. There aren't many when using Zsh (there are probably more when switching back to Bash after experiencing Zsh) but when they happen, remain calm and be explicit. Rarely will you go wrong to adhere strictly to POSIX—but if that fails, look up the problem to solve it and move on. [Hyperpolyglot.org][5] has proven invaluable to many users stuck on one shell at work and another at home. + +In my next Zsh article, I'll show you how to install themes and plugins to make your Z-Shell even Z-ier. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/getting-started-zsh + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/falm +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/bash_command_line.png?itok=k4z94W2U (bash logo on green background) +[2]: https://devblogs.microsoft.com/commandline/category/bash-on-ubuntu-on-windows/ +[3]: https://www.cygwin.com/ +[4]: https://linux.die.net/man/1/zsh +[5]: http://hyperpolyglot.org/unix-shells From acea082abe8d48f8e8e3eb7767d81d6febff817c Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:01:12 +0800 Subject: [PATCH 150/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=203=20st?= =?UTF-8?q?eps=20to=20developing=20psychological=20safety?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 3 steps to developing psychological safety.md --- ...teps to developing psychological safety.md | 120 ++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 sources/tech/20190917 3 steps to developing psychological safety.md diff --git a/sources/tech/20190917 3 steps to developing psychological safety.md b/sources/tech/20190917 3 steps to developing psychological safety.md new file mode 100644 index 0000000000..ee77789bc4 --- /dev/null +++ b/sources/tech/20190917 3 steps to developing psychological safety.md @@ -0,0 +1,120 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (3 steps to developing psychological safety) +[#]: via: (https://opensource.com/open-organization/19/9/psychological-safety-leadership-behaviors) +[#]: author: (Kathleen Hayes https://opensource.com/users/khayes4dayshttps://opensource.com/users/khayes4dayshttps://opensource.com/users/mdoyle) + +3 steps to developing psychological safety +====== +The mindsets, behaviors, and communication patterns necessary for +establishing psychological safety in our organizations may not be our +defaults, but they are teachable and observable. +![Brain map][1] + +Psychological safety is a belief that one will not be punished or humiliated for speaking up with ideas, questions, concerns, or mistakes. And it's critical for high-performing teams in open organizations. + +Part one of this series introduced the concept of [psychological safety][2]. In this companion article, I'll recount my personal journey toward understanding psychological safety—and explain the fundamental shifts in mindset, behavior, and communication that anyone hoping to create psychologically safe teams and environments will need to make. + +### Mindset: Become the learner + +I have participated in a number of corporate cultures that fostered a "gotcha" mindset—one in which people were poised to pounce when something didn't go according to plan. Dropping this deeply ingrained mindset was a requirement for achieving psychological safety, and doing _that_ required a fundamental shift in the way I lived my life. + +_Guiding value: Become the learner, not the knower._ + +Life is a process; achieving perfection will never happen. Similarly, building an organization is a process; there is no "we've got it now" state. In most cases, we're traversing unchartered territories. Enormous uncertainty lies ahead. [We can't know what will happen][3], which is why it was important for me to become the learner in work, just as in life. + +On my first day as a new marketing leader, a team member was describing their collaboration with engineering and told me about "the F-You board." If a new program was rolled out and engineers complained that it missed the mark, was wrong, or was downright ridiculous, someone placed a tally on the F-You whiteboard. When they'd accumulated ten, the team would go out drinking. + +There is a lot to unpack in this dynamic. For our purposes, however, let's focus on a few actionable steps that helped me reframe the "gotcha" mentality into a learning mindset. + +First, I shaped marketing programs and campaigns as experiments, using the word "experiment" with intention not just _within_ the marketing department but _across_ the entire organization. Corporate-wide communications about upcoming rollouts concluded with, "If you see any glaring omissions or areas for refinement, please let me know," inviting engineers to surface blind spots and bring forward solutions—rather than point fingers after the work had concluded. + +Only after shifting my perspective from that of the knower to that of the learner did I open a genuine desire to understand another's perspective. + +Next, to stimulate the learning process in myself and others, I began fostering a "[try, learn, modify][4]" mindset, in which setbacks are not viewed as doomsday failures but as opportunities for clarification and improvement. To recover quickly when something doesn't go according to plan, I would ask four key questions: + + * What did we set out to do? + * What happened? + * What did we learn? + * How quickly can we improve upon it? + + + +It's nearly impossible for every project to be a home run. Setbacks will occur. As the ground-breaking research on psychological safety revealed, [learning happens through vulnerable conversations][5]. When engaged in psychologically safe environments, we can use these moments to cultivate more learning and less defensiveness. + +### Behavior: Model curiosity + +One way we can help our team drop their defensiveness is by _modeling curiosity_. + +As the "knower," I was adept at command and control. Quite often this meant playing the devil's advocate and shaming others into submitting to my point of view. + +In a meeting early in my career as a vice president, a colleague was rolling out a new program and asked each executive to share feedback. I was surprised as each person around the table gave a thumbs up. I had reservations about the direction and was concerned that no one else could see the issues that seemed so readily apparent to me. Rather than asking for clarification and [stimulating a productive dialog][6], I simply quipped, "Am I the only one that hasn't [sipped the purple Kool-Aid][7]?" Not my finest moment. + +As I look back, this behavior was fueled by a mixture of fear and overconfidence, a hostile combination resulting in a hostile psychological attitude. I wasn't curious because I was too busy being right, being the knower. By becoming the learner, I let a genuine interest in understanding others' perspectives come to the forefront. This helped me more deeply understand a fundamental fact about the human condition. + +_Guiding value: Situations are rarely, if ever, crystal clear._ + +The process of understanding is dynamic. We are constantly moving along a continuum from clear to unclear and back again. For large teams, this swing is more pronounced as each member brings a unique perspective to bear on an issue. And rightly so. There are seven billion people on this planet; it's nearly impossible for everyone to see a situation the same way. + +Recalibrating this attitude—the devil's advocate attitude of "I disagree" to the learner's space and behavior of "help me see what you see"—took practice. One thing that worked for me was intentionally using the phrase "I have a different perspective to share" when offering my opinion and, when clarifying, saying, "That is not consistent with my understanding. Can you tell me more about your perspective?" These tactics helped me move from my default of knowing and convincing. I also asked a trusted team member to privately point out when something or someone had triggered my old default. Over time, my self-awareness matured and, with practice, the intentional tactics evolved into a learned behavior. + +As I look back, this behavior was fueled by a mixture of fear and overconfidence, a hostile combination resulting in a hostile psychological attitude. I wasn't curious because I was too busy being right, being the knower. + +I feel compelled to share that without the right mindset these tactics would have been cheap communication gimmicks. Only after shifting my perspective from that of the knower to that of the learner did I open a genuine desire to understand another's perspective. This allowed me to develop the capacity to model curiosity and open the space for my team members—and me—to explore ideas with safety, vulnerability, and respect. + +### Communication: Deliver productive feedback + +Psychological safety does not imply a cozy situation in which people are necessarily close friends, nor does it suggest an absence of pressure or problems. When problems inevitably arise, we must hold ourselves and others accountable and deliver feedback without tiptoeing around the truth, or playing the blame game. However, giving productive feedback is [a skill most leaders have never learned][8]. + +_Guiding value: Clear is kind; unclear is unkind._ + +When problems arise during experiments in marketing, I am finding team communication to be incredibly productive when using that _try, learn, modify_ approach and modeling curiosity. One-on-one conversations about real deficits, however, have proven more difficult. + +I found so many creative reasons to delay or avoid these conversations. In a fast-paced startup, one of my favorites was, "They've only been in this role for a short while. Give them more time to get up to speed." That was an unfortunate approach, especially when coupled later with vague direction, like, "I need you to deliver more, more quickly." Because I was unable to clearly communicate what was expected, team members were not clear on what needed to be improved. This stall tactic and belittling innuendo masquerading as feedback was leading to more shame and blame than learning and growth. + +In becoming the learner, the guiding value of "clear is kind, unclear is unkind," crystalized for me when studying _Dare to Lead_. In her work, [Brené Brown explains][9]: + + * Feeding people half-truths to make them feel better, which is almost always about making ourselves feel more comfortable, is unkind. + * Not getting clear with people about your expectations because it feels too hard, yet holding them accountable or blaming them for not delivering, is unkind. + + + +Below are three actionable tips that are helping me deliver more clear, productive feedback. + +**Get specific.** Point to the specific lack in proficiency that needs to be addressed. Tie your comments to a shared vision of how this impacts career progression. When giving feedback on behavior, stay away from character and separate person from process. + +**Allow people to have feelings.** Rather than rushing in, give them space to feel. Learn how to hold the discomfort. + +**Think carefully about how you want to show up**. Work through how your conversation may unfold and where you might trigger blaming behaviors. Knowing puts you in a safer mindset for having difficult conversations. + +Teaching team members, reassessing skill gaps, reassigning them (or even letting them go) can become more empathetic processes when "clear is kind" is top of mind for leaders. + +### Closing + +The mindsets, behaviors, and communication patterns necessary for establishing psychological safety in our organizations may not be our defaults, but they are teachable and observable. Stay curious, ask questions, and deepen your understanding of others' perspectives. Do the difficult work of holding yourself and others accountable for showing up in a way that's aligned with cultivating a culture where your creativity—and your team members—thrive. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/open-organization/19/9/psychological-safety-leadership-behaviors + +作者:[Kathleen Hayes][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/khayes4dayshttps://opensource.com/users/khayes4dayshttps://opensource.com/users/mdoyle +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/open_art-mindmap-520.png?itok=qQVBAoVw (Brain map) +[2]: https://opensource.com/open-organization/19/3/introduction-psychological-safety +[3]: https://opensource.com/open-organization/19/5/planning-future-unknowable +[4]: https://opensource.com/open-organization/18/3/try-learn-modify +[5]: https://www.youtube.com/watch?v=LhoLuui9gX8 +[6]: https://opensource.com/open-organization/19/5/productive-arguments +[7]: https://opensource.com/open-organization/15/7/open-organizations-kool-aid +[8]: https://opensource.com/open-organization/19/4/be-open-with-difficult-feedback +[9]: https://brenebrown.com/articles/2018/10/15/clear-is-kind-unclear-is-unkind/ From 72ed90b85b5bc6a71c3a9f24855a3112cbab2fc4 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:01:32 +0800 Subject: [PATCH 151/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20Talkin?= =?UTF-8?q?g=20to=20machines:=20Lisp=20and=20the=20origins=20of=20AI?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 Talking to machines- Lisp and the origins of AI.md --- ...to machines- Lisp and the origins of AI.md | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 sources/tech/20190917 Talking to machines- Lisp and the origins of AI.md diff --git a/sources/tech/20190917 Talking to machines- Lisp and the origins of AI.md b/sources/tech/20190917 Talking to machines- Lisp and the origins of AI.md new file mode 100644 index 0000000000..795f4c731b --- /dev/null +++ b/sources/tech/20190917 Talking to machines- Lisp and the origins of AI.md @@ -0,0 +1,115 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Talking to machines: Lisp and the origins of AI) +[#]: via: (https://opensource.com/article/19/9/command-line-heroes-lisp) +[#]: author: (Matthew Broberg https://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberg) + +Talking to machines: Lisp and the origins of AI +====== +The Command Line Heroes podcast explores the invention of Lisp and the +rise of thinking computers powered by open source software. +![Listen to the Command Line Heroes Podcast][1] + +Artificial intelligence (AI) is all the rage today, and its massive impact on the world is still to come, says the[ Association for the Advancement of Artificial Intelligence][2] (AAAI). According to an article on [Nanalyze][3]: + +> "The vast majority of nearly 2,000 experts polled by the Pew Research Center in 2014 said they anticipate robotics and artificial intelligence will permeate wide segments of daily life by 2025. A 2015 study covering 17 countries found that artificial intelligence and related technologies added an estimated 0.4 percentage point on average to those countries' annual GDP growth between 1993 and 2007, accounting for just over one-tenth of those countries' overall GDP growth during that time." + +However, this is the second time has AI garnered so much attention. When was AI first popular, and what does that have to do with the obscure-but-often-loved programming language Lisp? + +The second-to-last podcast of [Command Line Heroes][4]' third season dives into these topics and leaves us thinking about open source at the core of AI. + +### Before the term AI + +Thinking machines have been a curiosity for centuries, long before they could be realized. In the 1800s, computer science pioneers Charles Babbage and Ada Lovelace imagined an analytical engine capable of predictions far beyond human skills, such as correctly selecting the winning horse in a race. + +In the 1940s and '50s, Alan Turing defined what it would look like for intelligent machines to emulate human intelligence; that's what we now call the Turing Test. In his 1950 [research paper][5], Turing's "imitation game" set out to convince someone they were communicating with a human in another room when, in reality, it was a machine. + +While these theories inspired imaginative debate, they became less theoretical as computer hardware began providing enough power to begin experimenting. + +### Why Lisp is at the heart of AI theory + +John McCarthy, the person to coin the term "artificial intelligence," is also the person who reinvented how we program to create thinking machines. His reimagined approach was codified into the Lisp programming language. As [Paul Graham][6] wrote: + +> "In 1960, [John McCarthy][7] published a remarkable paper in which he did for programming something like what Euclid did for geometry. He showed how, given a handful of simple operators and a notation for functions, you can build a whole programming language. He called this language Lisp, for 'List Processing,' because one of his key ideas was to use a simple data structure called a list for both code and data. +> +> "It's worth understanding what McCarthy discovered, not just as a landmark in the history of computers, but as a model for what programming is tending to become in our own time. It seems to me that there have been two really clean, consistent models of programming so far: the C model and the Lisp model. These two seem points of high ground, with swampy lowlands between them. As computers have grown more powerful, the new languages being developed have been [moving steadily][8] toward the Lisp model. A popular recipe for new programming languages in the past 20 years has been to take the C model of computing and add to it, piecemeal, parts taken from the Lisp model, like runtime typing and garbage collection." + +I remember when I first wrote Lisp for a computer science class. After wrapping my head around its seemingly infinite number of parentheses, I uncovered a beautiful pattern of thought: Can I think through what I want this software to do? + +![The elegance of Lisp programming is timeless][9] + +That sounds silly: computers process what we code them to do, but there's something about recursion that made me think in a wildly different light. It's exciting to learn that 15 years ago, I may have been tapping into the big-picture changes McCarthy was describing. + +### Why the slowdown in AI? + +By the mid-to-late 1960s, McCarthy's work made way to a new field of research, where AI, machine learning (ML), and deep learning all became possibilities. And Lisp became the accepted standard in this emerging field. It's said that in 1968, McCarthy made a wager with David Levy, a Scottish chess master, that in 10 years a computer would be able to beat Levy in a chess match. Why did it take nearly 30 years to get to the famous [Deep Blue vs. Garry Kasparov][10] match? + +Command Line Heroes explores one theory: that for-profit investment in AI pulled essential talent from academia, where they were advancing the science, and pushed them onto a different path. Whether or not this was the reason, the world of AI fell into a "winter," where the people pursuing it were considered unrealistic. + +This AI winter lasted for quite some time. In 2005, The [_New York Times_ reported][11] that AI had become so stigmatized that "some computer scientists and software engineers avoided the term artificial intelligence for fear of being viewed as wild-eyed dreamers." + +### Where is AI now? + +Fast forward to today, when talking about AI or ML is a fast pass to getting people's attention—but that attention isn't always positive. Many are concerned that AI will remove millions of jobs from the world. Others say it will [create][12] millions of more jobs than are lost. + +The verdict is still out. [McKinsey's research][13] on the job loss vs. job gain debate is fascinating. When you take into account growing world consumption, aging populations, "marketization" of previously unpaid domestic work, and other factors, you find that the answer depends on your outlook. + +One thing is for sure: AI will be a significant part of our lives, and it will have much wider implications than other areas of tech. For this reason (among others), examining the [misconceptions around ethics and bias in AI][14] is essential. + +### Open source and AI + +McCarthy had a dream that machines could have common sense. His AI goals included open source from the very beginning; this is visualized on Red Hat's beautifully animated webpage on the [origins of AI and its open source roots][15]. + +[![Origins of AI and open source screenshot][16]][15] + +If we are to achieve the goals of McCarthy, Turing, or other AI pioneers, I believe it will be because of the open source community behind the technology. Part of the reason AI's popularity bounced back is because of open source: languages, frameworks, and the datasets we analyze are increasingly open. Here are a handful of things to explore: + + * [Learn enough Python and R][17] to be part of this future + * [Explore Python libraries][18] that will bulk up your skills + * Understand how [AI and ML are related][19] + * Explore [free and open datasets][20] + * Use modern implementations of Lisp, [available under open source licenses][21] + + + +It's possible that early AI explored the right ideas in the wrong decade. World-class computers back then weren't even as powerful as today's cellphones, and each one was shared by dozens of individuals. Today, many of us own multiple supercomputers and carry them with us all the time. For this reason, among others, the future of AI is strong and its highest achievements are yet to come. + +_Command Line Heroes has covered programming languages for all of Season 3. [Subscribe so that you don't miss the last episode of the season][4], and I would love to hear your thoughts in the comments below._ + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/command-line-heroes-lisp + +作者:[Matthew Broberg][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberghttps://opensource.com/users/mbbroberg +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/command_line_hereoes_ep7_blog-header-292x521.png?itok=lI4DXvq2 (Listen to the Command Line Heroes Podcast) +[2]: http://aaai.org/ +[3]: https://www.nanalyze.com/2016/11/artificial-intelligence-definition/ +[4]: https://www.redhat.com/en/command-line-heroes +[5]: https://www.csee.umbc.edu/courses/471/papers/turing.pdf +[6]: http://www.paulgraham.com/rootsoflisp.html +[7]: http://www-formal.stanford.edu/jmc/index.html +[8]: http://www.paulgraham.com/diff.html +[9]: https://opensource.com/sites/default/files/uploads/lisp_cycles.png (The elegance of Lisp programming is timeless) +[10]: https://en.wikipedia.org/wiki/Deep_Blue_versus_Garry_Kasparov +[11]: https://www.nytimes.com/2005/10/14/technology/behind-artificial-intelligence-a-squadron-of-bright-real-people.html +[12]: https://singularityhub.com/2019/01/01/ai-will-create-millions-more-jobs-than-it-will-destroy-heres-how/ +[13]: https://www.mckinsey.com/featured-insights/future-of-work/jobs-lost-jobs-gained-what-the-future-of-work-will-mean-for-jobs-skills-and-wages +[14]: https://opensource.com/article/19/8/4-misconceptions-ethics-and-bias-ai +[15]: https://www.redhat.com/en/open-source-stories/ai-revolutionaries/origins-ai-open-source +[16]: https://opensource.com/sites/default/files/uploads/origins_aiopensource.png (Origins of AI and open source screenshot) +[17]: https://opensource.com/article/19/5/learn-python-r-data-science +[18]: https://opensource.com/article/18/5/top-8-open-source-ai-technologies-machine-learning +[19]: https://opensource.com/tags/ai-and-machine-learning +[20]: https://opensource.com/article/19/2/learn-data-science-ai +[21]: https://www.cliki.net/Common+Lisp+implementation From 398467b1ff46e963378969165af7d55b746f558e Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:02:12 +0800 Subject: [PATCH 152/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20Micros?= =?UTF-8?q?oft=20brings=20IBM=20iron=20to=20Azure=20for=20on-premises=20mi?= =?UTF-8?q?grations?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190918 Microsoft brings IBM iron to Azure for on-premises migrations.md --- ...ron to Azure for on-premises migrations.md | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 sources/talk/20190918 Microsoft brings IBM iron to Azure for on-premises migrations.md diff --git a/sources/talk/20190918 Microsoft brings IBM iron to Azure for on-premises migrations.md b/sources/talk/20190918 Microsoft brings IBM iron to Azure for on-premises migrations.md new file mode 100644 index 0000000000..196af8153f --- /dev/null +++ b/sources/talk/20190918 Microsoft brings IBM iron to Azure for on-premises migrations.md @@ -0,0 +1,57 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Microsoft brings IBM iron to Azure for on-premises migrations) +[#]: via: (https://www.networkworld.com/article/3438904/microsoft-brings-ibm-iron-to-azure-for-on-premises-migrations.html) +[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) + +Microsoft brings IBM iron to Azure for on-premises migrations +====== +Once again Microsoft shows it has shed its not-invented here attitude to support customers. +Microsoft / Just_Super / Getty Images + +When Microsoft launched Azure as a cloud-based version of its Windows Server operating system, it didn't make it exclusively Windows. It also included Linux support, and in just a few years, the [number of Linux instances now outnumbers Windows instances][1]. + +It's nice to see Microsoft finally shed that not-invented-here attitude that was so toxic for so long, but the company's latest move is really surprising. + +Microsoft has partnered with a company called Skytap to offer IBM Power9 instances on its Azure cloud service to run Power-based systems inside of the Azure cloud, which will be offered as Azure virtual machines (VM) along with the Xeon and Epyc server instances that it already offers. + +**Also read: [How to make hybrid cloud work][2]** + +Skytap is an interesting company. Founded by three University of Washington professors, it specializes in cloud migrations of older on-premises hardware, such as IBM System I or Sparc. It has a data center in its home town of Seattle, with IBM hardware running IBM's PowerVM hypervisor, plus some co-locations in IBM data centers in the U.S. and England. + +Its motto is to migrate fast, then modernize at your own pace. So, its focus is on helping legacy systems migrate to the cloud and then modernize the apps, which is what the alliance with Microsoft appears to be aimed at. Azure will provide enterprises with a platform to enhance the value of traditional applications without the major expense of rewriting for a new platform. + +Skytap is providing a preview of what’s possible when lifting and extending a legacy IBM i application using DB2 on Skytap and augmenting it with Azure IoT Hub. The application seamlessly spans old and new architectures, demonstrating there is no need to completely rewrite rock-solid IBM i applications to benefit from modern cloud capabilities. + +### Migrating to Azure cloud + +Under the deal, Microsoft will deploy Power S922 servers from IBM and deploy them in an undeclared Azure region. These machines can run the PowerVM hypervisor, which supports legacy IBM operating systems, as well as Linux. + +"Migrating to the cloud by first replacing older technologies is time consuming and risky," said Brad Schick, CEO of Skytap, in a statement. "Skytap’s goal has always been to provide businesses with a path to get these systems into the cloud with little change and less risk. Working with Microsoft, we will bring Skytap’s native support for a wide range of legacy applications to Microsoft Azure, including those dependent on IBM i, AIX, and Linux on Power. This will give businesses the ability to extend the life of traditional systems and increase their value by modernizing with Azure services." + +As Power-based applications are modernized, Skytap will then bring in DevOps CI/CD toolchains to accelerate software delivery. After moving to Skytap on Azure, customers will be able to integrate Azure DevOps, in addition to CI/CD toolchains for Power, such as Eradani and UrbanCode. + +These sound like first steps, which means there will be more to come, especially in terms of the app migration. If it's only in one Azure region, it sounds like they are testing and finding their legs with this project and will likely expand later this year or next. + +Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438904/microsoft-brings-ibm-iron-to-azure-for-on-premises-migrations.html + +作者:[Andy Patrizio][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Andy-Patrizio/ +[b]: https://github.com/lujun9972 +[1]: https://www.openwall.com/lists/oss-security/2019/06/27/7 +[2]: https://www.networkworld.com/article/3119362/hybrid-cloud/how-to-make-hybrid-cloud-work.html#tk.nww-fsb +[3]: https://www.facebook.com/NetworkWorld/ +[4]: https://www.linkedin.com/company/network-world From 8d2a943093b5fe7609cc46a6f443a1d38cef8f00 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:03:09 +0800 Subject: [PATCH 153/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20Here?= =?UTF-8?q?=20Comes=20Oracle=20Autonomous=20Linux=20=E2=80=93=20World?= =?UTF-8?q?=E2=80=99s=20First=20Autonomous=20Operating=20System?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md --- ...rld-s First Autonomous Operating System.md | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md diff --git a/sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md b/sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md new file mode 100644 index 0000000000..f1ecce16c4 --- /dev/null +++ b/sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md @@ -0,0 +1,62 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Here Comes Oracle Autonomous Linux – World’s First Autonomous Operating System) +[#]: via: (https://opensourceforu.com/2019/09/here-comes-oracle-autonomous-linux-worlds-first-autonomous-operating-system/) +[#]: author: (Longjam Dineshwori https://opensourceforu.com/author/dineshwori-longjam/) + +Here Comes Oracle Autonomous Linux – World’s First Autonomous Operating System +====== + + * _**Oracle Autonomous Linux**_ _**delivers automated patching, updates and tuning without human intervention.**_ + * _**It can help IT companies improve reliability and protect their systems from cyberthreats**_ + * _**Oracle also introduces Oracle OS Management Service that delivers control and visibility over systems**_ + + + +![Oracle cloud][1] + +Oracle today marked a major milestone in the company’s autonomous strategy with the introduction of Oracle Autonomous Linux – the world’s first autonomous operating system. + +Oracle Autonomous Linux, along with the new Oracle OS Management Service, is the first and only autonomous operating environment that eliminates complexity and human error to deliver unprecedented cost savings, security and availability for customers, the company claims in a just released statement. + +Keeping systems patched and secure is one of the biggest ongoing challenges faced by IT today. With Oracle Autonomous Linux, the company says, customers can rely on autonomous capabilities to help ensure their systems are secure and highly available to help prevent cyberattacks. + +“Oracle Autonomous Linux builds on Oracle’s proven history of delivering Linux with extreme performance, reliability and security to run the most demanding enterprise applications,” said Wim Coekaerts, senior vice president of operating systems and virtualization engineering, Oracle. + +“Today we are taking the next step in our autonomous strategy with Oracle Autonomous Linux, providing a rich set of capabilities to help our customers significantly improve reliability and protect their systems from cyberthreats,” he added. + +**Oracle OS Management Service** + +Along with Oracle Autonomous Linux, Oracle introduced Oracle OS Management Service, a highly available Oracle Cloud Infrastructure component that delivers control and visibility over systems whether they run Autonomous Linux, Linux or Windows. + +Combined with resource governance policies, OS Management Service, via the Oracle Cloud Infrastructure console or APIs, also enables users to automate capabilities that will execute common management tasks for Linux systems, including patch and package management, security and compliance reporting, and configuration management. + +It can be further automated with other Oracle Cloud Infrastructure services like auto-scaling as workloads need to grow or shrink to meet elastic demand. + +**Always Free Autonomous Database and Cloud Infrastructure** + +Oracle Autonomous Linux, in conjunction with Oracle OS Management Service, uses advanced machine learning and autonomous capabilities to deliver unprecedented cost savings, security and availability and frees up critical IT resources to tackle more strategic initiatives. + +They are included with Oracle Premier Support at no extra charge with Oracle Cloud Infrastructure compute services. Combined with Oracle Cloud Infrastructure’s other cost advantages, most Linux workload customers can expect to have 30-50 percent TCO savings versus both on-premise and other cloud vendors over five years. + +“Adding autonomous capabilities to the operating system layer, with future plans to expand beyond infrastructure software, goes straight after the OpEx challenges nearly all customers face today,” said Al Gillen, Group VP, Software Development and Open Source, IDC. + +“This capability effectively turns Oracle Linux into a service, freeing customers to focus their IT resources on application and user experience, where they can deliver true competitive differentiation,” he added. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/here-comes-oracle-autonomous-linux-worlds-first-autonomous-operating-system/ + +作者:[Longjam Dineshwori][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/dineshwori-longjam/ +[b]: https://github.com/lujun9972 +[1]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2016/09/Oracle-cloud.jpg?resize=350%2C197&ssl=1 From 8e33bf187618bb901c3043e576c1650dbe607543 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:03:46 +0800 Subject: [PATCH 154/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190917=20What?= =?UTF-8?q?=E2=80=99s=20Good=20About=20TensorFlow=202.0=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190917 What-s Good About TensorFlow 2.0.md --- ...190917 What-s Good About TensorFlow 2.0.md | 328 ++++++++++++++++++ 1 file changed, 328 insertions(+) create mode 100644 sources/tech/20190917 What-s Good About TensorFlow 2.0.md diff --git a/sources/tech/20190917 What-s Good About TensorFlow 2.0.md b/sources/tech/20190917 What-s Good About TensorFlow 2.0.md new file mode 100644 index 0000000000..a00306d6c5 --- /dev/null +++ b/sources/tech/20190917 What-s Good About TensorFlow 2.0.md @@ -0,0 +1,328 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (What’s Good About TensorFlow 2.0?) +[#]: via: (https://opensourceforu.com/2019/09/whats-good-about-tensorflow-2-0/) +[#]: author: (Siva Rama Krishna Reddy B https://opensourceforu.com/author/siva-krishna/) + +What’s Good About TensorFlow 2.0? +====== + +[![][1]][2] + +_Version 2.0 of TensorFlow is focused on simplicity and ease of use. It has been strengthened with updates like eager execution and intuitive higher level APIs accompanied by flexible model building. It is platform agnostic, and makes APIs more consistent, while removing those that are redundant._ + +Machine learning and artificial intelligence are experiencing a revolution these days, primarily due to three major factors. The first is the increased computing power available within small form factors such as GPUs, NPUs and TPUs. The second is the breakthrough in machine learning algorithms. State-of-art algorithms and hence models are available to infer faster. Finally, huge amounts of labelled data is essential for deep learning models to perform better, and this is now available. + +TensorFlow is an open source AI framework from Google which arms researchers and developers with the right tools to build novel models. It was made open source in 2015 and, in the past few years, has evolved with various enhancements covering operator support, programming languages, hardware support, data sets, official models, and distributed training and deployment strategies. + +TensorFlow 2.0 was released recently at the TensorFlow Developer Summit. It has major changes across the stack, some of which will be discussed from the developers’ point of view. + +TensorFlow 2.0 is primarily focused on the ease-of-use, power and scalability aspects. Ease is ensured in terms of simplified APIs, Keras being the main high level API interface; eager execution is available by default. Version 2.0 is powerful in the sense of being flexible and running much faster than earlier, with more optimisation. Finally, it is more scalable since it can be deployed on high-end distributed environments as well as on small edge devices. + +This new release streamlines the various components involved, from data preparation all the way up to deployment on various targets. High speed data processing pipelines are offered by tf.data, high level APIs are offered by tf.keras, and there are simplified APIs to access various distribution strategies on targets like the CPU, GPU and TPU. TensorFlow 2.0 offers a unique packaging format called SavedModel that can be deployed over the cloud through a TensorFlow Serving. Edge devices can be deployed through TensorFlow Lite, and Web applications through the newly introduced TensorFlow.js and various other language bindings that are also available. + +![Figure 1: The evolution of TensorFlow][3] + +TensorFlow.js was announced at the developer summit with off-the-shelf pretrained models for the browser, node, desktop and mobile native applications. The inclusion of Swift was also announced. Looking at some of the performance improvements since last year, the latest release claims a training speedup of 1.8x on NVIDIA Tesla V100, a 1.6x training speedup on Google Cloud TPUv2 and a 3.3.x inference speedup on Intel Skylake. + +**Upgrade to 2.0** +The new release offers a utility _tf_upgrade_v2_ to convert a 1.x Python application script to a 2.0 compatible script. It does most of the job in converting the 1.x deprecated API to a newer compatibility API. An example of the same can be seen below: + +``` +test-pc:~$cat test-infer-v1.py + +# Tensorflow imports +import tensorflow as tf + +save_path = ‘checkpoints/dev’ +with tf.gfile.FastGFile(“./trained-graph.pb”, ‘rb’) as f: +graph_def = tf.GraphDef() +graph_def.ParseFromString(f.read()) +tf.import_graph_def(graph_def, name=’’) + +with tf.Session(graph=tf.get_default_graph()) as sess: +input_data = sess.graph.get_tensor_by_name(“DecodeJPGInput:0”) +output_data = sess.graph.get_tensor_by_name(“final_result:0”) + +image = ‘elephant-299.jpg’ +if not tf.gfile.Exists(image): +tf.logging.fatal(‘File does not exist %s’, image) +image_data = tf.gfile.FastGFile(image, ‘rb’).read() + +result = sess.run(output_data, {‘DecodeJPGInput:0’: image_data}) +print(result) + +test-pc:~$ tf_upgrade_v2 --infile test-infer-v1.py --outfile test-infer-v2.py + +INFO line 5:5: Renamed ‘tf.gfile.FastGFile’ to ‘tf.compat.v1.gfile.FastGFile’ +INFO line 6:16: Renamed ‘tf.GraphDef’ to ‘tf.compat.v1.GraphDef’ +INFO line 10:9: Renamed ‘tf.Session’ to ‘tf.compat.v1.Session’ +INFO line 10:26: Renamed ‘tf.get_default_graph’ to ‘tf.compat.v1.get_default_graph’ +INFO line 15:15: Renamed ‘tf.gfile.Exists’ to ‘tf.io.gfile.exists’ +INFO line 16:12: Renamed ‘tf.logging.fatal’ to ‘tf.compat.v1.logging.fatal’ +INFO line 17:21: Renamed ‘tf.gfile.FastGFile’ to ‘tf.compat.v1.gfile.FastGFile’ +TensorFlow 2.0 Upgrade Script +----------------------------- +Converted 1 files +Detected 0 issues that require attention +------------------------------------------------------------- +Make sure to read the detailed log ‘report.txt’ + +test-pc:~$ cat test-infer-v2.py + +# Tensorflow imports +import tensorflow as tf + +save_path = ‘checkpoints/dev’ +with tf.compat.v1.gfile.FastGFile(“./trained-graph.pb”, ‘rb’) as f: +graph_def = tf.compat.v1.GraphDef() +graph_def.ParseFromString(f.read()) +tf.import_graph_def(graph_def, name=’’) + +with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: +input_data = sess.graph.get_tensor_by_name(“DecodeJPGInput:0”) +output_data = sess.graph.get_tensor_by_name(“final_result:0”) + +image = ‘elephant-299.jpg’ +if not tf.io.gfile.exists(image): +tf.compat.v1.logging.fatal(‘File does not exist %s’, image) +image_data = tf.compat.v1.gfile.FastGFile(image, ‘rb’).read() + +result = sess.run(output_data, {‘DecodeJPGInput:0’: image_data}) +print(result) +``` + +As we can see here, the _tf_upgrade_v2_ utility converts all the deprecated APIs to compatible v1 APIs, to make them work with 2.0. + +**Eager execution:** Eager execution allows real-time evaluation of Tensors without calling _session.run_. A major advantage with eager execution is that we can print the Tensor values any time for debugging. +With TensorFlow 1.x, the code is: + +``` +test-pc:~$python3 +Python 3.6.7 (default, Oct 22 2018, 11:32:17) +[GCC 8.2.0] on linux +Type “help”, “copyright”, “credits” or “license” for more information. +>>> import tensorflow as tf +>>> print(tf.__version__) +1.14.0 +>>> tf.add(2,3) + +``` + +TensorFlow 2.0, on the other hand, evaluates the result that we call the API: + +``` +test-pc:~$python3 +Python 3.6.7 (default, Oct 22 2018, 11:32:17) +[GCC 8.2.0] on linux +Type “help”, “copyright”, “credits” or “license” for more information. +>>> import tensorflow as tf +>>> print(tf.__version__) +2.0.0-beta1 +>>> tf.add(2,3) + +``` + +In v1.x, the resulting Tensor doesn’t display the value and we need to execute the graph under a session to get the value, but in v2.0 the values are implicitly computed and available for debugging. + +**Keras** +Keras (_tf.keras_) is now the official high level API. It has been enhanced with many compatible low level APIs. The redundancy across Keras and TensorFlow is removed, and most of the APIs are now available with Keras. The low level operators are still accessible through tf.raw_ops. +We can now save the Keras model directly as a Tensorflow SavedModel, as shown below: + +``` +# Save Model to SavedModel +saved_model_path = tf.keras.experimental.export_saved_model(model, ‘/path/to/model’) + +# Load the SavedModel +new_model = tf.keras.experimental.load_from_saved_model(saved_model_path) + +# new_model is now keras Model object. +new_model.summary() +``` + +Earlier, APIs related to various layers, optimisers, metrics and loss functions were distributed across Keras and native TensorFlow. Latest enhancements unify them as _tf.keras.optimizer.*, tf.keras.metrics.*, tf.keras.losses.* and tf.keras.layers.*._ +The RNN layers are now much more simplified compared to v 1.x. +With TensorFlow 1.x, the commands given are: + +``` +if tf.test.is_gpu_available(): +model.add(tf.keras.layers.CudnnLSTM(32)) +else +model.add(tf.keras.layers.LSTM(32)) +``` + +With TensorFlow 2.0, the commands given are: + +``` +# This will use Cudnn kernel when the GPU is available. +model.add(tf.keras.layer.LSTM(32)) +``` + +TensorBoard integration is now a simple call back, as shown below: + +``` +tb_callbaclk = tf.keras.callbacks.TensorBoard(log_dir=log_dir) + +model.fit( +x_train, y_train, epocha=5, +validation_data = [x_test, y_test], +Callbacks = [tb_callbacks]) +``` + +With this simple call back addition, TensorBoard is up on the browser to look for all the statistics in real-time. +Keras offers unified distribution strategies, and a few lines of code can enable the required strategy as shown below: + +``` +strategy = tf.distribute.MirroredStrategy() + +with strategy.scope() +model = tf.keras.models.Sequential([ +tf.keras.layers.Dense(64, input_shape=[10]), +tf.keras.layers.Dense(64, activation=’relu’), +tf.keras.layers.Dense(10, activation=’softmax’)]) + +model.compile(optimizer=’adam’, +loss=’categorical_crossentropy’, +metrics=[‘accuracy’]) +``` + +As shown above, the model definition under the desired scope is all we need to apply the desired strategy. Very soon, there will be support for multi-node synchronous and TPU strategy, and later, for parameter server strategy. + +![Figure 2: Coral products with edge TPU][4] + +**TensorFlow function** +Function is a major upgrade that impacts the way we write TensorFlow applications. The new version introduces tf.function, which simplifies the applications and makes it very close to writing a normal Python application. +A sample _tf.function_ definition looks like what’s shown in the code snippet below. Here the _tf.function_ declaration makes the user define a function as a TensorFlow operator, and all optimisation is applied automatically. Also, the function is faster than eager execution. APIs like _tf.control_dependencies_, _tf.global_variable_initializer_, and _tf.cond, tf.while_loop_ are no longer needed with _tf.function_. The user defined functions are polymorphic by default, i.e., we may pass mixed type tensors. + +``` +test-pc:~$ cat tf-test.py +import tensorflow as tf + +print(tf.__version__) + +@tf.function +def add(a, b): +return (a+b) + +print(add(tf.ones([2,2]), tf.ones([2,2]))) + +test-pc:~$ python3 tf-test.py +2.0.0-beta1 +tf.Tensor( +[[2. 2.] +[2. 2.]], shape=(2, 2), dtype=float32) +``` + +Here is another example to demonstrate automatic control flows and Autograph in action. Autograph automatically converts the conditions, while it loops Python to TensorFlow operators. + +``` +test-pc:~$ cat tf-test-control.py +import tensorflow as tf + +print(tf.__version__) + +@tf.function +def f(x): +while tf.reduce_sum(x) > 1: +x = tf.tanh(x) +return x + +print(f(tf.random.uniform([10]))) + +test-pc:~$ python3 tf-test-control.py + +2.0.0-beta1 +tf.Tensor( +[0.10785562 0.11102211 0.11347286 0.11239681 0.03989326 0.10335539 +0.11030331 0.1135259 0.11357211 0.07324989], shape=(10,), dtype=float32) +``` + +We can see Autograph in action with the following API over the function. + +``` +print(tf.autograph.to_code(f)) # f is the function name +``` + +**TensorFlow Lite** +The latest advancements in edge devices add neural network accelerators. Google has released EdgeTPU, Intel has the edge inference platform Movidius, Huawei mobile devices have the Kirin based NPU, Qualcomm has come up with NPE SDK to accelerate on the Snapdragon chipsets using Hexagon power and, recently, Samsung released Exynos 9 with NPU. An edge device optimised framework is necessary to support these hardware ecosystems. + +Unlike TensorFlow, which is widely used in high power-consuming server infrastructure, edge devices are challenging in terms of reduced computing power, limited memory and battery constraints. TensorFlow Lite is aimed at bringing in TensorFlow models directly onto the edge with minimal effort. The TF Lite model format is different from TensorFlow. A TF Lite converter is available to convert a TensorFlow SavedBundle to a TF Lite model. + +Though TensorFlow Lite is evolving, there are limitations too, such as in the number of operations supported, and the unsupported semantics like control-flows and RNNs. In its early days, TF Lite used a TOCO converter and there were a few challenges for the developer community. A brand new 2.0 converter is planned to be released soon. There are claims that using TF Lite results in huge improvements across the CPU, GPU and TPU. + +TF Lite introduces delegates to accelerate parts of the graph on an accelerator. We may choose a specific delegate for a specific sub-graph, if needed. + +``` +#import “tensorflow/lite/delegates/gpu/metal_delegate.h” + +// Initialize interpreter with GPU delegate +std::unique_ptr interpreter; +InterpreterBuilder(*model, resolver)(&interpreter); +auto* delegate = NewGpuDelegate(nullptr); // default config +if (interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) return false; + +// Run inference +while (true) { +WriteToInputTensor(interpreter->typed_input_tensor(0)); +if (interpreter->Invoke() != kTfLiteOk) return false; +ReadFromOutputTensor(interpreter->typed_output_tensor(0)); +} + +// Clean up +interpreter = nullptr; +DeleteGpuDelegate(delegate); +``` + +As shown above, we can choose GPUDelegate, and modify the graph with the respective kernel’s runtime. TF Lite is going to support the Android NNAPI delegate, in order to support all the hardware that is supported by NNAPI. For edge devices, CPU optimisation is also important, as not all edge devices are equipped with accelerators; hence, there is a plan to support further optimisations for ARM and x86. + +Optimisations based on quantisation and pruning are evolving to reduce the size and processing demands of models. Quantisation generally can reduce model size by 4x (i.e., 32-bit to 8-bit). Models with more convolution layers may get faster by 10 to 50 per cent on the CPU. Fully connected and RNN layers may speed up operation by 3x. + +TF Lite now supports post-training quantisation, which reduces the size along with compute demands greatly. TensorFlow 2.0 offers simplified APIs to build models with quantisation and by pruning optimisations. +A normal dense layer without quantisation looks like what follows: + +``` +tf.keras.layers.Dense(512, activation=’relu’) +``` + +Whereas a quality dense layer looks like what’s shown below: + +``` +quantize.Quantize(tf.keras.layers.Dense(512, activation=’relu’)) +``` + +Pruning is a technique used to drop connections that are ineffective. In general, ‘dense’ layers contain lots of connections which don’t influence the output. Such connections can be dropped by making the weight zero. Tensors with lots of zeros may be represented as ‘sparse’ and can be compressed. Also, the number of operations in a sparse tensor is less. +Building a layer with _prune_ is as simple as using the following command: + +``` +prune.Prune(tf.keras.layers.Dense(512, activation=’relu’)) +``` + +In a pipeline, there is Keras based quantised training and Keras based connection pruning. These optimisations may push TF Lite further ahead of the competition, with regard to other frameworks. + +**Coral** +Coral is a new platform for creating products with on-device ML acceleration. The first product here features Google’s Edge TPU in SBC and USB form factors. TensorFlow Lite is officially supported on this platform, with the salient features being very fast inference speed, privacy and no reliance on network connection. + +More details related to hardware specifications, pricing, and a getting started guide can be found at __. + +With these advances as well as a wider ecosystem, it’s very evident that TensorFlow may become the leading framework for artificial intelligence and machine learning, similar to how Android evolved in the mobile world. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/whats-good-about-tensorflow-2-0/ + +作者:[Siva Rama Krishna Reddy B][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/siva-krishna/ +[b]: https://github.com/lujun9972 +[1]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2018/09/ML-with-tensorflow.jpg?resize=696%2C328&ssl=1 (ML with tensorflow) +[2]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2018/09/ML-with-tensorflow.jpg?fit=1200%2C565&ssl=1 +[3]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-1-The-evolution-of-TensorFlow.jpg?resize=350%2C117&ssl=1 +[4]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-2-Coral-products-with-edge-TPU.jpg?resize=350%2C198&ssl=1 From e6f38861178f50055ab4be982f8becd0275a873f Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:05:45 +0800 Subject: [PATCH 155/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20How=20?= =?UTF-8?q?Cloud=20Billing=20Can=20Benefit=20Your=20Business?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190916 How Cloud Billing Can Benefit Your Business.md --- ...Cloud Billing Can Benefit Your Business.md | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 sources/talk/20190916 How Cloud Billing Can Benefit Your Business.md diff --git a/sources/talk/20190916 How Cloud Billing Can Benefit Your Business.md b/sources/talk/20190916 How Cloud Billing Can Benefit Your Business.md new file mode 100644 index 0000000000..9e57b5b019 --- /dev/null +++ b/sources/talk/20190916 How Cloud Billing Can Benefit Your Business.md @@ -0,0 +1,63 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How Cloud Billing Can Benefit Your Business) +[#]: via: (https://opensourceforu.com/2019/09/how-cloud-billing-can-benefit-your-business/) +[#]: author: (Aashima Sharma https://opensourceforu.com/author/aashima-sharma/) + +How Cloud Billing Can Benefit Your Business +====== + +[![][1]][2] + +_The Digital Age has led to many changes in the way we do business, and also in the way that businesses are run on a day to day basis. Computers are now ubiquitous – even in the smallest of businesses – and are utilised for a wide variety of purposes. When it comes to making things easier, a particularly efficient use of computer technology and software is cloud billing. Let us explain in a little more detail what it’s all about._ + +**What Cloud Billing?** +The idea of [_cloud billing_][3] is relatively simple. Cloud billing is designed to enable the user to perform a number of functions essential to the business – and it can be used in any business – without taking up unnecessary space on computer services. For example, you can use a cloud billing solution to ensure that invoices are sent at a certain time, subscriptions are efficiently managed, discounts are applied and many more regular or one-off functions. + +![Figure 1: Tridens Monetization][4] + +**How Cloud Billing Platform Can Benefit Your Business** +Let’s have a quick look at some of the major benefits of a cloud billing platform: + + * Lower costs on IT services: a cloud billing system, like any cloud-based IT solution, does not need to take up any space on your server. Nor does it require you to purchase software or hardware. You don’t need to engage the services of an IT professional, it’s all done via the service provider. + * Reduced operating costs: there is little in the way of operating costs when you use a cloud billing solution, as there is no extra equipment to maintain. + * Faster product to market: if you are rolling out new products or services on a regular basis, a cloud billing solution means you can cut out a lot of time that would normally be take up with the process. + * Grow with the business: if in the future you need to expand or add to your cloud billing solution, you can, and without any additional equipment or software. You pay for what you need and use, and no more. + * Quick start: once you decide you want to use cloud billing, there is very little to do before you are ready to go. No software to install, no computer servers or network, it’s all in the cloud. + + + +There are many more benefits to engaging the services of a cloud billing solutions provider, and finding the right one – a service provider you can trust and with whom you can build a reliable working relationship – is the important part of the deal. + +**Where to Get the Best Cloud Billing Platform?** +The market for cloud-based billing services is one that hotly contested, and there are many potential options for your business. There are many Cloud billing available, you can try [_Tridens Monetization_][3] or any of the open source billing software solutions that may be suitable for your business. + +Where the best cloud billing solutions come to the fore is in providing real-time ability and flexibility for various areas of industry and commerce. They are ideal, for example, for use in media and communications, in education and utilities, and in the healthcare industry as well as many others. No matter the size, type or are of your business, a cloud-based billing platform offers not just a cost-saving opportunity, but it will also help accelerate growth, engender brand loyalty thanks to more efficient service, and more. + +Use a cloud billing solution for recurring billing purposes, for invoicing and revenue management, or for producing a range of reports and real-time analysis of your business performance, and all without the need for expensive hardware and software, or costly in-house IT experts. The best such solutions can be connected to many payment gateways, can handle your business tax requirements, and will reduce the workload on your team, so that each can dedicate their time to what they do best. + +**Summary** +Put simply, your business needs a cloud billing platform if you want to grow, improve your efficiency – both for you and your clients – without the need for great expenditure and restructuring. We recommend you check it out further, and talk to the experts in more detail about your cloud billing requirements. + +**By: Alivia Mallan** + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/how-cloud-billing-can-benefit-your-business/ + +作者:[Aashima Sharma][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/aashima-sharma/ +[b]: https://github.com/lujun9972 +[1]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2016/01/Young-man-with-the-head-in-the-clouds-thinking_15259762_xl.jpg?resize=579%2C606&ssl=1 (Young man with the head in the clouds thinking_15259762_xl) +[2]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2016/01/Young-man-with-the-head-in-the-clouds-thinking_15259762_xl.jpg?fit=579%2C606&ssl=1 +[3]: https://tridenstechnology.com/monetization/ +[4]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/diagram-monetization-infinite-v2-1.png?resize=350%2C196&ssl=1 From 577e4a0450c46387a798a90780c8da035d59a935 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:06:04 +0800 Subject: [PATCH 156/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190916=20The=20?= =?UTF-8?q?Emacs=20Series=20Exploring=20ts.el?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190916 The Emacs Series Exploring ts.el.md --- ...190916 The Emacs Series Exploring ts.el.md | 366 ++++++++++++++++++ 1 file changed, 366 insertions(+) create mode 100644 sources/tech/20190916 The Emacs Series Exploring ts.el.md diff --git a/sources/tech/20190916 The Emacs Series Exploring ts.el.md b/sources/tech/20190916 The Emacs Series Exploring ts.el.md new file mode 100644 index 0000000000..06e724d4ab --- /dev/null +++ b/sources/tech/20190916 The Emacs Series Exploring ts.el.md @@ -0,0 +1,366 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (The Emacs Series Exploring ts.el) +[#]: via: (https://opensourceforu.com/2019/09/the-emacs-series-exploring-ts-el/) +[#]: author: (Shakthi Kannan https://opensourceforu.com/author/shakthi-kannan/) + +The Emacs Series Exploring ts.el +====== + +[![][1]][2] + +_In this article, the author reviews the ts.el date and time library for Emacs. Written by Adam Porter, ts.el is still in the development phase and has been released under the GNU General Public License v3.0._ + +The ts.el package uses intuitive names for date and time functions. It internally uses UNIX timestamps and depends on both the ‘dash’ and ‘s’ Emacs libraries. The parts of the date are computed lazily and also cached for performance. The source code is available at __. In this article, we will explore the API functions available from the ts.el library. + +**Installation** +The package does not have a tagged release yet; hence, you should download it from and add it to your Emacs load path to use it. You should also have the ‘dash’ and ‘s’ libraries installed and loaded in your Emacs environment. You can then load the library using the following command: + +``` +(require ‘ts) +``` + +**Usage** +Let us explore the various functions available to retrieve parts of the date from the ts.el library. When the examples were executed, the date was ‘Friday July 5, 2019’. The ts-dow function can be used to obtain the day of the week, as shown below: + +``` +(ts-dow (ts-now)) +5 +``` + +_ts-now_ is a Lisp construct that returns a timestamp set. It is defined in ts.el as follows: + +``` +(defsubst ts-now () +“Return `ts’ struct set to now.” +(make-ts :unix (float-time))) +``` + +The day of the week starts from Monday (1) and hence Friday has the value of 5. An abbreviated form of the day can be fetched using the _ts-day-abbr_ function. In the following example, ‘Friday’ is shortened to‘Fri’. + +``` +(ts-day-abbr (ts-now)) +"Fri" +``` + +The day of the week in full form can be obtained using the _ts-day-name_ function, as shown below: + +``` +(ts-day-name (ts-now)) +“Friday” +``` + +The twelve months from January to December are numbered from 1 to 12 respectively. Hence, for the month of July, the index number is 7. This numeric value for the month can be retrieved using the ‘ts-month’ API. For example: + +``` +(ts-month (ts-now)) +7 +``` + +If you want a three-character abbreviation for the month’s name, you can use the ts-month-abbr function as shown below: + +``` +(ts-month-abbr (ts-now)) +“Jul” +``` + +The _ts-month-name_ function can be used to obtain the full name of the month. For example: + +``` +(ts-month-name (ts-now)) +“July” +``` + +The day of the week starts from Monday and has an index 1, while Sunday has an index 7. If you need the numeric value for the day of the week, use the ts-day function as indicated below: + +``` +(ts-day (ts-now)) +5 +``` + +The _ts-year_ API returns the year. In our example, it is ‘2019’ as shown below: + +``` +(ts-year (ts-now)) +2019 +``` + +The hour, minute and seconds can be retrieved using the _ts-hour, ts-minute_ and _ts-second_ functions, respectively. Examples of these functions are given below: + +``` +(ts-hour (ts-now)) +18 + +(ts-minute (ts-now)) +19 + +(ts-second (ts-now)) +5 +``` + +The UNIX timestamps are in UTC, by default. The _ts-tz-offset_ function returns the offset from UTC. The Indian Standard Time (IST) is five-and-a-half-hours ahead of UTC and hence this function returns ‘+0530’ as shown below: + +``` +(ts-tz-offset (ts-now)) +"+0530" +``` + +The _ts-tz-abbr_ API returns an abbreviated form of the time zone. In our case, ‘IST’ is returned for the Indian Standard Time. + +``` +(ts-tz-abbr (ts-now)) +"IST" +``` + +The _ts-adjustf_ function applies the time adjustments passed to the timestamp and the _ts-format_ function formats the timestamp as a string. A couple of examples are given below: + +``` +(let ((ts (ts-now))) +(ts-adjustf ts ‘day 1) +(ts-format nil ts)) +“2019-07-06 18:23:24 +0530” + +(let ((ts (ts-now))) +(ts-adjustf ts ‘year 1 ‘month 3 ‘day 5) +(ts-format nil ts)) +“2020-10-10 18:24:07 +0530” +``` + +You can use the _ts-dec_ function to decrement the timestamp. For example: + +``` +(ts-day-name (ts-dec ‘day 1 (ts-now))) +“Thursday” +``` + +The threading macro syntax can also be used with the ts-dec function as shown below: + +``` +(->> (ts-now) (ts-dec ‘day 2) ts-day-name) +“Wednesday” +``` + +The UNIX epoch is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT). The ts-unix function returns an epoch UNIX timestamp as illustrated below: + +``` +(ts-unix (ts-adjust ‘day -2 (ts-now))) +1562158551.0 ;; Wednesday, July 3, 2019 6:25:51 PM GMT+05:30 +``` + +An hour has 3600 seconds and a day has 86400 seconds. You can compare epoch timestamps as shown in the following example: + +``` +(/ (- (ts-unix (ts-now)) +(ts-unix (ts-adjust ‘day -4 (ts-now)))) +86400) +4 +``` + +The _ts-difference_ function returns the difference between two timestamps, while the _ts-human-duration_ function returns the property list (_plist_) values of years, days, hours, minutes and seconds. For example: + +``` +(ts-human-duration +(ts-difference (ts-now) +(ts-dec ‘day 3 (ts-now)))) +(:years 0 :days 3 :hours 0 :minutes 0 :seconds 0) +``` + +A number of aliases are available for the hour, minute, second, year, month and day format string constructors. A few examples are given below: + +``` +(ts-hour (ts-now)) +18 +(ts-H (ts-now)) +18 + + +(ts-minute (ts-now)) +46 +(ts-min (ts-now)) +46 +(ts-M (ts-now)) +46 + +(ts-second (ts-now)) +16 +(ts-sec (ts-now)) +16 +(ts-S (ts-now)) +16 + +(ts-year (ts-now)) +2019 +(ts-Y (ts-now)) +2019 + +(ts-month (ts-now)) +7 +(ts-m (ts-now)) +7 + +(ts-day (ts-now)) +5 +(ts-d (ts-now)) +5 +``` + +You can parse a string into a timestamp object using the ts-parse function. For example: + +``` +(ts-format nil (ts-parse “Fri Dec 6 2019 18:48:00”)) +“2019-12-06 18:48:00 +0530” +``` + +You can also format the difference between two timestamps in a human readable format as shown in the following example: + +``` +(ts-human-format-duration +(ts-difference (ts-now) +(ts-adjust ‘day -1 ‘hour -3 ‘minute -2 ‘second -4 (ts-now)))) +“1 days, 3 hours, 2 minutes, 4 seconds” +``` + +The timestamp comparator operations are also defined in ts.el. The ts< function compares if one epoch UNIX timestamp is less than the other. Its definition is as follows: + +``` +(defun ts< (a b) +“Return non-nil if timestamp A is less than timestamp B.” +(< (ts-unix a) (ts-unix b))) +``` + +In the example given below, the current timestamp is not less than the previous day and hence it returns nil. + +``` +(ts< (ts-now) (ts-adjust ‘day -1 (ts-now))) +nil +``` + +Similarly, we have other comparator functions like ts>, ts=, ts>= and ts<=. A few examples of these function use cases are given below: + +``` +(ts> (ts-now) (ts-adjust ‘day -1 (ts-now))) +t + +(ts= (ts-now) (ts-now)) +nil + +(ts>= (ts-now) (ts-adjust ‘day -1 (ts-now))) +t + +(ts<= (ts-now) (ts-adjust ‘day -2 (ts-now))) +nil +``` + +**Benchmarking** +A few performance tests can be conducted to compare the Emacs internal time values versus the UNIX timestamps. The benchmarking tests can be executed by including the bench-multi macro and bench-multi-process-results function available from __ in your Emacs environment. +You will also need to load the dash-functional library to use the -on function. + +``` +(require ‘dash-functional) +``` + +The following tests have been executed on an Intel(R) Core(TM) i7-3740QM CPU at 2.70GHz with eight cores, 16GB RAM and running Ubuntu 18.04 LTS. + +**Formatting** +The first benchmarking exercise is to compare the formatting of the UNIX timestamp and the Emacs internal time. The Emacs Lisp code to run the test is shown below: + +``` +(let ((format “%Y-%m-%d %H:%M:%S”)) +(bench-multi :times 100000 +:forms ((“Unix timestamp” (format-time-string format 1544311232)) +(“Internal time” (format-time-string format ‘(23564 20962 864324 108000)))))) +``` + +The output appears as an s-expression: + +``` +((“Form” “x faster than next” “Total runtime” “# of GCs” “Total GC runtime”) +hline + +(“Internal time” “1.11” “2.626460” 13 “0.838733”) +(“Unix timestamp” “slowest” “2.921408” 13 “0.920814”)) +``` + +The abbreviation ‘GC’ refers to garbage collection. A tabular representation of the above results is given below: + +[![][3]][4] + +We observe that formatting the internal time is slightly faster. + +**Getting the current time** +The functions to obtain the current time can be compared using the following test: + +``` +(bench-multi :times 100000 +:forms ((“Unix timestamp” (float-time)) +(“Internal time” (current-time)))) +``` + +The results are shown below: + +[![][5]][6] + +We observe that using the Unix timestamp is faster. + +**Parsing** +The third benchmarking exercise is to compare parsing functions on a date timestamp string. The corresponding test code is given below: + +``` +(let* ((s “Wed 10 Jul 2019”)) +(bench-multi :times 100000 +:forms ((“ts-parse” (ts-parse s)) +(“ts-parse ts-unix” (ts-unix (ts-parse s)))))) +``` + +The _ts-parse_ function is slightly faster than the ts-parse _ts-unix_ function, as seen in the results: + +[![][7]][8] + +**A new timestamp versus blanking fields** +The last performance comparison is between creating a new timestamp and blanking the fields. The relevant test code is as follows: + +``` +(let* ((a (ts-now))) +(bench-multi :times 100000 +:ensure-equal t +:forms ((“New” (let ((ts (copy-ts a))) +(setq ts (ts-fill ts)) +(make-ts :unix (ts-unix ts)))) +(“Blanking” (let ((ts (copy-ts a))) +(setq ts (ts-fill ts)) +(ts-reset ts)))))) +``` + +The output of the benchmarking exercise is given below: + +[![][9]][10] + +We observe that creating a new timestamp is slightly faster than blanking the fields. +You are encouraged to read the ts.el README and notes.org from the GitHub repository __ for more information. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/the-emacs-series-exploring-ts-el/ + +作者:[Shakthi Kannan][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/shakthi-kannan/ +[b]: https://github.com/lujun9972 +[1]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/GPL-emacs-1.jpg?resize=696%2C435&ssl=1 (GPL emacs) +[2]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/GPL-emacs-1.jpg?fit=800%2C500&ssl=1 +[3]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/1-1.png?resize=350%2C151&ssl=1 +[4]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/1-1.png?ssl=1 +[5]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/2-1.png?resize=350%2C191&ssl=1 +[6]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/2-1.png?ssl=1 +[7]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/3.png?resize=350%2C144&ssl=1 +[8]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/3.png?ssl=1 +[9]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/4.png?resize=350%2C149&ssl=1 +[10]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/4.png?ssl=1 From ddfd5b64a2a8902c4f92b7d1458b108a36592c09 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:07:01 +0800 Subject: [PATCH 157/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190913=20Why=20?= =?UTF-8?q?the=20Blockchain=20is=20Your=20Best=20Bet=20for=20Cyber=20Secur?= =?UTF-8?q?ity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190913 Why the Blockchain is Your Best Bet for Cyber Security.md --- ...ain is Your Best Bet for Cyber Security.md | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 sources/talk/20190913 Why the Blockchain is Your Best Bet for Cyber Security.md diff --git a/sources/talk/20190913 Why the Blockchain is Your Best Bet for Cyber Security.md b/sources/talk/20190913 Why the Blockchain is Your Best Bet for Cyber Security.md new file mode 100644 index 0000000000..5b980b3046 --- /dev/null +++ b/sources/talk/20190913 Why the Blockchain is Your Best Bet for Cyber Security.md @@ -0,0 +1,105 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Why the Blockchain is Your Best Bet for Cyber Security) +[#]: via: (https://opensourceforu.com/2019/09/why-the-blockchain-is-your-best-bet-for-cyber-security/) +[#]: author: (Anna Aneena M G https://opensourceforu.com/author/anna-aneena/) + +Why the Blockchain is Your Best Bet for Cyber Security +====== + +[![][1]][2] + +_Blockchain is a tamper-proof, shared digital ledger that records the history of transactions between the peers in a peer-to-peer network. This article describes how blockchain technology can be used to protect data and the network from cyber-attacks._ + +Cyber security is a set of technologies, processes and controls designed to protect systems, devices, data, networks and programs from cyber-attacks. It secures data from threats such as theft or misuse, and also safeguards the system from viruses. + +In today’s world, cyber-attacks are major threats faced by each user. Most of us are responsive to the advertisements on various websites on the Internet, and if asked questions or any personal details, respond without even thinking of the consequences. Sharing one’s personal information is very risky, as one may lose whatever one has. In 2016, the search engine Yahoo! faced a major attack and around one billion accounts were compromised. The attackers were able to get the user names, passwords, phone numbers, and security questions and answers of e-mail users. On September 7, 2017, Equifax, one of the largest consumer credit recording agencies in the world, faced a massive cyber security threat. It is believed that someone gave unauthorised access to the data with this agency, from mid-May to July 2017. Around 145.5 million people felt threatened by the news as they had shared personal information like names, social security numbers, birthdays, addresses and driving license numbers with Equifax. + +Many people use weak or default passwords for their personal accounts on some Internet sites, which is very risky as these can be cracked, and their personal details can be compromised. This may be even more risky if people use default passwords for all their sites, just for convenience. If the attackers crack that password, then it can be used for all other sites, and all their personal details, including their credit card and bank details may be harvested. In this digital era, cyber-attacks are a matter of real concern. Cyber criminals are greatly increasing in number and are attempting to steal financial data, personal identifiable information (PII) as well as identities of the common Internet user. + +Businesses, the government, and the private as well as public sectors are continuously fighting against such frauds, malicious bugs and so on. Even as the hackers are increasing their expertise, the ways to cope with their attacks are also improving very fast. One of these ways is the blockchain. + +**Blockchain: A brief background** +Each block in a blockchain contains transaction data, a hash function and hash of the previous block. Blockchain is managed by a peer-to-peer (P2P) network. On the network, no central authority exists and all blocks are distributed among all the users in the network. Everyone in the network is responsible for verifying the data that is shared, and ensuring that no existing blocks are being altered and no false data is being added. Blockchain technology enables direct transaction between two individuals, without the involvement of a third party and hence provides transparency. When a transaction happens, the transaction information is shared amongst everyone in the blockchain network. These transactions are individually time stamped. When these transactions are put together in a block, they are time stamped again as a whole block. Blockchain can be used to prevent cyber-attacks in three ways – by being a trusted system, by being immutable and by network consensus. + +A blockchain based system runs on the concept of human trust. A blockchain network is built in such a way that it presumes any individual node could attack it at any time. The consensus protocol, like proof of work, ensures that even if this happens, the network completes its work as intended, regardless of human cheating or intervention. The blockchain allows one to secure stored data using various cryptographic properties such as digital signatures and hashing. As soon as the data enters a block in the blockchain, it cannot be tampered with and this property is called immutability. If anyone tries to tamper with the blockchain database, then the network consensus will recognise the fact and shut down the attempt. + +Blockchains are made up of nodes; these can be within one institution like a hospital, or can be all over the world on the computer of any citizen who wants to participate in the blockchain. For any decision to be made, the majority of the nodes need to come to a consensus. The blockchain has a democratic system instead of a central authoritarian figure. So if any one node is compromised due to malicious action, the rest of the nodes recognise the problem and do not execute the unacceptable activity. Though blockchain has a pretty incredible security feature, it is not used by everyone to store data. + +![Figure 1: Applications of blockchain][3] + +**Common use cases of blockchain in cyber security** +Mitigating DDoS attacks: A distributed denial-of-service attack is a cyber-attack; it involves multiple compromised computer systems that aim at a target and attack it, causing denial of service for users of the targeted resources. This causes the system to slow down and crash, hence denying services to legitimate users. There are some forms of DDoS software that are causing huge problems. One among them is Hide and Seek malware, which has the ability to act even after the system reboots and hence can cause the system to crash over and over again. Currently, the difficulty in handling DDoS attacks is due to the existing DNS (Domain Name System). A solution to this is the implementation of blockchain technology. It will decentralise the DNS, distributing the data to a greater number of nodes and making it impossible for the hackers to hack. + +**More secure DNS:** For hackers, DNS is an easy target. Hence DNS service providers like Twitter, PayPal, etc, can be brought down. Adding the blockchain to the DNS will enhance the security, because that one single target which can be compromised is removed. + +**Advanced confidentiality and data integrity:** Initially, blockchain had no particular access controls. But as it evolved, more confidentiality and access controls were added, ensuring that data as a whole or in part was not accessible to any wrong person or organisation. Private keys are generally used for signing documents and other purposes. Since these keys can be tampered with, they need to be protected. Blockchain replaces such secret keys with transparency. + +**Improved PKI:** PKI or Public Key Infrastructure is one of the most popular forms of public key cryptography which keeps the messaging apps, websites, emails and other forms of communications secure. The main issue with this cryptography is that most PKI implementations depend on trusted third party Certificate Authorities (CA). But these certificate authorities can easily be compromised by hackers and spoof user identities. When keys are published on the blockchain, the risk of false key generation is eliminated. Along with that, blockchain enables applications to verify the identity of the person you are communicating with. ‘Certain’ is the first implementation of blockchain based PKI. + +**The major roles of blockchain in cyber security** +**Eliminating the human factor from authentication:** Human intervention is eliminated from the process of authentication. With the help of blockchain technology, businesses are able to authenticate devices and users without the need for a password. Hence, blockchain avoids being a potential attack vector. + +**Decentralised storage:** Blockchain users’ data can be maintained on their computers in their network. This ensures that the chain won’t collapse. If someone other than the owner of a component of data (say, an attacker) attempts to destroy a block, the entire system checks each and every data block to identify the one that differs from the rest. If this block is identified or located by the system, it is recognised as false and is deleted from the chain. + +**Traceability:** All the transactions that are added to a private or public blockchain are time stamped and signed digitally. This means that companies can trace every transaction back to a particular time period. And they can also locate the corresponding party on the blockchain through their public address. + +**DDoS:** Blockchain transactions can be denied easily if the participating units are delayed from sending transactions. For example, the entire attendant infrastructure and the blockchain organisation can be crippled due to the DDoS attack on a set of entities or an entity. These kinds of attacks can introduce integrity risks to a blockchain. + +**Blockchain for cyber security** +One interesting use case is applying the strong integrity assurance feature of blockchain technology to strengthen the cyber security of many other technologies. For example, to ensure the integrity of software downloads like firmware updates, patches, installers, etc, blockchain can be used in the same way that we make use of MD5 hashes today. Our file download that we compare against the hash might be compromised on a vendor website and altered without our knowledge. With a higher level of confidence, we can make a comparison against what is permanently recorded in the blockchain. The use of blockchain technologies has great security potential, particularly in the world of cyber-physical systems (CPS) such as IoT, industrial controls, vehicles, robotics, etc. + +Summarising this, for cyber-physical systems the integrity of data is the key concern while the confidentiality in many cases is almost irrelevant. This is the key difference between cyber security for cyber-physical systems and cyber security for traditional enterprise IT systems. Blockchain technology is just what the doctor ordered to address the key cyber-physical systems’ security concerns. + +The key characteristics of a blockchain that establish trust are listed below. + + * _**Identification and authentication:**_ Access is granted via cryptographic keys and access rules. + * _**Transaction rules:**_ At every node standard rules are applied to every transaction. + * _**Transaction concatenation:**_ Every transaction is linked to its previous transaction. + * _**Consensus mechanism:**_ In order to agree upon the transaction integrity, mathematical puzzles are solved for all nodes. + * _**Distributed ledger:**_ There are standards for listing transactions on every node. + * _**Permissioned and unpermissioned:**_ Ability to participate in a blockchain can be open or pre-approved. + + + +**Is blockchain secure?** +Blockchain stores data using sophisticated and innovative software rules that are extremely difficult for attackers to manipulate. The best example is Bitcoin. In Bitcoin’s blockchain, the shared data is the history of every transaction made. Information is stored in multiple copies on a network of computers called nodes. Each time someone submits a transaction to the ledger, the node checks to make sure the transaction is valid or not. A subset of the package validates the transaction into blocks and adds them to the previous chain. + +The blockchain offers a higher level of security to every individual user. This is because it removes the need for easily compromised and weak online identities and passwords. + +**How does a blockchain protect data?** +Instead of uploading data to a cloud server or storing it in a single location, a blockchain breaks everything into several small nodes. A blockchain protects data because: + + * It is decentralised. + * It offers encryption and validation. + * It can be private or public. + * It is virtually impossible to hack. + * It offers quality assurance. + * It ensures fast, cheap and secure transfer of funds across the globe. + * It is well known for its traceability. + * Through it, transactions become more transparent. + + + +**Cyber security is a priority, not an afterthought** +It seems like in the digital age, comfort and convenience have overtaken things like privacy and security in very unfortunate ways. The handing over of personal information to companies like Facebook is a personal choice; however, no one wants to see information leaked to third parties without consent. The blockchain is all about security. It has provided us a simple, effective and affordable way of ensuring that our cyber security needs are not only met, but also exceeded. We need to understand that the technologies we use to improve our lives can also be used to harm us. That is the reality of the era we are living in, one where most of our personal data is on the cloud, on our mobile device, or on our computer. Because of that, it is vital to look at online safety and cyber security as priorities and not afterthoughts. The blockchain can assist us in turning that thought into reality, and allow us to build a future where online threats are kept to a minimum. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/why-the-blockchain-is-your-best-bet-for-cyber-security/ + +作者:[Anna Aneena M G][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/anna-aneena/ +[b]: https://github.com/lujun9972 +[1]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/1.png?resize=615%2C434&ssl=1 (1) +[2]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/1.png?fit=615%2C434&ssl=1 +[3]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/2.png?resize=350%2C219&ssl=1 From e30ebc3929662e15bb90373432e84f767b853cbe Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:23:51 +0800 Subject: [PATCH 158/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180720=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Conversation=20Page?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180720 Building a Messenger App- Conversation Page.md --- ...ding a Messenger App- Conversation Page.md | 269 ++++++++++++++++++ 1 file changed, 269 insertions(+) create mode 100644 sources/tech/20180720 Building a Messenger App- Conversation Page.md diff --git a/sources/tech/20180720 Building a Messenger App- Conversation Page.md b/sources/tech/20180720 Building a Messenger App- Conversation Page.md new file mode 100644 index 0000000000..c721b48161 --- /dev/null +++ b/sources/tech/20180720 Building a Messenger App- Conversation Page.md @@ -0,0 +1,269 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Conversation Page) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-conversation-page/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Conversation Page +====== + +This post is the 9th and last in a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + * [Part 4: Messages][4] + * [Part 5: Realtime Messages][5] + * [Part 6: Development Login][6] + * [Part 7: Access Page][7] + * [Part 8: Home Page][8] + + + +In this post we’ll code the conversation page. This page is the chat between the two users. At the top we’ll show info about the other participant, below, a list of the latest messages and a message form at the bottom. + +### Chat heading + +![chat heading screenshot][9] + +Let’s start by creating the file `static/pages/conversation-page.js` with the following content: + +``` +import http from '../http.js' +import { navigate } from '../router.js' +import { avatar, escapeHTML } from '../shared.js' + +export default async function conversationPage(conversationID) { + let conversation + try { + conversation = await getConversation(conversationID) + } catch (err) { + alert(err.message) + navigate('/', true) + return + } + + const template = document.createElement('template') + template.innerHTML = ` +
+ ← Back + ${avatar(conversation.otherParticipant)} + ${conversation.otherParticipant.username} +
+ + + ` + const page = template.content + return page +} + +function getConversation(id) { + return http.get('/api/conversations/' + id) +} +``` + +This page receives the conversation ID the router extracted from the URL. + +First it does a GET request to `/api/conversations/{conversationID}` to get info about the conversation. In case of error, we show it and redirect back to `/`. Then we render info about the other participant. + +### Conversation List + +![chat heading screenshot][10] + +We’ll fetch the latest messages too to display them. + +``` +let conversation, messages +try { + [conversation, messages] = await Promise.all([ + getConversation(conversationID), + getMessages(conversationID), + ]) +} +``` + +Update the `conversationPage()` function to fetch the messages too. We use `Promise.all()` to do both request at the same time. + +``` +function getMessages(conversationID) { + return http.get(`/api/conversations/${conversationID}/messages`) +} +``` + +A GET request to `/api/conversations/{conversationID}/messages` gets the latest messages of the conversation. + +``` +
    +``` + +Now, add that list to the markup. + +``` +const messagesOList = page.getElementById('messages') +for (const message of messages.reverse()) { + messagesOList.appendChild(renderMessage(message)) +} +``` + +So we can append messages to the list. We show them in reverse order. + +``` +function renderMessage(message) { + const messageContent = escapeHTML(message.content) + const messageDate = new Date(message.createdAt).toLocaleString() + + const li = document.createElement('li') + if (message.mine) { + li.classList.add('owned') + } + li.innerHTML = ` +

    ${messageContent}

    + + ` + return li +} +``` + +Each message item displays the message content itself with its timestamp. Using `.mine` we can append a different class to the item so maybe you can show the message to the right. + +### Message Form + +![chat heading screenshot][11] + +``` +
    + + +
    +``` + +Add that form to the current markup. + +``` +page.getElementById('message-form').onsubmit = messageSubmitter(conversationID) +``` + +Attach an event listener to the “submit” event. + +``` +function messageSubmitter(conversationID) { + return async ev => { + ev.preventDefault() + + const form = ev.currentTarget + const input = form.querySelector('input') + const submitButton = form.querySelector('button') + + input.disabled = true + submitButton.disabled = true + + try { + const message = await createMessage(input.value, conversationID) + input.value = '' + const messagesOList = document.getElementById('messages') + if (messagesOList === null) { + return + } + + messagesOList.appendChild(renderMessage(message)) + } catch (err) { + if (err.statusCode === 422) { + input.setCustomValidity(err.body.errors.content) + } else { + alert(err.message) + } + } finally { + input.disabled = false + submitButton.disabled = false + + setTimeout(() => { + input.focus() + }, 0) + } + } +} + +function createMessage(content, conversationID) { + return http.post(`/api/conversations/${conversationID}/messages`, { content }) +} +``` + +We make use of [partial application][12] to have the conversation ID in the “submit” event handler. It takes the message content from the input and does a POST request to `/api/conversations/{conversationID}/messages` with it. Then prepends the newly created message to the list. + +### Messages Subscription + +To make it realtime we’ll subscribe to the message stream in this page also. + +``` +page.addEventListener('disconnect', subscribeToMessages(messageArriver(conversationID))) +``` + +Add that line in the `conversationPage()` function. + +``` +function subscribeToMessages(cb) { + return http.subscribe('/api/messages', cb) +} + +function messageArriver(conversationID) { + return message => { + if (message.conversationID !== conversationID) { + return + } + + const messagesOList = document.getElementById('messages') + if (messagesOList === null) { + return + + } + messagesOList.appendChild(renderMessage(message)) + readMessages(message.conversationID) + } +} + +function readMessages(conversationID) { + return http.post(`/api/conversations/${conversationID}/read_messages`) +} +``` + +We also make use of partial application to have the conversation ID here. +When a new message arrives, first we check if it’s from this conversation. If it is, we go a prepend a message item to the list and do a POST request to `/api/conversations/{conversationID}/read_messages` to updated the last time the participant read messages. + +* * * + +That concludes this series. The messenger app is now functional. + +~~I’ll add pagination on the conversation and message list, also user searching before sharing the source code. I’ll updated once it’s ready along with a hosted demo 👨‍💻~~ + +[Souce Code][13] • [Demo][14] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-conversation-page/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/ +[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/ +[6]: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/ +[7]: https://nicolasparada.netlify.com/posts/go-messenger-access-page/ +[8]: https://nicolasparada.netlify.com/posts/go-messenger-home-page/ +[9]: https://nicolasparada.netlify.com/img/go-messenger-conversation-page/heading.png +[10]: https://nicolasparada.netlify.com/img/go-messenger-conversation-page/list.png +[11]: https://nicolasparada.netlify.com/img/go-messenger-conversation-page/form.png +[12]: https://en.wikipedia.org/wiki/Partial_application +[13]: https://github.com/nicolasparada/go-messenger-demo +[14]: https://go-messenger-demo.herokuapp.com/ From ecfbe10b3981dd7a0fdc38fd6bbed94a8951b309 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:24:04 +0800 Subject: [PATCH 159/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180719=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Home=20Page?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180719 Building a Messenger App- Home Page.md --- ...719 Building a Messenger App- Home Page.md | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 sources/tech/20180719 Building a Messenger App- Home Page.md diff --git a/sources/tech/20180719 Building a Messenger App- Home Page.md b/sources/tech/20180719 Building a Messenger App- Home Page.md new file mode 100644 index 0000000000..ddec2c180f --- /dev/null +++ b/sources/tech/20180719 Building a Messenger App- Home Page.md @@ -0,0 +1,255 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Home Page) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-home-page/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Home Page +====== + +This post is the 8th on a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + * [Part 4: Messages][4] + * [Part 5: Realtime Messages][5] + * [Part 6: Development Login][6] + * [Part 7: Access Page][7] + + + +Continuing the frontend, let’s finish the home page in this post. We’ll add a form to start conversations and a list with the latest ones. + +### Conversation Form + +![conversation form screenshot][8] + +In the `static/pages/home-page.js` file add some markup in the HTML view. + +``` +
    + +
    +``` + +Add that form just below the section in which we displayed the auth user and logout button. + +``` +page.getElementById('conversation-form').onsubmit = onConversationSubmit +``` + +Now we can listen to the “submit” event to create the conversation. + +``` +import http from '../http.js' +import { navigate } from '../router.js' + +async function onConversationSubmit(ev) { + ev.preventDefault() + + const form = ev.currentTarget + const input = form.querySelector('input') + + input.disabled = true + + try { + const conversation = await createConversation(input.value) + input.value = '' + navigate('/conversations/' + conversation.id) + } catch (err) { + if (err.statusCode === 422) { + input.setCustomValidity(err.body.errors.username) + } else { + alert(err.message) + } + setTimeout(() => { + input.focus() + }, 0) + } finally { + input.disabled = false + } +} + +function createConversation(username) { + return http.post('/api/conversations', { username }) +} +``` + +On submit we do a POST request to `/api/conversations` with the username and redirect to the conversation page (for the next post). + +### Conversation List + +![conversation list screenshot][9] + +In the same file, we are going to make the `homePage()` function async to load the conversations first. + +``` +export default async function homePage() { + const conversations = await getConversations().catch(err => { + console.error(err) + return [] + }) + /*...*/ +} + +function getConversations() { + return http.get('/api/conversations') +} +``` + +Then, add a list in the markup to render conversations there. + +``` +
      +``` + +Add it just below the current markup. + +``` +const conversationsOList = page.getElementById('conversations') +for (const conversation of conversations) { + conversationsOList.appendChild(renderConversation(conversation)) +} +``` + +So we can append each conversation to the list. + +``` +import { avatar, escapeHTML } from '../shared.js' + +function renderConversation(conversation) { + const messageContent = escapeHTML(conversation.lastMessage.content) + const messageDate = new Date(conversation.lastMessage.createdAt).toLocaleString() + + const li = document.createElement('li') + li.dataset['id'] = conversation.id + if (conversation.hasUnreadMessages) { + li.classList.add('has-unread-messages') + } + li.innerHTML = ` + +
      + ${avatar(conversation.otherParticipant)} + ${conversation.otherParticipant.username} +
      +
      +

      ${messageContent}

      + +
      +
      + ` + return li +} +``` + +Each conversation item contains a link to the conversation page and displays the other participant info and a preview of the last message. Also, you can use `.hasUnreadMessages` to add a class to the item and do some styling with CSS. Maybe a bolder font or accent the color. + +Note that we’re escaping the message content. That function comes from `static/shared.js`: + +``` +export function escapeHTML(str) { + return str + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, ''') +} +``` + +That prevents displaying as HTML the message the user wrote. If the user happens to write something like: + +``` + +``` + +It would be very annoying because that script will be executed 😅 +So yeah, always remember to escape content from untrusted sources. + +### Messages Subscription + +Last but not least, I want to subscribe to the message stream here. + +``` +const unsubscribe = subscribeToMessages(onMessageArrive) +page.addEventListener('disconnect', unsubscribe) +``` + +Add that line in the `homePage()` function. + +``` +function subscribeToMessages(cb) { + return http.subscribe('/api/messages', cb) +} +``` + +The `subscribe()` function returns a function that once called it closes the underlying connection. That’s why I passed it to the “disconnect” event; so when the user leaves the page, the event stream will be closed. + +``` +async function onMessageArrive(message) { + const conversationLI = document.querySelector(`li[data-id="${message.conversationID}"]`) + if (conversationLI !== null) { + conversationLI.classList.add('has-unread-messages') + conversationLI.querySelector('a > div > p').textContent = message.content + conversationLI.querySelector('a > div > time').textContent = new Date(message.createdAt).toLocaleString() + return + } + + let conversation + try { + conversation = await getConversation(message.conversationID) + conversation.lastMessage = message + } catch (err) { + console.error(err) + return + } + + const conversationsOList = document.getElementById('conversations') + if (conversationsOList === null) { + return + } + + conversationsOList.insertAdjacentElement('afterbegin', renderConversation(conversation)) +} + +function getConversation(id) { + return http.get('/api/conversations/' + id) +} +``` + +Every time a new message arrives, we go and query for the conversation item in the DOM. If found, we add the `has-unread-messages` class to the item, and update the view. If not found, it means the message is from a new conversation created just now. We go and do a GET request to `/api/conversations/{conversationID}` to get the conversation in which the message was created and prepend it to the conversation list. + +* * * + +That covers the home page 😊 +On the next post we’ll code the conversation page. + +[Souce Code][10] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-home-page/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/ +[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/ +[6]: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/ +[7]: https://nicolasparada.netlify.com/posts/go-messenger-access-page/ +[8]: https://nicolasparada.netlify.com/img/go-messenger-home-page/conversation-form.png +[9]: https://nicolasparada.netlify.com/img/go-messenger-home-page/conversation-list.png +[10]: https://github.com/nicolasparada/go-messenger-demo From fb4471a35cef840e4e7bf7d4c82c15248e787436 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:24:15 +0800 Subject: [PATCH 160/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180716=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Access=20Page?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180716 Building a Messenger App- Access Page.md --- ...6 Building a Messenger App- Access Page.md | 459 ++++++++++++++++++ 1 file changed, 459 insertions(+) create mode 100644 sources/tech/20180716 Building a Messenger App- Access Page.md diff --git a/sources/tech/20180716 Building a Messenger App- Access Page.md b/sources/tech/20180716 Building a Messenger App- Access Page.md new file mode 100644 index 0000000000..21671b92f6 --- /dev/null +++ b/sources/tech/20180716 Building a Messenger App- Access Page.md @@ -0,0 +1,459 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Access Page) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-access-page/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Access Page +====== + +This post is the 7th on a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + * [Part 4: Messages][4] + * [Part 5: Realtime Messages][5] + * [Part 6: Development Login][6] + + + +Now that we’re done with the backend, lets move to the frontend. I will go with a single-page application. + +Lets start by creating a file `static/index.html` with the following content. + +``` + + + + + + Messenger + + + + + + +``` + +This HTML file must be server for every URL and JavaScript will take care of rendering the correct page. + +So lets go the the `main.go` for a moment and in the `main()` function add the following route: + +``` +router.Handle("GET", "/...", http.FileServer(SPAFileSystem{http.Dir("static")})) + +type SPAFileSystem struct { + fs http.FileSystem +} + +func (spa SPAFileSystem) Open(name string) (http.File, error) { + f, err := spa.fs.Open(name) + if err != nil { + return spa.fs.Open("index.html") + } + return f, nil +} +``` + +We use a custom file system so instead of returning `404 Not Found` for unknown URLs, it serves the `index.html`. + +### Router + +In the `index.html` we loaded two files: `styles.css` and `main.js`. I leave styling to your taste. + +Lets move to `main.js`. Create a `static/main.js` file with the following content: + +``` +import { guard } from './auth.js' +import Router from './router.js' + +let currentPage +const disconnect = new CustomEvent('disconnect') +const router = new Router() + +router.handle('/', guard(view('home'), view('access'))) +router.handle('/callback', view('callback')) +router.handle(/^\/conversations\/([^\/]+)$/, guard(view('conversation'), view('access'))) +router.handle(/^\//, view('not-found')) + +router.install(async result => { + document.body.innerHTML = '' + if (currentPage instanceof Node) { + currentPage.dispatchEvent(disconnect) + } + currentPage = await result + if (currentPage instanceof Node) { + document.body.appendChild(currentPage) + } +}) + +function view(pageName) { + return (...args) => import(`/pages/${pageName}-page.js`) + .then(m => m.default(...args)) +} +``` + +If you are follower of this blog, you already know how this works. That router is the one showed [here][7]. Just download it from [@nicolasparada/router][8] and save it to `static/router.js`. + +We registered four routes. At the root `/` we show the home or access page whether the user is authenticated. At `/callback` we show the callback page. On `/conversations/{conversationID}` we show the conversation or access page whether the user is authenticated and for every other URL, we show a not found page. + +We tell the router to render the result to the document body and dispatch a `disconnect` event to each page before leaving. + +We have each page in a different file and we import them with the new dynamic `import()`. + +### Auth + +`guard()` is a function that given two functions, executes the first one if the user is authenticated, or the sencond one if not. It comes from `auth.js` so lets create a `static/auth.js` file with the following content: + +``` +export function isAuthenticated() { + const token = localStorage.getItem('token') + const expiresAtItem = localStorage.getItem('expires_at') + if (token === null || expiresAtItem === null) { + return false + } + + const expiresAt = new Date(expiresAtItem) + if (isNaN(expiresAt.valueOf()) || expiresAt <= new Date()) { + return false + } + + return true +} + +export function guard(fn1, fn2) { + return (...args) => isAuthenticated() + ? fn1(...args) + : fn2(...args) +} + +export function getAuthUser() { + if (!isAuthenticated()) { + return null + } + + const authUser = localStorage.getItem('auth_user') + if (authUser === null) { + return null + } + + try { + return JSON.parse(authUser) + } catch (_) { + return null + } +} +``` + +`isAuthenticated()` checks for `token` and `expires_at` from localStorage to tell if the user is authenticated. `getAuthUser()` gets the authenticated user from localStorage. + +When we login, we’ll save all the data to localStorage so it will make sense. + +### Access Page + +![access page screenshot][9] + +Lets start with the access page. Create a file `static/pages/access-page.js` with the following content: + +``` +const template = document.createElement('template') +template.innerHTML = ` +

      Messenger

      + Access with GitHub +` + +export default function accessPage() { + return template.content +} +``` + +Because the router intercepts all the link clicks to do its navigation, we must prevent the event propagation for this link in particular. + +Clicking on that link will redirect us to the backend, then to GitHub, then to the backend and then to the frontend again; to the callback page. + +### Callback Page + +Create the file `static/pages/callback-page.js` with the following content: + +``` +import http from '../http.js' +import { navigate } from '../router.js' + +export default async function callbackPage() { + const url = new URL(location.toString()) + const token = url.searchParams.get('token') + const expiresAt = url.searchParams.get('expires_at') + + try { + if (token === null || expiresAt === null) { + throw new Error('Invalid URL') + } + + const authUser = await getAuthUser(token) + + localStorage.setItem('auth_user', JSON.stringify(authUser)) + localStorage.setItem('token', token) + localStorage.setItem('expires_at', expiresAt) + } catch (err) { + alert(err.message) + } finally { + navigate('/', true) + } +} + +function getAuthUser(token) { + return http.get('/api/auth_user', { authorization: `Bearer ${token}` }) +} +``` + +The callback page doesn’t render anything. It’s an async function that does a GET request to `/api/auth_user` using the token from the URL query string and saves all the data to localStorage. Then it redirects to `/`. + +### HTTP + +There is an HTTP module. Create a `static/http.js` file with the following content: + +``` +import { isAuthenticated } from './auth.js' + +async function handleResponse(res) { + const body = await res.clone().json().catch(() => res.text()) + + if (res.status === 401) { + localStorage.removeItem('auth_user') + localStorage.removeItem('token') + localStorage.removeItem('expires_at') + } + + if (!res.ok) { + const message = typeof body === 'object' && body !== null && 'message' in body + ? body.message + : typeof body === 'string' && body !== '' + ? body + : res.statusText + throw Object.assign(new Error(message), { + url: res.url, + statusCode: res.status, + statusText: res.statusText, + headers: res.headers, + body, + }) + } + + return body +} + +function getAuthHeader() { + return isAuthenticated() + ? { authorization: `Bearer ${localStorage.getItem('token')}` } + : {} +} + +export default { + get(url, headers) { + return fetch(url, { + headers: Object.assign(getAuthHeader(), headers), + }).then(handleResponse) + }, + + post(url, body, headers) { + const init = { + method: 'POST', + headers: getAuthHeader(), + } + if (typeof body === 'object' && body !== null) { + init.body = JSON.stringify(body) + init.headers['content-type'] = 'application/json; charset=utf-8' + } + Object.assign(init.headers, headers) + return fetch(url, init).then(handleResponse) + }, + + subscribe(url, callback) { + const urlWithToken = new URL(url, location.origin) + if (isAuthenticated()) { + urlWithToken.searchParams.set('token', localStorage.getItem('token')) + } + const eventSource = new EventSource(urlWithToken.toString()) + eventSource.onmessage = ev => { + let data + try { + data = JSON.parse(ev.data) + } catch (err) { + console.error('could not parse message data as JSON:', err) + return + } + callback(data) + } + const unsubscribe = () => { + eventSource.close() + } + return unsubscribe + }, +} +``` + +This module is a wrapper around the [fetch][10] and [EventSource][11] APIs. The most important part is that it adds the JSON web token to the requests. + +### Home Page + +![home page screenshot][12] + +So, when the user login, the home page will be shown. Create a `static/pages/home-page.js` file with the following content: + +``` +import { getAuthUser } from '../auth.js' +import { avatar } from '../shared.js' + +export default function homePage() { + const authUser = getAuthUser() + const template = document.createElement('template') + template.innerHTML = ` +
      +
      + ${avatar(authUser)} + ${authUser.username} +
      + +
      + + + ` + const page = template.content + page.getElementById('logout-button').onclick = onLogoutClick + return page +} + +function onLogoutClick() { + localStorage.clear() + location.reload() +} +``` + +For this post, this is the only content we render on the home page. We show the current authenticated user and a logout button. + +When the user clicks to logout, we clear all inside localStorage and do a reload of the page. + +### Avatar + +That `avatar()` function is to show the user’s avatar. Because it’s used in more than one place, I moved it to a `shared.js` file. Create the file `static/shared.js` with the following content: + +``` +export function avatar(user) { + return user.avatarUrl === null + ? `
      ` + : `${user.username}'s avatar` +} +``` + +We use a small figure with the user’s initial in case the avatar URL is null. + +You can show the initial with a little of CSS using the `attr()` function. + +``` +.avatar[data-initial]::after { + content: attr(data-initial); +} +``` + +### Development Login + +![access page with login form screenshot][13] + +In the previous post we coded a login for development. Lets add a form for that in the access page. Go to `static/pages/access-page.js` and modify it a little. + +``` +import http from '../http.js' + +const template = document.createElement('template') +template.innerHTML = ` +

      Messenger

      +
      + + +
      + Access with GitHub +` + +export default function accessPage() { + const page = template.content.cloneNode(true) + page.getElementById('login-form').onsubmit = onLoginSubmit + return page +} + +async function onLoginSubmit(ev) { + ev.preventDefault() + + const form = ev.currentTarget + const input = form.querySelector('input') + const submitButton = form.querySelector('button') + + input.disabled = true + submitButton.disabled = true + + try { + const payload = await login(input.value) + input.value = '' + + localStorage.setItem('auth_user', JSON.stringify(payload.authUser)) + localStorage.setItem('token', payload.token) + localStorage.setItem('expires_at', payload.expiresAt) + + location.reload() + } catch (err) { + alert(err.message) + setTimeout(() => { + input.focus() + }, 0) + } finally { + input.disabled = false + submitButton.disabled = false + } +} + +function login(username) { + return http.post('/api/login', { username }) +} +``` + +I added a login form. When the user submits the form. It does a POST requets to `/api/login` with the username. Saves all the data to localStorage and reloads the page. + +Remember to remove this form once you are done with the frontend. + +* * * + +That’s all for this post. In the next one, we’ll continue with the home page to add a form to start conversations and display a list with the latest ones. + +[Souce Code][14] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-access-page/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/ +[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/ +[6]: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/ +[7]: https://nicolasparada.netlify.com/posts/js-router/ +[8]: https://unpkg.com/@nicolasparada/router +[9]: https://nicolasparada.netlify.com/img/go-messenger-access-page/access-page.png +[10]: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API +[11]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource +[12]: https://nicolasparada.netlify.com/img/go-messenger-access-page/home-page.png +[13]: https://nicolasparada.netlify.com/img/go-messenger-access-page/access-page-v2.png +[14]: https://github.com/nicolasparada/go-messenger-demo From 6346d2c47e9b645d6ce8c59097a166787d7d9c7b Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:24:27 +0800 Subject: [PATCH 161/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180712=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Development=20Login?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180712 Building a Messenger App- Development Login.md --- ...ding a Messenger App- Development Login.md | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 sources/tech/20180712 Building a Messenger App- Development Login.md diff --git a/sources/tech/20180712 Building a Messenger App- Development Login.md b/sources/tech/20180712 Building a Messenger App- Development Login.md new file mode 100644 index 0000000000..e12fb3c56a --- /dev/null +++ b/sources/tech/20180712 Building a Messenger App- Development Login.md @@ -0,0 +1,145 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Development Login) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-dev-login/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Development Login +====== + +This post is the 6th on a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + * [Part 4: Messages][4] + * [Part 5: Realtime Messages][5] + + + +We already implemented login through GitHub, but if we want to play around with the app, we need a couple of users to test it. In this post we’ll add an endpoint to login as any user just giving an username. This endpoint will be just for development. + +Start by adding this route in the `main()` function. + +``` +router.HandleFunc("POST", "/api/login", requireJSON(login)) +``` + +### Login + +This function handles POST requests to `/api/login` with a JSON body with just an username and returns the authenticated user, a token and expiration date of it in JSON format. + +``` +func login(w http.ResponseWriter, r *http.Request) { + if origin.Hostname() != "localhost" { + http.NotFound(w, r) + return + } + + var input struct { + Username string `json:"username"` + } + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + defer r.Body.Close() + + var user User + if err := db.QueryRowContext(r.Context(), ` + SELECT id, avatar_url + FROM users + WHERE username = $1 + `, input.Username).Scan( + &user.ID, + &user.AvatarURL, + ); err == sql.ErrNoRows { + http.Error(w, "User not found", http.StatusNotFound) + return + } else if err != nil { + respondError(w, fmt.Errorf("could not query user: %v", err)) + return + } + + user.Username = input.Username + + exp := time.Now().Add(jwtLifetime) + token, err := issueToken(user.ID, exp) + if err != nil { + respondError(w, fmt.Errorf("could not create token: %v", err)) + return + } + + respond(w, map[string]interface{}{ + "authUser": user, + "token": token, + "expiresAt": exp, + }, http.StatusOK) +} +``` + +First it checks we are on localhost or it responds with `404 Not Found`. It decodes the body skipping validation since this is just for development. Then it queries to the database for a user with the given username, if none is found, it returns with `404 Not Found`. Then it issues a new JSON web token using the user ID as Subject. + +``` +func issueToken(subject string, exp time.Time) (string, error) { + token, err := jwtSigner.Encode(jwt.Claims{ + Subject: subject, + Expiration: json.Number(strconv.FormatInt(exp.Unix(), 10)), + }) + if err != nil { + return "", err + } + return string(token), nil +} +``` + +The function does the same we did [previously][2]. I just moved it to reuse code. + +After creating the token, it responds with the user, token and expiration date. + +### Seed Users + +Now you can add users to play with to the database. + +``` +INSERT INTO users (id, username) VALUES + (1, 'john'), + (2, 'jane'); +``` + +You can save it to a file and pipe it to the Cockroach CLI. + +``` +cat seed_users.sql | cockroach sql --insecure -d messenger +``` + +* * * + +That’s it. Once you deploy the code to production and use your own domain this login function won’t be available. + +This post concludes the backend. + +[Souce Code][6] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/ +[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/ +[6]: https://github.com/nicolasparada/go-messenger-demo From 6374b15cc9a0240659ee6152c72c8a5a8ee94edf Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:24:39 +0800 Subject: [PATCH 162/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180710=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Realtime=20Messages?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180710 Building a Messenger App- Realtime Messages.md --- ...ding a Messenger App- Realtime Messages.md | 175 ++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 sources/tech/20180710 Building a Messenger App- Realtime Messages.md diff --git a/sources/tech/20180710 Building a Messenger App- Realtime Messages.md b/sources/tech/20180710 Building a Messenger App- Realtime Messages.md new file mode 100644 index 0000000000..71479495b2 --- /dev/null +++ b/sources/tech/20180710 Building a Messenger App- Realtime Messages.md @@ -0,0 +1,175 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Realtime Messages) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Realtime Messages +====== + +This post is the 5th on a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + * [Part 4: Messages][4] + + + +For realtime messages we’ll use [Server-Sent Events][5]. This is an open connection in which we can stream data. We’ll have and endpoint in which the user subscribes to all the messages sended to him. + +### Message Clients + +Before the HTTP part, let’s code a map to have all the clients listening for messages. Initialize this globally like so: + +``` +type MessageClient struct { + Messages chan Message + UserID string +} + +var messageClients sync.Map +``` + +### New Message Created + +Remember in the [last post][4] when we created the message, we left a “TODO” comment. There we’ll dispatch a goroutine with this function. + +``` +go messageCreated(message) +``` + +Insert that line just where we left the comment. + +``` +func messageCreated(message Message) error { + if err := db.QueryRow(` + SELECT user_id FROM participants + WHERE user_id != $1 and conversation_id = $2 + `, message.UserID, message.ConversationID). + Scan(&message.ReceiverID); err != nil { + return err + } + + go broadcastMessage(message) + + return nil +} + +func broadcastMessage(message Message) { + messageClients.Range(func(key, _ interface{}) bool { + client := key.(*MessageClient) + if client.UserID == message.ReceiverID { + client.Messages <- message + } + return true + }) +} +``` + +The function queries for the recipient ID (the other participant ID) and sends the message to all the clients. + +### Subscribe to Messages + +Lets go to the `main()` function and add this route: + +``` +router.HandleFunc("GET", "/api/messages", guard(subscribeToMessages)) +``` + +This endpoint handles GET requests on `/api/messages`. The request should be an [EventSource][6] connection. It responds with an event stream in which the data is JSON formatted. + +``` +func subscribeToMessages(w http.ResponseWriter, r *http.Request) { + if a := r.Header.Get("Accept"); !strings.Contains(a, "text/event-stream") { + http.Error(w, "This endpoint requires an EventSource connection", http.StatusNotAcceptable) + return + } + + f, ok := w.(http.Flusher) + if !ok { + respondError(w, errors.New("streaming unsupported")) + return + } + + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + + h := w.Header() + h.Set("Cache-Control", "no-cache") + h.Set("Connection", "keep-alive") + h.Set("Content-Type", "text/event-stream") + + messages := make(chan Message) + defer close(messages) + + client := &MessageClient{Messages: messages, UserID: authUserID} + messageClients.Store(client, nil) + defer messageClients.Delete(client) + + for { + select { + case <-ctx.Done(): + return + case message := <-messages: + if b, err := json.Marshal(message); err != nil { + log.Printf("could not marshall message: %v\n", err) + fmt.Fprintf(w, "event: error\ndata: %v\n\n", err) + } else { + fmt.Fprintf(w, "data: %s\n\n", b) + } + f.Flush() + } + } +} +``` + +First it checks for the correct request headers and checks the server supports streaming. We create a channel of messages to make a client and store it in the clients map. Each time a new message is created, it will go in this channel, so we can read from it with a `for-select` loop. + +Server-Sent Events uses this format to send data: + +``` +data: some data here\n\n +``` + +We are sending it in JSON format: + +``` +data: {"foo":"bar"}\n\n +``` + +We are using `fmt.Fprintf()` to write to the response writter in this format and flushing the data in each iteration of the loop. + +This will loop until the connection is closed using the request context. We defered the close of the channel and the delete of the client, so when the loop ends, the channel will be closed and the client won’t receive more messages. + +Note aside, the JavaScript API to work with Server-Sent Events (EventSource) doesn’t support setting custom headers 😒 So we cannot set `Authorization: Bearer `. And that’s the reason why the `guard()` middleware reads the token from the URL query string also. + +* * * + +That concludes the realtime messages. I’d like to say that’s everything in the backend, but to code the frontend I’ll add one more endpoint to login. A login that will be just for development. + +[Souce Code][7] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/ +[5]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events +[6]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource +[7]: https://github.com/nicolasparada/go-messenger-demo From e53a76b788aef7a086b0157f8acac86796570e21 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:24:55 +0800 Subject: [PATCH 163/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180710=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Messages?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180710 Building a Messenger App- Messages.md --- ...0710 Building a Messenger App- Messages.md | 315 ++++++++++++++++++ 1 file changed, 315 insertions(+) create mode 100644 sources/tech/20180710 Building a Messenger App- Messages.md diff --git a/sources/tech/20180710 Building a Messenger App- Messages.md b/sources/tech/20180710 Building a Messenger App- Messages.md new file mode 100644 index 0000000000..55e596df64 --- /dev/null +++ b/sources/tech/20180710 Building a Messenger App- Messages.md @@ -0,0 +1,315 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Messages) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-messages/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Messages +====== + +This post is the 4th on a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + * [Part 3: Conversations][3] + + + +In this post we’ll code the endpoints to create a message and list them, also an endpoint to update the last time the participant read messages. Start by adding these routes in the `main()` function. + +``` +router.HandleFunc("POST", "/api/conversations/:conversationID/messages", requireJSON(guard(createMessage))) +router.HandleFunc("GET", "/api/conversations/:conversationID/messages", guard(getMessages)) +router.HandleFunc("POST", "/api/conversations/:conversationID/read_messages", guard(readMessages)) +``` + +Messages goes into conversations so the endpoint includes the conversation ID. + +### Create Message + +This endpoint handles POST requests to `/api/conversations/{conversationID}/messages` with a JSON body with just the message content and return the newly created message. It has two side affects: it updates the conversation `last_message_id` and updates the participant `messages_read_at`. + +``` +func createMessage(w http.ResponseWriter, r *http.Request) { + var input struct { + Content string `json:"content"` + } + defer r.Body.Close() + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + errs := make(map[string]string) + input.Content = removeSpaces(input.Content) + if input.Content == "" { + errs["content"] = "Message content required" + } else if len([]rune(input.Content)) > 480 { + errs["content"] = "Message too long. 480 max" + } + if len(errs) != 0 { + respond(w, Errors{errs}, http.StatusUnprocessableEntity) + return + } + + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + conversationID := way.Param(ctx, "conversationID") + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + respondError(w, fmt.Errorf("could not begin tx: %v", err)) + return + } + defer tx.Rollback() + + isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID) + if err != nil { + respondError(w, fmt.Errorf("could not query participant existance: %v", err)) + return + } + + if !isParticipant { + http.Error(w, "Conversation not found", http.StatusNotFound) + return + } + + var message Message + if err := tx.QueryRowContext(ctx, ` + INSERT INTO messages (content, user_id, conversation_id) VALUES + ($1, $2, $3) + RETURNING id, created_at + `, input.Content, authUserID, conversationID).Scan( + &message.ID, + &message.CreatedAt, + ); err != nil { + respondError(w, fmt.Errorf("could not insert message: %v", err)) + return + } + + if _, err := tx.ExecContext(ctx, ` + UPDATE conversations SET last_message_id = $1 + WHERE id = $2 + `, message.ID, conversationID); err != nil { + respondError(w, fmt.Errorf("could not update conversation last message ID: %v", err)) + return + } + + if err = tx.Commit(); err != nil { + respondError(w, fmt.Errorf("could not commit tx to create a message: %v", err)) + return + } + + go func() { + if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil { + log.Printf("could not update messages read at: %v\n", err) + } + }() + + message.Content = input.Content + message.UserID = authUserID + message.ConversationID = conversationID + // TODO: notify about new message. + message.Mine = true + + respond(w, message, http.StatusCreated) +} +``` + +First, it decodes the request body into an struct with the message content. Then, it validates the content is not empty and has less than 480 characters. + +``` +var rxSpaces = regexp.MustCompile("\\s+") + +func removeSpaces(s string) string { + if s == "" { + return s + } + + lines := make([]string, 0) + for _, line := range strings.Split(s, "\n") { + line = rxSpaces.ReplaceAllLiteralString(line, " ") + line = strings.TrimSpace(line) + if line != "" { + lines = append(lines, line) + } + } + return strings.Join(lines, "\n") +} +``` + +This is the function to remove spaces. It iterates over each line, remove more than two consecutives spaces and returns with the non empty lines. + +After the validation, it starts an SQL transaction. First, it queries for the participant existance in the conversation. + +``` +func queryParticipantExistance(ctx context.Context, tx *sql.Tx, userID, conversationID string) (bool, error) { + if ctx == nil { + ctx = context.Background() + } + var exists bool + if err := tx.QueryRowContext(ctx, `SELECT EXISTS ( + SELECT 1 FROM participants + WHERE user_id = $1 AND conversation_id = $2 + )`, userID, conversationID).Scan(&exists); err != nil { + return false, err + } + return exists, nil +} +``` + +I extracted it into a function because it’s reused later. + +If the user isn’t participant of the conversation, we return with a `404 Not Found` error. + +Then, it inserts the message and updates the conversation `last_message_id`. Since this point, `last_message_id` cannot by `NULL` because we don’t allow removing messages. + +Then it commits the transaction and we update the participant `messages_read_at` in a goroutine. + +``` +func updateMessagesReadAt(ctx context.Context, userID, conversationID string) error { + if ctx == nil { + ctx = context.Background() + } + + if _, err := db.ExecContext(ctx, ` + UPDATE participants SET messages_read_at = now() + WHERE user_id = $1 AND conversation_id = $2 + `, userID, conversationID); err != nil { + return err + } + return nil +} +``` + +Before responding with the new message, we must notify about it. This is for the realtime part we’ll code in the next post so I left a comment there. + +### Get Messages + +This endpoint handles GET requests to `/api/conversations/{conversationID}/messages`. It responds with a JSON array with all the messages in the conversation. It also has the same side affect of updating the participant `messages_read_at`. + +``` +func getMessages(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + conversationID := way.Param(ctx, "conversationID") + + tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) + if err != nil { + respondError(w, fmt.Errorf("could not begin tx: %v", err)) + return + } + defer tx.Rollback() + + isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID) + if err != nil { + respondError(w, fmt.Errorf("could not query participant existance: %v", err)) + return + } + + if !isParticipant { + http.Error(w, "Conversation not found", http.StatusNotFound) + return + } + + rows, err := tx.QueryContext(ctx, ` + SELECT + id, + content, + created_at, + user_id = $1 AS mine + FROM messages + WHERE messages.conversation_id = $2 + ORDER BY messages.created_at DESC + `, authUserID, conversationID) + if err != nil { + respondError(w, fmt.Errorf("could not query messages: %v", err)) + return + } + defer rows.Close() + + messages := make([]Message, 0) + for rows.Next() { + var message Message + if err = rows.Scan( + &message.ID, + &message.Content, + &message.CreatedAt, + &message.Mine, + ); err != nil { + respondError(w, fmt.Errorf("could not scan message: %v", err)) + return + } + + messages = append(messages, message) + } + + if err = rows.Err(); err != nil { + respondError(w, fmt.Errorf("could not iterate over messages: %v", err)) + return + } + + if err = tx.Commit(); err != nil { + respondError(w, fmt.Errorf("could not commit tx to get messages: %v", err)) + return + } + + go func() { + if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil { + log.Printf("could not update messages read at: %v\n", err) + } + }() + + respond(w, messages, http.StatusOK) +} +``` + +First, it begins an SQL transaction in readonly mode. Checks for the participant existance and queries all the messages. In each message, we use the current authenticated user ID to know whether the user owns the message (`mine`). Then it commits the transaction, updates the participant `messages_read_at` in a goroutine and respond with the messages. + +### Read Messages + +This endpoint handles POST requests to `/api/conversations/{conversationID}/read_messages`. Without any request or response body. In the frontend we’ll make this request each time a new message arrive in the realtime stream. + +``` +func readMessages(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + conversationID := way.Param(ctx, "conversationID") + + if err := updateMessagesReadAt(ctx, authUserID, conversationID); err != nil { + respondError(w, fmt.Errorf("could not update messages read at: %v", err)) + return + } + + w.WriteHeader(http.StatusNoContent) +} +``` + +It uses the same function we’ve been using to update the participant `messages_read_at`. + +* * * + +That concludes it. Realtime messages is the only part left in the backend. Wait for it in the next post. + +[Souce Code][4] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-messages/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ +[4]: https://github.com/nicolasparada/go-messenger-demo From d4f2810f8f4c83cd3a84944cd9c9e951f2a16613 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:25:09 +0800 Subject: [PATCH 164/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180708=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Conversations?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180708 Building a Messenger App- Conversations.md --- ...Building a Messenger App- Conversations.md | 351 ++++++++++++++++++ 1 file changed, 351 insertions(+) create mode 100644 sources/tech/20180708 Building a Messenger App- Conversations.md diff --git a/sources/tech/20180708 Building a Messenger App- Conversations.md b/sources/tech/20180708 Building a Messenger App- Conversations.md new file mode 100644 index 0000000000..6789d1d4a1 --- /dev/null +++ b/sources/tech/20180708 Building a Messenger App- Conversations.md @@ -0,0 +1,351 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Conversations) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-conversations/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Conversations +====== + +This post is the 3rd in a series: + + * [Part 1: Schema][1] + * [Part 2: OAuth][2] + + + +In our messenger app, messages are stacked by conversations between two participants. You start a conversation providing the user you want to chat with, the conversations is created (if not exists already) and you can start sending messages to that conversations. + +On the front-end we’re interested in showing a list of the lastest conversations. There we’ll show the last message of it and the name and avatar of the other participant. + +In this post, we’ll code the endpoints to start a conversation, list the latest and find a single one. + +Inside the `main()` function add this routes. + +``` +router.HandleFunc("POST", "/api/conversations", requireJSON(guard(createConversation))) +router.HandleFunc("GET", "/api/conversations", guard(getConversations)) +router.HandleFunc("GET", "/api/conversations/:conversationID", guard(getConversation)) +``` + +These three endpoints require authentication so we use the `guard()` middleware. There is a new middleware that checks for the request content type JSON. + +### Require JSON Middleware + +``` +func requireJSON(handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if ct := r.Header.Get("Content-Type"); !strings.HasPrefix(ct, "application/json") { + http.Error(w, "Content type of application/json required", http.StatusUnsupportedMediaType) + return + } + handler(w, r) + } +} +``` + +If the request isn’t JSON, it responds with a `415 Unsupported Media Type` error. + +### Create Conversation + +``` +type Conversation struct { + ID string `json:"id"` + OtherParticipant *User `json:"otherParticipant"` + LastMessage *Message `json:"lastMessage"` + HasUnreadMessages bool `json:"hasUnreadMessages"` +} +``` + +So, a conversation holds a reference to the other participant and the last message. Also has a bool field to tell if it has unread messages. + +``` +type Message struct { + ID string `json:"id"` + Content string `json:"content"` + UserID string `json:"-"` + ConversationID string `json:"conversationID,omitempty"` + CreatedAt time.Time `json:"createdAt"` + Mine bool `json:"mine"` + ReceiverID string `json:"-"` +} +``` + +Messages are for the next post, but I define the struct now since we are using it. Most of the fields are the same as the database table. We have `Mine` to tell if the message is owned by the current authenticated user and `ReceiverID` will be used to filter messanges once we add realtime capabilities. + +Lets write the HTTP handler then. It’s quite long but don’t be scared. + +``` +func createConversation(w http.ResponseWriter, r *http.Request) { + var input struct { + Username string `json:"username"` + } + defer r.Body.Close() + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + input.Username = strings.TrimSpace(input.Username) + if input.Username == "" { + respond(w, Errors{map[string]string{ + "username": "Username required", + }}, http.StatusUnprocessableEntity) + return + } + + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + respondError(w, fmt.Errorf("could not begin tx: %v", err)) + return + } + defer tx.Rollback() + + var otherParticipant User + if err := tx.QueryRowContext(ctx, ` + SELECT id, avatar_url FROM users WHERE username = $1 + `, input.Username).Scan( + &otherParticipant.ID, + &otherParticipant.AvatarURL, + ); err == sql.ErrNoRows { + http.Error(w, "User not found", http.StatusNotFound) + return + } else if err != nil { + respondError(w, fmt.Errorf("could not query other participant: %v", err)) + return + } + + otherParticipant.Username = input.Username + + if otherParticipant.ID == authUserID { + http.Error(w, "Try start a conversation with someone else", http.StatusForbidden) + return + } + + var conversationID string + if err := tx.QueryRowContext(ctx, ` + SELECT conversation_id FROM participants WHERE user_id = $1 + INTERSECT + SELECT conversation_id FROM participants WHERE user_id = $2 + `, authUserID, otherParticipant.ID).Scan(&conversationID); err != nil && err != sql.ErrNoRows { + respondError(w, fmt.Errorf("could not query common conversation id: %v", err)) + return + } else if err == nil { + http.Redirect(w, r, "/api/conversations/"+conversationID, http.StatusFound) + return + } + + var conversation Conversation + if err = tx.QueryRowContext(ctx, ` + INSERT INTO conversations DEFAULT VALUES + RETURNING id + `).Scan(&conversation.ID); err != nil { + respondError(w, fmt.Errorf("could not insert conversation: %v", err)) + return + } + + if _, err = tx.ExecContext(ctx, ` + INSERT INTO participants (user_id, conversation_id) VALUES + ($1, $2), + ($3, $2) + `, authUserID, conversation.ID, otherParticipant.ID); err != nil { + respondError(w, fmt.Errorf("could not insert participants: %v", err)) + return + } + + if err = tx.Commit(); err != nil { + respondError(w, fmt.Errorf("could not commit tx to create conversation: %v", err)) + return + } + + conversation.OtherParticipant = &otherParticipant + + respond(w, conversation, http.StatusCreated) +} +``` + +For this endpoint you do a POST request to `/api/conversations` with a JSON body containing the username of the user you want to chat with. + +So first it decodes the request body into an struct with the username. Then it validates that the username is not empty. + +``` +type Errors struct { + Errors map[string]string `json:"errors"` +} +``` + +This is the `Errors` struct. It’s just a map. If you enter an empty username you get this JSON with a `422 Unprocessable Entity` error. + +``` +{ + "errors": { + "username": "Username required" + } +} +``` + +Then, we begin an SQL transaction. We only received an username, but we need the actual user ID. So the first part of the transaction is to query for the id and avatar of that user (the other participant). If the user is not found, we respond with a `404 Not Found` error. Also, if the user happens to be the same as the current authenticated user, we respond with `403 Forbidden`. There should be two different users, not the same. + +Then, we try to find a conversation those two users have in common. We use `INTERSECT` for that. If there is one, we redirect to that conversation `/api/conversations/{conversationID}` and return there. + +If no common conversation was found, we continue by creating a new one and adding the two participants. Finally, we `COMMIT` the transaction and respond with the newly created conversation. + +### Get Conversations + +This endpoint `/api/conversations` is to get all the conversations of the current authenticated user. + +``` +func getConversations(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + + rows, err := db.QueryContext(ctx, ` + SELECT + conversations.id, + auth_user.messages_read_at < messages.created_at AS has_unread_messages, + messages.id, + messages.content, + messages.created_at, + messages.user_id = $1 AS mine, + other_users.id, + other_users.username, + other_users.avatar_url + FROM conversations + INNER JOIN messages ON conversations.last_message_id = messages.id + INNER JOIN participants other_participants + ON other_participants.conversation_id = conversations.id + AND other_participants.user_id != $1 + INNER JOIN users other_users ON other_participants.user_id = other_users.id + INNER JOIN participants auth_user + ON auth_user.conversation_id = conversations.id + AND auth_user.user_id = $1 + ORDER BY messages.created_at DESC + `, authUserID) + if err != nil { + respondError(w, fmt.Errorf("could not query conversations: %v", err)) + return + } + defer rows.Close() + + conversations := make([]Conversation, 0) + for rows.Next() { + var conversation Conversation + var lastMessage Message + var otherParticipant User + if err = rows.Scan( + &conversation.ID, + &conversation.HasUnreadMessages, + &lastMessage.ID, + &lastMessage.Content, + &lastMessage.CreatedAt, + &lastMessage.Mine, + &otherParticipant.ID, + &otherParticipant.Username, + &otherParticipant.AvatarURL, + ); err != nil { + respondError(w, fmt.Errorf("could not scan conversation: %v", err)) + return + } + + conversation.LastMessage = &lastMessage + conversation.OtherParticipant = &otherParticipant + conversations = append(conversations, conversation) + } + + if err = rows.Err(); err != nil { + respondError(w, fmt.Errorf("could not iterate over conversations: %v", err)) + return + } + + respond(w, conversations, http.StatusOK) +} +``` + +This handler just does a query to the database. It queries to the conversations table with some joins… First, to the messages table to get the last message. Then to the participants, but it adds a condition to a participant whose ID is not the one of the current authenticated user; this is the other participant. Then it joins to the users table to get his username and avatar. And finally joins with the participants again but with the contrary condition, so this participant is the current authenticated user. We compare `messages_read_at` with the message `created_at` to know whether the conversation has unread messages. And we use the message `user_id` to check if it’s “mine” or not. + +Note that this query assumes that a conversation has just two users. It only works for that scenario. Also, if you want to show a count of the unread messages, this design isn’t good. I think you could add a `unread_messages_count` `INT` field on the `participants` table and increment it each time a new message is created and reset it when the user read them. + +Then it iterates over the rows, scan each one to make an slice of conversations and respond with those at the end. + +### Get Conversation + +This endpoint `/api/conversations/{conversationID}` respond with a single conversation by its ID. + +``` +func getConversation(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + conversationID := way.Param(ctx, "conversationID") + + var conversation Conversation + var otherParticipant User + if err := db.QueryRowContext(ctx, ` + SELECT + IFNULL(auth_user.messages_read_at < messages.created_at, false) AS has_unread_messages, + other_users.id, + other_users.username, + other_users.avatar_url + FROM conversations + LEFT JOIN messages ON conversations.last_message_id = messages.id + INNER JOIN participants other_participants + ON other_participants.conversation_id = conversations.id + AND other_participants.user_id != $1 + INNER JOIN users other_users ON other_participants.user_id = other_users.id + INNER JOIN participants auth_user + ON auth_user.conversation_id = conversations.id + AND auth_user.user_id = $1 + WHERE conversations.id = $2 + `, authUserID, conversationID).Scan( + &conversation.HasUnreadMessages, + &otherParticipant.ID, + &otherParticipant.Username, + &otherParticipant.AvatarURL, + ); err == sql.ErrNoRows { + http.Error(w, "Conversation not found", http.StatusNotFound) + return + } else if err != nil { + respondError(w, fmt.Errorf("could not query conversation: %v", err)) + return + } + + conversation.ID = conversationID + conversation.OtherParticipant = &otherParticipant + + respond(w, conversation, http.StatusOK) +} +``` + +The query is quite similar. We’re not interested in showing the last message, so we omit those fields, but we need the message to know whether the conversation has unread messages. This time we do a `LEFT JOIN` instead of an `INNER JOIN` because the `last_message_id` is `NULLABLE`; in other case we won’t get any rows. We use an `IFNULL` in the `has_unread_messages` comparison for that reason too. Lastly, we filter by ID. + +If the query returns no rows, we respond with a `404 Not Found` error, otherwise `200 OK` with the found conversation. + +* * * + +Yeah, that concludes with the conversation endpoints. + +Wait for the next post to create and list messages 👋 + +[Souce Code][3] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-conversations/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ +[3]: https://github.com/nicolasparada/go-messenger-demo From 7a84658be3900df7070f8dbe396503ca29207ae9 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:25:21 +0800 Subject: [PATCH 165/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180706=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20OAuth?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180706 Building a Messenger App- OAuth.md --- ...0180706 Building a Messenger App- OAuth.md | 448 ++++++++++++++++++ 1 file changed, 448 insertions(+) create mode 100644 sources/tech/20180706 Building a Messenger App- OAuth.md diff --git a/sources/tech/20180706 Building a Messenger App- OAuth.md b/sources/tech/20180706 Building a Messenger App- OAuth.md new file mode 100644 index 0000000000..72f8c4e3f6 --- /dev/null +++ b/sources/tech/20180706 Building a Messenger App- OAuth.md @@ -0,0 +1,448 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: OAuth) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-oauth/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: OAuth +====== + +[Previous part: Schema][1]. + +In this post we start the backend by adding social login. + +This is how it works: the user click on a link that redirects him to the GitHub authorization page. The user grant access to his info and get redirected back logged in. The next time he tries to login, he won’t be asked to grant permission, it is remembered so the login flow is as fast as a single click. + +Internally, the history is more complex tho. First we need the register a new [OAuth app on GitHub][2]. + +The important part is the callback URL. Set it to `http://localhost:3000/api/oauth/github/callback`. On development we are on localhost, so when you ship the app to production, register a new app with the correct callback URL. + +This will give you a client id and a secret key. Don’t share them with anyone 👀 + +With that off of the way, lets start to write some code. Create a `main.go` file: + +``` +package main + +import ( + "database/sql" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/gorilla/securecookie" + "github.com/joho/godotenv" + "github.com/knq/jwt" + _ "github.com/lib/pq" + "github.com/matryer/way" + "golang.org/x/oauth2" + "golang.org/x/oauth2/github" +) + +var origin *url.URL +var db *sql.DB +var githubOAuthConfig *oauth2.Config +var cookieSigner *securecookie.SecureCookie +var jwtSigner jwt.Signer + +func main() { + godotenv.Load() + + port := intEnv("PORT", 3000) + originString := env("ORIGIN", fmt.Sprintf("http://localhost:%d/", port)) + databaseURL := env("DATABASE_URL", "postgresql://root@127.0.0.1:26257/messenger?sslmode=disable") + githubClientID := os.Getenv("GITHUB_CLIENT_ID") + githubClientSecret := os.Getenv("GITHUB_CLIENT_SECRET") + hashKey := env("HASH_KEY", "secret") + jwtKey := env("JWT_KEY", "secret") + + var err error + if origin, err = url.Parse(originString); err != nil || !origin.IsAbs() { + log.Fatal("invalid origin") + return + } + + if i, err := strconv.Atoi(origin.Port()); err == nil { + port = i + } + + if githubClientID == "" || githubClientSecret == "" { + log.Fatalf("remember to set both $GITHUB_CLIENT_ID and $GITHUB_CLIENT_SECRET") + return + } + + if db, err = sql.Open("postgres", databaseURL); err != nil { + log.Fatalf("could not open database connection: %v\n", err) + return + } + defer db.Close() + if err = db.Ping(); err != nil { + log.Fatalf("could not ping to db: %v\n", err) + return + } + + githubRedirectURL := *origin + githubRedirectURL.Path = "/api/oauth/github/callback" + githubOAuthConfig = &oauth2.Config{ + ClientID: githubClientID, + ClientSecret: githubClientSecret, + Endpoint: github.Endpoint, + RedirectURL: githubRedirectURL.String(), + Scopes: []string{"read:user"}, + } + + cookieSigner = securecookie.New([]byte(hashKey), nil).MaxAge(0) + + jwtSigner, err = jwt.HS256.New([]byte(jwtKey)) + if err != nil { + log.Fatalf("could not create JWT signer: %v\n", err) + return + } + + router := way.NewRouter() + router.HandleFunc("GET", "/api/oauth/github", githubOAuthStart) + router.HandleFunc("GET", "/api/oauth/github/callback", githubOAuthCallback) + router.HandleFunc("GET", "/api/auth_user", guard(getAuthUser)) + + log.Printf("accepting connections on port %d\n", port) + log.Printf("starting server at %s\n", origin.String()) + addr := fmt.Sprintf(":%d", port) + if err = http.ListenAndServe(addr, router); err != nil { + log.Fatalf("could not start server: %v\n", err) + } +} + +func env(key, fallbackValue string) string { + v, ok := os.LookupEnv(key) + if !ok { + return fallbackValue + } + return v +} + +func intEnv(key string, fallbackValue int) int { + v, ok := os.LookupEnv(key) + if !ok { + return fallbackValue + } + i, err := strconv.Atoi(v) + if err != nil { + return fallbackValue + } + return i +} +``` + +Install dependencies: + +``` +go get -u github.com/gorilla/securecookie +go get -u github.com/joho/godotenv +go get -u github.com/knq/jwt +go get -u github.com/lib/pq +ge get -u github.com/matoous/go-nanoid +go get -u github.com/matryer/way +go get -u golang.org/x/oauth2 +``` + +We use a `.env` file to save secret keys and other configurations. Create it with at least this content: + +``` +GITHUB_CLIENT_ID=your_github_client_id +GITHUB_CLIENT_SECRET=your_github_client_secret +``` + +The other enviroment variables we use are: + + * `PORT`: The port in which the server runs. Defaults to `3000`. + * `ORIGIN`: Your domain. Defaults to `http://localhost:3000/`. The port can also be extracted from this. + * `DATABASE_URL`: The Cockroach address. Defaults to `postgresql://root@127.0.0.1:26257/messenger?sslmode=disable`. + * `HASH_KEY`: Key to sign cookies. Yeah, we’ll use signed cookies for security. + * `JWT_KEY`: Key to sign JSON web tokens. + + + +Because they have default values, your don’t need to write them on the `.env` file. + +After reading the configuration and connecting to the database, we create an OAuth config. We use the origin to build the callback URL (the same we registered on the github page). And we set the scope to “read:user”. This will give us permission to read the public user info. That’s because we just need his username and avatar. Then we initialize the cookie and JWT signers. Define some endpoints and start the server. + +Before implementing those HTTP handlers lets write a couple functions to send HTTP responses. + +``` +func respond(w http.ResponseWriter, v interface{}, statusCode int) { + b, err := json.Marshal(v) + if err != nil { + respondError(w, fmt.Errorf("could not marshal response: %v", err)) + return + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(statusCode) + w.Write(b) +} + +func respondError(w http.ResponseWriter, err error) { + log.Println(err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) +} +``` + +The first one is to send JSON and the second one logs the error to the console and return a `500 Internal Server Error` error. + +### OAuth Start + +So, the user clicks on a link that says “Access with GitHub”… That link points the this endpoint `/api/oauth/github` that will redirect the user to github. + +``` +func githubOAuthStart(w http.ResponseWriter, r *http.Request) { + state, err := gonanoid.Nanoid() + if err != nil { + respondError(w, fmt.Errorf("could not generte state: %v", err)) + return + } + + stateCookieValue, err := cookieSigner.Encode("state", state) + if err != nil { + respondError(w, fmt.Errorf("could not encode state cookie: %v", err)) + return + } + + http.SetCookie(w, &http.Cookie{ + Name: "state", + Value: stateCookieValue, + Path: "/api/oauth/github", + HttpOnly: true, + }) + http.Redirect(w, r, githubOAuthConfig.AuthCodeURL(state), http.StatusTemporaryRedirect) +} +``` + +OAuth2 uses a mechanism to prevent CSRF attacks so it requires a “state”. We use nanoid to create a random string and use that as state. We save it as a cookie too. + +### OAuth Callback + +Once the user grant access to his info on the GitHub page, he will be redirected to this endpoint. The URL will come with the state and a code on the query string `/api/oauth/github/callback?state=&code=` + +``` +const jwtLifetime = time.Hour * 24 * 14 + +type GithubUser struct { + ID int `json:"id"` + Login string `json:"login"` + AvatarURL *string `json:"avatar_url,omitempty"` +} + +type User struct { + ID string `json:"id"` + Username string `json:"username"` + AvatarURL *string `json:"avatarUrl"` +} + +func githubOAuthCallback(w http.ResponseWriter, r *http.Request) { + stateCookie, err := r.Cookie("state") + if err != nil { + http.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot) + return + } + + http.SetCookie(w, &http.Cookie{ + Name: "state", + Value: "", + MaxAge: -1, + HttpOnly: true, + }) + + var state string + if err = cookieSigner.Decode("state", stateCookie.Value, &state); err != nil { + http.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot) + return + } + + q := r.URL.Query() + + if state != q.Get("state") { + http.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot) + return + } + + ctx := r.Context() + + t, err := githubOAuthConfig.Exchange(ctx, q.Get("code")) + if err != nil { + respondError(w, fmt.Errorf("could not fetch github token: %v", err)) + return + } + + client := githubOAuthConfig.Client(ctx, t) + resp, err := client.Get("https://api.github.com/user") + if err != nil { + respondError(w, fmt.Errorf("could not fetch github user: %v", err)) + return + } + + var githubUser GithubUser + if err = json.NewDecoder(resp.Body).Decode(&githubUser); err != nil { + respondError(w, fmt.Errorf("could not decode github user: %v", err)) + return + } + defer resp.Body.Close() + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + respondError(w, fmt.Errorf("could not begin tx: %v", err)) + return + } + + var user User + if err = tx.QueryRowContext(ctx, ` + SELECT id, username, avatar_url FROM users WHERE github_id = $1 + `, githubUser.ID).Scan(&user.ID, &user.Username, &user.AvatarURL); err == sql.ErrNoRows { + if err = tx.QueryRowContext(ctx, ` + INSERT INTO users (username, avatar_url, github_id) VALUES ($1, $2, $3) + RETURNING id + `, githubUser.Login, githubUser.AvatarURL, githubUser.ID).Scan(&user.ID); err != nil { + respondError(w, fmt.Errorf("could not insert user: %v", err)) + return + } + user.Username = githubUser.Login + user.AvatarURL = githubUser.AvatarURL + } else if err != nil { + respondError(w, fmt.Errorf("could not query user by github ID: %v", err)) + return + } + + if err = tx.Commit(); err != nil { + respondError(w, fmt.Errorf("could not commit to finish github oauth: %v", err)) + return + } + + exp := time.Now().Add(jwtLifetime) + token, err := jwtSigner.Encode(jwt.Claims{ + Subject: user.ID, + Expiration: json.Number(strconv.FormatInt(exp.Unix(), 10)), + }) + if err != nil { + respondError(w, fmt.Errorf("could not create token: %v", err)) + return + } + + expiresAt, _ := exp.MarshalText() + + data := make(url.Values) + data.Set("token", string(token)) + data.Set("expires_at", string(expiresAt)) + + http.Redirect(w, r, "/callback?"+data.Encode(), http.StatusTemporaryRedirect) +} +``` + +First we try to decode the cookie with the state we saved before. And compare it with the state that comes in the query string. In case they don’t match, we return a `418 I'm teapot` error. + +Then we exchange the code for a token. This token is used to create an HTTP client to make requests to the GitHub API. So we do a GET request to `https://api.github.com/user`. This endpoint will give us the current authenticated user info in JSON format. We decode it to get the user ID, login (username) and avatar URL. + +Then we try to find a user with that GitHub ID on the database. If none is found, we create one using that data. + +Then, with the newly created user, we issue a JSON web token with the user ID as Subject and redirect to the frontend with the token, along side the expiration date in the query string. + +The web app will be for another post, but the URL you are being redirected is `/callback?token=&expires_at=`. There we’ll have some JavaScript to extract the token and expiration date from the URL and do a GET request to `/api/auth_user` with the token in the `Authorization` header in the form of `Bearer token_here` to get the authenticated user and save it to localStorage. + +### Guard Middleware + +To get the current authenticated user we use a middleware. That’s because in future posts we’ll have more endpoints that requires authentication, and a middleware allow us to share functionality. + +``` +type ContextKey struct { + Name string +} + +var keyAuthUserID = ContextKey{"auth_user_id"} + +func guard(handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + var token string + if a := r.Header.Get("Authorization"); strings.HasPrefix(a, "Bearer ") { + token = a[7:] + } else if t := r.URL.Query().Get("token"); t != "" { + token = t + } else { + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + + var claims jwt.Claims + if err := jwtSigner.Decode([]byte(token), &claims); err != nil { + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + + ctx := r.Context() + ctx = context.WithValue(ctx, keyAuthUserID, claims.Subject) + + handler(w, r.WithContext(ctx)) + } +} +``` + +First we try to read the token from the `Authorization` header or a `token` in the URL query string. If none found, we return a `401 Unauthorized` error. Then we decode the claims in the token and use the Subject as the current authenticated user ID. + +Now, we can wrap any `http.handlerFunc` that needs authentication with this middleware and we’ll have the authenticated user ID in the context. + +``` +var guarded = guard(func(w http.ResponseWriter, r *http.Request) { + authUserID := r.Context().Value(keyAuthUserID).(string) +}) +``` + +### Get Authenticated User + +``` +func getAuthUser(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + authUserID := ctx.Value(keyAuthUserID).(string) + + var user User + if err := db.QueryRowContext(ctx, ` + SELECT username, avatar_url FROM users WHERE id = $1 + `, authUserID).Scan(&user.Username, &user.AvatarURL); err == sql.ErrNoRows { + http.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot) + return + } else if err != nil { + respondError(w, fmt.Errorf("could not query auth user: %v", err)) + return + } + + user.ID = authUserID + + respond(w, user, http.StatusOK) +} +``` + +We use the guard middleware to get the current authenticated user id and do a query to the database. + +* * * + +That will cover the OAuth process on the backend. In the next part we’ll see how to start conversations with other users. + +[Souce Code][3] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-oauth/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/ +[2]: https://github.com/settings/applications/new +[3]: https://github.com/nicolasparada/go-messenger-demo From acf9b6c29c9f7168e88b04a36a74c335eb148337 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Wed, 18 Sep 2019 12:25:33 +0800 Subject: [PATCH 166/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020180705=20Buildi?= =?UTF-8?q?ng=20a=20Messenger=20App:=20Schema?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20180705 Building a Messenger App- Schema.md --- ...180705 Building a Messenger App- Schema.md | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 sources/tech/20180705 Building a Messenger App- Schema.md diff --git a/sources/tech/20180705 Building a Messenger App- Schema.md b/sources/tech/20180705 Building a Messenger App- Schema.md new file mode 100644 index 0000000000..39b9bf97c2 --- /dev/null +++ b/sources/tech/20180705 Building a Messenger App- Schema.md @@ -0,0 +1,114 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Building a Messenger App: Schema) +[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-schema/) +[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/) + +Building a Messenger App: Schema +====== + +New post on building a messenger app. You already know this kind of app. They allow you to have conversations with your friends. [Facebook Messenger][1], [WhatsApp][2] and [Skype][3] are a few examples. Tho, these apps allows you to send pictures, stream video, record audio, chat with large groups of people, etc… We’ll try to keep it simple and just send text messages between two users. + +We’ll use [CockroachDB][4] as the SQL database, [Go][5] as the backend language, and JavaScript to make a web app. + +In this first post, we’re getting around the database design. + +``` +CREATE TABLE users ( + id SERIAL NOT NULL PRIMARY KEY, + username STRING NOT NULL UNIQUE, + avatar_url STRING, + github_id INT NOT NULL UNIQUE +); +``` + +Of course, this app requires users. We will go with social login. I selected just [GitHub][6] so we keep a reference to the github user ID there. + +``` +CREATE TABLE conversations ( + id SERIAL NOT NULL PRIMARY KEY, + last_message_id INT, + INDEX (last_message_id DESC) +); +``` + +Each conversation references the last message. Every time we insert a new message, we’ll go and update this field. (I’ll add the foreign key constraint below). + +… You can say that we can group conversations and get the last message that way, but that will add much more complexity to the queries. + +``` +CREATE TABLE participants ( + user_id INT NOT NULL REFERENCES users ON DELETE CASCADE, + conversation_id INT NOT NULL REFERENCES conversations ON DELETE CASCADE, + messages_read_at TIMESTAMPTZ NOT NULL DEFAULT now(), + PRIMARY KEY (user_id, conversation_id) +); +``` + +Even tho I said conversations will be between just two users, we’ll go with a design that allow the possibility to add multiple participants to a conversation. That’s why we have a participants table between the conversation and users. + +To know whether the user has unread messages we have the `messages_read_at` field. Every time the user read in a conversation, we update this value, so we can compare it with the conversation last message `created_at` field. + +``` +CREATE TABLE messages ( + id SERIAL NOT NULL PRIMARY KEY, + content STRING NOT NULL, + user_id INT NOT NULL REFERENCES users ON DELETE CASCADE, + conversation_id INT NOT NULL REFERENCES conversations ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + INDEX(created_at DESC) +); +``` + +Last but not least is the messages table, it saves a reference to the user who created it and the conversation in which it goes. Is has an index on `created_at` too to sort messages. + +``` +ALTER TABLE conversations +ADD CONSTRAINT fk_last_message_id_ref_messages +FOREIGN KEY (last_message_id) REFERENCES messages ON DELETE SET NULL; +``` + +And yep, the fk constraint I said. + +These four tables will do the trick. You can save those queries to a file and pipe it to the Cockroach CLI. First start a new node: + +``` +cockroach start --insecure --host 127.0.0.1 +``` + +Then create the database and tables: + +``` +cockroach sql --insecure -e "CREATE DATABASE messenger" +cat schema.sql | cockroach sql --insecure -d messenger +``` + +* * * + +That’s it. In the next part we’ll do the login. Wait for it. + +[Souce Code][7] + +-------------------------------------------------------------------------------- + +via: https://nicolasparada.netlify.com/posts/go-messenger-schema/ + +作者:[Nicolás Parada][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://nicolasparada.netlify.com/ +[b]: https://github.com/lujun9972 +[1]: https://www.messenger.com/ +[2]: https://www.whatsapp.com/ +[3]: https://www.skype.com/ +[4]: https://www.cockroachlabs.com/ +[5]: https://golang.org/ +[6]: https://github.com/ +[7]: https://github.com/nicolasparada/go-messenger-demo From 29690cd45d7c41b0b22d390174328e763dbd7e0f Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 12:39:02 +0800 Subject: [PATCH 167/202] PRF @heguangzhi --- ...nsible environments on MacOS with Conda.md | 102 +++++++++--------- 1 file changed, 49 insertions(+), 53 deletions(-) diff --git a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md index f5d72eaa4e..2154e9f1b2 100644 --- a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md +++ b/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (heguangzhi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Managing Ansible environments on MacOS with Conda) @@ -11,22 +11,23 @@ 使用 Conda 管理 MacOS 上的 Ansible 环境 ===== -Conda 将 Ansible 所需的一切都收集到虚拟环境中并将其与其他项目分开。 -![CICD with gears][1] +> Conda 将 Ansible 所需的一切都收集到虚拟环境中并将其与其他项目分开。 -如果您是一名使用 MacOS 并参与 Ansible 管理的 Python 开发人员,您可能希望使用 Conda 包管理器将 Ansible 的工作内容与核心操作系统和其他本地项目分开。 +![](https://img.linux.net.cn/data/attachment/album/201909/18/123838m1bcmke570kl6kzm.jpg) -Ansible 基于 Python的。让 Ansible 在 MacOS 上工作 Conda 并不是必须要的,但是它确实让您管理 Python 版本和包依赖变得更加容易。这允许您在 MacOS 上使用升级的 Python 版本,并在您的系统中、Ansible 和其他编程项目之间保持 Python 包的依赖性是相互独立的。 +如果你是一名使用 MacOS 并涉及到 Ansible 管理的 Python 开发人员,你可能希望使用 Conda 包管理器将 Ansible 的工作内容与核心操作系统和其他本地项目分开。 -在 MacOS 上安装 Ansible 还有其他方法。您可以使用[Homebrew][2],但是如果您对 Python 开发(或 Ansible 开发)感兴趣,您可能会发现在一个独立 Python 虚拟环境中管理 Ansible 可以减少一些混乱。我觉得这更简单;与其试图将 Python 版本和依赖项加载到系统或在 **/usr/local** 目录中 ,还不如使用 Conda 帮助我将 Ansible 所需的一切都收集到一个虚拟环境中,并将其与其他项目完全分开。 +Ansible 基于 Python。要让 Ansible 在 MacOS 上工作,Conda 并不是必须要的,但是它确实让你管理 Python 版本和包依赖变得更加容易。这允许你在 MacOS 上使用升级的 Python 版本,并在你的系统中、Ansible 和其他编程项目之间保持 Python 包的依赖性相互独立。 -本文着重于使用 Conda 作为 Python 项目来管理 Ansible ,以保持它的干净并与其他项目分开。请继续阅读,并了解如何安装 Conda、创建新的虚拟环境、安装 Ansible 并对其进行测试。 +在 MacOS 上安装 Ansible 还有其他方法。你可以使用 [Homebrew][2],但是如果你对 Python 开发(或 Ansible 开发)感兴趣,你可能会发现在一个独立 Python 虚拟环境中管理 Ansible 可以减少一些混乱。我觉得这更简单;与其试图将 Python 版本和依赖项加载到系统或 `/usr/local` 目录中 ,还不如使用 Conda 帮助我将 Ansible 所需的一切都收集到一个虚拟环境中,并将其与其他项目完全分开。 + +本文着重于使用 Conda 作为 Python 项目来管理 Ansible,以保持它的干净并与其他项目分开。请继续阅读,并了解如何安装 Conda、创建新的虚拟环境、安装 Ansible 并对其进行测试。 ### 序幕 -最近,我想学习[Ansible][3],所以我需要找到安装它的最佳方法。 +最近,我想学习 [Ansible][3],所以我需要找到安装它的最佳方法。 -我通常对在我的日常工作站上安装东西很谨慎。我尤其不喜欢对供应商的默认操作系统安装应用手动更新(这是我多年作为 Unix 系统管理的首选)。我真的很想使用 Python 3.7,但是 MacOS 包是旧的2.7,我不会安装任何可能干扰核心 MacOS 系统的全局 Python 包。 +我通常对在我的日常工作站上安装东西很谨慎。我尤其不喜欢对供应商的默认操作系统安装应用手动更新(这是我多年作为 Unix 系统管理的习惯)。我真的很想使用 Python 3.7,但是 MacOS 的 Python 包是旧的 2.7,我不会安装任何可能干扰核心 MacOS 系统的全局 Python 包。 所以,我使用本地 Ubuntu 18.04 虚拟机上开始了我的 Ansible 工作。这提供了真正意义上的的安全隔离,但我很快发现管理它是非常乏味的。所以我着手研究如何在本机 MacOS 上获得一个灵活但独立的 Ansible 系统。 @@ -34,19 +35,19 @@ Ansible 基于 Python的。让 Ansible 在 MacOS 上工作 Conda 并不是必须 ### 安装 Conda -Conda 是一个开源软件,它提供方便的包和环境管理功能。它可以帮助您管理多个版本的 Python 、安装软件包依赖关系、执行升级和维护项目隔离。如果您手动管理 Python 虚拟环境,Conda 将有助于简化和管理您的工作。浏览 [Conda 文档][4]可以了解更多细节。 +Conda 是一个开源软件,它提供方便的包和环境管理功能。它可以帮助你管理多个版本的 Python、安装软件包依赖关系、执行升级和维护项目隔离。如果你手动管理 Python 虚拟环境,Conda 将有助于简化和管理你的工作。浏览 [Conda 文档][4]可以了解更多细节。 -我选择了 [Miniconda][5] Python 3.7 安装在我的工作站中,因为我想要最新的 Python 版本。无论选择哪个版本,您都可以使用其他版本的 Python 安装新的虚拟环境。 +我选择了 [Miniconda][5] Python 3.7 安装在我的工作站中,因为我想要最新的 Python 版本。无论选择哪个版本,你都可以使用其他版本的 Python 安装新的虚拟环境。 -要安装 Conda,请下载 PKG 格式的文件,进行通常的双击,并选择 “Install for me only” 选项。安装在我的系统上占用了大约158兆的空间。 +要安装 Conda,请下载 PKG 格式的文件,进行通常的双击,并选择 “Install for me only” 选项。安装在我的系统上占用了大约 158 兆的空间。 -安装完成后,调出一个终端来查看您有什么了。您应该看到: +安装完成后,调出一个终端来查看你有什么了。你应该看到: - * 一个 **miniconda3** 目录在您的 **home** 目录中 - * shell 提示符被修改为 "(base)" - * **.bash_profile** 文件被 Conda-specific 设置内容更新 + * 在你的家目录中的 `miniconda3` 目录 + * shell 提示符被修改为 `(base)` + * `.bash_profile` 文件更新了一些 Conda 特有的设置内容 -现在已经安装了基础,您就有了第一个 Python 虚拟环境。运行 Python 版本检查可以证明这一点,您的 PATH 将指向新的位置: +现在基础已经安装好了,你有了第一个 Python 虚拟环境。运行 Python 版本检查可以证明这一点,你的 `PATH` 将指向新的位置: ``` (base) $ which python @@ -54,12 +55,11 @@ Conda 是一个开源软件,它提供方便的包和环境管理功能。它 (base) $ python --version Python 3.7.1 ``` -现在安装了 Conda ,下一步是建立一个虚拟环境,然后安装 Ansible 并运行。 + +现在安装了 Conda,下一步是建立一个虚拟环境,然后安装 Ansible 并运行。 ### 为 Ansible 创建虚拟环境 - - 我想将 Ansible 与我的其他 Python 项目分开,所以我创建了一个新的虚拟环境并切换到它: ``` @@ -68,61 +68,57 @@ Python 3.7.1 (ansible-env) $ conda env list ``` +第一个命令将 Conda 库克隆到一个名为 `ansible-env` 的新虚拟环境中。克隆引入了 Python 3.7 版本和一系列默认的 Python 模块,你可以根据需要添加、删除或升级这些模块。 -第一个命令将 Conda 库克隆到一个名为 **ansible-env** 的新虚拟环境中。克隆引入了 Python 3.7 版本和一系列默认的 Python 模块,您可以根据需要添加、删除或升级这些模块。 - -第二个命令将 shell 上下文更改为这个新的环境。它为 Python 及其包含的模块设置了正确的路径。请注意,在 **conda activate ansible-env** 命令后,您的 shell 提示符会发生变化。 +第二个命令将 shell 上下文更改为这个新的环境。它为 Python 及其包含的模块设置了正确的路径。请注意,在 `conda activate ansible-env` 命令后,你的 shell 提示符会发生变化。 第三个命令不是必须的;它列出了安装了哪些 Python 模块及其版本和其他数据。 -您可以随时使用 Conda 的 **activate** 命令切换到另一个虚拟环境。这将带您回到基本的: **conda base**。 +你可以随时使用 Conda 的 `activate` 命令切换到另一个虚拟环境。这将带你回到基本环境:`conda base`。 -### 安装 Ansible +### 安装 Ansible -安装 Ansible 有多种方法,但是使用 Conda 可以将 Ansible 版本和所有需要的依赖项打包在一个地方。Conda 提供了灵活的,既可以将所有内容分开,又可以根据需要添加其他新环境(我将在后面演示)。 +安装 Ansible 有多种方法,但是使用 Conda 可以将 Ansible 版本和所有需要的依赖项打包在一个地方。Conda 提供了灵活性,既可以将所有内容分开,又可以根据需要添加其他新环境(我将在后面演示)。 要安装 Ansible 的相对较新版本,请使用: - ``` (base) $ conda activate ansible-env (ansible-env) $ conda install -c conda-forge ansible ``` -由于 Ansible 不是 Conda 默认的一部分,因此**-c**用于从备用通道搜索和安装。Ansible 现已安装到**ansible-env**虚拟环境中,可以使用了。 - +由于 Ansible 不是 Conda 默认通道的一部分,因此 `-c` 用于从备用通道搜索和安装。Ansible 现已安装到 `ansible-env` 虚拟环境中,可以使用了。 ### 使用 Ansible -既然您已经安装了 Conda 虚拟环境,就可以使用它了。首先,确保要控制的节点已将工作站的 SSH 密钥安装到正确的用户帐户。 - -调出一个新的 shell 并运行一些基本的Ansible命令: +既然你已经安装了 Conda 虚拟环境,就可以使用它了。首先,确保要控制的节点已将工作站的 SSH 密钥安装到正确的用户帐户。 +调出一个新的 shell 并运行一些基本的 Ansible 命令: ``` (base) $ conda activate ansible-env (ansible-env) $ ansible --version ansible 2.8.1 -  config file = None -  configured module search path = ['/Users/jfarrell/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] -  ansible python module location = /Users/jfarrell/miniconda3/envs/ansibleTest/lib/python3.7/site-packages/ansible -  executable location = /Users/jfarrell/miniconda3/envs/ansibleTest/bin/ansible -  python version = 3.7.1 (default, Dec 14 2018, 13:28:58) [Clang 4.0.1 (tags/RELEASE_401/final)] + config file = None + configured module search path = ['/Users/jfarrell/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = /Users/jfarrell/miniconda3/envs/ansibleTest/lib/python3.7/site-packages/ansible + executable location = /Users/jfarrell/miniconda3/envs/ansibleTest/bin/ansible + python version = 3.7.1 (default, Dec 14 2018, 13:28:58) [Clang 4.0.1 (tags/RELEASE_401/final)] (ansible-env) $ ansible all -m ping -u ansible -192.168.99.200 | SUCCESS => { -    "ansible_facts": { -        "discovered_interpreter_python": "/usr/bin/python" -    }, -    "changed": false, -    "ping": "pong" +192.168.99.200 | SUCCESS => { + "ansible_facts": { + "discovered_interpreter_python": "/usr/bin/python" + }, + "changed": false, + "ping": "pong" } ``` -现在 Ansible 正在工作了,您可以在控制台中抽身,并从您的 MacOS 工作站中使用它们。 +现在 Ansible 工作了,你可以在控制台中抽身,并从你的 MacOS 工作站中使用它们。 ### 克隆新的 Ansible 进行 Ansible 开发 -这部分完全是可选的;只有当您想要额外的虚拟环境来修改 Ansible 或者安全地使用有问题的 Python 模块时,才需要它。您可以通过以下方式将主 Ansible 环境克隆到开发副本中: +这部分完全是可选的;只有当你想要额外的虚拟环境来修改 Ansible 或者安全地使用有问题的 Python 模块时,才需要它。你可以通过以下方式将主 Ansible 环境克隆到开发副本中: ``` (ansible-env) $ conda create --name ansible-dev --clone ansible-env @@ -132,24 +128,24 @@ ansible 2.8.1 ### 需要注意的问题 -偶尔您可能遇到使用 Conda 的麻烦。您通常可以通过以下方式删除不良环境: +偶尔你可能遇到使用 Conda 的麻烦。你通常可以通过以下方式删除不良环境: ``` $ conda activate base $ conda remove --name ansible-dev --all ``` -如果出现无法解决的错误,通常可以通过在 **~/miniconda3/envs** 中找到环境并删除整个目录来直接删除环境。如果基础损坏了,您可以删除整个 **~/miniconda3**,然后从 PKG 文件中重新安装。只要确保保留 **~/miniconda3/envs** ,或使用 Conda 工具导出环境配置并在以后重新创建即可。 -MacOS 上不包括 **sshpass** 程序。只有当您的 Ansible 工作要求您向 Ansible 提供SSH登录密码时,才需要它。您可以在 SourceForge 上找到当前的[sshpass source][6]。 +如果出现无法解决的错误,通常可以通过在 `~/miniconda3/envs` 中找到该环境并删除整个目录来直接删除环境。如果基础环境损坏了,你可以删除整个 `~/miniconda3`,然后从 PKG 文件中重新安装。只要确保保留 `~/miniconda3/envs` ,或使用 Conda 工具导出环境配置并在以后重新创建即可。 +MacOS 上不包括 `sshpass` 程序。只有当你的 Ansible 工作要求你向 Ansible 提供 SSH 登录密码时,才需要它。你可以在 SourceForge 上找到当前的 [sshpass 源代码][6]。 -最后,基础 Conda Python 模块列表可能缺少您工作所需的一些 Python 模块。如果您需要安装一个模块,**conda install <package>** 命令是首选的,但是 **pip** 可以在需要的地方使用,Conda会识别安装模块。 +最后,基础的 Conda Python 模块列表可能缺少你工作所需的一些 Python 模块。如果你需要安装一个模块,首选命令是 `conda install package`,但是需要的话也可以使用 `pip`,Conda 会识别安装的模块。 ### 结论 -Ansible 是一个强大的自动化工具,值得我们去学习。Conda是一个简单有效的 Python 虚拟环境管理工具。 +Ansible 是一个强大的自动化工具,值得我们去学习。Conda 是一个简单有效的 Python 虚拟环境管理工具。 -在您的 MacOS 环境中保持软件安装分离是保持日常工作环境的稳定性和健全性的谨慎方法。Conda 尤其有助于升级您的Python 版本,将 Ansible 从其他项目中分离出来,并安全地使用 Ansible。 +在你的 MacOS 环境中保持软件安装分离是保持日常工作环境的稳定性和健全性的谨慎方法。Conda 尤其有助于升级你的 Python 版本,将 Ansible 从其他项目中分离出来,并安全地使用 Ansible。 -------------------------------------------------------------------------------- @@ -157,8 +153,8 @@ via: https://opensource.com/article/19/8/using-conda-ansible-administration-maco 作者:[James Farrell][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) +译者:[heguangzhi](https://github.com/heguangzhi) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 88512d559868079240d430d9b20535be5052e689 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Wed, 18 Sep 2019 12:39:35 +0800 Subject: [PATCH 168/202] PUB @heguangzhi https://linux.cn/article-11356-1.html --- ...90828 Managing Ansible environments on MacOS with Conda.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190828 Managing Ansible environments on MacOS with Conda.md (99%) diff --git a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md b/published/20190828 Managing Ansible environments on MacOS with Conda.md similarity index 99% rename from translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md rename to published/20190828 Managing Ansible environments on MacOS with Conda.md index 2154e9f1b2..24e8d65fa0 100644 --- a/translated/tech/20190828 Managing Ansible environments on MacOS with Conda.md +++ b/published/20190828 Managing Ansible environments on MacOS with Conda.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (heguangzhi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11356-1.html) [#]: subject: (Managing Ansible environments on MacOS with Conda) [#]: via: (https://opensource.com/article/19/8/using-conda-ansible-administration-macos) [#]: author: (James Farrell https://opensource.com/users/jamesf) From 7aadc31c86703acc22ccc77526d2efa7af8fc3b6 Mon Sep 17 00:00:00 2001 From: heguangzhi <7731226@qq.com> Date: Wed, 18 Sep 2019 13:43:19 +0800 Subject: [PATCH 169/202] translated --- ...823 The Linux kernel- Top 5 innovations.md | 105 ----------------- ...823 The Linux kernel- Top 5 innovations.md | 108 ++++++++++++++++++ 2 files changed, 108 insertions(+), 105 deletions(-) delete mode 100644 sources/tech/20190823 The Linux kernel- Top 5 innovations.md create mode 100644 translated/tech/20190823 The Linux kernel- Top 5 innovations.md diff --git a/sources/tech/20190823 The Linux kernel- Top 5 innovations.md b/sources/tech/20190823 The Linux kernel- Top 5 innovations.md deleted file mode 100644 index 5e35982290..0000000000 --- a/sources/tech/20190823 The Linux kernel- Top 5 innovations.md +++ /dev/null @@ -1,105 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (heguangzhi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The Linux kernel: Top 5 innovations) -[#]: via: (https://opensource.com/article/19/8/linux-kernel-top-5-innovations) -[#]: author: (Seth Kenlon https://opensource.com/users/sethhttps://opensource.com/users/mhaydenhttps://opensource.com/users/mralexjuarez) - -The Linux kernel: Top 5 innovations -====== -Want to know what the actual (not buzzword) innovations are when it -comes to the Linux kernel? Read on. -![Penguin with green background][1] - -The word _innovation_ gets bandied about in the tech industry almost as much as _revolution_, so it can be difficult to differentiate hyperbole from something that’s actually exciting. The Linux kernel has been called innovative, but then again it’s also been called the biggest hack in modern computing, a monolith in a micro world. - -Setting aside marketing and modeling, Linux is arguably the most popular kernel of the open source world, and it’s introduced some real game-changers over its nearly 30-year life span. - -### Cgroups (2.6.24) - -Back in 2007, Paul Menage and Rohit Seth got the esoteric [_control groups_ (cgroups)][2] feature added to the kernel (the current implementation of cgroups is a rewrite by Tejun Heo.) This new technology was initially used as a way to ensure, essentially, quality of service for a specific set of tasks. - -For example, you could create a control group definition (cgroup) for all tasks associated with your web server, another cgroup for routine backups, and yet another for general operating system requirements. You could then control a percentage of resources for each cgroup, such that your OS and web server gets the bulk of system resources while your backup processes have access to whatever is left. - -What cgroups has become most famous for, though, is its role as the technology driving the cloud today: containers. In fact, cgroups were originally named [process containers][3]. It was no great surprise when they were adopted by projects like [LXC][4], [CoreOS][5], and Docker. - -The floodgates being opened, the term _containers_ justly became synonymous with Linux, and the concept of microservice-style cloud-based “apps” quickly became the norm. These days, it’s hard to get away from cgroups, they’re so prevalent. Every large-scale infrastructure (and probably your laptop, if you run Linux) takes advantage of cgroups in a meaningful way, making your computing experience more manageable and more flexible than ever. - -For example, you might already have installed [Flathub][6] or [Flatpak][7] on your computer, or maybe you’ve started using [Kubernetes][8] and/or [OpenShift][9] at work. Regardless, if the term “containers” is still hazy for you, you can gain a hands-on understanding of containers from [Behind the scenes with Linux containers][10]. - -### LKMM (4.17) - -In 2018, the hard work of Jade Alglave, Alan Stern, Andrea Parri, Luc Maranget, Paul McKenney, and several others, got merged into the mainline Linux kernel to provide formal memory models. The Linux Kernel Memory [Consistency] Model (LKMM) subsystem is a set of tools describing the Linux memory coherency model, as well as producing _litmus tests_ (**klitmus**, specifically) for testing. - -As systems become more complex in physical design (more CPU cores added, cache and RAM grow, and so on), the harder it is for them to know which address space is required by which CPU, and when. For example, if CPU0 needs to write data to a shared variable in memory, and CPU1 needs to read that value, then CPU0 must write before CPU1 attempts to read. Similarly, if values are written in one order to memory, then there’s an expectation that they are also read in that same order, regardless of which CPU or CPUs are doing the reading. - -Even on a single CPU, memory management requires a specific task order. A simple action such as **x = y** requires a CPU to load the value of **y** from memory, and then store that value in **x**. Placing the value stored in **y** into the **x** variable cannot occur _before_ the CPU has read the value from memory. There are also address dependencies: **x[n] = 6** requires that **n** is loaded before the CPU can store the value of six. - -LKMM helps identify and trace these memory patterns in code. It does this in part with a tool called **herd**, which defines the constraints imposed by a memory model (in the form of logical axioms), and then enumerates all possible outcomes consistent with these constraints. - -### Low-latency patch (2.6.38) - -Long ago, in the days before 2011, if you wanted to do "serious" [multimedia work on Linux][11], you had to obtain a low-latency kernel. This mostly applied to [audio recording][12] while adding lots of real-time effects (such as singing into a microphone and adding reverb, and hearing your voice in your headset with no noticeable delay). There were distributions, such as [Ubuntu Studio][13], that reliably provided such a kernel, so in practice it wasn't much of a hurdle, just a significant caveat when choosing your distribution as an artist. - -However, if you weren’t using Ubuntu Studio, or you had some need to update your kernel before your distribution got around to it, you had to go to the rt-patches web page, download the kernel patches, apply them to your kernel source code, compile, and install manually. - -And then, with the release of kernel version 2.6.38, this process was all over. The Linux kernel suddenly, as if by magic, had low-latency code (according to benchmarks, latency decreased by a factor of 10, at least) built-in by default. No more downloading patches, no more compiling. Everything just worked, and all because of a small 200-line patch implemented by Mike Galbraith. - -For open source multimedia artists the world over, it was a game-changer. Things got so good from 2011 on that in 2016, I challenged myself to [build a Digital Audio Workstation (DAW) on a Raspberry Pi v1 (model B)][14] and found that it worked surprisingly well. - -### RCU (2.5) - -RCU, or Read-Copy-Update, is a system defined in computer science that allows multiple processor threads to read from shared memory. It does this by deferring updates, but also marking them as updated, to ensure that the data’s consumers read the latest version. Effectively, this means that reads happen concurrently with updates. - -The typical RCU cycle is a little like this: - - 1. Remove pointers to data to prevent other readers from referencing it. - 2. Wait for readers to complete their critical processes. - 3. Reclaim the memory space. - - - -Dividing the update stage into removal and reclamation phases means the updater performs the removal immediately while deferring reclamation until all active readers are complete (either by blocking them or by registering a callback to be invoked upon completion). - -While the concept of read-copy-update was not invented for the Linux kernel, its implementation in Linux is a defining example of the technology. - -### Collaboration (0.01) - -The final answer to the question of what the Linux kernel innovated will always be, above all else, collaboration. Call it good timing, call it technical superiority, call it hackability, or just call it open source, but the Linux kernel and the many projects that it enabled is a glowing example of collaboration and cooperation. - -And it goes well beyond just the kernel. People from all walks of life have contributed to open source, arguably _because_ of the Linux kernel. The Linux was, and remains to this day, a major force of [Free Software][15], inspiring users to bring their code, art, ideas, or just themselves, to a global, productive, and diverse community of humans. - -### What’s your favorite innovation? - -This list is biased toward my own interests: containers, non-uniform memory access (NUMA), and multimedia. I’ve surely left your favorite kernel innovation off the list. Tell me about it in the comments! - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/8/linux-kernel-top-5-innovations - -作者:[Seth Kenlon][a] -选题:[lujun9972][b] -译者:[heguangzhi](https://github.com/heguangzhi) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/sethhttps://opensource.com/users/mhaydenhttps://opensource.com/users/mralexjuarez -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/linux_penguin_green.png?itok=ENdVzW22 (Penguin with green background) -[2]: https://en.wikipedia.org/wiki/Cgroups -[3]: https://lkml.org/lkml/2006/10/20/251 -[4]: https://linuxcontainers.org -[5]: https://coreos.com/ -[6]: http://flathub.org -[7]: http://flatpak.org -[8]: http://kubernetes.io -[9]: https://www.redhat.com/sysadmin/learn-openshift-minishift -[10]: https://opensource.com/article/18/11/behind-scenes-linux-containers -[11]: http://slackermedia.info -[12]: https://opensource.com/article/17/6/qtractor-audio -[13]: http://ubuntustudio.org -[14]: https://opensource.com/life/16/3/make-music-raspberry-pi-milkytracker -[15]: http://fsf.org diff --git a/translated/tech/20190823 The Linux kernel- Top 5 innovations.md b/translated/tech/20190823 The Linux kernel- Top 5 innovations.md new file mode 100644 index 0000000000..cdf455f02a --- /dev/null +++ b/translated/tech/20190823 The Linux kernel- Top 5 innovations.md @@ -0,0 +1,108 @@ +[#]: collector: (lujun9972) +[#]: translator: (heguangzhi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (The Linux kernel: Top 5 innovations) +[#]: via: (https://opensource.com/article/19/8/linux-kernel-top-5-innovations) +[#]: author: (Seth Kenlon https://opensource.com/users/sethhttps://opensource.com/users/mhaydenhttps://opensource.com/users/mralexjuarez) + +The Linux kernel: Top 5 innovations +====== +Linux 内核:五大创新 +====== + +想知道什么是真正的(不是那种时髦的)在 Linux 内核上的创新吗?请继续阅读。 +![绿色背景的企鹅][1] + +_创新_ 这个词在科技行业的传播几乎和 _革命_ 一样多,所以很难区分那些夸张和真正令人振奋的东西。Linux 内核被称为创新的,但它又被称为现代计算中最大的黑客,一个微观世界中的庞然大物。 + +撇开市场和模式不谈,Linux 可以说是开源世界中最受欢迎的内核,它在近30年的生命周期中引入了一些真正的游戏改变者。 + +### Cgroups (2.6.24) + +早在2007年,Paul Menage 和 Rohit Seth 就在内核中添加了深奥的[_control groups_ (cgroups)][2]功能(cgroups 的当前实现是由 Tejun Heo 重写的。)这种新技术最初被用作一种方法,从本质上来说,是为了确保一组特定任务的服务质量。 + +例如,您为与您的 WEB 服务相关联的所有任务创建一个控制组定义 ( cgroup ),为常规备份创建另一个 cgroup ,为一般操作系统需求创建另一个cgroup。然后,您可以控制每个组的资源百分比,这样您的操作系统和 WEB 服务就可以获得大部分系统资源,而您的备份进程可以访问剩余的资源。 + +然而,cgroups 最著名的是它作为今天驱动云技术的角色:容器。事实上,cgroups 最初被命名为[进程容器][3]。当它们被 [LXC][4],[CoreOS][5]和 Docker 等项目采用时,这并不奇怪。 + +就像闸门打开后一样,“ _容器_ ”一词恰好成为了 Linux 的同义词,微服务风格的基于云的“应用”概念很快成为了规范。如今,很难脱离 cgroups ,他们是如此普遍。每一个大规模的基础设施(可能还有你的笔记本电脑,如果你运行 Linux 的话)都以一种有意思的方式使用了 cgroups ,使你的计算体验比以往任何时候都更加易于管理和灵活。 + +例如,您可能已经在电脑上安装了[Flathub][6]或[Flatpak][7],或者您已经在工作中使用[Kubernetes][8]和/或[OpenShift][9]。不管怎样,如果“容器”这个术语对你来说仍然模糊不清,你可以在[ Linux 容器背后的应用场景][10] 获得对容器的实际理解。 + +### LKMM (4.17) + +2018年,Jade Alglave, Alan Stern, Andrea Parri, Luc Maranget, Paul McKenney, 和其他几个人的辛勤工作的成果被合并到主线 Linux 内核中,以提供正式的内存模型。Linux 内核内存[一致性]模型(LKMM)子系统是一套描述Linux 内存一致性模型的工具,同时也产生测试用例。 + + +随着系统在物理设计上变得越来越复杂(增加了更多的中央处理器内核,高速缓存和内存增加,等等),它们就越难知道哪个中央处理器需要哪个地址空间,以及何时需要。例如,如果 CPU0 需要将数据写入内存中的共享变量,并且 CPU1 需要读取该值,那么 CPU0 必须在 CPU1 尝试读取之前写入。类似地,如果值是以一种顺序写入内存的,那么期望它们也以同样的顺序被读取,而不管哪个或哪些 CPU 正在读取。 + +即使在单个处理器上,内存管理也需要特定的顺序。像 **x = y** 这样的简单操作需要处理器从内存中加载 **y** 的值,然后将该值存储在 **x** 中。在处理器从内存中读取值之前,是不能将存储在 **y** 中的值放入 **x** 变量的。还有地址依赖:**x[n] = 6** 要求在处理器能够存储值6之前加载 **n** 。 + +LKMM 帮助识别和跟踪代码中的这些内存模式。这部分是通过一个名为 **herd** 的工具来实现的,该工具定义了内存模型施加的约束(以逻辑公式的形式),然后列举了与这些约束一致性的所有可能的结果。 + +### 低延迟补丁 (2.6.38) + + +很久以前,在2011年之前的日子里,如果你想在 Linux进行 多媒体工作 [multimedia work on Linux][11] ,您必须获得一个低延迟内核。这主要适用于[录音/audio recording][12],同时添加了许多实时效果(如对着麦克风唱歌和添加混音,以及在耳机中无延迟地听到您的声音)。有些发行版,如[Ubuntu Studio][13],可靠地提供了这样一个内核,所以实际上这没有什么障碍,当艺术家选择发行版本时,只是作为一个重要提醒。 + +然而,如果您没有使用 Ubuntu Studio ,或者您需要在分发之前更新您的内核,您必须跳转到 rt-patches 网页,下载内核补丁,将它们应用到您的内核源代码,编译,然后手动安装。 + +然后,随着内核版本2.6.38的发布,这个过程结束了。默认情况下,Linux 内核突然像变魔术一样内置了低延迟代码(根据基准测试,延迟至少降低了10倍)。不再下载补丁,不用编译。一切都很顺利,这都是因为 Mike Galbraith 编写了一个200行的小补丁。 + +对于全世界的开源多媒体艺术家来说,这是一个游戏规则的改变。从2011年开始到2016年事情变得如此美好,我向自己做了一个挑战,要求[在树莓派v1(型号B)上建造一个数字音频工作站(DAW)][14],结果发现它运行得出奇地好。 + +### RCU (2.5) + +RCU,或称读-拷贝-更新,是计算机科学中定义的一个系统,它允许多个处理器线程从共享内存中读取数据。它通过推迟更新来做到这一点,但也将它们标记为已更新,以确保数据读取为最新内容。实际上,这意味着读取与更新同时发生。 + + +典型的 RCU 循环有点像这样: + + 1. 删除指向数据的指针,以防止其他读操作引用它。 + 2. 等待读完成他们的关键处理。 + 3. 回收内存空间。 + +将更新阶段划分为删除和回收阶段意味着更新程序会立即执行删除,同时推迟回收直到所有活动读取完成(通过阻止它们或注册一个回调以便在完成时调用)。 + +虽然读-拷贝-更新的概念不是为 Linux 内核发明的,但它在 Linux 中的实现是该技术的一个定义性的例子。 + +### 合作 (0.01) + +对于 Linux 内核创新的问题,最重要的是协作,最终答案也是。称之为好时机,称之为技术优势,称之为黑客能力,或者仅仅称之为开源,但 Linux 内核及其支持的许多项目是协作与合作的光辉范例。 + +它远远超出了内核范畴。各行各业的人都对开源做出了贡献,可以说是因为 Linux 内核。Linux 曾经是,现在仍然是 [自由软件][15]的主要力量,激励人们把他们的代码、艺术、想法或者仅仅是他们自己带到一个全球化的、有生产力的、多样化的人类社区中。 + +### 你最喜欢的创新是什么? + +这个列表偏向于我自己的兴趣:容器、非统一内存访问(NUMA)和多媒体。我肯定把你最喜欢的内核创新从列表中去掉了。在评论中告诉我! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/8/linux-kernel-top-5-innovations + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[heguangzhi](https://github.com/heguangzhi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/sethhttps://opensource.com/users/mhaydenhttps://opensource.com/users/mralexjuarez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/linux_penguin_green.png?itok=ENdVzW22 (Penguin with green background) +[2]: https://en.wikipedia.org/wiki/Cgroups +[3]: https://lkml.org/lkml/2006/10/20/251 +[4]: https://linuxcontainers.org +[5]: https://coreos.com/ +[6]: http://flathub.org +[7]: http://flatpak.org +[8]: http://kubernetes.io +[9]: https://www.redhat.com/sysadmin/learn-openshift-minishift +[10]: https://opensource.com/article/18/11/behind-scenes-linux-containers +[11]: http://slackermedia.info +[12]: https://opensource.com/article/17/6/qtractor-audio +[13]: http://ubuntustudio.org +[14]: https://opensource.com/life/16/3/make-music-raspberry-pi-milkytracker +[15]: http://fsf.org From ed1c7845673e386f1649196a83b6816e840b5049 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Wed, 18 Sep 2019 16:13:00 +0800 Subject: [PATCH 170/202] Rename sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md to sources/news/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md --- ...utonomous Linux - World-s First Autonomous Operating System.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{talk => news}/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md (100%) diff --git a/sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md b/sources/news/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md similarity index 100% rename from sources/talk/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md rename to sources/news/20190917 Here Comes Oracle Autonomous Linux - World-s First Autonomous Operating System.md From f1f794e1131c8c12731d2681daab6a23954a3513 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Wed, 18 Sep 2019 16:16:19 +0800 Subject: [PATCH 171/202] Rename sources/tech/20190917 3 steps to developing psychological safety.md to sources/talk/20190917 3 steps to developing psychological safety.md --- .../20190917 3 steps to developing psychological safety.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190917 3 steps to developing psychological safety.md (100%) diff --git a/sources/tech/20190917 3 steps to developing psychological safety.md b/sources/talk/20190917 3 steps to developing psychological safety.md similarity index 100% rename from sources/tech/20190917 3 steps to developing psychological safety.md rename to sources/talk/20190917 3 steps to developing psychological safety.md From 5fadad844c9be692da1ac4a394861975969a0d41 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Wed, 18 Sep 2019 16:19:29 +0800 Subject: [PATCH 172/202] Rename sources/tech/20190917 How Ansible brought peace to my home.md to sources/talk/20190917 How Ansible brought peace to my home.md --- .../20190917 How Ansible brought peace to my home.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190917 How Ansible brought peace to my home.md (100%) diff --git a/sources/tech/20190917 How Ansible brought peace to my home.md b/sources/talk/20190917 How Ansible brought peace to my home.md similarity index 100% rename from sources/tech/20190917 How Ansible brought peace to my home.md rename to sources/talk/20190917 How Ansible brought peace to my home.md From 5c3b197a6507b92803937ca13d168d883fb8eef5 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Wed, 18 Sep 2019 16:23:11 +0800 Subject: [PATCH 173/202] Rename sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md to sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md --- ...ersy, Richard Stallman is Forced to Resign as FSF President.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => news}/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md (100%) diff --git a/sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md similarity index 100% rename from sources/tech/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md rename to sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md From 2263de0da557acc7680f071381621d0debd8bfd8 Mon Sep 17 00:00:00 2001 From: Name1e5s Date: Wed, 18 Sep 2019 16:28:56 +0800 Subject: [PATCH 174/202] [translating] name1e5s translating --- ...sy, Richard Stallman is Forced to Resign as FSF President.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md index 8d3c853ca3..838a585c3c 100644 --- a/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md +++ b/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (name1e5s ) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 9a212c81fd49a89aa0a6393bf23b76f8cd838ded Mon Sep 17 00:00:00 2001 From: Morisun029 <54652937+Morisun029@users.noreply.github.com> Date: Wed, 18 Sep 2019 21:05:22 +0800 Subject: [PATCH 175/202] translation --- ...0190917 How to Check Linux Mint Version Number - Codename.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md index 843ab133a6..35856e4cc2 100644 --- a/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md +++ b/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (Morisun029) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From dc66492c5afd796abbf63f7efc54454f7ee63be8 Mon Sep 17 00:00:00 2001 From: name1e5s Date: Wed, 18 Sep 2019 22:58:17 +0800 Subject: [PATCH 176/202] [Translated] Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President --- ...an is Forced to Resign as FSF President.md | 145 ------------------ ...an is Forced to Resign as FSF President.md | 142 +++++++++++++++++ 2 files changed, 142 insertions(+), 145 deletions(-) delete mode 100644 sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md create mode 100644 translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md diff --git a/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md deleted file mode 100644 index 838a585c3c..0000000000 --- a/sources/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md +++ /dev/null @@ -1,145 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (name1e5s ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President) -[#]: via: (https://itsfoss.com/richard-stallman-controversy/) -[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) - -Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President -====== - -_**Richard Stallman, founder and president of the Free Software Foundation, has resigned as the president and from its board of directors. The announcement has come after a relentless campaign by a few activists and media person to remove Stallman for his views on the Epstein victims. Read more to get the details.**_ - -![][1] - -### A little background to the Stallman controversy - -If you are not aware of the context, let me provide some details. - -[Richard Stallman][2], a 66 years old computer scientist at [MIT][3], is best known for founding the [free software movement][4] in 1983. He also developed several software like GCC, Emacs under the GNU project. The free software movement inspired a number of projects to choose the open source GPL license. Linux is one of those projects. - -[Jeffrey Epstein][5] was a billionaire American financier. He was convicted as a sex offender for running an escort service (included underage girls) for the rich and elites in his social service. He committed suicide in his prison cell while still being tried for sex trafficking charges. - -[Marvin Lee Minsky][6] was an eminent computer scientist at MIT. He founded the Artificial Intelligence lab at MIT. He died at the age of 88 in 2016. After his death, an Epstein victim named Misky as one of the people she was “directed to have sex” with on Jeffrey Epstein’s private island while she was a minor. - -So what all this has to do with Richard Stallman? It all started with an email Stallman sent to MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) mailing list over proposed protest by MIT students and affiliates regarding Jeffrey Epstein’s donation (to MIT’s AI lab). - -The announcement of the Friday event does an injustice to Marvin Minsky: - -“deceased AI ‘pioneer’ Marvin Minsky (who is accused of assaulting -one of Epstein’s victims [2])” - -The injustice is in the word “assaulting”. The term “sexual assault” is so vague and slippery that it facilitates accusation inflation: taking claims that someone did X and leading people to think of it as Y, which is much worse than X. - -The accusation quoted is a clear example of inflation. The reference reports the claim that Minsky had sex with one of Epstein’s harem. (See .) -Let’s presume that was true (I see no reason to disbelieve it). - -The word “assaulting” presumes that he applied force or violence, in some unspecified way, but the article itself says no such thing. -Only that they had sex. - -We can imagine many scenarios, but the most plausible scenario is that she presented herself to him as entirely willing. Assuming she was being coerced by Epstein, he would have had every reason to tell her to conceal that from most of his associates. - -I’ve concluded from various examples of accusation inflation that it is absolutely wrong to use the term “sexual assault” in an accusation. - -Whatever conduct you want to criticize, you should describe it with a specific term that avoids moral vagueness about the nature of the criticism. - -### The call for removing Stallman - -‘Epstein’ is an extremely controversial ‘topic’ in the USA. Stallman’s reckless ‘intellectual discourse’ on a sensitive matter like this would not have gone well and it didn’t go well. - -A robotics engineer received this forwarded email from her friend and started a [campaign to remove Stallman][7]. She didn’t want a clarification or apology. All she wanted was to remove Stallman even if it means ‘burning MIT to the ground’. - -> At least Richard Stallman is not accused of raping anyone. But is that our highest standard? The standard that this prestigious institution holds itself to? If this is what MIT wants to defend; if this is what MIT wants to stand for, then, yes, burn it to the ground… -> -> …Remove everyone, if we must, and let something much better be built from the ashes. -> -> Salem, Robotics student who started Remove Stallman campaign - -Salem’s rant was initially ignored by mainstream digital media. But it was picked by activists who fight against meritocracy and gender bias in the software industry. - -> [#epstein][8] [#MIT][9] Hi I'm angry and reporters didn't respond to me so i wrote this story myself. its such a fun time to be an mit alumn right now🙃 -> -> — SZJG (@selamjie) [September 12, 2019][10] - -> are we done with "brilliant jerks" defending child sexual exploitation with "maybe it was consensual" -> -> — Tracy Chou 👩🏻‍💻 (@triketora) [September 13, 2019][11] - -> I've tweeted for many years about how awful Richard "RMS" Stallman is – the pedophilia, the ableism, the misogyny. -> -> Inevitably, each time I do, dudes examine my receipts & then say "all those incidents are from years ago! he's changed now!" -> -> NOPE. -> -> — Sarah Mei (@sarahmei) [September 12, 2019][12] - -A Twitter thread by Sage Sharp on how Stallman’s behavior negatively impact people in tech: - -> 👇Thread about the impact of Richard Stallman on marginalized groups in tech, especially women. [CW: rape, incest, ableism, sex trafficking] -> -> The [@fsf][13] needs to permanently remove Richard Stallman from being President and Chair of the Free Software Foundation Board of Directors. -> -> — Sage Sharp (@_sagesharp_) [September 16, 2019][14] - -It’s not that Stallman is a saint. His crude, insensitive and sexist jokes have been doing the rounds for years. You can read about it [here][15] and [here][16]. - -Soon the news was picked by the big media houses like [The Vice][17], [The Daily Beast][18], [Futurism][19] etc. They painted Stallman as a defender of Jeffrey Epstein. Amidst the outcry, [executive director of GNOME threatened to end the relationship between GNOME and FSF][20]. - -Eventually, Stallman resigned first from MIT and now [from Free Software Foundation][21]. - -![][22] - -### A dangerous precedence? - -All it took five days of activism to remove a person from an organization he created and worked for more than thirty years. And this is when Stallman wasn’t even remotely involved in the sex trafficking scandal. - -Some of these ‘activists’ have also targeted [Linux creator Linus Torvalds in the past][23]. The management behind the Linux Foundation foresaw the growing trend of activism in the tech industry and hence they put up a [code of conduct for Linux kernel development][24] in place and forced [Torvalds to undergo training to improve his behavior][25]. If they had not taken the corrective step, probably Torvalds would have been a goner by now. - -Ignoring reckless and sexist behavior of tech stalwarts is not acceptable but neither is the mob mentality of lynching anyone who disagrees with a certain popular view. I don’t agree with Stallman and his past remarks but I am also not happy that he has been (forced to?) resign in this manner. - -Techrights has some interesting take on it that you can read [here][26] and [here][27]. - -_**What do you think of the entire episode? Please share your views and opinion but in a civilized manner. Abusive comments will not be published. Arguments and discussion must be civil.**_ - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/richard-stallman-controversy/ - -作者:[Abhishek Prakash][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/abhishek/ -[b]: https://github.com/lujun9972 -[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/stallman-conroversy.png?ssl=1 -[2]: https://en.wikipedia.org/wiki/Richard_Stallman -[3]: https://en.wikipedia.org/wiki/Massachusetts_Institute_of_Technology -[4]: https://en.wikipedia.org/wiki/Free_software_movement -[5]: https://en.wikipedia.org/wiki/Jeffrey_Epstein -[6]: https://en.wikipedia.org/wiki/Marvin_Minsky -[7]: https://medium.com/@selamie/remove-richard-stallman-fec6ec210794 -[8]: https://twitter.com/hashtag/epstein?src=hash&ref_src=twsrc%5Etfw -[9]: https://twitter.com/hashtag/MIT?src=hash&ref_src=twsrc%5Etfw -[10]: https://twitter.com/selamjie/status/1172244207978897408?ref_src=twsrc%5Etfw -[11]: https://twitter.com/triketora/status/1172443389536555009?ref_src=twsrc%5Etfw -[12]: https://twitter.com/sarahmei/status/1172283772428906496?ref_src=twsrc%5Etfw -[13]: https://twitter.com/fsf?ref_src=twsrc%5Etfw -[14]: https://twitter.com/_sagesharp_/status/1173637138413318144?ref_src=twsrc%5Etfw -[15]: https://geekfeminism.wikia.org/wiki/Richard_Stallman -[16]: https://medium.com/@selamie/remove-richard-stallman-appendix-a-a7e41e784f88 -[17]: https://www.vice.com/en_us/article/9ke3ke/famed-computer-scientist-richard-stallman-described-epstein-victims-as-entirely-willing -[18]: https://www.thedailybeast.com/famed-mit-computer-scientist-richard-stallman-defends-epstein-victims-were-entirely-willing -[19]: https://futurism.com/richard-stallman-epstein-scandal -[20]: https://blog.halon.org.uk/2019/09/gnome-foundation-relationship-gnu-fsf/ -[21]: https://www.fsf.org/news/richard-m-stallman-resigns -[22]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/richard-stallman.png?resize=800%2C94&ssl=1 -[23]: https://www.newyorker.com/science/elements/after-years-of-abusive-e-mails-the-creator-of-linux-steps-aside -[24]: https://itsfoss.com/linux-code-of-conduct/ -[25]: https://itsfoss.com/torvalds-takes-a-break-from-linux/ -[26]: http://techrights.org/2019/09/15/media-attention-has-been-shifted/ -[27]: http://techrights.org/2019/09/16/stallman-removed/ diff --git a/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md new file mode 100644 index 0000000000..959e369e20 --- /dev/null +++ b/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md @@ -0,0 +1,142 @@ +[#]: collector: (lujun9972) +[#]: translator: (name1e5s) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President) +[#]: via: (https://itsfoss.com/richard-stallman-controversy/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +Richard Stallman 被迫辞去 FSF 主席的职务 +====== + +_**Richard Stallman,自由软件基金会的创建者以及主席,已经辞去他的职务,开始寻求下一任主席。此前,因为 Stallman 对于爱泼斯坦事件中的受害者的观点,一小撮活动家以及媒体人发起了清除 Stallman 的运动。这份声明就是在这些活动后发生的。阅读全文以获得更多信息。**_ + +![][1] + +### Stallman 事件的背景概述 + +如果您不知道这次事件发生的前因后果,请看本段的详细信息。 + +[Richard Stallman][2],66岁,是就职于 [MIT][3] 的计算机科学家。他最著名的成就就是在 1983 年发起了[自由软件运动][4]。他也开发了 GNU 项目旗下的部分软件,比如 GCC 和 Emacs。受自由软件运动影响选择使用 GPL 开源协议的项目不计其数。Linux 是其中最出名的项目之一。 + +[Jeffrey Epstein][5],美国亿万富翁,金融大佬。其涉嫌为社会上流精英提供性交易服务(其中有未成年少女)而被指控成为性犯罪者。在受审期间,爱泼斯坦在监狱中自杀身亡。 + +[Marvin Lee Minsky][6],MIT 知名计算机科学家。他在 MIT 建立了人工智能实验室。2016 年,88 岁的 Minsky 逝世。在 Minsky 逝世后,一位名为 Misky 的爱泼斯坦事件受害者生成其在未成年时曾被“诱导”到爱泼斯坦的私人岛屿,与之发生性关系。 + +但是这些与 Richard Stallman 有什么关系?这要从 Stallman 发给 MIT 计算机科学与人工智能实验室(CSAIL) 的学生以及附属机构就爱泼斯坦的捐款提出抗议的邮件列表的邮件说起。邮件全文翻译如下: + +> 周五事件的公告对 Marvin Minsky 来说是不公正的。 +> +> “已故的人工智能 ’先锋‘ Marvin Minsky (被控告侵害了爱泼斯坦事件的受害者之一[2])” +> +> 不公正之处在于 “侵害(assulting)” 这个用语。“性侵犯(sexual assault)” 这个用语非常的糢糊,夸大了指控的严重性:宣称某人做了 X 但误导别人,让别人觉得这个人做了 Y,Y 远远比 X 严重。 +> +> 上面引用的指控显然就是夸大。报导声称 Minksy 与爱泼斯坦的女眷之一发生了性关系(详见 )。我们假设这是真的(我找不到理由不相信)。 +> +> “侵害(assulting)” 这个词,意味着他使用了某种暴力。但那篇报道并没有提到这个,只说了他们发生了性关系。 +> +> 我们可以想像很多种情况,但最合理的情况是,她在 Marvin 面前表现的像是完全自愿的。假设她是被爱泼斯坦强迫的,那爱泼斯坦有充足的理由让她对大多数人守口如瓶。 +> +> 从各种的指控夸大事例中,我总结出,在指控时使用“性侵犯(sexual assault)”是绝对错误的。 +> +> 无论你想要批判什么行为,你都应该使用特定的词汇来描述,以此避免批判本身天然的道德上的模糊性。 + +### “清除 Stallman” 的呼吁 + +‘爱泼斯坦’在美国是颇具争议的话题。Stallman 对该敏感事件做出如此鲁莽的 “知识陈述” 不会有好结果,事实也是如此。 + +一位机器人学工程师从她的朋友那里收到了转发的邮件并发起了一个[清除 Stallman 的活动][7]。她要的不是澄清或者道歉,她只想要清除斯托曼,就算这意味着 “将 MIT 夷为平地” 也在所不惜。 + +> 是,至少 Stallman 没有的确被控强奸任何人。但这就是我们的最高标准吗?这所声望极高的学院坚持的标准就是这样的吗? 如果这是麻省理工学院想要捍卫的、想要代表的标准的话,还不如把这破学校夷为平地… +> +> 如果有必要的话,就把所有人都清除出去,之后从废墟中建立出更好的秩序。 +> +> Salem,发起“清除 Stallman“运动的机器人学专业学生 + +Salem 的大字报最初没有被主流媒体重视。但它还是被反对软件行业内精英崇拜以及性别偏见的积极分子发现了。 + +> [#epstein][8] [#MIT][9] 嗨 记者没有回复我我很生气就自己写了这么个故事。 作为 MIT 的校友我还真是高兴啊🙃 +> +> — SZJG (@selamjie) [September 12, 2019][10] + +> 是不是对于性侵儿童的 “杰出混蛋” 我们也可以辩护说 “万一这是你情我愿的” +> +> — Tracy Chou 👩🏻‍💻 (@triketora) [September 13, 2019][11] + +> 多年来我就一直发推说 Richard "RMS" Stallman 这人有多恶心 —— 恋童癖、厌女症、还残障歧视 +> +> 不可避免的是,每次我这样做,都会有老哥检查我的数据来源,然后说 “这都是几年前的事了!他现在变了!” +> +> 变个屁。 +> +> — Sarah Mei (@sarahmei) [September 12, 2019][12] + +下面是 Sage Sharp 开头的一篇关于 Stallman 的行为如何对科技人员产生负面影响的帖子: + +> 👇大家说下 Richard Stallman 对科技从业者的影响吧,尤其是女性。 [例如: 强奸,乱伦,残障歧视,性交易] +> +> [@fsf][13] 有必要永久禁止 Richard Stallman 担任自由软件基金会董事会主席。 +> +> — Sage Sharp (@_sagesharp_) [September 16, 2019][14] + +Stallman 一直以来也不是一个圣人。 他粗暴,不合时宜、带有性别歧视的笑话多年来一直在进行。你可以在[这里][15]和[这里][16]读到。 + +很快这个消息就被 [The Vice][17],[每日野兽][18],[未来主义][19]等大媒体采访。他们把 Stallman 描绘成爱泼斯坦的捍卫者。在强烈的抗议声中,[GNOME 执行董事威胁要结束 GNOME 和 FSF 之间的关系][20]。 + +最后,Stallman 先是从 MIT 辞职,现在又从 [自由软件基金会][21] 辞职。 + +![][22] + +### 危险的特权? + +我们见识到了,把一个人从他创建并为之工作了三十多年的组织中驱逐出去仅仅需要五天。这甚至还是在 Stallman 没有参与性交易丑闻的情况下。 + +其中一些 “活动家” 过去也曾[针对 Linux 的作者 Linus Torvalds][23]。 Linux 基金会背后的管理层预见到了科技行业激进主义的增长趋势,因此他们制定了[适用于 Linux 内核开发的行为准则][24]并[强制 Torvalds 接受培训以改善他的行为][25]。 如果他们没有采取纠正措施,可能 Torvalds 也已经被批倒批臭了。 + +忽视技术支持者的鲁莽行为和性别歧视是不可接受的,但是对于那些遇到不同意某种流行观点的人就贴大字报,施以私刑也是不道德的做法。我不支持 Stallman 和他过去的言论,但我也不能接受他以这种方式(被迫?)辞职。 + +Techrights 对此有一些有趣的评论,你可以在 [这里][26] 和 [这里][27] 看到。 + +_**您对此事有何看法? 请文明分享您的观点和意见。过激评论将不会公布。**_ + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/richard-stallman-controversy/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[name1e5s](https://github.com/name1e5s) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/stallman-conroversy.png?ssl=1 +[2]: https://en.wikipedia.org/wiki/Richard_Stallman +[3]: https://en.wikipedia.org/wiki/Massachusetts_Institute_of_Technology +[4]: https://en.wikipedia.org/wiki/Free_software_movement +[5]: https://en.wikipedia.org/wiki/Jeffrey_Epstein +[6]: https://en.wikipedia.org/wiki/Marvin_Minsky +[7]: https://medium.com/@selamie/remove-richard-stallman-fec6ec210794 +[8]: https://twitter.com/hashtag/epstein?src=hash&ref_src=twsrc%5Etfw +[9]: https://twitter.com/hashtag/MIT?src=hash&ref_src=twsrc%5Etfw +[10]: https://twitter.com/selamjie/status/1172244207978897408?ref_src=twsrc%5Etfw +[11]: https://twitter.com/triketora/status/1172443389536555009?ref_src=twsrc%5Etfw +[12]: https://twitter.com/sarahmei/status/1172283772428906496?ref_src=twsrc%5Etfw +[13]: https://twitter.com/fsf?ref_src=twsrc%5Etfw +[14]: https://twitter.com/_sagesharp_/status/1173637138413318144?ref_src=twsrc%5Etfw +[15]: https://geekfeminism.wikia.org/wiki/Richard_Stallman +[16]: https://medium.com/@selamie/remove-richard-stallman-appendix-a-a7e41e784f88 +[17]: https://www.vice.com/en_us/article/9ke3ke/famed-computer-scientist-richard-stallman-described-epstein-victims-as-entirely-willing +[18]: https://www.thedailybeast.com/famed-mit-computer-scientist-richard-stallman-defends-epstein-victims-were-entirely-willing +[19]: https://futurism.com/richard-stallman-epstein-scandal +[20]: https://blog.halon.org.uk/2019/09/gnome-foundation-relationship-gnu-fsf/ +[21]: https://www.fsf.org/news/richard-m-stallman-resigns +[22]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/richard-stallman.png?resize=800%2C94&ssl=1 +[23]: https://www.newyorker.com/science/elements/after-years-of-abusive-e-mails-the-creator-of-linux-steps-aside +[24]: https://itsfoss.com/linux-code-of-conduct/ +[25]: https://itsfoss.com/torvalds-takes-a-break-from-linux/ +[26]: http://techrights.org/2019/09/15/media-attention-has-been-shifted/ +[27]: http://techrights.org/2019/09/16/stallman-removed/ From e838464c94a34e11fd0c50271d8e636427f94732 Mon Sep 17 00:00:00 2001 From: Morisun029 <54652937+Morisun029@users.noreply.github.com> Date: Thu, 19 Sep 2019 00:16:41 +0800 Subject: [PATCH 177/202] Translated --- ...ck Linux Mint Version Number - Codename.md | 145 ---------------- ...ck Linux Mint Version Number - Codename.md | 159 ++++++++++++++++++ 2 files changed, 159 insertions(+), 145 deletions(-) delete mode 100644 sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md create mode 100644 translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md diff --git a/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md deleted file mode 100644 index 35856e4cc2..0000000000 --- a/sources/tech/20190917 How to Check Linux Mint Version Number - Codename.md +++ /dev/null @@ -1,145 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (Morisun029) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How to Check Linux Mint Version Number & Codename) -[#]: via: (https://itsfoss.com/check-linux-mint-version/) -[#]: author: (Sergiu https://itsfoss.com/author/sergiu/) - -How to Check Linux Mint Version Number & Codename -====== - -Linux Mint has a major release (like Mint 19) every two years and minor releases (like Mint 19.1, 19.2 etc) every six months or so. You can upgrade Linux Mint version on your own or it may get automatically update for the minor releases. - -Between all these release, you may wonder which Linux Mint version you are using. Knowing the version number is also helpful in determining whether a particular software is available for your system or if your system has reached end of life. - -There could be a number of reasons why you might require the Linux Mint version number and there are various ways you can obtain this information. Let me show you both graphical and command line ways to get the Mint release information. - - * [Check Linux Mint version using command line][1] - * [Check Linux Mint version information using GUI][2] - - - -### Ways to check Linux Mint version number using terminal - -![][3] - -I’ll go over several ways you can check your Linux Mint version number and codename using very simple commands. You can open up a **terminal** from the **Menu** or by pressing **CTRL+ALT+T** (default hotkey). - -The **last two entries** in this list also output the **Ubuntu release** your current Linux Mint version is based on. - -#### 1\. /etc/issue - -Starting out with the simplest CLI method, you can print out the contents of **/etc/issue** to check your **Version Number** and **Codename**: - -``` -[email protected]:~$ cat /etc/issue -Linux Mint 19.2 Tina \n \l -``` - -#### 2\. hostnamectl - -![hostnamectl][4] - -This single command (**hostnamectl**) prints almost the same information as that found in **System Info**. You can see your **Operating System** (with **version number**), as well as your **kernel version**.3. - -#### 3\. lsb_release - -**lsb_release** is a very simple Linux utility to check basic information about your distribution: - -``` -[email protected]:~$ lsb_release -a -No LSB modules are available. -Distributor ID: LinuxMint -Description: Linux Mint 19.2 Tina -Release: 19.2 -Codename: tina -``` - -**Note:** *I used the **–***_**a**_ _tag to print all parameters, but you can also use **-s** for short form, **-d** for description etc. (check **man lsb_release** for all tags)._ - -#### 4\. /etc/linuxmint/info - -![/etc/linuxmint/info][5] - -This isn’t a command, but rather a file on any Linux Mint install. Simply use cat command to print it’s contents to your terminal and see your **Release Number** and **Codename**. - -[][6] - -Suggested read  Get Rid Of Two Google Chrome Icons From Dock In Elementary OS Freya [Quick Tip] - -#### 5\. Use /etc/os-release to get Ubuntu codename as well - -![/etc/os-release][7] - -Linux Mint is based on Ubuntu. Each Linux Mint release is based on a different Ubuntu release. Knowing which Ubuntu version your Linux Mint release is based on is helpful in cases where you’ll have to use Ubuntu codename while adding a repository like when you need to [install the latest Virtual Box in Linux Mint][8]. - -**os-release** is yet another file similar to **info**, showing you the codename for the **Ubuntu** release your Linux Mint is based on. - -#### 6\. Use /etc/upstream-release/lsb-release to get only Ubuntu base info - -If you only ****want to see information about the **Ubuntu** base, output **/etc/upstream-release/lsb-release**: - -``` -[email protected]:~$ cat /etc/upstream-release/lsb-release -DISTRIB_ID=Ubuntu -DISTRIB_RELEASE=18.04 -DISTRIB_CODENAME=bionic -DISTRIB_DESCRIPTION="Ubuntu 18.04 LTS" -``` - -Bonus Tip: [You can just check Linux kernel version][9] with the **uname** command: - -``` -[email protected]:~$ uname -r -4.15.0-54-generic -``` - -**Note:** _**-r** stands for **release**, however you can check the other flags with **man uname**._ - -### Check Linux Mint version information using GUI - -If you are not comfortable with the terminal and commands, you can use the graphical method. As you would expect, this one is pretty straight-forward. - -Open up the **Menu** (bottom-left corner) and then go to **Preferences > System Info**: - -![Linux Mint Menu][10] - -Alternatively, in the Menu you can search for **System Info**: - -![Menu Search System Info][11] - -Here you can see both your operating system (including version number), your kernel and the version number of your DE: - -![System Info][12] - -**Wrapping Up** - -I have covered some different ways you can quickly check the version and name (as well as the Ubuntu base and kernel) of the Linux Mint release you are running. I hope you found this beginner tutorial helpful. Let us know in the comments which one is your favorite method! - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/check-linux-mint-version/ - -作者:[Sergiu][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/sergiu/ -[b]: https://github.com/lujun9972 -[1]: tmp.pL5Hg3N6Qt#terminal -[2]: tmp.pL5Hg3N6Qt#GUI -[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/check-linux-mint-version.png?ssl=1 -[4]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/hostnamectl.jpg?ssl=1 -[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/linuxmint_info.jpg?ssl=1 -[6]: https://itsfoss.com/rid-google-chrome-icons-dock-elementary-os-freya/ -[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/os_release.jpg?ssl=1 -[8]: https://itsfoss.com/install-virtualbox-ubuntu/ -[9]: https://itsfoss.com/find-which-kernel-version-is-running-in-ubuntu/ -[10]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/linux_mint_menu.jpg?ssl=1 -[11]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/menu_search_system_info.jpg?ssl=1 -[12]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/system_info.png?ssl=1 diff --git a/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md new file mode 100644 index 0000000000..dee320ad88 --- /dev/null +++ b/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md @@ -0,0 +1,159 @@ +[#]: collector: (lujun9972) +[#]: translator: (Morisun029) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to Check Linux Mint Version Number & Codename) +[#]: via: (https://itsfoss.com/check-linux-mint-version/) +[#]: author: (Sergiu https://itsfoss.com/author/sergiu/) + +如何查看 Linux Mint 版本号和代号 +====== + + +Linux Mint 每两年发布一次主版本(如 Mint 19),每六个月左右发布一次次版本(如 Mint 19.1,19.2等)。 你可以自己升级 Linux Mint 版本,次版本也会自动更新。 + + +在所有这些版本中,你可能想知道你正在使用的是哪个版本。 了解 Linux Mint 版本号可以帮助你确定某个特定软件是否适用于你的系统,或者检查你的系统是否已达到使用寿命。 + +你可能需要 Linux Mint 版本号有多种原因,你也有多种方法可以获取此信息。 +让我向你展示用图形和命令行的方式获取 Mint 版本信息。 + + + * [使用命令行查看 Linux Mint 版本信息][1] + * [使用GUI(图形用户界面)查看 Linux Mint 版本信息][2] + + + +### 使用终端查看 Linux Mint 版本号的方法 + +![][3] + +我将介绍几种使用非常简单的命令查看 Linux Mint 版本号和代号的方法。 你可以从 **菜单** 中打开**终端** ,或按**CTRL+ALT+T** (默认热键)打开。 + + +本文中的 **最后两个命令** 还会输出你当前的 Linux Mint 版本所基于的 **Ubuntu 版本**。 + + +#### 1\. /etc/issue + + +从最简单的 CLI 方法开始,你可以打印出 **/etc/issue** 的内容来检查你的 **版本号** 和 **代号** : + + +``` +[email protected]:~$ cat /etc/issue +Linux Mint 19.2 Tina \n \l +``` + +#### 2\. hostnamectl + +![hostnamectl][4] + + +这一个命令(**hostnamectl**)打印的信息几乎与 **系统信息** 中的信息相同。 你可以看到你的 **操作系统**(带有**版本号**)以及你的 **内核版本**。3. + +#### 3\. lsb_release + +**lsb_release** 是一个非常简单的Linux实用程序,用于查看有关你的发行版本的基本信息: + +``` +[email protected]:~$ lsb_release -a +No LSB modules are available. +Distributor ID: LinuxMint +Description: Linux Mint 19.2 Tina +Release: 19.2 +Codename: tina +``` + +**注:** *我使用 **–***_**a**_ _标签打印所有参数, 但你也可以使用 **-s** 作为简写格式, **-d** 用于描述等 (检查所有标签的 **man lsb_release** )._ + + +#### 4\. /etc/linuxmint/info + +![/etc/linuxmint/info][5] + +This isn’t a command, but rather a file on any Linux Mint install. Simply use cat command to print it’s contents to your terminal and see your **Release Number** and **Codename**. +这不是命令,而是所有Linux Mint 安装上的文件。 只需使用 cat 命令将其内容打印到终端,然后查看你的**版本号** 和**代号** 。 + +[][6] + +建议阅读避免在 ELemetary OS Freya 中出现两个 Chrome 图标[快速提示] + +#### 5\. 使用 /etc/os-release 命令也可以获取到 Ubuntu 代号 + +![/etc/os-release][7] + + +Linux Mint 基于 Ubuntu。 每个 Linux Mint 版本都基于不同的 Ubuntu 版本。了解 Linux Mint 版本所基于的 Ubuntu 版本有助你在必须要使用 Ubuntu 版本号的情况下使用-在你需要在 [Linux Mint 中安装最新的Virtual Box][8]. 添加仓库时。 + +os-release 则是另一个类似于**info**的文件,向你展示 Linux Mint 所基于的 Ubuntu 版本代号。 + + +#### 6\. 使用 /etc/upstream-release/lsb-release 只能获取到 Ubuntu 的基本信息 + + +如果你只想要查看有关 **Ubuntu** 的基本信息,请输出 **/etc/upstream-release/lsb-release**: + +``` +[email protected]:~$ cat /etc/upstream-release/lsb-release +DISTRIB_ID=Ubuntu +DISTRIB_RELEASE=18.04 +DISTRIB_CODENAME=bionic +DISTRIB_DESCRIPTION="Ubuntu 18.04 LTS" +``` + +特别提示: [你可以使用 **uname** 命令查看 Linux 内核版本][9]: + +``` +[email protected]:~$ uname -r +4.15.0-54-generic +``` + +**注:** _**-r** 代表 **release**, 你可以使用 **man uname** 查看其他信息。 + +### 使用 GUI 查看 Linux Mint 版本信息 + +如果你对终端和命令行不满意,可以使用图形方法。如你所料,这个非常明了。 + +打开 **Menu** (左下角), 然后转到 **Preferences > System Info**: + +![Linux Mint 菜单][10] + +或者,在菜单中,你可以搜索 **System Info**: + +![Menu Search System Info][11] + +在这里,你可以看到你的操作系统(包括版本号),内核和 DE 的版本号: + +![System Info][12] + +**总结** + +我已经介绍了一些不同的方法,用这些方法你可以快速查看你正在使用的 Linux Mint 的版本和代号(以及所基于的Ubuntu 版本和内核)。 我希望这个初学者教程对你有所帮助。请在评论中告诉我们你最喜欢哪个方法! + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/check-linux-mint-version/ + +作者:[Sergiu][a] +选题:[lujun9972][b] +译者:[Morisun029](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/sergiu/ +[b]: https://github.com/lujun9972 +[1]: tmp.pL5Hg3N6Qt#terminal +[2]: tmp.pL5Hg3N6Qt#GUI +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/check-linux-mint-version.png?ssl=1 +[4]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/hostnamectl.jpg?ssl=1 +[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/09/linuxmint_info.jpg?ssl=1 +[6]: https://itsfoss.com/rid-google-chrome-icons-dock-elementary-os-freya/ +[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/os_release.jpg?ssl=1 +[8]: https://itsfoss.com/install-virtualbox-ubuntu/ +[9]: https://itsfoss.com/find-which-kernel-version-is-running-in-ubuntu/ +[10]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/09/linux_mint_menu.jpg?ssl=1 +[11]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/menu_search_system_info.jpg?ssl=1 +[12]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/system_info.png?ssl=1 From 83780b7af7a543cdba0d40649140273879569657 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:53:56 +0800 Subject: [PATCH 178/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20Adding?= =?UTF-8?q?=20themes=20and=20plugins=20to=20Zsh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190918 Adding themes and plugins to Zsh.md --- ...190918 Adding themes and plugins to Zsh.md | 210 ++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 sources/tech/20190918 Adding themes and plugins to Zsh.md diff --git a/sources/tech/20190918 Adding themes and plugins to Zsh.md b/sources/tech/20190918 Adding themes and plugins to Zsh.md new file mode 100644 index 0000000000..60af63d667 --- /dev/null +++ b/sources/tech/20190918 Adding themes and plugins to Zsh.md @@ -0,0 +1,210 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Adding themes and plugins to Zsh) +[#]: via: (https://opensource.com/article/19/9/adding-plugins-zsh) +[#]: author: (Seth Kenlon https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/seth) + +Adding themes and plugins to Zsh +====== +Expand Z-shell's capabilities with themes and plugins installed with Oh +My Zsh. +![Someone wearing a hardhat and carrying code ][1] + +In my [previous article][2], I explained how to get started with [Z-shell][2] (Zsh). For some users, the most exciting thing about Zsh is its ability to adopt new themes. It's so easy to theme Zsh both because of the active community designing visuals for the shell and also because of the [Oh My Zsh][3] project, which makes it trivial to install them. + +Theming is one of those changes you notice immediately, so if you don't feel like you changed shells when you installed Zsh, you'll definitely feel it once you've adopted one of the 100+ themes bundled with Oh My Zsh. There's a lot more to Oh My Zsh than just pretty themes, though; there are also hundreds of plugins that add features to your Z-shell environment. + +### Installing Oh My Zsh + +The [ohmyz.sh][3] website encourages you to install the framework by running a script over the internet from your computer. While the Oh My Zsh project is almost certainly trustworthy, it's generally ill-advised to blindly run scripts on your system. If you want to run the install script, you can download it, read it, and run it after you're satisfied you understand what it's doing. + +If you download the script and read it, you may notice that installation is only a three-step process: + +#### 1\. Clone oh-my-zsh + +First, clone the oh-my-zsh repository into a directory called **~/.oh-my-zsh**: + + +``` +`% git clone http://github.com/robbyrussell/oh-my-zsh ~/.oh-my-zsh` +``` + +#### 2\. Switch the config file + +Next, back up your existing **.zshrc** file and move the default one from the oh-my-zsh install into its place. You can do this in one command using the **-b** (backup) option for **mv**, as long as your version of the **mv** command includes that option: + + +``` +% mv -b \ +~/.oh-my-zsh/templates/zshrc.zsh-template \ +~/.zshrc +``` + +#### 3\. Edit the config + +By default, Oh My Zsh's configuration is pretty bland, so you might want to reintegrate your custom **~/.zshrc** into the **.oh-my-zsh** config. To do that, append your old config to the end of the new one using the [cat command][4]: + + +``` +`% cat ~/.zshrc~ >> ~/.zshrc` +``` + +To see the default configuration and learn about some of the options it provides, open **~/.zshrc** in your favorite text editor. The file is well-commented, so it's a great way to get a good idea of what's possible. + +For instance, you can change the location of your **.oh-my-zsh** directory. At installation, it resides at the base of your home directory, but modern Linux convention, as defined by the [Free Desktop][5] specification, is to place directories that extend the functionality of applications in the **~/.local/share** directory. You can change it in **~/.zshrc** by editing the line: + + +``` +# Path to your oh-my-zsh installation. +export ZSH=$HOME/.local/share/oh-my-zsh +``` + +then moving the directory to that location: + + +``` +% mv ~/.oh-my-zsh \ +$HOME/.local/share/oh-my-zsh +``` + +If you're using MacOS, the specification is less clear, but arguably the most appropriate place for the directory is **$HOME/Library/Application\ Support**. + +### Relaunching Zsh + +After editing the config, you have to relaunch your shell. Before you do that, make sure you've finished any in-progress config changes; for instance, don't change the path of **.oh-my-zsh** then forget to move the directory to its new location. If you don't want to relaunch your shell, you can **source** the config file, just as you can with Bash: + + +``` +% source ~/.zshrc +➜  .oh-my-zsh git:(master) ✗ +``` + +You can ignore any warnings about missing update files; they will be resolved upon relaunch. + +### Changing your theme + +Installing Oh My Zsh sets your Z-shell theme to **robbyrussell**, a theme by the project's maintainer. This theme's changes are minimal, mostly involving the color of your prompt. + +To view all the available themes, list the contents of the **.oh-my-zsh** theme directory: + + +``` +➜  .oh-my-zsh git:(master) ✗ ls \ +~/.local/share/oh-my-zsh/themes +3den.zsh-theme +adben.zsh-theme +af-magic.zsh-theme +afowler.zsh-theme +agnoster.zsh-theme +[...] +``` + +To see screenshots of themes before trying them, visit the Oh My Zsh [wiki][6]. For even more themes, visit the [External themes][7] wiki page. + +Most themes are simple to set up and use. Just change the value of the theme name in **.zshrc** and reload the config: + + +``` +➜ ~ sed -i \ +'s/_THEME=\"robbyrussel\"/_THEME=\"linuxonly\"/g' \ +~/.zshrc +➜ ~ source ~/.zshrc +seth@darkstar:pts/0->/home/skenlon (0) ➜ +``` + +Other themes require extra configuration. For example, to use the **agnoster** theme, you must first install the Powerline font. This is an open source font, and it's probably in your software repository if you're running Linux. Install it with: + + +``` +`➜ ~ sudo dnf install powerline-fonts` +``` + +Set your theme in the config: + + +``` +➜ ~ sed -i \ +'s/_THEME=\"linuxonly\"/_THEME=\"agnoster\"/g' \ +~/.zshrc +``` + +and then relaunch (a simple **source** won't work). Upon relaunch, you will see the new theme: + +![agnoster theme][8] + +### Installing plugins + +Over 200 plugins ship with Oh My Zsh, and you can see them by looking in **.oh-my-zsh/plugins**. Each plugin directory has a README file explaining what the plugin does. + +Some plugins are relatively simple. For instance, the **dnf**, **ubuntu**, **brew**, and **macports** plugins are collections of aliases to simplify interactions with the DNF, Apt, Homebrew, and MacPorts package managers. + +Others are more complex. The **git** plugin, active by default, detects when you're working in a [Git repository][9] and updates your shell prompt so that it lists the current branch and even indicates whether there are unmerged changes. + +To activate a plugin, add it to the plugin setting in **~/.zshrc**. For example, to add the **dnf** and **pass** plugins, open **~/.zshrc** in your favorite text editor: + + +``` +`plugins=(git dnf pass)` +``` + +Save your changes and reload your Zsh session: + + +``` +`% source ~/.zshrc` +``` + +The plugins are now active. You can test the **dnf** plugin by using one of the aliases it provides: + + +``` +% dnfs fop +====== Name Exactly Matched: fop ====== +fop.noarch : XSL-driven print formatter +``` + +Different plugins do different things, so you may want to install only one or two at a time to help you learn the new capabilities of your shell. + +#### Cheating + +Some Oh My Zsh plugins are pretty generic. If you look at a plugin that claims to be a Z-shell plugin and the code is also compatible with Bash, then you can use it in your Bash shell. Some plugins require Z-shell-specific functions, so this won't work with all of them. But you can load plugins like **dnf**, **ubuntu**, **[firewalld][10]**, and others into a Bash shell by using **source** to load the plugin of your choice. For example: + + +``` +if [ -d $HOME/.local/share/oh-my-zsh/plugins ]; then +        source $HOME/.local/share/oh-my-zsh/plugins/dnf/dnf.plugin.zsh +fi +``` + +### To Z or not to Z + +Z-shell is a powerful shell both for its built-in features and the plugins contributed by its passionate community. Whether you use it as your primary shell or just as a shell you visit on weekends or holidays, you owe it to yourself to try it out. + +What are your favorite Z-shell themes and plugins? Tell us in the comments! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/adding-plugins-zsh + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/sethhttps://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/build_structure_tech_program_code_construction.png?itok=nVsiLuag (Someone wearing a hardhat and carrying code ) +[2]: https://opensource.com/article/19/9/getting-started-zsh +[3]: https://ohmyz.sh/ +[4]: https://opensource.com/article/19/2/getting-started-cat-command +[5]: http://freedesktop.org +[6]: https://github.com/robbyrussell/oh-my-zsh/wiki/Themes +[7]: https://github.com/robbyrussell/oh-my-zsh/wiki/External-themes +[8]: https://opensource.com/sites/default/files/uploads/zsh-agnoster.jpg (agnoster theme) +[9]: https://opensource.com/resources/what-is-git +[10]: https://opensource.com/article/19/7/make-linux-stronger-firewalls From 207285f6ca5781d75ef3a7d363c71c68314848ee Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:54:40 +0800 Subject: [PATCH 179/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20The=20?= =?UTF-8?q?community-led=20renaissance=20of=20open=20source?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190918 The community-led renaissance of open source.md --- ...ommunity-led renaissance of open source.md | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 sources/tech/20190918 The community-led renaissance of open source.md diff --git a/sources/tech/20190918 The community-led renaissance of open source.md b/sources/tech/20190918 The community-led renaissance of open source.md new file mode 100644 index 0000000000..2cea5c3985 --- /dev/null +++ b/sources/tech/20190918 The community-led renaissance of open source.md @@ -0,0 +1,79 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (The community-led renaissance of open source) +[#]: via: (https://opensource.com/article/19/9/community-led-renaissance) +[#]: author: (Donald Fischer https://opensource.com/users/dff) + +The community-led renaissance of open source +====== +Moving beyond the scarcity mindset of "open core." +![shapes of people symbols][1] + +With few commercial participants, early free software and open source communities were, by definition, community-led. Software was designed and created organically by communities of users in response to their needs and inspiration. The results, to a degree nobody predicted, were often magical. + +However, there were some missing pieces that prevented this magic from being unleashed at greater scale. As professional developers in larger organizations began to depend on open source software for key functions, they started looking for the same kinds of commercial support they were used to having for the proprietary software they purchased from companies like Microsoft, Oracle, and SAP. Someone to be accountable for fixing a new security vulnerability in a timely manner, or providing basic intellectual property assurances such as license verification and indemnification, or just delivering the everyday maintenance necessary to keep the software in good working order. + +First-generation open source businesses like Red Hat emerged to respond to these needs. They combined the best of both worlds: the flexibility and control of raw open source with the commercial support that enterprises depend on. These new open source businesses found their opportunity by _adding_ the missing—but necessary—commercial services to community-led open source projects. These services would be costly for organizations to provide on their own and potentially even more costly to do without. One early leader of that era, Cygnus Solutions, even adopted the counter-intuitive tagline "Making free software affordable." + +But back then, it was always overwhelmingly clear: The commercial vendors were in service of the community, filling in around the edges to enable commercial applications. The community was the star, and the companies were the supporting cast. + +### The dark ages of open core + +With the success of the original commercial open source companies like Red Hat, investment dollars flowed toward startups looking to harness the newfound commercial power of open source. + +Unfortunately, by and large, this generation of open source companies drew the wrong lessons from first-generation players like Red Hat. + +Witnessing the powerful combination of community-created technology with vendor-provided commercial capabilities, this new breed of company concluded that the vendors were the stars of the show, not the community. + +This marked a turning point for open source. + +In this vendor-centric view of the world, it was imagined that a single organization could generate the insights and set the roadmap for open source technologies. This drove a pervasive new belief that open source communities primarily represented a capital-efficient marketing channel rather than a new form of internet-enabled co-creation. + +These companies approached open source with a scarcity mindset. Instead of investing in community-led projects to unlock the potential of the crowd, they created vendor-dominated projects, released demo versions under open source licenses, and directed the bulk of their resources to companion proprietary technology that they withheld as paid-only, closed-source products. By reverting to some of [the worst aspects of traditional proprietary software][2]—like uncertain licensing terms, unclear support horizons, and unknowable cost—these businesses crowded out the best aspects of open source. + +As so often happens, this misreading of the open source model took on a new life when it was assigned an innocent-sounding brand name: "open core." + +The open core dog chased its tail into an escalating flurry of blog posts, pitch decks, and even dedicated open core conferences. In its worst moments, leading players in this movement even [sought to redefine the very meaning of the words open source][3]. + +In the worldview of open core, the vendors are at the center of the universe, and open source users are just a commodity to be exploited. + +### A community-led renaissance to restore balance + +While business interests whipped themselves into a frenzy around open core, the community of creators at the heart of open source just kept on building. While a handful of high-profile companies occupied the industry headlines, thousands of individual creators and teams kept on building software, one pull request at a time. + +It added up. Today, the modern application development platform isn't from a single vendor, or even a collection of vendors. It's the union of thousands of discrete open source packages—implemented in languages like JavaScript, Python, PHP, Ruby, Java, Go, .NET, Rust, R, and many others. Each element built for its own purpose, but together creating a beautiful tapestry that has become the foundation of all modern applications. + +In some cases, the creators behind these projects are assisted by organizations that arose naturally from the communities, like Ruby Together, the Apache Software Foundation, and the Python Software Foundation. But by and large, these creators are self-supported, making time in the margins of their day jobs and central pursuits to collaborate on the software that makes their work possible while collectively building a huge commons of open source software that's available for any individual or organization to use. + +But now, there's an emerging way for open source maintainers to participate in the value they create, which isn't about withholding value, but instead is about [creating _additional_ value][4]. + +In a revival and expansion of the principles that drove the first generation of community-led open source commercial players, creators are now coming together in a new form of collaboration. Rather than withholding software under a different license, they're partnering with each other to provide [the same kinds of professional assurances][5] that companies such as Red Hat discovered were necessary back in the day, but for the thousands of discrete components that make up the modern development platform. + +Today's generation of entrepreneurial open source creators is leaving behind the scarcity mindset that bore open core and its brethren. Instead, they're advancing an optimistic, additive, and still practical model that [adds missing commercial value][6] on top of raw open source. + +And by emulating first-generation open source companies, these creators are rediscovering a wide-open opportunity for value creation that benefits everyone. As commercial organizations engage with managed open source services sourced directly from the creators themselves, there's an immediate clarity in the alignment of interests between producers and consumers. + +The result? The end of the scarcity-mindset dark ages of open core, and a renaissance of technology driven by a new class of thriving, independent, full-time open source creators. + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/community-led-renaissance + +作者:[Donald Fischer][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/dff +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/Open%20Pharma.png?itok=GP7zqNZE (shapes of people symbols) +[2]: https://blog.tidelift.com/the-closed-source-sustainability-crisis +[3]: https://opensource.com/article/19/4/fauxpen-source-bad-business +[4]: https://www.techrepublic.com/article/the-key-to-open-source-sustainability-is-good-old-fashioned-self-interest/ +[5]: https://tidelift.com/subscription/video/what-is-managed-open-source +[6]: https://blog.tidelift.com/what-is-managed-open-source From 71615cc99a2f9b85557df54a70d357963a8af8b4 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:55:26 +0800 Subject: [PATCH 180/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20Electi?= =?UTF-8?q?on=20fraud:=20Is=20there=20an=20open=20source=20solution=3F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190918 Election fraud- Is there an open source solution.md --- ...fraud- Is there an open source solution.md | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 sources/tech/20190918 Election fraud- Is there an open source solution.md diff --git a/sources/tech/20190918 Election fraud- Is there an open source solution.md b/sources/tech/20190918 Election fraud- Is there an open source solution.md new file mode 100644 index 0000000000..775afe501a --- /dev/null +++ b/sources/tech/20190918 Election fraud- Is there an open source solution.md @@ -0,0 +1,107 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Election fraud: Is there an open source solution?) +[#]: via: (https://opensource.com/article/19/9/voting-fraud-open-source-solution) +[#]: author: (Jeff Macharyas https://opensource.com/users/jeffmacharyashttps://opensource.com/users/mbrownhttps://opensource.com/users/luis-ibanezhttps://opensource.com/users/jhibbets) + +Election fraud: Is there an open source solution? +====== +The Trust The Vote project is developing open source technology to help +keep elections honest. +![Team checklist][1] + +Can open source technology help keep our elections honest? With its [Trust The Vote Project][2], the [Open Source Election Technology (OSET) Institute][3] is working on making that a reality for elections in the United States and around the world. + +The project is developing an open, adaptable, flexible, full-featured, and innovative elections technology platform called [ElectOS][4]. It will support all aspects of elections administration and voting, including creating, marking, casting, and counting ballots and managing all back-office functions. The software is freely available under an Open Source Initiative (OSI)-recognized public license for adoption, adaptation, and deployment by anyone, including elections jurisdictions directly or, more commonly, commercial vendors or systems integrators. + +With elections coming under more and more scrutiny due to vulnerable designs, aging machinery, hacking, foreign influence, and human incompetence, the OSET Institute is working on technology that will help ensure that every vote is counted as it was cast. + +### Mississippi churning + +The Mississippi Republican gubernatorial primary in August 2019 was called into question when a voting machine malfunctioned, denying a vote for candidate Bill Waller as the machine changed it to his opponent, Tate Reeves. The incident was [caught on camera][5]. + +"Around 40 states have voting machines that are a decade old or older," J. Alex Halderman, a computer science professor at the University of Michigan, tells [Politifact][6]. "There are reports of machines misbehaving in this manner after almost every election." That's rather alarming. Can open source be a solution to this problem? + +The OSET Institute was founded in January 2007 by venture capital advisers who are alumni of Apple, Mozilla, Netscape, and Sun Microsystems. It is a non-partisan, non-profit 501(c)3 organization that researches and designs public technology—open source technology—for use in the US and around the world. + +"For a long time, there have been systemic problems—market malformation and dysfunction—in the election industry. Due to market dynamics and industry performance issues, there has been no commercial incentive to make the investment in the necessary research and development to produce the kind of high-assurance, mission-critical systems for verifiable and accurate elections," says Gregory Miller, co-founder and COO of the OSET Institute. + +Reflecting back to the 2000 presidential election between Vice President Al Gore and Texas Governor George W. Bush, Miller says: "Nobody was thinking about digital attacks during the ['chadfest' of Florida][7] and even as recently as 2007." Miller adds that one of the most important aspects of public technology is that it must be entirely trustworthy. + +### Essential election technologies + +Most voting systems in use are based around proprietary, black-box hardware and software built on 1990s technology—Windows 2000 or older. Some newer machines are running Windows 7—which is scheduled to lose maintenance and support in January 2020. + +Miller says there are two crucial parts of the election process: the election administration systems and the voting systems. + + * Election administration systems in the back office are responsible for elections setup, administration, and operation, especially casting and counting ballots; voter registration; ballot design, layout, and distribution; poll-book configuration; ballot-marking device configuration; and election results reporting and analytics. + * Voting systems are simply the ballot marking, casting, and counting components. + + + +The most important element of the system is the bridge between the voting systems in polling places and the back-office administration systems. This behind-the-scenes process aggregates vote tallies into tabulations to determine the results. + +The key device—and arguably the Achilles Heel of the entire ecosystem—is the election management system (EMS), which is the connection between election administration and the voting systems. The EMS machine is a back-office element but also a component of the voting system. Because the EMS software typically resides on a desktop machine used for a variety of government functions that serve citizens, it is the element most vulnerable to attacks. + +Despite the vote-changing problem in the Mississippi primary, Miller warns, "the vulnerability of attack is not to the voting machinery in the polling place but to the tabulation component of the back-office EMS or other vital configuration tools, including the configuration of poll books (those stacks of papers, binders, or … apps that check a voter in to cast a ballot). As a result, attackers need only to find a highly contentious swing state precinct to be disruptive." + +### Code causes change + +Because voting security vulnerabilities are largely software-based, "code causes change," Miller declares. But, there are barriers to getting this done in time for the next US presidential election in November 2020. Foremost is the fact that there is little time left for OSET's team of 12 full-time people to finish the core voting platform and create separate EMS and voting system components, all based on a non-black box, non-proprietary model that uses public, open source technology with off-the-shelf hardware. + +However, there is a lot more to do in addition to developing code for off-the-shelf hardware. OSET is developing new open data standards with NIST and the Elections Assistance Commission (EAC). A new component-based certification process must be invented, and new design guidelines must be issued. + +Meanwhile, service contracts that last for decades or more are protecting legacy vendors and making migration to new systems a challenge. Miller explains, "there are three primary vendors that control 85% of voting systems in the US and 70% globally; with the barriers to entry that exist, it will take a finished system to display the opportunity for change." + +Getting there with open code is a process too. Miller says, "there is a very closely managed code-commit process, and the work faces far more scrutiny than an open source operating system, web server, or [content management system]. So, we are implementing a dual-sandbox model where one allows for wide-ranging, free-wheeling development and contributions. The other is the controlled environment that must pass security muster with the federal government in order for states to feel confident that the actual code for production systems is verifiable, accurate, secure, and transparent. We'll use a process for [passing] code across a review membrane that enables work in the less secure environment to be committed to the production environment. It's a process still in design." + +The licenses are a bit complex: for governments or vendors that have regulatory issues with procuring and deploying commercial systems comprised of open source software, commercial hardware, and professional services, OSET offers the OSET Public License (OPL), an OSI-approved open source license based on the Mozilla Public License. For other research, development, and non-commercial use, or commercial use where certain procurement regulations are not an issue, the code is available under the GPL 2.0 license. + +Of OSET's activities, Miller says, "85% is code development—a democracy software foundry. Another 10% is cybersecurity advisory for election administrators, and the final 5% of OSET's activity is public policy work (we are informers, not influencers in legislation). As a 501(c)3, we can not—and do not—perform lobbying, but a portion of our mission is education. So, our work remains steadfastly nonpartisan and philanthropically funded by grant-making organizations and the public. + +Explains Miller: "We have a fairly straightforward charter: to build a trustworthy codebase that can exist on an inherently untrustworthy hardware base. Certain types of election administration apps and services can live in the cloud, while the voting system—a marriage of that software layer and hardware, federally certified to be deployed—must be built up from a hardened Linux kernel. So, a portion of our R&D lies in the arenas of trusted boot with hardware attestation and other important security architectures such as computer-assisted code randomization and so forth." + +### Work with the code + +There are two levels of access for people who want to work with the OSET Institute's Trust the Vote code. One is to contact the project to request access to certain code to advance development efforts; all legitimate requests will be honored, but the code is not yet accessible in the public domain. The other access point is to the extensive work that the OSET Institute has done for third-party voter registration systems, such as Rock The Vote. That source code is publicly available [online][8]. + +One component of this is [RockyOVR][9] for online voter registration services (it is operated by the OSET Institute and Rock The Vote with support from AWS). Another is [Grommet][10], an Android-native mobile app used by voter registration drives. [Siggy][11] and [VoteReady][12] are prototypes for new voter services under development that will be announced soon. + +The OSET Institute is continually on the lookout for talented volunteers to advance the project. Top needs include system architecture and engineering and software development for both cloud-based and offline applications. These are not entry-level projects or positions, and in some cases, they require advanced skills in BIOS and firmware development; operating system internals; device drivers; and more. + +All developers at the OSET Institute start as volunteers, and the best are moved into contract and staff positions, as project funding allows, in a meritocratic process. The Institute is based in Palo Alto, Calif., but operations are distributed and virtual with offices and centers of development excellence in Arlington, Va.; Boston; Mountain View, Calif.; Portland, Ore.; San Francisco; and the University of Edinburgh, Scotland. + +The Open Election Data Initiative wants to give access to election data for a true picture of an... + +The pernicious effects of closed proprietary software reaches its peak of damaging the general... + +One of the ways Obama won the 2012 election was with technology. It wasn’t the only way, but... + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/voting-fraud-open-source-solution + +作者:[Jeff Macharyas][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/jeffmacharyashttps://opensource.com/users/mbrownhttps://opensource.com/users/luis-ibanezhttps://opensource.com/users/jhibbets +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/checklist_todo_clock_time_team.png?itok=1z528Q0y (Team checklist) +[2]: https://trustthevote.org/ +[3]: https://www.osetfoundation.org/ +[4]: https://bit.ly/EOSt1 +[5]: https://twitter.com/STaylorRayburn/status/1166347828152680449 +[6]: https://www.politifact.com/truth-o-meter/article/2019/aug/29/viral-video-voting-machine-malfunction-mississippi/ +[7]: https://en.wikipedia.org/wiki/2000_United_States_presidential_election_in_Florida +[8]: https://github.com/TrustTheVote-Project +[9]: https://github.com/TrustTheVote-Project/Rocky-OVR +[10]: https://github.com/TrustTheVote-Project/Grommet +[11]: https://github.com/TrustTheVote-Project/Siggy +[12]: https://github.com/TrustTheVote-Project/VoteReady From b2772b90e2750a9229d8b79f556b7a0a8c2445a6 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:57:11 +0800 Subject: [PATCH 181/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20How=20?= =?UTF-8?q?to=20remove=20carriage=20returns=20from=20text=20files=20on=20L?= =?UTF-8?q?inux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/tech/20190918 How to remove carriage returns from text files on Linux.md --- ...rriage returns from text files on Linux.md | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 sources/tech/20190918 How to remove carriage returns from text files on Linux.md diff --git a/sources/tech/20190918 How to remove carriage returns from text files on Linux.md b/sources/tech/20190918 How to remove carriage returns from text files on Linux.md new file mode 100644 index 0000000000..c51de1b918 --- /dev/null +++ b/sources/tech/20190918 How to remove carriage returns from text files on Linux.md @@ -0,0 +1,114 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to remove carriage returns from text files on Linux) +[#]: via: (https://www.networkworld.com/article/3438857/how-to-remove-carriage-returns-from-text-files-on-linux.html) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +How to remove carriage returns from text files on Linux +====== +When carriage returns (also referred to as Ctrl+M's) get on your nerves, don't fret. There are several easy ways to show them the door. +[Kim Siever][1] + +Carriage returns go back a long way – as far back as typewriters on which a mechanism or a lever swung the carriage that held a sheet of paper to the right so that suddenly letters were being typed on the left again. They have persevered in text files on Windows, but were never used on Linux systems. This incompatibility sometimes causes problems when you’re trying to process files on Linux that were created on Windows, but it's an issue that is very easily resolved. + +The carriage return, also referred to as **Ctrl+M**, character would show up as an octal 15 if you were looking at the file with an **od** octal dump) command. The characters **CRLF** are often used to represent the carriage return and linefeed sequence that ends lines on Windows text files. Those who like to gaze at octal dumps will spot the **\r \n**. Linux text files, by comparison, end with just linefeeds. + +**[ Two-Minute Linux Tips: [Learn how to master a host of Linux commands in these 2-minute video tutorials][2] ]** + +Here's a sample of **od** output with the lines containing the **CRLF** characters in both octal and character form highlighted. + +``` +$ od -bc testfile.txt +0000000 124 150 151 163 040 151 163 040 141 040 164 145 163 164 040 146 + T h i s i s a t e s t f +0000020 151 154 145 040 146 162 157 155 040 127 151 156 144 157 167 163 + i l e f r o m W i n d o w s +0000040 056 015 012 111 164 047 163 040 144 151 146 146 145 162 145 156 <== + . \r \n I t ' s d i f f e r e n <== +0000060 164 040 164 150 141 156 040 141 040 125 156 151 170 040 164 145 + t t h a n a U n i x t e +0000100 170 164 040 146 151 154 145 015 012 167 157 165 154 144 040 142 <== + x t f i l e \r \n w o u l d b <== +``` + +While these characters don’t represent a huge problem, they can sometimes interfere when you want to parse the text files in some way and don’t want to have to code around their presence or absence. + +### 3 ways to remove carriage return characters from text files + +Fortunately, there are several ways to easily remove carriage return characters. Here are three options: + +#### dos2unix + +You might need to go through the trouble of installing it, but **dos2unix** is probably the easiest way to turn Windows text files into Unix/Linux text files. One command with one argument, and you’re done. No second file name is required. The file will be changed in place. + +``` +$ dos2unix testfile.txt +dos2unix: converting file testfile.txt to Unix format... +``` + +You should see the file length decrease, depending on how many lines it contains. A file with 100 lines would likely shrink by 99 characters, since only the last line will not end with the **CRLF** characters. + +Before: + +``` +-rw-rw-r-- 1 shs shs 121 Sep 14 19:11 testfile.txt +``` + +After: + +``` +-rw-rw-r-- 1 shs shs 118 Sep 14 19:12 testfile.txt +``` + +If you need to convert a large collection of files, don't fix them one at a time. Instead, put them all in a directory by themselves and run a command like this: + +``` +$ find . -type f -exec dos2unix {} \; +``` + +In this command, we use find to locate regular files and then run the **dos2unix** command to convert them one at a time. The {} in the command is replaced by the filename. You should be sitting in the directory with the files when you run it. This command could damage other types of files, such as those that contain octal 15 characters in some context other than a text file (e.g., bytes in an image file). + +#### sed + +You can also use **sed**, the stream editor, to remove carriage returns. You will, however, have to supply a second file name. Here’s an example: + +``` +$ sed -e “s/^M//” before.txt > after.txt +``` + +One important thing to note is that you DON’T type what that command appears to be. You must enter **^M** by typing **Ctrl+V** followed by **Ctrl+M**. The “s” is the substitute command. The slashes separate the text we’re looking for (the Ctrl+M) and the text (nothing in this case) that we’re replacing it with. + +#### vi + +You can even remove carriage return (**Ctrl+M**) characters with **vi**, although this assumes you’re not running through hundreds of files and are maybe making some other changes, as well. You would type “**:**” to go to the command line and then type the string shown below. As with **sed**, the **^M** portion of this command requires typing **Ctrl+V** to get the **^** and then **Ctrl+M** to insert the **M**. The **%s** is a substitute operation, the slashes again separate the characters we want to remove and the text (nothing) we want to replace it with. The “**g**” (global) means to do this on every line in the file. + +``` +:%s/^M//g +``` + +#### Wrap-up + +The **dos2unix** command is probably the easiest to remember and most reliable way to remove carriage returns from text files. Other options are a little trickier to use, but they provide the same basic function. + +Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3438857/how-to-remove-carriage-returns-from-text-files-on-linux.html + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://www.flickr.com/photos/kmsiever/5895380540/in/photolist-9YXnf5-cNmpxq-2KEvib-rfecPZ-9snnkJ-2KAcDR-dTxzKW-6WdgaG-6H5i46-2KzTZX-7cnSw7-e3bUdi-a9meh9-Zm3pD-xiFhs-9Hz6YM-ar4DEx-4PXAhw-9wR4jC-cihLcs-asRFJc-9ueXvG-aoWwHq-atwL3T-ai89xS-dgnntH-5en8Te-dMUDd9-aSQVn-dyZqij-cg4SeS-abygkg-f2umXt-Xk129E-4YAeNn-abB6Hb-9313Wk-f9Tot-92Yfva-2KA7Sv-awSCtG-2KDPzb-eoPN6w-FE9oi-5VhaNf-eoQgx7-eoQogA-9ZWoYU-7dTGdG-5B1aSS +[2]: https://www.youtube.com/playlist?list=PL7D2RMSmRO9J8OTpjFECi8DJiTQdd4hua +[3]: https://www.facebook.com/NetworkWorld/ +[4]: https://www.linkedin.com/company/network-world From f4501c4a9142658cb5538b29beaae9d084a5751c Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:57:40 +0800 Subject: [PATCH 182/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20The=20?= =?UTF-8?q?Protocols=20That=20Help=20Things=20to=20Communicate=20Over=20th?= =?UTF-8?q?e=20Internet?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190918 The Protocols That Help Things to Communicate Over the Internet.md --- ...Things to Communicate Over the Internet.md | 143 ++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 sources/talk/20190918 The Protocols That Help Things to Communicate Over the Internet.md diff --git a/sources/talk/20190918 The Protocols That Help Things to Communicate Over the Internet.md b/sources/talk/20190918 The Protocols That Help Things to Communicate Over the Internet.md new file mode 100644 index 0000000000..6fbfa24bb0 --- /dev/null +++ b/sources/talk/20190918 The Protocols That Help Things to Communicate Over the Internet.md @@ -0,0 +1,143 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (The Protocols That Help Things to Communicate Over the Internet) +[#]: via: (https://opensourceforu.com/2019/09/the-protocols-that-help-things-to-communicate-over-the-internet/) +[#]: author: (Sapna Panchal https://opensourceforu.com/author/sapna-panchal/) + +The Protocols That Help Things to Communicate Over the Internet +====== + +[![][1]][2] + +_The Internet of Things is a system of connected, interrelated objects. These objects transmit data to servers for processing and, in turn, receive messages from the servers. These messages are sent and received using different protocols. This article discusses some of the protocols related to the IoT._ + +The Internet of Things (IoT) is beginning to pervade more and more aspects of our lives. Everyone, everywhere is using the Internet of Things. Using the Internet, connected things are used to collect information, convey/send information back, or do both. IoT is an architecture that is a combination of available technologies. It helps to make our daily lives more pleasant and convenient. + +![Figure 1: IoT architecture][3] + +![Figure 2: Messaging Queuing Telemetry protocol][4] + +**IoT architecture** +Basically, IoT architecture has four components. In this article, we will explore each component to understand the architecture better. + + * **Sensors:** These are present everywhere. They help to collect data from any location and then share it to the IoT gateway. As an example, sensors sense the temperature at different locations, which helps to gauge the weather conditions. And this information is shared or passed to the IoT gateway. This is a basic example of how the IoT operates. + * **IoT gateway:** Once the information is collected from the sensors, it is passed on to the gateway. The gateway is a mediator between sensor nodes and the World Wide Web. So basically, it processes the data that is collected from sensor nodes and then transmits this to the Internet infrastructure. + * **Cloud server:** Once data is transmitted through the gateway, it is stored and processed in the cloud server. + * **Mobile app:** Using a mobile application, the user can view and access the data processed in the cloud server. + + + +This is the basic idea of the IoT and its architecture, along with the components. We now move on to the basic ideas behind different IoT protocols. + +**IoT protocols** +As mentioned earlier, connected things are used to collect information, convey/send information back, or do both, using the Internet. This is the fundamental basis of the IoT. To convey/send information, we need a protocol, which is a set of procedures that is used to transmit the data between electronic devices. +Essentially, we have two types of IoT protocols — the IoT network protocols and the IoT data protocols. This article discusses the IoT data protocols. + +![Figure 3: Advance Message Queuing Protocol][5] + +![Figure 4: CoAP][6] + +**MQTT** +The Messaging Queuing Telemetry Transmit (MQTT) protocol was primarily designed for low bandwidth networks, but is very popular today as an IoT protocol. It is used to exchange data between clients and the server. It is a lightweight messaging protocol. + +This protocol has many advantages: + + * It is small in size and has low power usage. + * It is a lightweight protocol. + * It is based on low network usage. + * It works entirely in real-time. + + + +Considering all the above reasons, MQTT emerges as the perfect IoT data protocol. + +**How MQTT works:** MQTT is based on a client-server relationship. The server manages the requests that come from different clients and sends the required information to clients. MQTT is based on two operations. +i) _Publish:_ When the client sends data to the MQTT broker, this operation is known as ‘Publish’. +ii) _Subscribe:_ When the client receives data from the broker, this operation is known as‘Subscribe’. + +The MQTT broker is the mediator that handles these operations, primarily taking messages and delivering them to the application or client. + +Let’s look at the example of a device temperature sensor, which sends readings to the MQTT broker, and then information is delivered to desktop or mobile applications. As stated earlier, ‘Publish’ means sending readings to the MQTT broker and ‘Subscribe’ means delivering the information to the desktop/mobile application. + +**AMQP** +Advanced Message Queuing Protocol is a peer-to-peer protocol, where one peer plays the role of the client application and the other peer plays the role of the delivery service or broker. It is the combination of hard and fast components that basically routes and saves messages within the delivery service or broker carrier. + +The benefits of AMQP are: + + * It helps to send messages without them getting missed out. + * It helps to guarantee a ‘one-time only’ and secured delivery. + * It provides a secure connection. + * It always supports acknowledgments for message delivery or failure. + + + +**How AMQP works and its architecture:** The AMQP architecture is made up of the following parts. + +_**Exchange**_ – Messages that come from the publisher are accepted by Exchange, which routes them to the message queue. + +_**Message queue**_ – This is the combination of multiple queues and is helpful for processing the messages. + +_**Binding**_ – This helps to maintain the connectivity between Exchange and the message queue. +The combination of Exchange and the message queues is known as the broker or AMQP broker. + +![Figure 5: Constrained Application Protocol architecture][7] + +**Constrained Application Protocol (CoAP)** +This was initially used as a machine-to-machine (M2M) protocol, and later began to be used as an IoT protocol. It is a Web transfer protocol that is used with constrained nodes and constrained networks. CoAP uses the RESTful architecture, just like the HTTP protocol. +The advantages CoAP offers are: + + * It works as a REST model for small devices. + * As this is like HTTP, it’s easy for developers to work on. + * It is a one-to-one protocol for transferring information between the client and server, directly. + * It is very simple to parse. + + + +**How CoAP works and its architecture:** From Figure 4, we can understand that CoAP is the combination of ‘Request/Response and Message’. We can also say it has two layers – ‘Request/Response’and ‘Message’. +Figure 5 clearly explains that CoAP architecture is based on the client server relationship, where… + + * The client sends requests to the server. + * The server receives requests from the client and responds to them. + + + +**Extensible Messaging and Presence Protocol (XMPP)** +This protocol is used to exchange messages in real-time. It is used not only to communicate with others, but also to get information on the status of the user (away, offline, active). This protocol is widely used in real life, like in WhatsApp. +The Extensible Messaging and Presence Protocol should be used because: + + * It is free, open and easy to understand. Hence, it is very popular. + * It has secured authentication, is extensible and flexible. + + + +![Figure 6: Extensible Messaging and Presence Protocol][8] + +**How XMPP works and its architecture:** In the XMPP architecture, each client has a unique name associated with it and communicates to other clients via the XMPP server. The XMPP client has either the same domain or a different one. + +In Figure 6, the XMPP client belongs to the same domain in which one XMPP client sends the information to the XMPP server. The server translates it and conveys the information to another client. +Basically, this protocol is the backbone that provides universal connectivity between different endpoint protocols. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/the-protocols-that-help-things-to-communicate-over-the-internet/ + +作者:[Sapna Panchal][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/sapna-panchal/ +[b]: https://github.com/lujun9972 +[1]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Internet-of-things-illustration.jpg?resize=696%2C439&ssl=1 (Internet of things illustration) +[2]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Internet-of-things-illustration.jpg?fit=1125%2C710&ssl=1 +[3]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-1-IoT-architecture.jpg?resize=350%2C133&ssl=1 +[4]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-2-Messaging-Queuing-Telemetry-Transmit-protocol.jpg?resize=350%2C206&ssl=1 +[5]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-3-Advance-Message-Queuing-Protocol.jpg?resize=350%2C160&ssl=1 +[6]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-4-CoAP.jpg?resize=350%2C84&ssl=1 +[7]: https://i0.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-5-Constrained-Application-Protocol-architecture.jpg?resize=350%2C224&ssl=1 +[8]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2019/09/Figure-6-Extensible-Messaging-and-Presence-Protocol.jpg?resize=350%2C46&ssl=1 From b06d137aa213a7c8ed7916ccf18dc1c7fdedf041 Mon Sep 17 00:00:00 2001 From: DarkSun Date: Thu, 19 Sep 2019 00:58:27 +0800 Subject: [PATCH 183/202] =?UTF-8?q?=E9=80=89=E9=A2=98:=2020190918=20Oracle?= =?UTF-8?q?=20Unleashes=20World=E2=80=99s=20Fastest=20Database=20Machine?= =?UTF-8?q?=20=E2=80=98Exadata=20X8M=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md --- ...s Fastest Database Machine ‘Exadata X8M.md | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md diff --git a/sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md b/sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md new file mode 100644 index 0000000000..add06c37a7 --- /dev/null +++ b/sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md @@ -0,0 +1,67 @@ +[#]: collector: (lujun9972) +[#]: translator: ( ) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Oracle Unleashes World’s Fastest Database Machine ‘Exadata X8M’) +[#]: via: (https://opensourceforu.com/2019/09/oracle-unleashes-worlds-fastest-database-machine-exadata-x8m/) +[#]: author: (Longjam Dineshwori https://opensourceforu.com/author/dineshwori-longjam/) + +Oracle Unleashes World’s Fastest Database Machine ‘Exadata X8M’ +====== + + * _**Exadata X8M is the first database machine with integrated persistent memory and RoCE**_ + * _**Oracle also announced availability of Oracle Zero Data Loss Recovery Appliance X8M (ZDLRA)**_ + + + +![][1] + +Oracle has released its new Exadata Database Machine X8M with an aim to set a new bar in the database infrastructure market. + +Exadata X8M combines Intel Optane DC persistent memory and 100 gigabit remote direct memory access (RDMA) over Converged Ethernet (RoCE) to remove storage bottlenecks and dramatically increase performance for the most demanding workloads such as Online Transaction Processing (OLTP), analytics, IoT, fraud detection and high frequency trading. + +“With Exadata X8M, we deliver in-memory performance with all the benefits of shared storage for both OLTP and analytics,” said Juan Loaiza, executive vice president, mission-critical database technologies, Oracle. + +“Reducing response times by an order of magnitude using direct database access to shared persistent memory accelerates every OLTP application and is a game changer for applications that need real-time access to large amounts of data such as fraud detection and personalized shopping,” the official added. + +**What’s unique about it?** + +Oracle Exadata X8M uses RDMA directly from the database to access persistent memory in smart storage servers, bypassing the entire OS, IO and network software stacks. This results in lower latency and higher throughput. Using RDMA to bypass software stacks also frees CPU resources on storage servers to execute more Smart Scan queries in support of analytics workloads. + +**No More Storage Bottlenecks** + +“High-performance OLTP applications require a demanding mixture of high Input/Output Operations Per Second (IOPS) with low latency. Direct database access to shared persistent memory increases peak performance to 16 million SQL read IOPS, 2.5X greater than the industry leading Exadata X8,” Oracle said in a statement. + +Additionally, Exadata X8M dramatically reduces the latency of critical database IOs by enabling remote IO latencies below 19 microseconds – more than 10X faster than the Exadata X8. These ultra-low latencies are achieved even for workloads requiring millions of IOs per second. + +**More Efficient Better than AWS and Azure** + +The company claimed that compared to the fastest Amazon RDS storage for Oracle, Exadata X8M delivers up to 50X lower latency, 200X more IOPS and 15X more capacity. + +Compared to Azure SQL Database Service storage, it says, Exadata X8M delivers 100X lower latency, 150X more IOPS and 300X more capacity. + +According to oracle, a single rack Exadata X8M delivers up to 2X the OLTP read IOPS, 3X the throughput and 5X lower latency than shared storage systems with persistent memory such as a single rack of Dell EMC PowerMax 8000. + +“By simultaneously supporting faster OLTP queries and greater throughput for analytics workloads, Exadata X8M is the ideal platform on which to converge mixed-workload environments to decrease IT costs and complexity,” it said. + +**Oracle Zero Data Loss Recovery Appliance X8** + +Oracle today also announced availability of Oracle Zero Data Loss Recovery Appliance X8M (ZDLRA), which uses new 100Gb RoCE for high throughput internal data transfers between compute and storage servers. + +Exadata and ZDLRA customers can now choose between RoCE or InfiniBand-based Engineered Systems for optimal flexibility in their architectural deployments. + +-------------------------------------------------------------------------------- + +via: https://opensourceforu.com/2019/09/oracle-unleashes-worlds-fastest-database-machine-exadata-x8m/ + +作者:[Longjam Dineshwori][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensourceforu.com/author/dineshwori-longjam/ +[b]: https://github.com/lujun9972 +[1]: https://i2.wp.com/opensourceforu.com/wp-content/uploads/2019/02/Oracle-Cloud.jpg?resize=350%2C212&ssl=1 From 2d5814be4db5ac0b863a59418de58cf8290f8f42 Mon Sep 17 00:00:00 2001 From: qfzy1233 Date: Thu, 19 Sep 2019 08:17:37 +0800 Subject: [PATCH 184/202] qfzy1233 is translating --- sources/tech/20190912 An introduction to Markdown.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190912 An introduction to Markdown.md b/sources/tech/20190912 An introduction to Markdown.md index df13f64f6d..1e0a990913 100644 --- a/sources/tech/20190912 An introduction to Markdown.md +++ b/sources/tech/20190912 An introduction to Markdown.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (qfzy1233) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From 081d1a57cfe526891291dacc7d437d304f09d1cb Mon Sep 17 00:00:00 2001 From: geekpi Date: Thu, 19 Sep 2019 08:50:12 +0800 Subject: [PATCH 185/202] translated --- ...introduction to Virtual Machine Manager.md | 102 ------------------ ...introduction to Virtual Machine Manager.md | 99 +++++++++++++++++ 2 files changed, 99 insertions(+), 102 deletions(-) delete mode 100644 sources/tech/20190913 An introduction to Virtual Machine Manager.md create mode 100644 translated/tech/20190913 An introduction to Virtual Machine Manager.md diff --git a/sources/tech/20190913 An introduction to Virtual Machine Manager.md b/sources/tech/20190913 An introduction to Virtual Machine Manager.md deleted file mode 100644 index de43386f8f..0000000000 --- a/sources/tech/20190913 An introduction to Virtual Machine Manager.md +++ /dev/null @@ -1,102 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (geekpi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (An introduction to Virtual Machine Manager) -[#]: via: (https://opensource.com/article/19/9/introduction-virtual-machine-manager) -[#]: author: (Alan Formy-Duval https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo) - -An introduction to Virtual Machine Manager -====== -Virt-manager provides a full range of options for spinning up virtual -machines on Linux. -![A person programming][1] - -In my [series][2] about [GNOME Boxes][3], I explained how Linux users can quickly spin up virtual machines on their desktop without much fuss. Boxes is ideal for creating virtual machines in a pinch when a simple configuration is all you need. - -But if you need to configure more detail in your virtual machine, you need a tool that provides a full range of options for disks, network interface cards (NICs), and other hardware. This is where [Virtual Machine Manager][4] (virt-manager) comes in. If you don't see it in your applications menu, you can install it from your package manager or via the command line: - - * On Fedora: **sudo dnf install virt-manager** - * On Ubuntu: **sudo apt install virt-manager** - - - -Once it's installed, you can launch it from its application menu icon or from the command line by entering **virt-manager**. - -![Virtual Machine Manager's main screen][5] - -To demonstrate how to create a virtual machine using virt-manager, I'll go through the steps to set one up for Red Hat Enterprise Linux 8. - -To start, click **File** then **New Virtual Machine**. Virt-manager's developers have thoughtfully titled each step of the process (e.g., Step 1 of 5) to make it easy. Click **Local install media** and **Forward**. - -![Step 1 virtual machine creation][6] - -On the next screen, browse to select the ISO file for the operating system you want to install. (My RHEL 8 image is located in my Downloads directory.) Virt-manager automatically detects the operating system. - -![Step 2 Choose the ISO File][7] - -In Step 3, you can specify the virtual machine's memory and CPU. The defaults are 1,024MB memory and one CPU. - -![Step 3 Set CPU and Memory][8] - -I want to give RHEL ample room to run—and the hardware I'm using can accommodate it—so I'll increase them (respectively) to 4,096MB and two CPUs. - -The next step configures storage for the virtual machine; the default setting is a 10GB disk image. (I'll keep this setting, but you can adjust it for your needs.) You can also choose an existing disk image or create one in a custom location. - -![Step 4 Configure VM Storage][9] - -Step 5 is the place to name your virtual machine and click Finish. This is equivalent to creating a virtual machine or a Box in GNOME Boxes. While it's technically the last step, you have several options (as you can see in the screenshot below). Since the advantage of virt-manager is the ability to customize a virtual machine, I'll check the box labeled **Customize configuration before install** before I click **Finish**. - -Since I chose to customize the configuration, virt-manager opens a screen displaying a bunch of devices and settings. This is the fun part! - -Here you have another chance to name the virtual machine. In the list on the left, you can view details on various aspects, such as CPU, memory, disks, controllers, and many other items. For example, I can click on **CPUs** to verify the change I made in Step 3. - -![Changing the CPU count][10] - -I can also confirm the amount of memory I set. - -When installing a VM to run as a server, I usually disable or remove its sound capability. To do so, select **Sound** and click **Remove** or right-click on **Sound** and choose **Remove Hardware**. - -You can also add hardware with the **Add Hardware** button at the bottom. This brings up the **Add New Virtual Hardware** screen where you can add additional storage devices, memory, sound, etc. It's like having access to a very well-stocked (if virtual) computer hardware warehouse. - -![The Add New Hardware screen][11] - -Once you are happy with your VM configuration, click **Begin Installation**, and the system will boot and begin installing your specified operating system from the ISO. - -![Begin installing the OS][12] - -Once it completes, it reboots, and your new VM is ready for use. - -![Red Hat Enterprise Linux 8 running in VMM][13] - -Virtual Machine Manager is a powerful tool for desktop Linux users. It is open source and an excellent alternative to proprietary and closed virtualization products. - -Learn how Vagrant and Ansible can be used to provision virtual machines for web development. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/9/introduction-virtual-machine-manager - -作者:[Alan Formy-Duval][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_keyboard_laptop_development_code_woman.png?itok=vbYz6jjb (A person programming) -[2]: https://opensource.com/sitewide-search?search_api_views_fulltext=GNOME%20Box -[3]: https://wiki.gnome.org/Apps/Boxes -[4]: https://virt-manager.org/ -[5]: https://opensource.com/sites/default/files/1-vmm_main_0.png (Virtual Machine Manager's main screen) -[6]: https://opensource.com/sites/default/files/2-vmm_step1_0.png (Step 1 virtual machine creation) -[7]: https://opensource.com/sites/default/files/3-vmm_step2.png (Step 2 Choose the ISO File) -[8]: https://opensource.com/sites/default/files/4-vmm_step3default.png (Step 3 Set CPU and Memory) -[9]: https://opensource.com/sites/default/files/6-vmm_step4.png (Step 4 Configure VM Storage) -[10]: https://opensource.com/sites/default/files/9-vmm_customizecpu.png (Changing the CPU count) -[11]: https://opensource.com/sites/default/files/11-vmm_addnewhardware.png (The Add New Hardware screen) -[12]: https://opensource.com/sites/default/files/12-vmm_rhelbegininstall.png -[13]: https://opensource.com/sites/default/files/13-vmm_rhelinstalled_0.png (Red Hat Enterprise Linux 8 running in VMM) diff --git a/translated/tech/20190913 An introduction to Virtual Machine Manager.md b/translated/tech/20190913 An introduction to Virtual Machine Manager.md new file mode 100644 index 0000000000..786efdb14b --- /dev/null +++ b/translated/tech/20190913 An introduction to Virtual Machine Manager.md @@ -0,0 +1,99 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (An introduction to Virtual Machine Manager) +[#]: via: (https://opensource.com/article/19/9/introduction-virtual-machine-manager) +[#]: author: (Alan Formy-Duval https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo) + +Virtual Machine Manager 简介 +====== +Virt-manager 为 Linux 虚拟化提供了全方位的选择。 +![A person programming][1] + +在我关于 [GNOME Boxes][3] 的[系列文章][2]中,我已经解释了 Linux 用户如何能够在他们的桌面上快速启动虚拟机。当你只需要简单的配置时,Box 可以在紧要关头创建虚拟机。 + +但是,如果你需要在虚拟机中配置更多详细信息,那么你就需要一个工具,为磁盘,网卡(NIC)和其他硬件提供全面的选项。这时就需要 [Virtual Machine Manager][4](virt-manager)了。如果在应用菜单中没有看到它,你可以从包管理器或命令行安装它: + + * 在 Fedora 上:**sudo dnf install virt-manager** +  * 在 Ubuntu 上:**sudo apt install virt-manager** + + + +安装完成后,你可以从应用菜单或在命令行中输入 **virt-manager** 启动。 + +![Virtual Machine Manager's main screen][5] + +为了演示如何使用 virt-manager 创建虚拟机,我将设置一个 Red Hat Enterprise Linux 8 虚拟机。 + +首先,单击 **File** 然后点击 **New Virtual Machine**。Virt-manager 的开发者已经标记好了每一步(例如,第 1 步,共 5 步)来使其变得简单。单击 **Local install media** 和 **Forward**。 + +![Step 1 virtual machine creation][6] + +在下个页面中,选择要安装的操作系统的 ISO 文件。(RHEL 8 镜像位于我的下载目录中。)Virt-manager 自动检测操作系统。 + +![Step 2 Choose the ISO File][7] + +在步骤 3 中,你可以指定虚拟机的内存和 CPU。默认值为内存 1,024MB 和一个 CPU。 + +![Step 3 Set CPU and Memory][8] + +我想给 RHEL 充足的配置运行,我使用的硬件配置也充足 - 所以我将它们(分别)增加到 4,096MB 和两个 CPU。 + +下一步为虚拟机配置存储。默认设置是 10GB 硬盘。 (我保留此设置,但你可以根据需要进行调整。)你还可以选择现有磁盘镜像或在自定义位置创建一个磁盘镜像。 + +![Step 4 Configure VM Storage][9] + +步骤 5 是命名虚拟机并单击“完成”。这相当于创建了一台虚拟机或 GNOME Boxes 中的 Box。虽然技术上讲是最后一步,但你有几个选择(如下面的截图所示)。由于 virt-manager 的优点是能够自定义虚拟机,因此在单击 **Finish** 之前,我将选中 **Customize configuration before install** 的复选框。 + +因为我选择了自定义配置,virt-manager 打开了一个有一组设备和设置的页面。这个很有趣! + +这里你也可以命名虚拟机。在左侧列表中,你可以查看各个方面的详细信息,例如 CPU、内存、磁盘、控制器和许多其他项目。例如,我可以单击 **CPU** 来验证我在步骤 3 中所做的更改。 + +![Changing the CPU count][10] + +我也可以确认我设置的内存量。 + +当 VM 作为服务器运行时,我通常会禁用或删除声卡。为此,请选择 **Sound** 并单击 **Remove** 或右键单击 **Sound** 并选择 **Remove Hardware**。 + +你还可以使用底部的 **Add Hardware** 按钮添加硬件。这会打开 **Add New Virtual Hardware件** 页面,你可以在其中添加其他存储设备、内存、声卡等。这就像可以访问一个库存充足(如果虚拟)的计算机硬件仓库。 + +![The Add New Hardware screen][11] + +对 VM 配置感到满意后,单击 **Begin Installation**,系统将启动并开始从 ISO 安装指定的操作系统。 + +![Begin installing the OS][12] + +完成后,它会重新启动,你的新 VM 就可以使用了。 + +![Red Hat Enterprise Linux 8 running in VMM][13] + +Virtual Machine Manager 是桌面 Linux 用户的强大工具。它是开源的,是专有和封闭虚拟化产品的绝佳替代品。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/9/introduction-virtual-machine-manager + +作者:[Alan Formy-Duval][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/alanfdosshttps://opensource.com/users/alanfdosshttps://opensource.com/users/bgamrathttps://opensource.com/users/marcobravo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_keyboard_laptop_development_code_woman.png?itok=vbYz6jjb (A person programming) +[2]: https://opensource.com/sitewide-search?search_api_views_fulltext=GNOME%20Box +[3]: https://wiki.gnome.org/Apps/Boxes +[4]: https://virt-manager.org/ +[5]: https://opensource.com/sites/default/files/1-vmm_main_0.png (Virtual Machine Manager's main screen) +[6]: https://opensource.com/sites/default/files/2-vmm_step1_0.png (Step 1 virtual machine creation) +[7]: https://opensource.com/sites/default/files/3-vmm_step2.png (Step 2 Choose the ISO File) +[8]: https://opensource.com/sites/default/files/4-vmm_step3default.png (Step 3 Set CPU and Memory) +[9]: https://opensource.com/sites/default/files/6-vmm_step4.png (Step 4 Configure VM Storage) +[10]: https://opensource.com/sites/default/files/9-vmm_customizecpu.png (Changing the CPU count) +[11]: https://opensource.com/sites/default/files/11-vmm_addnewhardware.png (The Add New Hardware screen) +[12]: https://opensource.com/sites/default/files/12-vmm_rhelbegininstall.png +[13]: https://opensource.com/sites/default/files/13-vmm_rhelinstalled_0.png (Red Hat Enterprise Linux 8 running in VMM) From 9e155a34ea215f87af1f91c5fea1082bad9f8124 Mon Sep 17 00:00:00 2001 From: geekpi Date: Thu, 19 Sep 2019 09:16:59 +0800 Subject: [PATCH 186/202] translating --- ...ze and lock your Linux system (and why you would want to).md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md b/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md index 367113a47b..886974a8c0 100644 --- a/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md +++ b/sources/tech/20190916 How to freeze and lock your Linux system (and why you would want to).md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) From f106e93bad9580ab58df33e84a6e89426c892461 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 09:41:11 +0800 Subject: [PATCH 187/202] PRF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @name1e5s 翻译的很棒! --- ...an is Forced to Resign as FSF President.md | 58 ++++++++++--------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md index 959e369e20..aa5ab6bece 100644 --- a/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md +++ b/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (name1e5s) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President) @@ -10,60 +10,64 @@ Richard Stallman 被迫辞去 FSF 主席的职务 ====== -_**Richard Stallman,自由软件基金会的创建者以及主席,已经辞去他的职务,开始寻求下一任主席。此前,因为 Stallman 对于爱泼斯坦事件中的受害者的观点,一小撮活动家以及媒体人发起了清除 Stallman 的运动。这份声明就是在这些活动后发生的。阅读全文以获得更多信息。**_ +> Richard Stallman,自由软件基金会的创建者以及主席,已经辞去主席及董事会职务。此前,因为 Stallman 对于爱泼斯坦事件中的受害者的观点,一小撮活动家以及媒体人发起了清除 Stallman 的运动。这份声明就是在这些活动后发生的。阅读全文以获得更多信息。 ![][1] ### Stallman 事件的背景概述 -如果您不知道这次事件发生的前因后果,请看本段的详细信息。 +如果你不知道这次事件发生的前因后果,请看本段的详细信息。 [Richard Stallman][2],66岁,是就职于 [MIT][3] 的计算机科学家。他最著名的成就就是在 1983 年发起了[自由软件运动][4]。他也开发了 GNU 项目旗下的部分软件,比如 GCC 和 Emacs。受自由软件运动影响选择使用 GPL 开源协议的项目不计其数。Linux 是其中最出名的项目之一。 -[Jeffrey Epstein][5],美国亿万富翁,金融大佬。其涉嫌为社会上流精英提供性交易服务(其中有未成年少女)而被指控成为性犯罪者。在受审期间,爱泼斯坦在监狱中自杀身亡。 +[Jeffrey Epstein][5](爱泼斯坦),美国亿万富翁,金融大佬。其涉嫌为社会上流精英提供性交易服务(其中有未成年少女)而被指控成为性犯罪者。在受审期间,爱泼斯坦在监狱中自杀身亡。 -[Marvin Lee Minsky][6],MIT 知名计算机科学家。他在 MIT 建立了人工智能实验室。2016 年,88 岁的 Minsky 逝世。在 Minsky 逝世后,一位名为 Misky 的爱泼斯坦事件受害者生成其在未成年时曾被“诱导”到爱泼斯坦的私人岛屿,与之发生性关系。 +[Marvin Lee Minsky][6],MIT 知名计算机科学家。他在 MIT 建立了人工智能实验室。2016 年,88 岁的 Minsky 逝世。在 Minsky 逝世后,一位名为 Misky 的爱泼斯坦事件受害者声称其在未成年时曾被“诱导”到爱泼斯坦的私人岛屿,与之发生性关系。 -但是这些与 Richard Stallman 有什么关系?这要从 Stallman 发给 MIT 计算机科学与人工智能实验室(CSAIL) 的学生以及附属机构就爱泼斯坦的捐款提出抗议的邮件列表的邮件说起。邮件全文翻译如下: +但是这些与 Richard Stallman 有什么关系?这要从 Stallman 发给 MIT 计算机科学与人工智能实验室(CSAIL)的学生以及附属机构就爱泼斯坦的捐款提出抗议的邮件列表的邮件说起。邮件全文翻译如下: > 周五事件的公告对 Marvin Minsky 来说是不公正的。 > -> “已故的人工智能 ’先锋‘ Marvin Minsky (被控告侵害了爱泼斯坦事件的受害者之一[2])” +> “已故的人工智能 ‘先锋’ Marvin Minsky (被控告侵害了爱泼斯坦事件的受害者之一\[2])” > -> 不公正之处在于 “侵害(assulting)” 这个用语。“性侵犯(sexual assault)” 这个用语非常的糢糊,夸大了指控的严重性:宣称某人做了 X 但误导别人,让别人觉得这个人做了 Y,Y 远远比 X 严重。 +> 不公正之处在于 “侵害assulting” 这个用语。“性侵犯sexual assault” 这个用语非常的糢糊,夸大了指控的严重性:宣称某人做了 X 但误导别人,让别人觉得这个人做了 Y,Y 远远比 X 严重。 > -> 上面引用的指控显然就是夸大。报导声称 Minksy 与爱泼斯坦的女眷之一发生了性关系(详见 )。我们假设这是真的(我找不到理由不相信)。 +> 上面引用的指控显然就是夸大。报导声称 Minksy 与爱泼斯坦的女眷harem之一发生了性关系(详见 )。我们假设这是真的(我找不到理由不相信)。 > -> “侵害(assulting)” 这个词,意味着他使用了某种暴力。但那篇报道并没有提到这个,只说了他们发生了性关系。 +> “侵害assulting” 这个词,意味着他使用了某种暴力。但那篇报道并没有提到这个,只说了他们发生了性关系。 > > 我们可以想像很多种情况,但最合理的情况是,她在 Marvin 面前表现的像是完全自愿的。假设她是被爱泼斯坦强迫的,那爱泼斯坦有充足的理由让她对大多数人守口如瓶。 > -> 从各种的指控夸大事例中,我总结出,在指控时使用“性侵犯(sexual assault)”是绝对错误的。 +> 从各种的指控夸大事例中,我总结出,在指控时使用“性侵犯sexual assault”是绝对错误的。 > -> 无论你想要批判什么行为,你都应该使用特定的词汇来描述,以此避免批判本身天然的道德上的模糊性。 +> 无论你想要批判什么行为,你都应该使用特定的词汇来描述,以此避免批判的本质的道德模糊性。 ### “清除 Stallman” 的呼吁 -‘爱泼斯坦’在美国是颇具争议的话题。Stallman 对该敏感事件做出如此鲁莽的 “知识陈述” 不会有好结果,事实也是如此。 +‘爱泼斯坦’在美国是颇具争议的‘话题’。Stallman 对该敏感事件做出如此鲁莽的 “知识陈述” 不会有好结果,事实也是如此。 一位机器人学工程师从她的朋友那里收到了转发的邮件并发起了一个[清除 Stallman 的活动][7]。她要的不是澄清或者道歉,她只想要清除斯托曼,就算这意味着 “将 MIT 夷为平地” 也在所不惜。 -> 是,至少 Stallman 没有的确被控强奸任何人。但这就是我们的最高标准吗?这所声望极高的学院坚持的标准就是这样的吗? 如果这是麻省理工学院想要捍卫的、想要代表的标准的话,还不如把这破学校夷为平地… +> 是,至少 Stallman 没有被控强奸任何人。但这就是我们的最高标准吗?这所声望极高的学院坚持的标准就是这样的吗?如果这是麻省理工学院想要捍卫的、想要代表的标准的话,还不如把这破学校夷为平地… > > 如果有必要的话,就把所有人都清除出去,之后从废墟中建立出更好的秩序。 > -> Salem,发起“清除 Stallman“运动的机器人学专业学生 +> —— Salem,发起“清除 Stallman“运动的机器人学专业学生 -Salem 的大字报最初没有被主流媒体重视。但它还是被反对软件行业内精英崇拜以及性别偏见的积极分子发现了。 +Salem 的声讨最初没有被主流媒体重视。但它还是被反对软件行业内的精英崇拜以及性别偏见的积极分子发现了。 -> [#epstein][8] [#MIT][9] 嗨 记者没有回复我我很生气就自己写了这么个故事。 作为 MIT 的校友我还真是高兴啊🙃 +> [#epstein][8] [#MIT][9] 嗨 记者没有回复我我很生气就自己写了这么个故事。作为 MIT 的校友我还真是高兴啊🙃 > > — SZJG (@selamjie) [September 12, 2019][10] +. + > 是不是对于性侵儿童的 “杰出混蛋” 我们也可以辩护说 “万一这是你情我愿的” > > — Tracy Chou 👩🏻‍💻 (@triketora) [September 13, 2019][11] +. + > 多年来我就一直发推说 Richard "RMS" Stallman 这人有多恶心 —— 恋童癖、厌女症、还残障歧视 > > 不可避免的是,每次我这样做,都会有老哥检查我的数据来源,然后说 “这都是几年前的事了!他现在变了!” @@ -74,15 +78,15 @@ Salem 的大字报最初没有被主流媒体重视。但它还是被反对软 下面是 Sage Sharp 开头的一篇关于 Stallman 的行为如何对科技人员产生负面影响的帖子: -> 👇大家说下 Richard Stallman 对科技从业者的影响吧,尤其是女性。 [例如: 强奸,乱伦,残障歧视,性交易] +> 👇大家说下 Richard Stallman 对科技从业者的影响吧,尤其是女性。 [例如: 强奸、乱伦、残障歧视、性交易] > > [@fsf][13] 有必要永久禁止 Richard Stallman 担任自由软件基金会董事会主席。 > -> — Sage Sharp (@_sagesharp_) [September 16, 2019][14] +> — Sage Sharp (@\_sagesharp\_) [September 16, 2019][14] -Stallman 一直以来也不是一个圣人。 他粗暴,不合时宜、带有性别歧视的笑话多年来一直在进行。你可以在[这里][15]和[这里][16]读到。 +Stallman 一直以来也不是一个圣人。他粗暴,不合时宜、多年来一直在开带有性别歧视的笑话。你可以在[这里][15]和[这里][16]读到。 -很快这个消息就被 [The Vice][17],[每日野兽][18],[未来主义][19]等大媒体采访。他们把 Stallman 描绘成爱泼斯坦的捍卫者。在强烈的抗议声中,[GNOME 执行董事威胁要结束 GNOME 和 FSF 之间的关系][20]。 +很快这个消息就被 [The Vice][17]、[每日野兽][18],[未来主义][19]等大媒体采访。他们把 Stallman 描绘成爱泼斯坦的捍卫者。在强烈的抗议声中,[GNOME 执行董事威胁要结束 GNOME 和 FSF 之间的关系][20]。 最后,Stallman 先是从 MIT 辞职,现在又从 [自由软件基金会][21] 辞职。 @@ -92,13 +96,13 @@ Stallman 一直以来也不是一个圣人。 他粗暴,不合时宜、带有 我们见识到了,把一个人从他创建并为之工作了三十多年的组织中驱逐出去仅仅需要五天。这甚至还是在 Stallman 没有参与性交易丑闻的情况下。 -其中一些 “活动家” 过去也曾[针对 Linux 的作者 Linus Torvalds][23]。 Linux 基金会背后的管理层预见到了科技行业激进主义的增长趋势,因此他们制定了[适用于 Linux 内核开发的行为准则][24]并[强制 Torvalds 接受培训以改善他的行为][25]。 如果他们没有采取纠正措施,可能 Torvalds 也已经被批倒批臭了。 +其中一些 “活动家” 过去也曾[针对过 Linux 的作者 Linus Torvalds][23]。Linux 基金会背后的管理层预见到了科技行业激进主义的增长趋势,因此他们制定了[适用于 Linux 内核开发的行为准则][24]并[强制 Torvalds 接受培训以改善他的行为][25]。如果他们没有采取纠正措施,可能 Torvalds 也已经被批倒批臭了。 -忽视技术支持者的鲁莽行为和性别歧视是不可接受的,但是对于那些遇到不同意某种流行观点的人就贴大字报,施以私刑也是不道德的做法。我不支持 Stallman 和他过去的言论,但我也不能接受他以这种方式(被迫?)辞职。 +忽视技术支持者的鲁莽行为和性别歧视是不可接受的,但是对于那些遇到不同意某种流行观点的人就进行声讨,施以私刑也是不道德的做法。我不支持 Stallman 和他过去的言论,但我也不能接受他以这种方式(被迫?)辞职。 -Techrights 对此有一些有趣的评论,你可以在 [这里][26] 和 [这里][27] 看到。 +Techrights 对此有一些有趣的评论,你可以在[这里][26]和[这里][27]看到。 -_**您对此事有何看法? 请文明分享您的观点和意见。过激评论将不会公布。**_ +*你对此事有何看法?请文明分享你的观点和意见。过激评论将不会公布。* -------------------------------------------------------------------------------- @@ -107,13 +111,13 @@ via: https://itsfoss.com/richard-stallman-controversy/ 作者:[Abhishek Prakash][a] 选题:[lujun9972][b] 译者:[name1e5s](https://github.com/name1e5s) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 [a]: https://itsfoss.com/author/abhishek/ [b]: https://github.com/lujun9972 -[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/stallman-conroversy.png?ssl=1 +[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/stallman-conroversy.png?w=800&ssl=1 [2]: https://en.wikipedia.org/wiki/Richard_Stallman [3]: https://en.wikipedia.org/wiki/Massachusetts_Institute_of_Technology [4]: https://en.wikipedia.org/wiki/Free_software_movement From 26dc5824619f796c21d7ce798d0957699f3bc04d Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 09:41:33 +0800 Subject: [PATCH 188/202] PUB @name1e5s https://linux.cn/article-11358-1.html --- ..., Richard Stallman is Forced to Resign as FSF President.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/news => published}/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md (99%) diff --git a/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md b/published/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md similarity index 99% rename from translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md rename to published/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md index aa5ab6bece..e8a658cebc 100644 --- a/translated/news/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md +++ b/published/20190918 Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (name1e5s) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11358-1.html) [#]: subject: (Amid Epstein Controversy, Richard Stallman is Forced to Resign as FSF President) [#]: via: (https://itsfoss.com/richard-stallman-controversy/) [#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) From 311e76331f007f9bfcabac853b708877032c2537 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Thu, 19 Sep 2019 09:46:03 +0800 Subject: [PATCH 189/202] =?UTF-8?q?Rename=20sources/talk/20190918=20Oracle?= =?UTF-8?q?=20Unleashes=20World-s=20Fastest=20Database=20Machine=20?= =?UTF-8?q?=E2=80=98Exadata=20X8M.md=20to=20sources/news/20190918=20Oracle?= =?UTF-8?q?=20Unleashes=20World-s=20Fastest=20Database=20Machine=20?= =?UTF-8?q?=E2=80=98Exadata=20X8M.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...cle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{talk => news}/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md (100%) diff --git a/sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md b/sources/news/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md similarity index 100% rename from sources/talk/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md rename to sources/news/20190918 Oracle Unleashes World-s Fastest Database Machine ‘Exadata X8M.md From 5bd571e52134725d384a9d5fdac0710e513495f8 Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Thu, 19 Sep 2019 09:49:27 +0800 Subject: [PATCH 190/202] Rename sources/tech/20190918 Election fraud- Is there an open source solution.md to sources/talk/20190918 Election fraud- Is there an open source solution.md --- .../20190918 Election fraud- Is there an open source solution.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190918 Election fraud- Is there an open source solution.md (100%) diff --git a/sources/tech/20190918 Election fraud- Is there an open source solution.md b/sources/talk/20190918 Election fraud- Is there an open source solution.md similarity index 100% rename from sources/tech/20190918 Election fraud- Is there an open source solution.md rename to sources/talk/20190918 Election fraud- Is there an open source solution.md From ead9a16a0adcb503b221eeb35af65244fac221ea Mon Sep 17 00:00:00 2001 From: "Xingyu.Wang" Date: Thu, 19 Sep 2019 09:51:02 +0800 Subject: [PATCH 191/202] Rename sources/tech/20190918 The community-led renaissance of open source.md to sources/talk/20190918 The community-led renaissance of open source.md --- .../20190918 The community-led renaissance of open source.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sources/{tech => talk}/20190918 The community-led renaissance of open source.md (100%) diff --git a/sources/tech/20190918 The community-led renaissance of open source.md b/sources/talk/20190918 The community-led renaissance of open source.md similarity index 100% rename from sources/tech/20190918 The community-led renaissance of open source.md rename to sources/talk/20190918 The community-led renaissance of open source.md From cfea4daa5aed8a3fd4c54cba2db297b7908cfd45 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 10:03:44 +0800 Subject: [PATCH 192/202] PRF @qfzy1223 --- ...0905 How to Change Themes in Linux Mint.md | 58 +++++++++---------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/translated/tech/20190905 How to Change Themes in Linux Mint.md b/translated/tech/20190905 How to Change Themes in Linux Mint.md index 54bc21cdca..30b4da73c1 100644 --- a/translated/tech/20190905 How to Change Themes in Linux Mint.md +++ b/translated/tech/20190905 How to Change Themes in Linux Mint.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (qfzy1233) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to Change Themes in Linux Mint) @@ -10,9 +10,11 @@ 如何在 Linux Mint 中更换主题 ====== -自始至终,使用 Cinnamon 桌面环境的 Linux Mint 都是一种卓越的体验。这也是[为何我喜爱 Linux Mint ][1]的主要原因之一。 +![](https://img.linux.net.cn/data/attachment/album/201909/19/100317ixxp3y1l7lljl47a.jpg) -自从 Mint 的开发团队 [开始更为严肃的对待设计][2], “桌面主题” 应用便成为了更换新主题,图标,按钮样式,窗口边框以及鼠标指针的重要方式,当然你也可以直接通过它安装新的主题。感兴趣么? 让我们开始吧。 +一直以来,使用 Cinnamon 桌面环境的 Linux Mint 都是一种卓越的体验。这也是[为何我喜爱 Linux Mint][1]的主要原因之一。 + +自从 Mint 的开发团队[开始更为严肃的对待设计][2], “桌面主题” 应用便成为了更换新主题、图标、按钮样式、窗口边框以及鼠标指针的重要方式,当然你也可以直接通过它安装新的主题。感兴趣么?让我们开始吧。 ### 如何在 Linux Mint 中更换主题 @@ -20,70 +22,62 @@ ![Theme Applet provides an easy way of installing and changing themes][3] -在应用中中有一个“添加/删除”按钮,非常简单,不是么?而且,点击它,我们可以看到Cinnamon Spices( Cinnamon 的官方插件库)的主题按流行程度排序。 +在应用中有一个“添加/删除”按钮,非常简单吧。点击它,我们可以看到按流行程度排序的 Cinnamon Spices(Cinnamon 的官方插件库)的主题。 ![Installing new themes in Linux Mint Cinnamon][4] -要安装主题,你所要做的就是点击你喜欢的一个,然后等待它下载。之后,主题将在应用第一页的“Desktop”选项中显示可用。只需双击已安装的主题之一就可以开始使用它。 +要安装主题,你所要做的就是点击你喜欢的主题,然后等待它下载。之后,主题将在应用第一页的“Desktop”选项中显示可用。只需双击已安装的主题之一就可以开始使用它。 ![Changing themes in Linux Mint Cinnamon][5] -下面是默认的 Linux Mint 外观: +下面是默认的 Linux Mint 外观: ![Linux Mint Default Theme][6] -这是在我更换主题之后: +这是在我更换主题之后: ![Linux Mint with Carta Theme][7] 所有的主题都可以在 Cinnamon Spices 网站上获得更多的信息和更大的截图,这样你就可以更好地了解你的系统的外观。 -[浏览 Cinnamon 主题][8] +- [浏览 Cinnamon 主题][8] ### 在 Linux Mint 中安装第三方主题 -_“我在另一个网站上看到了这个优异的主题,但 Cinnamon Spices 网站上没有……”_ +> “我在另一个网站上看到了这个优异的主题,但 Cinnamon Spices 网站上没有……” -Cinnamon Spices 集成了许多优秀的主题,但你仍然会发现,你看到的主题并没有被 Cinnamon Spices 官方网站收录。 +Cinnamon Spices 集成了许多优秀的主题,但你仍然会发现,你看到的主题并没有被 Cinnamon Spices 官方网站收录。 -这时你可能会想:如果有别的办法就好了,对么?你可能会认为有(我的意思是……当然啦)。首先,我们可以在其他网站上找到一些很酷的主题。 +这时你可能会想:如果有别的办法就好了,对么?你可能会认为有(我的意思是……当然啦)。首先,我们可以在其他网站上找到一些很酷的主题。 -我推荐你去 Cinnamon 浏览主题。如果你喜欢什么,就下载吧。 +我推荐你去 Cinnamon Look 浏览一下那儿的主题。如果你喜欢什么,就下载吧。 -[在 Cinnamon 外观中获取更多主题][9] +- [在 Cinnamon Look 中获取更多主题][9] -下载了首选主题之后,你现在将得到一个压缩文件,其中包含安装所需的所有内容。提取它并保存到 ~/.themes. 迷糊么? “~” 代表了你的 home 文件夹的对应路径: /home/{YOURUSER}/.themes. +下载了首选主题之后,你现在将得到一个压缩文件,其中包含安装所需的所有内容。提取它并保存到 `~/.themes`。迷糊么? `~` 代表了你的家目录的对应路径:`/home/{YOURUSER}/.themes`。 -[][10] +然后跳转到你的家目录。按 `Ctrl+H` 来[显示 Linux 中的隐藏文件][11]。如果没有看到 `.themes` 文件夹,创建一个新文件夹并命名为 `.themes`。记住,文件夹名称开头的点很重要。 -建议在登录 Ubuntu 16.04 时读取“启动会话失败”的修复程序。 - -然后跳转到你的主目录。按Ctrl+H[显示Linux中的隐藏文件][11]。如果没有看到.themes文件夹,创建一个新文件夹并命名为.themes。记住,文件夹名称开头的点很重要。 - -将提取的主题文件夹从下载目录复制到你的 home 中的.themes文件夹。 +将提取的主题文件夹从下载目录复制到你的家目录中的 `.themes` 文件夹中。 最后,在上面提到的应用中查找已安装的主题。 -注记 +> 注记 +> +> 请记住,主题必须是 Cinnamon 相对应的,即使它是一个从 GNOME 复刻的系统也不行,并不是所有的 GNOME 主题都适用于 Cinnamon。 -请记住,主题必须是 Cinnamon 相对应的,即使它是一个从 GNOME 复刻的系统也不行,并不是所有的 GNOME 主题都适用于 Cinnamon 。 +改变主题是 Cinnamon 定制工作的一部分。你还可以[通过更改图标来更改 Linux Mint 的外观][12]。 -改变主题是 Cinnamon 定制的一部分。你还可以[通过更改图标来更改 Linux Mint 的外观][12]。 - -我希望你现在已经知道如何在 Linux Mint 中更改主题了。快去选取你喜欢的主题吧? - -### João Gondim - -来自巴西的Linux爱好者。 +我希望你现在已经知道如何在 Linux Mint 中更改主题了。快去选取你喜欢的主题吧。 -------------------------------------------------------------------------------- via: https://itsfoss.com/install-themes-linux-mint/ -作者:[It's FOSS Community][a] +作者:[It's FOSS][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) +译者:[qfzy1233](https://github.com/qfzy1233) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From c35b7a16eaf20d674735db48970af781560f7c1d Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 10:04:44 +0800 Subject: [PATCH 193/202] PUB @qfzy1223 https://linux.cn/article-11359-1.html --- .../20190905 How to Change Themes in Linux Mint.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190905 How to Change Themes in Linux Mint.md (98%) diff --git a/translated/tech/20190905 How to Change Themes in Linux Mint.md b/published/20190905 How to Change Themes in Linux Mint.md similarity index 98% rename from translated/tech/20190905 How to Change Themes in Linux Mint.md rename to published/20190905 How to Change Themes in Linux Mint.md index 30b4da73c1..dd2f69b044 100644 --- a/translated/tech/20190905 How to Change Themes in Linux Mint.md +++ b/published/20190905 How to Change Themes in Linux Mint.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (qfzy1233) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11359-1.html) [#]: subject: (How to Change Themes in Linux Mint) [#]: via: (https://itsfoss.com/install-themes-linux-mint/) [#]: author: (It's FOSS Community https://itsfoss.com/author/itsfoss/) From 1f49739a118f2f6854f19fdb82cfbf099a4bc953 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 10:31:00 +0800 Subject: [PATCH 194/202] PRF @Morisun029 --- ...ck Linux Mint Version Number - Codename.md | 82 +++++++------------ 1 file changed, 31 insertions(+), 51 deletions(-) diff --git a/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md index dee320ad88..82fe028b60 100644 --- a/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md +++ b/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (Morisun029) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (How to Check Linux Mint Version Number & Codename) @@ -10,52 +10,41 @@ 如何查看 Linux Mint 版本号和代号 ====== +Linux Mint 每两年发布一次主版本(如 Mint 19),每六个月左右发布一次次版本(如 Mint 19.1、19.2 等)。 你可以自己升级 Linux Mint 版本,而次版本也会自动更新。 -Linux Mint 每两年发布一次主版本(如 Mint 19),每六个月左右发布一次次版本(如 Mint 19.1,19.2等)。 你可以自己升级 Linux Mint 版本,次版本也会自动更新。 - - -在所有这些版本中,你可能想知道你正在使用的是哪个版本。 了解 Linux Mint 版本号可以帮助你确定某个特定软件是否适用于你的系统,或者检查你的系统是否已达到使用寿命。 - -你可能需要 Linux Mint 版本号有多种原因,你也有多种方法可以获取此信息。 -让我向你展示用图形和命令行的方式获取 Mint 版本信息。 - - - * [使用命令行查看 Linux Mint 版本信息][1] - * [使用GUI(图形用户界面)查看 Linux Mint 版本信息][2] +在所有这些版本中,你可能想知道你正在使用的是哪个版本。了解 Linux Mint 版本号可以帮助你确定某个特定软件是否适用于你的系统,或者检查你的系统是否已达到使用寿命。 +你可能需要 Linux Mint 版本号有多种原因,你也有多种方法可以获取此信息。让我向你展示用图形和命令行的方式获取 Mint 版本信息。 +* [使用命令行查看 Linux Mint 版本信息][1] +* [使用 GUI(图形用户界面)查看 Linux Mint 版本信息][2] ### 使用终端查看 Linux Mint 版本号的方法 ![][3] -我将介绍几种使用非常简单的命令查看 Linux Mint 版本号和代号的方法。 你可以从 **菜单** 中打开**终端** ,或按**CTRL+ALT+T** (默认热键)打开。 +我将介绍几种使用非常简单的命令查看 Linux Mint 版本号和代号的方法。 你可以从 “菜单” 中打开终端,或按 `CTRL+ALT+T`(默认热键)打开。 +本文中的最后两个命令还会输出你当前的 Linux Mint 版本所基于的 Ubuntu 版本。 -本文中的 **最后两个命令** 还会输出你当前的 Linux Mint 版本所基于的 **Ubuntu 版本**。 - - -#### 1\. /etc/issue - - -从最简单的 CLI 方法开始,你可以打印出 **/etc/issue** 的内容来检查你的 **版本号** 和 **代号** : +#### 1、/etc/issue +从最简单的 CLI 方法开始,你可以打印出 `/etc/issue` 的内容来检查你的版本号和代号: ``` [email protected]:~$ cat /etc/issue Linux Mint 19.2 Tina \n \l ``` -#### 2\. hostnamectl +#### 2、hostnamectl ![hostnamectl][4] +这一个命令(`hostnamectl`)打印的信息几乎与“系统信息”中的信息相同。 你可以看到你的操作系统(带有版本号)以及你的内核版本。 -这一个命令(**hostnamectl**)打印的信息几乎与 **系统信息** 中的信息相同。 你可以看到你的 **操作系统**(带有**版本号**)以及你的 **内核版本**。3. +#### 3、lsb_release -#### 3\. lsb_release - -**lsb_release** 是一个非常简单的Linux实用程序,用于查看有关你的发行版本的基本信息: +`lsb_release` 是一个非常简单的 Linux 实用程序,用于查看有关你的发行版本的基本信息: ``` [email protected]:~$ lsb_release -a @@ -66,34 +55,25 @@ Release: 19.2 Codename: tina ``` -**注:** *我使用 **–***_**a**_ _标签打印所有参数, 但你也可以使用 **-s** 作为简写格式, **-d** 用于描述等 (检查所有标签的 **man lsb_release** )._ +**注:** 我使用 `–a` 标签打印所有参数, 但你也可以使用 `-s` 作为简写格式,`-d` 用于描述等 (用 `man lsb_release` 查看所有选项) - -#### 4\. /etc/linuxmint/info +#### 4、/etc/linuxmint/info ![/etc/linuxmint/info][5] -This isn’t a command, but rather a file on any Linux Mint install. Simply use cat command to print it’s contents to your terminal and see your **Release Number** and **Codename**. -这不是命令,而是所有Linux Mint 安装上的文件。 只需使用 cat 命令将其内容打印到终端,然后查看你的**版本号** 和**代号** 。 +这不是命令,而是 Linux Mint 系统上的文件。只需使用 `cat` 命令将其内容打印到终端,然后查看你的版本号和代号。 -[][6] - -建议阅读避免在 ELemetary OS Freya 中出现两个 Chrome 图标[快速提示] - -#### 5\. 使用 /etc/os-release 命令也可以获取到 Ubuntu 代号 +#### 5、使用 /etc/os-release 命令也可以获取到 Ubuntu 代号 ![/etc/os-release][7] +Linux Mint 基于 Ubuntu。每个 Linux Mint 版本都基于不同的 Ubuntu 版本。了解你的 Linux Mint 版本所基于的 Ubuntu 版本有助你在必须要使用 Ubuntu 版本号的情况下使用(比如你需要在 [Linux Mint 中安装最新的 Virtual Box][8]添加仓库时)。 -Linux Mint 基于 Ubuntu。 每个 Linux Mint 版本都基于不同的 Ubuntu 版本。了解 Linux Mint 版本所基于的 Ubuntu 版本有助你在必须要使用 Ubuntu 版本号的情况下使用-在你需要在 [Linux Mint 中安装最新的Virtual Box][8]. 添加仓库时。 +`os-release` 则是另一个类似于 `info` 的文件,向你展示 Linux Mint 所基于的 Ubuntu 版本代号。 -os-release 则是另一个类似于**info**的文件,向你展示 Linux Mint 所基于的 Ubuntu 版本代号。 +#### 6、使用 /etc/upstream-release/lsb-release 只获取 Ubuntu 的基本信息 - -#### 6\. 使用 /etc/upstream-release/lsb-release 只能获取到 Ubuntu 的基本信息 - - -如果你只想要查看有关 **Ubuntu** 的基本信息,请输出 **/etc/upstream-release/lsb-release**: +如果你只想要查看有关 Ubuntu 的基本信息,请输出 `/etc/upstream-release/lsb-release`: ``` [email protected]:~$ cat /etc/upstream-release/lsb-release @@ -103,34 +83,34 @@ DISTRIB_CODENAME=bionic DISTRIB_DESCRIPTION="Ubuntu 18.04 LTS" ``` -特别提示: [你可以使用 **uname** 命令查看 Linux 内核版本][9]: +特别提示:[你可以使用 uname 命令查看 Linux 内核版本][9]。 ``` [email protected]:~$ uname -r 4.15.0-54-generic ``` -**注:** _**-r** 代表 **release**, 你可以使用 **man uname** 查看其他信息。 +**注:** `-r` 代表 release,你可以使用 `man uname` 查看其他信息。 ### 使用 GUI 查看 Linux Mint 版本信息 如果你对终端和命令行不满意,可以使用图形方法。如你所料,这个非常明了。 -打开 **Menu** (左下角), 然后转到 **Preferences > System Info**: +打开“菜单” (左下角),然后转到“偏好设置 > 系统信息”: ![Linux Mint 菜单][10] -或者,在菜单中,你可以搜索 **System Info**: +或者,在菜单中,你可以搜索“System Info”: ![Menu Search System Info][11] -在这里,你可以看到你的操作系统(包括版本号),内核和 DE 的版本号: +在这里,你可以看到你的操作系统(包括版本号),内核和桌面环境的版本号: ![System Info][12] -**总结** +### 总结 -我已经介绍了一些不同的方法,用这些方法你可以快速查看你正在使用的 Linux Mint 的版本和代号(以及所基于的Ubuntu 版本和内核)。 我希望这个初学者教程对你有所帮助。请在评论中告诉我们你最喜欢哪个方法! +我已经介绍了一些不同的方法,用这些方法你可以快速查看你正在使用的 Linux Mint 的版本和代号(以及所基于的 Ubuntu 版本和内核)。我希望这个初学者教程对你有所帮助。请在评论中告诉我们你最喜欢哪个方法! -------------------------------------------------------------------------------- @@ -138,8 +118,8 @@ via: https://itsfoss.com/check-linux-mint-version/ 作者:[Sergiu][a] 选题:[lujun9972][b] -译者:[Morisun029](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) +译者:[Morisun029](https://github.com/Morisun029) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 9a2ed48867249c9461b5a4c278e0178da09d01bf Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 10:31:31 +0800 Subject: [PATCH 195/202] PUB @Morisun029 https://linux.cn/article-11360-1.html --- ...90917 How to Check Linux Mint Version Number - Codename.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190917 How to Check Linux Mint Version Number - Codename.md (98%) diff --git a/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md b/published/20190917 How to Check Linux Mint Version Number - Codename.md similarity index 98% rename from translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md rename to published/20190917 How to Check Linux Mint Version Number - Codename.md index 82fe028b60..5f102dfa89 100644 --- a/translated/tech/20190917 How to Check Linux Mint Version Number - Codename.md +++ b/published/20190917 How to Check Linux Mint Version Number - Codename.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (Morisun029) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11360-1.html) [#]: subject: (How to Check Linux Mint Version Number & Codename) [#]: via: (https://itsfoss.com/check-linux-mint-version/) [#]: author: (Sergiu https://itsfoss.com/author/sergiu/) From 6f9db2e391267fabcc18ea00e64a0dc8836ec259 Mon Sep 17 00:00:00 2001 From: laingke Date: Thu, 19 Sep 2019 18:45:26 +0800 Subject: [PATCH 196/202] =?UTF-8?q?20190129-Create-an-online-store-with-th?= =?UTF-8?q?is-Java-based-framework=20=E7=BF=BB=E8=AF=91=E5=AE=8C=E6=88=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...ne store with this Java-based framework.md | 150 +++++++++--------- 1 file changed, 75 insertions(+), 75 deletions(-) diff --git a/sources/tech/20190129 Create an online store with this Java-based framework.md b/sources/tech/20190129 Create an online store with this Java-based framework.md index 6fb9bc5a6b..5c0a9ab78e 100644 --- a/sources/tech/20190129 Create an online store with this Java-based framework.md +++ b/sources/tech/20190129 Create an online store with this Java-based framework.md @@ -7,20 +7,20 @@ [#]: via: (https://opensource.com/article/19/1/scipio-erp) [#]: author: (Paul Piper https://opensource.com/users/madppiper) -Create an online store with this Java-based framework +使用这个 Java 框架创建一个在线商店 ====== -Scipio ERP comes with a large range of applications and functionality. +Scipio ERP 具有广泛的应用程序和功能。 ![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/osdc_whitehurst_money.png?itok=ls-SOzM0) -So you want to sell products or services online, but either can't find a fitting software or think customization would be too costly? [Scipio ERP][1] may just be what you are looking for. +所以,你想在网上销售产品或服务,但要么找不到合适的软件,要么认为定制成本太高? [Scipio ERP][1] 也许正是你想要的。 -Scipio ERP is a Java-based open source e-commerce framework that comes with a large range of applications and functionality. The project was forked from [Apache OFBiz][2] in 2014 with a clear focus on better customization and a more modern appeal. The e-commerce component is quite extensive and works in a multi-store setup, internationally, and with a wide range of product configurations, and it's also compatible with modern HTML frameworks. The software also provides standard applications for many other business cases, such as accounting, warehouse management, or sales force automation. It's all highly standardized and therefore easy to customize, which is great if you are looking for more than a virtual cart. +Scipio ERP 是一个基于 Java 的开放源码电子商务框架,具有广泛的应用程序和功能。这个项目在 2014 年从 [Apache OFBiz][2] fork 而来,侧重于更好的定制和更现代的吸引力。这个电子商务组件应用非常广泛,可以在多商店安装中工作,同时完成国际化,并具有广泛的产品配置,而且它还兼容现代 HTML 框架。该软件还为许多其他业务案例提供标准应用程序,例如会计,仓库管理或销售人员自动化。它都是高度标准化的,因此易于定制,如果您想要的不仅仅是一个虚拟购物车,这是非常棒的。 -The system makes it very easy to keep up with modern web standards, too. All screens are constructed using the system's "[templating toolkit][3]," an easy-to-learn macro set that separates HTML from all applications. Because of it, every application is already standardized to the core. Sounds confusing? It really isn't—it all looks a lot like HTML, but you write a lot less of it. +该系统也使得跟上现代 web 标准变得非常容易。所有界面都是使用系统的“[模板工具包][3]”构建的,这是一个易于学习的宏集,可以将 HTML 与所有应用程序分开。正因为如此,每个应用程序都已经标准化到核心。听起来令人困惑?它真的不是——它看起来很像 HTML,但你写的内容少了很多。 -### Initial setup +### 初始安装 -Before you get started, make sure you have Java 1.8 (or greater) SDK and a Git client installed. Got it? Great! Next, check out the master branch from GitHub: +在您开始之前,请确保您已经安装了 Java 1.8(或更高版本)的 SDK 以及一个 Git 客户端。完成了?太棒了!接下来,切换到 Github 上的主分支: ``` git clone https://github.com/ilscipio/scipio-erp.git @@ -28,78 +28,78 @@ cd scipio-erp git checkout master ``` -To set up the system, simply run **./install.sh** and select either option from the command line. Throughout development, it is best to stick to an **installation for development** (Option 1), which will also install a range of demo data. For professional installations, you can modify the initial config data ("seed data") so it will automatically set up the company and catalog data for you. By default, the system will run with an internal database, but it [can also be configured][4] with a wide range of relational databases such as PostgreSQL and MariaDB. +要安装系统,只需要运行 **./install.sh** 并从命令行中选择任一选项。在开发过程中,最好一直使用 **installation for development** (选项 1),它还将安装一系列演示数据。对于专业安装,您可以修改初始配置数据(“种子数据”),以便自动为您设置公司和目录数据。默认情况下,系统将使用内部数据库运行,但是它[也可以配置][4]使用各种关系数据库,比如 PostgreSQL 和 MariaDB 等。 -![Setup wizard][6] +![安装向导][6] -Follow the setup wizard to complete your initial configuration, +按照安装向导完成初始配置, -Start the system with **./start.sh** and head over to **** to complete the configuration. If you installed with demo data, you can log in with username **admin** and password **scipio**. During the setup wizard, you can set up a company profile, accounting, a warehouse, your product catalog, your online store, and additional user profiles. Keep the website entries on the product store configuration screen for now. The system allows you to run multiple webstores with different underlying code; unless you want to do that, it is easiest to stick to the defaults. +通过命令 **./start.sh** 启动系统然后打开链接 **** 完成配置。如果您安装了演示数据, 您可以使用用户名 **admin** 和密码 **scipio** 进行登录。在安装向导中,您可以设置公司简介、会计、仓库、产品目录、在线商店和额外的用户配置信息。暂时在产品商店配置界面上跳过网站实体的配置。系统允许您使用不同的底层代码运行多个在线商店;除非您想这样做,一直选择默认值是最简单的。 -Congratulations, you just installed Scipio ERP! Play around with the screens for a minute or two to get a feel for the functionality. +祝贺您,您刚刚安装了 Scipio ERP!在界面上操作一两分钟,感受一下它的功能。 -### Shortcuts +### 捷径 -Before you jump into the customization, here are a few handy commands that will help you along the way: +在您进入自定义之前,这里有一些方便的命令可以帮助您: - * Create a shop-override: **./ant create-component-shop-override** - * Create a new component: **./ant create-component** - * Create a new theme component: **./ant create-theme** - * Create admin user: **./ant create-admin-user-login** - * Various other utility functions: **./ant -p** - * Utility to install & update add-ons: **./git-addons help** + * 创建一个 shop-override:**./ant create-component-shop-override** + * 创建一个新组件:**./ant create-component** + * 创建一个新主题组件:**./ant create-theme** + * 创建管理员用户:**./ant create-admin-user-login** + * 各种其他实用功能:**./ant -p** + * 用于安装和更新插件的实用程序:**./git-addons help** -Also, make a mental note of the following locations: +另外,请记下以下位置: - * Scripts to run Scipio as a service: **/tools/scripts/** - * Log output directory: **/runtime/logs** - * Admin application: **** - * E-commerce application: **** + * 将 Scipio 作为服务运行的脚本:**/tools/scripts/** + * 日志输出目录:**/runtime/logs** + * 管理应用程序:**** + * 电子商务应用程序:**** -Last, Scipio ERP structures all code in the following five major directories: +最后,Scipio ERP 在以下五个主要目录中构建了所有代码: - * Framework: framework-related sources, the application server, generic screens, and configurations - * Applications: core applications - * Addons: third-party extensions - * Themes: modifies the look and feel - * Hot-deploy: your own components + * Framework: 框架相关的源,应用程序服务器,通用界面和配置 + * Applications: 核心应用程序 + * Addons: 第三方扩展 + * Themes: 修改界面外观 + * Hot-deploy: 您自己的组件 -Aside from a few configurations, you will be working within the hot-deploy and themes directories. +除了一些配置,您将在 hot-deploy 和 themes 目录中工作。 -### Webstore customizations +### 在线商店定制 -To really make the system your own, start thinking about [components][7]. Components are a modular approach to override, extend, and add to the system. Think of components as self-contained web modules that capture information on databases ([entity][8]), functions ([services][9]), screens ([views][10]), [events and actions][11], and web applications. Thanks to components, you can add your own code while remaining compatible with the original sources. +要真正使系统成为您自己的系统,请开始考虑使用[组件][7]。组件是一种模块化方法,可以覆盖,扩展和添加到系统中。您可以将组件视为可以捕获有关数据库([实体][8]),功能([服务][9]),界面([视图][10]),[事件和操作][11]和 Web 应用程序信息的独立 Web 模块。由于组件功能,您可以添加自己的代码,同时保持与原始源兼容。 -Run **./ant create-component-shop-override** and follow the steps to create your webstore component. A new directory will be created inside of the hot-deploy directory, which extends and overrides the original e-commerce application. +运行命令 **./ant create-component-shop-override** 并按照步骤创建您的在线商店组件。该操作将会在 hot-deploy 目录内创建一个新目录,该目录将扩展并覆盖原始的电子商务应用程序。 -![component directory structure][13] +![组件目录结构][13] -A typical component directory structure. +一个典型的组件目录结构。 -Your component will have the following directory structure: +您的组件将具有以下目录结构: - * config: configurations - * data: seed data - * entitydef: database table definitions - * script: Groovy script location - * servicedef: service definitions - * src: Java classes - * webapp: your web application - * widget: screen definitions + * config: 配置 + * data: 种子数据 + * entitydef: 数据库表定义 + * script: Groovy 脚本的位置 + * servicedef: 服务定义 + * src: Java 类 + * webapp: 您的 web 应用程序 + * widget: 界面定义 -Additionally, the **ivy.xml** file allows you to add Maven libraries to the build process and the **ofbiz-component.xml** file defines the overall component and web application structure. Apart from the obvious, you will also find a **controller.xml** file inside the web apps' **WEB-INF** directory. This allows you to define request entries and connect them to events and screens. For screens alone, you can also use the built-in CMS functionality, but stick to the core mechanics first. Familiarize yourself with **/applications/shop/** before introducing changes. +此外,**ivy.xml** 文件允许您将 Maven 库添加到构建过程中,**ofbiz-component.xml** 文件定义整个组件和 Web 应用程序结构。除了一些在当前目录所能够看到的,您还可以在 Web 应用程序的 **WEB-INF** 目录中找到 **controller.xml** 文件。这允许您定义请求实体并将它们连接到事件和界面。仅对于界面来说,您还可以使用内置的 CMS 功能,但优先要坚持使用核心机制。在引入更改之前,请熟悉**/applications/shop/**。 -#### Adding custom screens +#### 添加自定义界面 -Remember the [templating toolkit][3]? You will find it used on every screen. Think of it as a set of easy-to-learn macros that structure all content. Here's an example: +还记得[模板工具包][3]吗?您会发现它在每个界面都有使用到。您可以将其视为一组易于学习的宏,它用来构建所有内容。下面是一个例子: ``` <@section title="Title"> @@ -116,11 +116,11 @@ Remember the [templating toolkit][3]? You will find it used on every screen. Thi ``` -Not too difficult, right? Meanwhile, themes contain the HTML definitions and styles. This hands the power over to your front-end developers, who can define the output of each macro and otherwise stick to their own build tools for development. +不是很难,对吧?同时,主题包含 HTML 定义和样式。这将权力交给您的前端开发人员,他们可以定义每个宏的输出,并坚持使用自己的构建工具进行开发。 -Let's give it a quick try. First, define a request on your own webstore. You will modify the code for this. A built-in CMS is also available at **** , which allows you to create new templates and screens in a much more efficient way. It is fully compatible with the templating toolkit and comes with example templates that can be adopted to your preferences. But since we are trying to understand the system here, let's go with the more complicated way first. +我们快点试试吧。首先,在您自己的在线商店上定义一个请求。您将修改此代码。一个内置的 CMS 系统也可以通过 **** 进行访问,它允许您以更有效的方式创建新模板和界面。它与模板工具包完全兼容,并附带可根据您的喜好采用的示例模板。但是既然我们试图在这里理解系统,那么首先让我们采用更复杂的方法。 -Open the **[controller.xml][14]** file inside of your shop's webapp directory. The controller keeps track of request events and performs actions accordingly. The following will create a new request under **/shop/test** : +打开您商店 webapp 目录中的 **[controller.xml][14]** 文件。Controller 跟踪请求事件并相应地执行操作。下面的操作将会在 **/shop/test** 下创建一个新的请求: ``` @@ -130,14 +130,14 @@ Open the **[controller.xml][14]** file inside of your shop's webapp directory. T ``` -You can define multiple responses and, if you want, you could use an event or a service call inside the request to determine which response you may want to use. I opted for a response of type "view." A view is a rendered response; other types are request-redirects, forwards, and alike. The system comes with various renderers and allows you to determine the output later; to do so, add the following: +您可以定义多个响应,如果需要,可以在请求中使用事件或服务调用来确定您可能要使用的响应。我选择了“视图”类型的响应。视图是渲染的响应; 其他类型是请求重定向,转发等。系统附带各种渲染器,可让您稍后确定输出; 为此,请添加以下内容: ``` ``` -Replace **my-component** with your own component name. Then you can define your very first screen by adding the following inside the tags within the **widget/CommonScreens.xml** file: +用您自己的组件名称替换 **my-component**。然后,您可以通过在 **widget/CommonScreens.xml** 文件的标签内添加以下内容来定义您的第一个界面: ``` @@ -155,47 +155,47 @@ Replace **my-component** with your own component name. Then you can define your     ``` -Screens are actually quite modular and consist of multiple elements ([widgets, actions, and decorators][15]). For the sake of simplicity, leave this as it is for now, and complete the new webpage by adding your very first templating toolkit file. For that, create a new **webapp/mycomponent/test/test.ftl** file and add the following: +商店界面实际上非常模块化,由多个元素组成([小部件,动作和装饰器][15])。为简单起见,请暂时保留原样,并通过添加第一个模板工具包文件来完成新网页。为此,创建一个新的 **webapp/mycomponent/test/test.ftl** 文件并添加以下内容: ``` <@alert type="info">Success! ``` -![Custom screen][17] +![自定义的界面][17] -A custom screen. +一个自定义的界面。 -Open **** and marvel at your own accomplishments. +打开 **** 并惊叹于你自己的成就。 -#### Custom themes +#### 自定义主题 -Modify the look and feel of the shop by creating your very own theme. All themes can be found as components inside of the themes folder. Run **./ant create-theme** to add your own. +通过创建自己的主题来修改商店的界面外观。所有主题都可以作为组件在themes文件夹中找到。运行命令 **./ant create-theme** 来创建您自己的主题。 -![theme component layout][19] +![主题组件布局][19] -A typical theme component layout. +一个典型的主题组件布局。 -Here's a list of the most important directories and files: +以下是最重要的目录和文件列表: - * Theme configuration: **data/*ThemeData.xml** - * Theme-specific wrapping HTML: **includes/*.ftl** - * Templating Toolkit HTML definition: **includes/themeTemplate.ftl** - * CSS class definition: **includes/themeStyles.ftl** - * CSS framework: **webapp/theme-title/*** + * 主题配置:**data/\*ThemeData.xml** + * 特定主题封装的HTML:**includes/\*.ftl** + * 模板工具包HTML定义:**includes/themeTemplate.ftl** + * CSS 类定义:**includes/themeStyles.ftl** + * CSS 框架: **webapp/theme-title/** -Take a quick look at the Metro theme in the toolkit; it uses the Foundation CSS framework and makes use of all the things above. Afterwards, set up your own theme inside your newly constructed **webapp/theme-title** directory and start developing. The Foundation-shop theme is a very simple shop-specific theme implementation that you can use as a basis for your own work. +快速浏览工具包中的 Metro 主题;它使用 Foundation CSS 框架并且充分利用了这个框架。然后,然后,在新构建的 **webapp/theme-title** 目录中设置自己的主题并开始开发。Foundation-shop 主题是一个非常简单的特定于商店的主题实现,您可以将其用作您自己工作的基础。 -Voila! You have set up your own online store and are ready to customize! +瞧!您已经建立了自己的在线商店,准备个性化定制吧! -![Finished Scipio ERP shop][21] +![搭建完成的 Scipio ERP 在线商店][21] -A finished shop based on Scipio ERP. +一个搭建完成的基于 Scipio ERP的在线商店。 -### What's next? +### 接下来是什么? -Scipio ERP is a powerful framework that simplifies the development of complex e-commerce applications. For a more complete understanding, check out the project [documentation][7], try the [online demo][22], or [join the community][23]. +Scipio ERP 是一个功能强大的框架,可简化复杂的电子商务应用程序的开发。为了更完整的理解,请查看项目[文档][7],尝试[在线演示][22],或者[加入社区][23]. -------------------------------------------------------------------------------- @@ -203,7 +203,7 @@ via: https://opensource.com/article/19/1/scipio-erp 作者:[Paul Piper][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[laingke](https://github.com/laingke) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From f66616591c064e5196611f5947f0d739a0695cb7 Mon Sep 17 00:00:00 2001 From: laingke Date: Thu, 19 Sep 2019 19:02:38 +0800 Subject: [PATCH 197/202] =?UTF-8?q?20190129-Create-an-online-store-with-th?= =?UTF-8?q?is-Java-based-framework=20=E7=A7=BB=E5=8A=A8=E5=88=B0=20transla?= =?UTF-8?q?ted=20=E7=9B=AE=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...90129 Create an online store with this Java-based framework.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {sources => translated}/tech/20190129 Create an online store with this Java-based framework.md (100%) diff --git a/sources/tech/20190129 Create an online store with this Java-based framework.md b/translated/tech/20190129 Create an online store with this Java-based framework.md similarity index 100% rename from sources/tech/20190129 Create an online store with this Java-based framework.md rename to translated/tech/20190129 Create an online store with this Java-based framework.md From f90610c5282dbc80e7ac19e28dcae85ba24c6ae9 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Thu, 19 Sep 2019 21:56:47 +0800 Subject: [PATCH 198/202] =?UTF-8?q?=E6=B8=85=E9=99=A4=E8=BF=87=E6=9C=9F?= =?UTF-8?q?=E6=96=87=E7=AB=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...ll EMC updates PowerMax storage systems.md | 57 ------ sources/tech/20181228 2018- Year in review.md | 173 ------------------ .../20190216 --Con 2019- submit a talk.md | 144 --------------- ...aking a year to explain computer things.md | 63 ------- 4 files changed, 437 deletions(-) delete mode 100644 sources/news/20190913 Dell EMC updates PowerMax storage systems.md delete mode 100644 sources/tech/20181228 2018- Year in review.md delete mode 100644 sources/tech/20190216 --Con 2019- submit a talk.md delete mode 100644 sources/tech/20190913 Taking a year to explain computer things.md diff --git a/sources/news/20190913 Dell EMC updates PowerMax storage systems.md b/sources/news/20190913 Dell EMC updates PowerMax storage systems.md deleted file mode 100644 index b2a2559a30..0000000000 --- a/sources/news/20190913 Dell EMC updates PowerMax storage systems.md +++ /dev/null @@ -1,57 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Dell EMC updates PowerMax storage systems) -[#]: via: (https://www.networkworld.com/article/3438325/dell-emc-updates-powermax-storage-systems.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Dell EMC updates PowerMax storage systems -====== -Dell EMC's new PowerMax enterprise storage systems add support for Intel Optane drives and NVMe over Fabric. -Getty Images/Dell EMC - -Dell EMC has updated its PowerMax line of enterprise storage systems to offer Intel’s Optane persistent storage and NVMe-over-Fabric, both of which will give the PowerMax a big boost in performance. - -Last year, Dell launched the PowerMax line with high-performance storage, specifically targeting industries that need very low latency and high resiliency, such as banking, healthcare, and cloud service providers. - -The company claims the new PowerMax is the first-to-market with dual port Intel Optane SSDs and the use of storage-class memory (SCM) as persistent storage. The Optane is a new type of non-volatile storage that sits between SSDs and memory. It has the persistence of a SSD but almost the speed of a DRAM. Optane storage also has a ridiculous price tag. For example, a 512 GB stick costs nearly $8,000. - -**[ Read also: [Mass data fragmentation requires a storage rethink][1] | Get regularly scheduled insights: [Sign up for Network World newsletters][2] ]** - -The other big change is support for NVMe-oF, which allows SSDs to talk directly to each other via Fibre Channel rather than making multiple hops through the network. PowerMax already supports NVMe SSDs, but this update adds end-to-end NVMe support. - -The coupling of NVMe and Intel Optane on dual port gives the new PowerMax systems up to 15 million IOPS, a 50% improvement over the previous generation released just one year ago, with up to 50% better response times and twice the bandwidth. Response time is under 100 microseconds. - -In addition, the new Dell EMC PowerMax systems are validated for Dell Technologies Cloud, an architecture designed to bridge multi-cloud deployments. Dell offers connections between private clouds and Amazon Web Services (AWS), Microsoft Azure, and Google Cloud. - -PowerMax comes with a built-in machine learning engine for predictive analytics and pattern recognition to automatically place data on the correct media type, SCM or Flash, based on its I/O profile. PowerMax analyzes and forecasts 40 million data sets in real time, driving 6 billion decisions per day. - -It also has several important software integrations. The first is VMware’s vRealize Orchestrator (vRO) plug-in, which allows customers to develop end-to-end automation routines, including provisioning, data protection, and host operations. - -Second, it has pre-built Red Hat Ansible modules to allow customers to create Playbooks for storage provisioning, snapshots, and data management workflows for consistent and automated operations. These modules are available on GitHub now. - -Finally, there is a container storage interface (CSI) plugin that provisions and manages storage for workloads running on Kubernetes. The CSI plugin, available now on GitHub, extends PowerMax's performance and data services to a growing number of applications built on a micro-services-based architecture. - -The new PowerMax systems and PowerBricks will be available Monday, Sept.16. - -Join the Network World communities on [Facebook][3] and [LinkedIn][4] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3438325/dell-emc-updates-powermax-storage-systems.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3323580/mass-data-fragmentation-requires-a-storage-rethink.html -[2]: https://www.networkworld.com/newsletters/signup.html -[3]: https://www.facebook.com/NetworkWorld/ -[4]: https://www.linkedin.com/company/network-world diff --git a/sources/tech/20181228 2018- Year in review.md b/sources/tech/20181228 2018- Year in review.md deleted file mode 100644 index 91099492ac..0000000000 --- a/sources/tech/20181228 2018- Year in review.md +++ /dev/null @@ -1,173 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (2018: Year in review) -[#]: via: (https://jvns.ca/blog/2018/12/23/2018--year-in-review/) -[#]: author: (Julia Evans https://jvns.ca/) - -2018: Year in review -====== - -I wrote these in [2015][1] and [2016][2] and [2017][3] and it’s always interesting to look back at them, so here’s a summary of what went on in my side projects in 2018. - -### ruby profiler! - -At the beginning of this year I wrote [rbspy][4] (docs: ). It inspired a Python version called [py-spy][5] and a PHP profiler called [phpspy][6], both of which are excellent. I think py-spy in particular is [probably _better_][7] than rbspy which makes me really happy. - -Writing a program that does something innovative (`top` for your Ruby program’s functions!) and inspiring other people to make amazing new tools is something I’m really proud of. - -### started a side business! - -A very surprising thing that happened in 2018 is that I started a business! This is the website: , and I sell programming zines. - -It’s been astonishingly successful (it definitely made me enough money that I could have lived on just the revenue from the business this year), and I’m really grateful to everyone’s who’s supported that work. I hope the zines have helped you. I always thought that it was impossible to make anywhere near as much money teaching people useful things as I can as a software developer, and now I think that’s not true. I don’t think that I’d _want_ to make that switch (I like working as a programmer!), but now I actually think that if I was serious about it and was interested in working on my business skills, I could probably make it work. - -I don’t really know what’s next, but I plan to write at least one zine next year. I learned a few things about business this year, mainly from: - - * [stephanie hurlburt’s twitter][8] - * [amy hoy][9] - * the book [growing a business by paul hawken][10] - * seeing what joel hooks is doing with [egghead.io][11] - * a little from [indie hackers][12] - - - -I used to think that sales / marketing had to be gross, but reading some of these business books made me think that it’s actually possible to run a business by being honest & just building good things. - -### work! - -this is mostly about side projects, but a few things about work: - - * I still have the same manager ([jay][13]). He’s been really great to work with. The [help! i have a manager!][14] zine is secretly largely things I learned from working with him. - * my team made some big networking infrastructure changes and it went pretty well. I learned a lot about proxies/TLS and a little bit about C++. - * I mentored another intern, and the intern I mentored last year joined us full time! - - - -When I go back to work I’m going to switch to working on something COMPLETELY DIFFERENT (writing code that sends messages to banks!) for 3 months. It’s a lot closer to the company’s core business, and I think it’ll be neat to learn more about how financial infastracture works. - -I struggled a bit with understanding/defining my job this year. I wrote [What’s a senior engineer’s job?][15] about that, but I have not yet reached enlightenment. - -### talks! - -I gave 4 talks in 2018: - - * [So you want to be a wizard][16] at StarCon - * [Building a Ruby profiler][17] at the Recurse Center’s localhost series - * [Build Impossible Programs][18] in May at Deconstruct. - * [High Reliability Infrastructure Migrations][19] at Kubecon. I’m pretty happy about this talk because I’ve wanted to give a good talk about what I do at work for a long time and I think I finally succeeded. Previously when I gave talks about my work I think I fell into the trap of just describing what we do (“we do X Y Z” … “okay, so what?“). With this one, I think I was able to actually say things that were useful to other people. - - - -In past years I’ve mostly given talks which can mostly be summarized “here are some cool tools” and “here is how to learn hard things”. This year I changed focus to giving talks about the actual work I do – there were two talks about building a Ruby profiler, and one about what I do at work (I spend a lot of time on infrastructure migrations!) - -I’m not sure whether if I’ll give any talks in 2019. I travelled more than I wanted to in 2018, and to stay sane I ended up having to cancel on a talk I was planning to give with relatively short notice which wasn’t good. - -### podcasts! - -I also experimented a bit with a new format: the podcast! These were basically all really fun! They don’t take that long (about 2 hours total?). - - * [Software Engineering Daily][20], on rbspy and how to use a profiler - * [FLOSS weekly][21], again about rbspy. They told me I’m the guest that asked _them_ the most questions, which I took as a compliment :) - * [CodeNewbie][22] on computer networking & how the Internet works - * [Hanselminutes with Scott Hanselman][23] on writing zines / teaching / learning - * [egghead.io][24], on making zines & running a business - - - -what I learned about doing podcasts: - - * It’s really important to give the hosts a list of good questions to ask, and to be prepared to give good answers to those questions! I’m not a super polished podcast guest. - * you need a good microphone. At least one of these people told me I actually couldn’t be on their podcast unless I had a good enough microphone, so I bought a [medium fancy microphone][25]. It wasn’t too expensive and it’s nice to have a better quality microphone! Maybe I will use it more to record audio/video at some point! - - - -### !!Con - -I co-organized [!!Con][26] for the 4th time – I ran sponsorships. It’s always such a delight and the speakers are so great. - -!!Con is expanding [to the west coast in 2019][27] – I’m not directly involved with that but it’s going to be amazing. - -### blog posts! - -I apparently wrote 54 blog posts in 2018. A couple of my favourites are [What’s a senior engineer’s job?][15] , [How to teach yourself hard things][28], and [batch editing files with ed][29]. - -There were basically 4 themes in blogging for 2018: - - * progress on the rbspy project while I was working on it ([this category][30]) - * computer networking / infrastructure engineering (basically all I did at work this year was networking, though I didn’t write about it as much as I might have) - * musings about zines / business / developer education, for instance [why sell zines?][31] and [who pays to educate developers?][32] - * a few of the usual “how do you learn things” / “how do you succeed at your job” posts as I figure things about about that, for instance [working remotely, 4 years in][33] - - - -### a tiny inclusion project: a guide to performance reviews - -[Last year][3] in addition to my actual job, I did a couple of projects at work towards helping make sure the performance/promotion process works well for folks – i collaborated with the amazing [karla][34] on the idea of a “brag document”, and redid our engineering levels. - -This year, in the same vein, I wrote a document called the “Unofficial guide to the performance reviews”. A lot of folks said it helped them but probably it’s too early to celebrate. I think explaining to folks how the performance review process actually works and how to approach it is really valuable and I might try to publish a more general version here at some point. - -I like that I work at a place where it’s possible/encouraged to do projects like this. I spend a relatively small amount of time on them (maybe I spent 15 hours on this one?) but it feels good to be able to make tiny steps towards building a better workplace from time to time. It’s really hard to judge the results though! - -### conclusions? - -some things that worked in 2018: - - * setting [boundaries][15] around what my job is - * doing open source work while being paid for it - * starting a side business - * doing small inclusion projects at work - * writing zines is very time consuming but I feel happy about the time I spent on that - * blogging is always great - - - --------------------------------------------------------------------------------- - -via: https://jvns.ca/blog/2018/12/23/2018--year-in-review/ - -作者:[Julia Evans][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://jvns.ca/ -[b]: https://github.com/lujun9972 -[1]: https://jvns.ca/blog/2015/12/26/2015-year-in-review/ -[2]: https://jvns.ca/blog/2016/12/21/2016--year-in-review/ -[3]: https://jvns.ca/blog/2017/12/31/2017--year-in-review/ -[4]: https://github.com/rbspy/rbspy -[5]: https://github.com/benfred/py-spy -[6]: https://github.com/adsr/phpspy/ -[7]: https://jvns.ca/blog/2018/09/08/an-awesome-new-python-profiler--py-spy-/ -[8]: https://twitter.com/sehurlburt -[9]: https://stackingthebricks.com/ -[10]: https://www.amazon.com/Growing-Business-Paul-Hawken/dp/0671671642 -[11]: https://egghead.io/ -[12]: https://www.indiehackers.com/ -[13]: https://twitter.com/jshirley -[14]: https://wizardzines.com/zines/manager/ -[15]: https://jvns.ca/blog/senior-engineer/ -[16]: https://www.youtube.com/watch?v=FBMC9bm-KuU -[17]: https://jvns.ca/blog/2018/04/16/rbspy-talk/ -[18]: https://www.deconstructconf.com/2018/julia-evans-build-impossible-programs -[19]: https://www.youtube.com/watch?v=obB2IvCv-K0 -[20]: https://softwareengineeringdaily.com/2018/06/05/profilers-with-julia-evans/ -[21]: https://twit.tv/shows/floss-weekly/episodes/487 -[22]: https://www.codenewbie.org/podcast/how-does-the-internet-work -[23]: https://hanselminutes.com/643/learning-how-to-be-a-wizard-programmer-with-julia-evans -[24]: https://player.fm/series/eggheadio-developer-chats-1728019/exploring-concepts-and-teaching-using-focused-zines-with-julia-evans -[25]: https://www.amazon.com/gp/product/B000EOPQ7E/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=B000EOPQ7E&linkCode=as2&tag=diabeticbooks&linkId=ZBZBIVR4EB7V6JFL -[26]: http://bangbangcon.com -[27]: http://bangbangcon.com/west/ -[28]: https://jvns.ca/blog/2018/09/01/learning-skills-you-can-practice/ -[29]: https://jvns.ca/blog/2018/05/11/batch-editing-files-with-ed/ -[30]: https://jvns.ca/categories/ruby-profiler/ -[31]: https://jvns.ca/blog/2018/09/23/why-sell-zines/ -[32]: https://jvns.ca/blog/2018/09/01/who-pays-to-educate-developers-/ -[33]: https://jvns.ca/blog/2018/02/18/working-remotely--4-years-in/ -[34]: https://karla.io/ diff --git a/sources/tech/20190216 --Con 2019- submit a talk.md b/sources/tech/20190216 --Con 2019- submit a talk.md deleted file mode 100644 index 7a28651f6f..0000000000 --- a/sources/tech/20190216 --Con 2019- submit a talk.md +++ /dev/null @@ -1,144 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (!!Con 2019: submit a talk!) -[#]: via: (https://jvns.ca/blog/2019/02/16/--con-2019--submit-a-talk-/) -[#]: author: (Julia Evans https://jvns.ca/) - -!!Con 2019: submit a talk! -====== - -As some of you might know, for the last 5 years I’ve been one of the organizers for a conferences called [!!Con][1]. This year it’s going to be held on **May 11-12 in NYC**. - -The submission deadline is **Sunday, March 3** and you can [submit a talk here][2]. - -(we also expanded to the west coast this year: [!!Con West][3] is next week!! I’m not on the !!Con West team since I live on the east coast but they’re doing amazing work, I have a ticket, and I’m so excited for there to be more !!Con in the world) - -### !!Con is about the joy, excitement, and surprise of computing - -Computers are AMAZING. You can make programs that seem like magic, computer science has all kind of fun and surprising tidbits, there are all kinds of ways to make really cool art with computers, the systems that we use every day (like DNS!) are often super fascinating, and sometimes our computers do REALLY STRANGE THINGS and it’s very fun to figure out why. - -!!Con is about getting together for 2 days to share what we all love about computing. The only rule of !!Con talks is that the talk has to have an exclamation mark in the title :) - -We originally considered calling !!Con ExclamationMarkCon but that was too unwieldy so we went with !!Con :). - -### !!Con is inclusive - -The other big thing about !!Con is that we think computing should include everyone. To make !!Con a space where everyone can participate, we - - * have open captioning for all talks (so that people who can’t hear well can read the text of the talk as it’s happening). This turns out to be great for LOTS of people – if you just weren’t paying attention for a second, you can look at the live transcript to see what you missed! - * pay our speakers & pay for speaker travel - * have a code of conduct (of course) - * use the RC [social rules][4] - * make sure our washrooms work for people of all genders - * let people specify on their badges if they don’t want photos taken of them - * do a lot of active outreach to make sure our set of speakers is diverse - - - -### past !!Con talks - -I think maybe the easiest way to explain !!Con if you haven’t been is through the talk titles! Here are a few arbitrarily chosen talks from past !!Cons: - - * [Four Fake Filesystems!][5] - * [Islamic Geometry: Hankin’s Polygons in Contact Algorithm!!!][6] - * [Don’t know about you, but I’m feeling like SHA-2!: Checksumming with Taylor Swift][7] - * [MissingNo., my favourite Pokémon!][8] - * [Music! Programming! Arduino! (Or: Building Electronic Musical Interfaces to Create Awesome)][9] - * [How I Code and Use a Computer at 1,000 WPM!!][10] - * [The emoji that Killed Chrome!!][11] - * [We built a map to aggregate real-time flood data in under two days!][12] - * [PUSH THE BUTTON! 🔴 Designing a fun game where the only input is a BIG RED BUTTON! 🔴 !!!][13] - * [Serious programming with jq?! A practical and purely functional programming language!][14] - * [I wrote to a dead address in a deleted PDF and now I know where all the airplanes are!!][15] - * [Making Mushrooms Glow!][16] - * [HDR Photography in Microsoft Excel?!][17] - * [DHCP: IT’S MOSTLY YELLING!!][18] - * [Lossy text compression, for some reason?!][19] - * [Plants are Recursive!!: Using L-Systems to Generate Realistic Weeds][20] - - - -If you want to see more (or get an idea of what !!Con talk descriptions usually look like), here’s every past year of the conference: - - * 2018: [talk descriptions][21] and [recordings][22] - * 2017: [talk descriptions][23] and [recordings][24] - * 2016: [talk descriptions][25] and [recordings][26] - * 2015: [talk descriptions][27] and [recordings][28] - * 2014: [talk descriptions][29] and [recordings][30] - - - -### this year you can also submit a play / song / performance! - -One difference from previous !!Cons is that if you want submit a non-talk-talk to !!Con this year (like a play!), you can! I’m very excited to see what people come up with. For more of that see [Expanding the !!Con aesthetic][31]. - -### all talks are reviewed anonymously - -One big choice that we’ve made is to review all talks anonymously. This means that we’ll review your talk the same way whether you’ve never given a talk before or if you’re an internationally recognized public speaker. I love this because many of our best talks are from first time speakers or people who I’d never heard of before, and I think anonymous review makes it easier to find great people who aren’t well known. - -### writing a good outline is important - -We can’t rely on someone’s reputation to determine if they’ll give a good talk, but we do need a way to see that people have a plan for how to present their material in an engaging way. So we ask everyone to give a somewhat detailed outline explaining how they’ll spend their 10 minutes. Some people do it minute-by-minute and some people just say “I’ll explain X, then Y, then Z, then W”. - -Lindsey Kuper wrote some good advice about writing a clear !!Con outline here which has some examples of really good outlines [which you can see here][32]. - -### We’re looking for sponsors - -!!Con is pay-what-you-can (if you can’t afford a $300 conference ticket, we’re the conference for you!). Because of that, we rely on our incredible sponsors (companies who want to build an inclusive future for tech with us!) to help make up the difference so that we can pay our speakers for their amazing work, pay for speaker travel, have open captioning, and everything else that makes !!Con the amazing conference it is. - -If you love !!Con, a huge way you can help support the conference is to ask your company to sponsor us! Here’s our [sponsorship page][33] and you can email me at [[email protected]][34] if you’re interested. - -### hope to see you there ❤ - -I’ve met so many fantastic people through !!Con, and it brings me a lot of joy every year. The thing that makes !!Con great is all the amazing people who come to share what they’re excited about every year, and I hope you’ll be one of them. - --------------------------------------------------------------------------------- - -via: https://jvns.ca/blog/2019/02/16/--con-2019--submit-a-talk-/ - -作者:[Julia Evans][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://jvns.ca/ -[b]: https://github.com/lujun9972 -[1]: http://bangbangcon.com -[2]: http://bangbangcon.com/give-a-talk.html -[3]: http://bangbangcon.com/west/ -[4]: https://www.recurse.com/social-rules -[5]: https://youtube.com/watch?v=pfHpDDXJQVg -[6]: https://youtube.com/watch?v=ld4gpQnaziU -[7]: https://youtube.com/watch?v=1QgamEwwPro -[8]: https://youtube.com/watch?v=yX7tDROZUt8 -[9]: https://youtube.com/watch?v=67Y-wH0FJFg -[10]: https://youtube.com/watch?v=G1r55efei5c -[11]: https://youtube.com/watch?v=UE-fJjMasec -[12]: https://youtube.com/watch?v=hfatYo2J8gY -[13]: https://youtube.com/watch?v=KqEc2Ek4GzA -[14]: https://youtube.com/watch?v=PS_9pyIASvQ -[15]: https://youtube.com/watch?v=FhVob_sRqQk -[16]: https://youtube.com/watch?v=T75FvUDirNM -[17]: https://youtube.com/watch?v=bkQJdaGGVM8 -[18]: https://youtube.com/watch?v=enRY9jd0IJw -[19]: https://youtube.com/watch?v=meovx9OqWJc -[20]: https://youtube.com/watch?v=0eXg4B1feOY -[21]: http://bangbangcon.com/2018/speakers.html -[22]: http://bangbangcon.com/2018/recordings.html -[23]: http://bangbangcon.com/2017/speakers.html -[24]: http://bangbangcon.com/2017/recordings.html -[25]: http://bangbangcon.com/2016/speakers.html -[26]: http://bangbangcon.com/2016/recordings.html -[27]: http://bangbangcon.com/2015/speakers.html -[28]: http://bangbangcon.com/2015/recordings.html -[29]: http://bangbangcon.com/2014/speakers.html -[30]: http://bangbangcon.com/2014/recordings.html -[31]: https://organicdonut.com/2019/01/expanding-the-con-aesthetic/ -[32]: http://composition.al/blog/2017/06/30/how-to-write-a-timeline-for-a-bangbangcon-talk-proposal/ -[33]: http://bangbangcon.com/sponsors -[34]: https://jvns.ca/cdn-cgi/l/email-protection diff --git a/sources/tech/20190913 Taking a year to explain computer things.md b/sources/tech/20190913 Taking a year to explain computer things.md deleted file mode 100644 index 43dae546ad..0000000000 --- a/sources/tech/20190913 Taking a year to explain computer things.md +++ /dev/null @@ -1,63 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Taking a year to explain computer things) -[#]: via: (https://jvns.ca/blog/2019/09/13/a-year-explaining-computer-things/) -[#]: author: (Julia Evans https://jvns.ca/) - -Taking a year to explain computer things -====== - -I’ve been working on explaining computer things I’m learning on this blog for 6 years. I wrote one of my first posts, [what does a shell even do?][1] on Sept 30, 2013. Since then, I’ve written 11 zines, 370,000 words on this blog, and given 20 or so talks. So it seems like I like explaining things a lot. - -### tl;dr: I’m going to work on explaining computer things for a year - -Here’s the exciting news: I left my job a month ago and my plan is to spend the next year working on explaining computer things! - -As for why I’m doing this – I was talking through some reasons with my friend Mat last night and he said “well, sometimes there are things you just feel compelled to do”. I think that’s all there is to it :) - -### what does “explain computer things” mean? - -I’m planning to: - - 1. write some more zines (maybe I can write 10 zines in a year? we’ll see! I want to tackle both general-interest and slightly more niche topics, we’ll see what happens). - 2. work on some more interactive ways to learn things. I learn things best by trying things out and breaking them, so I want to see if I can facilitate that a little bit for other people. I started a project around this in May which has been on the backburner for a bit but which I’m excited about. Hopefully I’ll release it soon and then you can try it out and tell me what you think! - - - -I say “a year” because I think I have at least a year’s worth of ideas and I can’t predict how I’ll feel after doing this for a year. - -### how: run a business - -I started a corporation almost exactly a year ago, and I’m planning to keep running my explaining-things efforts as a business. This business has been making more than I made in my first programming job (that is, definitely enough money to live on!), which has been really surprising and great (thank you!). - -some parameters of the business: - - * I’m not planning to hire employees or anything, it’ll just be me and some (awesome) freelancers. The biggest change I have in mind is that I’m hoping to find a freelance editor to help me with editing. - * I also don’t have any specific plans for world domination or to work 80-hour weeks. I’m just going to make zines & things that explain computer concepts and sell them on the internet, like I’ve been doing. - * No commissions or consulting work, just building ideas I have - - - -It’s been pretty interesting to learn more about running a small business and so far I like it more than I thought I would. (except for taxes, which I like exactly as much as I thought I would) - -### that’s all! - -I’m excited to keep making explanations of computer things and to have more time to do it. This blog might change a bit away from “here’s what I’m learning at work these days” and towards “here are attempts at explaining things that I mostly already know”. It’ll be different! We’ll see how it goes! - --------------------------------------------------------------------------------- - -via: https://jvns.ca/blog/2019/09/13/a-year-explaining-computer-things/ - -作者:[Julia Evans][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://jvns.ca/ -[b]: https://github.com/lujun9972 -[1]: https://jvns.ca/blog/2013/09/30/hacker-school-day-2-what-does-a-shell-even-do/ From db261e9f8f16c314f244d2827c73c13a763444a3 Mon Sep 17 00:00:00 2001 From: geekpi Date: Fri, 20 Sep 2019 08:57:10 +0800 Subject: [PATCH 199/202] translated --- ...How to put an HTML page on the internet.md | 69 ------------------ ...How to put an HTML page on the internet.md | 70 +++++++++++++++++++ 2 files changed, 70 insertions(+), 69 deletions(-) delete mode 100644 sources/tech/20190906 How to put an HTML page on the internet.md create mode 100644 translated/tech/20190906 How to put an HTML page on the internet.md diff --git a/sources/tech/20190906 How to put an HTML page on the internet.md b/sources/tech/20190906 How to put an HTML page on the internet.md deleted file mode 100644 index 55c63aa2d4..0000000000 --- a/sources/tech/20190906 How to put an HTML page on the internet.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (geekpi) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How to put an HTML page on the internet) -[#]: via: (https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/) -[#]: author: (Julia Evans https://jvns.ca/) - -How to put an HTML page on the internet -====== - -One thing I love about the internet is that it’s SO EASY to put static HTML websites on the internet. Someone asked me today how to do it, so I thought I’d write down how really quickly! - -### just an HTML page - -All of my sites are just static HTML and CSS. My web design skills are relatively minimal ( is the most complicated site I’ve developed on my own), so keeping all my internet sites relatively simple means that I have some hope of being able to make changes / fix things without spending a billion hours on it. - -So we’re going to take as minimal of an approach as possible in this blog post – just one HTML page. - -### the HTML page - -The website we’re going to put on the internet is just one file, called `index.html`. You can find it at , which is a Github repository with exactly one file in it. - -The HTML file has some CSS in it to make it look a little less boring, which is partly copied from . - -### how to put the HTML page on the internet - -Here are the steps: - - 1. sign up for a [Neocities][1] account - 2. copy the index.html into the index.html in your neocities site - 3. done - - - -The index.html page above is on the internet at [julia-example-website.neocities.com][2], if you view source you’ll see that it’s the same HTML as in the github repo. - -I think this is probably the simplest way to put an HTML page on the internet (and it’s a throwback to Geocities, which is how I made my first website in 2003) :). I also like that Neocities (like [glitch][3], which I also love) is about experimentation and learning and having fun.. - -### other options - -This is definitely not the only easy way – Github pages and Gitlab pages and Netlify will all automatically publish a site when you push to a Git repository, and they’re all very easy to use (just connect them to your github repository and you’re done). I personally use the Git repository approach because not having things in Git makes me nervous – I like to know what changes to my website I’m actually pushing. But I think if you just want to put an HTML site on the internet for the first time and play around with HTML/CSS, Neocities is a really nice way to do it. - -If you want to actually use your website for a Real Thing and not just to play around you probably want to buy a domain and link it to your website so that you can change hosting providers in the future, but that is a bit less simple. - -### this is a good possible jumping off point for learning HTML - -If you are a person who is comfortable editing files in a Git repository but wants to practice HTML/CSS, I think this is a fun way to put a website on the internet and play around! I really like the simplicity of it – there’s literally just one file, so there’s no fancy extra magic to get in the way of understanding what’s going on. - -There are also a bunch of ways to complicate/extend this, like this blog is actually generated with [Hugo][4] which generates a bunch of HTML files which then go on the internet, but it’s always nice to start with the basics. - --------------------------------------------------------------------------------- - -via: https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/ - -作者:[Julia Evans][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://jvns.ca/ -[b]: https://github.com/lujun9972 -[1]: https://neocities.org/ -[2]: https://julia-example-website.neocities.org/ -[3]: https://glitch.com -[4]: https://gohugo.io/ diff --git a/translated/tech/20190906 How to put an HTML page on the internet.md b/translated/tech/20190906 How to put an HTML page on the internet.md new file mode 100644 index 0000000000..61339a2c63 --- /dev/null +++ b/translated/tech/20190906 How to put an HTML page on the internet.md @@ -0,0 +1,70 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to put an HTML page on the internet) +[#]: via: (https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/) +[#]: author: (Julia Evans https://jvns.ca/) + +如何在互联网放置 HTML 页面 +====== + +我喜欢互联网的一点是在互联网放置静态页面是如此简单。今天有人问我该怎么做,所以我想我会快速地写下来! + +### 只是一个 HTML 页面 + +我的所有网站都只是静态 HTML 和 CSS。我的网页设计技巧相对不高(是我自己开发的最复杂的网站),因此保持我所有的网站相对简单意味着我可以做一些改变/修复,而不会花费大量时间。 + +因此,我们将在此文章中采用尽可能简单的方式 - 只需一个 HTML 页面。 + +### HTML 页面 + +我们要放在互联网上的网站只是一个名为 `index.html` 的文件。你可以在 找到它,它是一个 Github 仓库,其中只包含一个文件。 + +HTML 文件中包含一些 CSS,使其看起来不那么无聊,部分复制自< https://example.com>。 + +### 如何将 HTML 页面放在互联网上 + +有以下几步: + + 1. 注册 [Neocities][1] 帐户 + 2. 将 index.html 复制到你自己 neocities 站点的 index.html 中 + 3. 完成 + + + +上面的 index.html 页面位于 [julia-example-website.neocities.com][2] 中,如果你查看源代码,你将看到它与 github 仓库中的 HTML 相同。 + +我认为这可能是将 HTML 页面放在互联网上的最简单的方法(这是一次回归 Geocities,它是我在 2003 年制作我的第一个网站的方式):)。我也喜欢 Neocities (像 [glitch][3],我也喜欢)它能实验、学习,并有乐趣。 + +### 其他选择 + +这绝不是唯一简单的方式 - 在你推送 Git 仓库时,Github pages 和 Gitlab pages 以及 Netlify 都将会自动发布站点,并且它们都非常易于使用(只需将它们连接到你的 github 仓库即可)。我个人使用 Git 仓库的方式,因为 Git 没有东西让我感到紧张 - 我想知道我实际推送的页面发生了什么更改。但我想你如果第一次只想将 HTML/CSS 制作的站点放到互联网上,那么 Neocities 就是一个非常好的方法。 + + +如果你不只是玩,而是要将网站用于真实用途,那么你或许会需要买一个域名,以便你将来可以更改托管服务提供商,但这有点不那么简单。 + +### 这是学习 HTML 的一个很好的起点 + +如果你熟悉在 Git 中编辑文件,同时想练习 HTML/CSS 的话,我认为将它放在网站中是一个有趣的方式!我真的很喜欢它的简单性 - 实际上这只有一个文件,所以没有其他花哨的东西需要去理解。 + +还有很多方法可以复杂化/扩展它,比如这个博客实际上是用 [Hugo][4] 生成的,它生成了一堆 HTML 文件并放在网络中,但从基础开始总是不错的。 + +-------------------------------------------------------------------------------- + +via: https://jvns.ca/blog/2019/09/06/how-to-put-an-html-page-on-the-internet/ + +作者:[Julia Evans][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://jvns.ca/ +[b]: https://github.com/lujun9972 +[1]: https://neocities.org/ +[2]: https://julia-example-website.neocities.org/ +[3]: https://glitch.com +[4]: https://gohugo.io/ From 9a5bbac1d78c08c00952184fed582dc7f501972b Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 20 Sep 2019 09:33:44 +0800 Subject: [PATCH 200/202] PRF --- ... a Mail About New User Account Creation.md | 58 ++++++++----------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md b/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md index ff0832ada1..2330cad84f 100644 --- a/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md +++ b/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md @@ -1,6 +1,6 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) -[#]: reviewer: ( ) +[#]: reviewer: (wxy) [#]: publisher: ( ) [#]: url: ( ) [#]: subject: (Bash Script to Send a Mail About New User Account Creation) @@ -10,37 +10,27 @@ 用 Bash 脚本发送新用户帐户创建的邮件 ====== -出于某些原因,你可能需要跟踪 Linux 上的新用户创建信息。 +![](https://img.linux.net.cn/data/attachment/album/201909/20/093308a615tcuiopctvp5t.jpg) -同时,你可能需要通过邮件发送详细信息。 - -这或许是审计目标的一部分,或者安全团队出于跟踪目的可能希望对此进行监控。 +出于某些原因,你可能需要跟踪 Linux 上的新用户创建信息。同时,你可能需要通过邮件发送详细信息。这或许是审计目标的一部分,或者安全团队出于跟踪目的可能希望对此进行监控。 我们可以通过其他方式进行此操作,正如我们在上一篇文章中已经描述的那样。 - * **[在系统中创建新用户帐户时发送邮件的 Bash 脚本][1]** +* [在系统中创建新用户帐户时发送邮件的 Bash 脚本][1] - - -Linux 有许多开源监控工具可以使用。 - -但我不认为他们有办法跟踪新用户创建过程,并在发生时提醒管理员。 +Linux 有许多开源监控工具可以使用。但我不认为他们有办法跟踪新用户创建过程,并在发生时提醒管理员。 那么我们怎样才能做到这一点? -我们可以编写自己的 Bash 脚本来实现这一目标。 - -我们过去写过许多有用的 shell 脚本。如果你想了解,请进入下面的链接。 - - * **[如何使用 shell 脚本自动化日常活动?][2]** - +我们可以编写自己的 Bash 脚本来实现这一目标。我们过去写过许多有用的 shell 脚本。如果你想了解,请进入下面的链接。 +* [如何使用 shell 脚本自动化日常活动?][2] ### 这个脚本做了什么? -这将每天两次(一天的开始和结束)备份 “/etc/passwd” 文件,这将使你能够获取指定日期的新用户创建详细信息。 +这将每天两次(一天的开始和结束)备份 `/etc/passwd` 文件,这将使你能够获取指定日期的新用户创建详细信息。 -我们需要添加以下两个 cron 任务来复制 “/etc/passwd” 文件。 +我们需要添加以下两个 cron 任务来复制 `/etc/passwd` 文件。 ``` # crontab -e @@ -49,7 +39,7 @@ Linux 有许多开源监控工具可以使用。 59 23 * * * cp /etc/passwd /opt/scripts/passwd-end-$(date +"%Y-%m-%d") ``` -它使用 “difference” 命令来检测文件之间的差异,如果发现与昨日有任何差异,脚本将向指定 email 发送新用户详细信息。 +它使用 `diff` 命令来检测文件之间的差异,如果发现与昨日有任何差异,脚本将向指定 email 发送新用户详细信息。 我们不用经常运行此脚本,因为用户创建不经常发生。但是,我们计划每天运行一次此脚本。 @@ -66,21 +56,21 @@ mv /opt/scripts/passwd-end-$(date --date='yesterday' '+%Y-%m-%d') /opt/scripts/p ucount=$(diff /opt/scripts/passwd-start /opt/scripts/passwd-end | grep ">" | cut -d":" -f6 | cut -d"/" -f3 | wc -l) if [ $ucount -gt 0 ] then -SUBJECT="ATTENTION: New User Account is created on server : `date --date='yesterday' '+%b %e'`" -MESSAGE="/tmp/new-user-logs.txt" -TO="[email protected]" -echo "Hostname: `hostname`" >> $MESSAGE -echo -e "\n" >> $MESSAGE -echo "The New User Details are below." >> $MESSAGE -echo "+------------------------------+" >> $MESSAGE -diff /opt/scripts/passwd-start /opt/scripts/passwd-end | grep ">" | cut -d":" -f6 | cut -d"/" -f3 >> $MESSAGE -echo "+------------------------------+" >> $MESSAGE -mail -s "$SUBJECT" "$TO" < $MESSAGE -rm $MESSAGE -fi + SUBJECT="ATTENTION: New User Account is created on server : `date --date='yesterday' '+%b %e'`" + MESSAGE="/tmp/new-user-logs.txt" + TO="2daygeek@gmail.com" + echo "Hostname: `hostname`" >> $MESSAGE + echo -e "\n" >> $MESSAGE + echo "The New User Details are below." >> $MESSAGE + echo "+------------------------------+" >> $MESSAGE + diff /opt/scripts/passwd-start /opt/scripts/passwd-end | grep ">" | cut -d":" -f6 | cut -d"/" -f3 >> $MESSAGE + echo "+------------------------------+" >> $MESSAGE + mail -s "$SUBJECT" "$TO" < $MESSAGE + rm $MESSAGE +fi ``` -给 “new-user-detail.sh” 文件添加可执行权限。 +给 `new-user-detail.sh` 文件添加可执行权限。 ``` $ chmod +x /opt/scripts/new-user-detail.sh @@ -116,7 +106,7 @@ via: https://www.2daygeek.com/linux-shell-script-to-monitor-user-creation-send-e 作者:[Magesh Maruthamuthu][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 From 011140e3152da1f163b5ed6eb375b03a6f8372c6 Mon Sep 17 00:00:00 2001 From: Xingyu Wang Date: Fri, 20 Sep 2019 09:34:20 +0800 Subject: [PATCH 201/202] PUB @geekpi https://linux.cn/article-11362-1.html --- ...h Script to Send a Mail About New User Account Creation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename {translated/tech => published}/20190912 Bash Script to Send a Mail About New User Account Creation.md (98%) diff --git a/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md b/published/20190912 Bash Script to Send a Mail About New User Account Creation.md similarity index 98% rename from translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md rename to published/20190912 Bash Script to Send a Mail About New User Account Creation.md index 2330cad84f..849d7c5597 100644 --- a/translated/tech/20190912 Bash Script to Send a Mail About New User Account Creation.md +++ b/published/20190912 Bash Script to Send a Mail About New User Account Creation.md @@ -1,8 +1,8 @@ [#]: collector: (lujun9972) [#]: translator: (geekpi) [#]: reviewer: (wxy) -[#]: publisher: ( ) -[#]: url: ( ) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-11362-1.html) [#]: subject: (Bash Script to Send a Mail About New User Account Creation) [#]: via: (https://www.2daygeek.com/linux-shell-script-to-monitor-user-creation-send-email/) [#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) From dee31d01f8d4a97fdde78acdf72990388f9adbc5 Mon Sep 17 00:00:00 2001 From: geekpi Date: Fri, 20 Sep 2019 09:56:12 +0800 Subject: [PATCH 202/202] translating --- ...8 How to remove carriage returns from text files on Linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sources/tech/20190918 How to remove carriage returns from text files on Linux.md b/sources/tech/20190918 How to remove carriage returns from text files on Linux.md index c51de1b918..45b8a8b89d 100644 --- a/sources/tech/20190918 How to remove carriage returns from text files on Linux.md +++ b/sources/tech/20190918 How to remove carriage returns from text files on Linux.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: ( ) +[#]: translator: (geekpi) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( )