Merge pull request #3 from LCTT/master

Update the Repositories
This commit is contained in:
ZTinoZ 2014-12-25 11:35:17 +08:00
commit 7720875b8d
103 changed files with 5394 additions and 3635 deletions

View File

@ -73,21 +73,21 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim linux-server.cfg
-
define host{
name linux-server ; 名称,需修改
name linux-server ; 名称,需修改
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
failure_prediction_enabled 1
process_perf_data 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
check_command example-host-check ; 检查所用脚本,需修改
check_interval 3 ; 连续检查的间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
check_command example-host-check ; 检查所用脚本,需修改
check_interval 3 ; 连续检查的间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
notification_options d,u,r
contact_groups admins ; 邮件将要发送至的组,需修改
contact_groups admins ; 邮件将要发送至的组,需修改
register0
}
@ -100,22 +100,22 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim cisco-device.cfg
-
define host{
name cisco-device ;名称,需修改
name cisco-device ;名称,需修改
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
failure_prediction_enabled 1
process_perf_data 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
check_command example-host-check ; 检查时使用的脚本,需修改
check_interval 3 ; 连续检查间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
check_command example-host-check ; 检查时使用的脚本,需修改
check_interval 3 ; 连续检查间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
notification_options d,u,r
contact_groups admins ; 邮件将要发至的组,需修改
register 0
contact_groups admins ; 邮件将要发至的组,需修改
register 0
}
### 添加主机 ###
@ -148,13 +148,13 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim hostgroups_nagios2.cfg
-
define hostgroup {
hostgroup_name linux-server ; 主机组名
hostgroup_name linux-server ; 主机组名
alias Linux Servers
members our-server ; 组员列表
}
define hostgroup {
hostgroup_name cisco-device ; 主机组名
hostgroup_name cisco-device ; 主机组名
alias Cisco Devices
members our-server ; comma separated list of members
}
@ -176,18 +176,18 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
-
define service {
hostgroup_name linux-server
service_description Linux Servers
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
service_description Linux Servers
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
}
define service {
hostgroup_name cisco-device
service_description Cisco Devices
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
}
### 联系人定义 ###
@ -205,12 +205,12 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
host_notification_options d,r
service_notification_commands notify-service-by-email
host_notification_commands notify-host-by-email
email root@localhost, sentinel@example.tst
email root@localhost, sentinel@example.tst
}
最后试运行初始化检测是否有配置错误。如果没有错误Nagios开始安全运行。
root@mrtg:~#nagios v /etc/nagios3/nagios.cfg
root@mrtg:~# nagios -v /etc/nagios3/nagios.cfg
root@mrtg:~# service nagios3 restart
## CentOS/RHEL上的Nagios配置 ##
@ -229,33 +229,33 @@ Redhat系统中Nagios的配置文件地址如下所示。
[root@mrtg objects]# vim templates.cfg
-
define host{
name linux-server
use generic-host
name linux-server
use generic-host
check_period 24x7
check_interval 3
retry_interval 1
check_interval 3
retry_interval 1
max_check_attempts 3
check_command example-host-check
notification_period 24x7
check_command example-host-check
notification_period 24x7
notification_interval 0
notification_options d,u,r
contact_groups admins
register 0
notification_options d,u,r
contact_groups admins
register 0
}
define host{
name cisco-router
use generic-host
define host{
name cisco-router
use generic-host
check_period 24x7
check_interval 3
retry_interval 1
check_interval 3
retry_interval 1
max_check_attempts 3
check_command example-host-check
notification_period 24x7
check_command example-host-check
notification_period 24x7
notification_interval 0
notification_options d,u,r
contact_groups admins
register 0
notification_options d,u,r
contact_groups admins
register 0
}
### 添加主机和主机组 ###
@ -267,7 +267,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
-
#Adding Linux server
define host{
use linux-server
use linux-server
host_name our-server
alias our-server
address 172.17.1.23
@ -275,7 +275,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
#Adding Cisco Router
define host{
use cisco-router
use cisco-router
host_name our-router
alias our-router
address 172.17.1.1
@ -310,10 +310,10 @@ Redhat系统中Nagios的配置文件地址如下所示。
告警要发送的邮件地址添加至Nagios中。
[root@objects objects]# vim contacts.cfg
-
-
define contact{
contact_name nagiosadmin
use generic-contact
use generic-contact
alias Nagios Admin
email nagios@localhost, sentinel@example.tst
}
@ -326,7 +326,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
### 配置后访问Nagios ###
现在一切就绪可以开始Nagios之旅了。Ubuntu/Debian用户可以通过打开http://IP地址/nagios3网页访问NagiosCentOS/RHEL用户可以打开http://IP地址/nagios如http://172.17.1.23/nagios3来访问Nagios。“nagiosadmin”用户则需要认证来访问页面。
现在一切就绪可以开始Nagios之旅了。Ubuntu/Debian用户可以通过打开 http://IP地址/nagios3 网页访问NagiosCentOS/RHEL用户可以打开 http://IP地址/nagios ,如 http://172.17.1.23/nagios3 来访问Nagios。“nagiosadmin”用户则需要认证来访问页面。
[![](http://farm4.staticflickr.com/3834/11198394806_4f4a753778_z.jpg)][9]

View File

@ -1,18 +1,21 @@
为什么你的公司需要参与更多开源软件的编写
为什么公司需要参与更多开源软件的编写
================================================================================
>闭关锁国是产生不了创新的。
> 闭门造车是产生不了创新的。
![](http://a5.files.readwrite.com/image/upload/c_fill,h_900,q_70,w_1600/MTE5NDg0MDYxMTkxMzQxNTgz.jpg)
**华尔街日报 [称][1]有消息表明Zulily正在开发** 更多的内部软件,但实际上根本不是。多年前[Eric Raymond写道][2]全世界95%的软件写来用的而不是售卖。原因很多但是其中有一个比较突出正如Zulily的CIO Luke Friang所说几乎没有一个[非定制]软件解决方案能跟上我们的步伐。
[据华尔街日报称][1]有消息表明Zulily正在开发更多的内部软件,但实际上根本不是。多年前[Eric Raymond写道][2]全世界95%的软件写来用的而不是售卖。原因很多但是其中有一个比较突出正如Zulily的CIO Luke Friang所说几乎没有一个[非定制]软件解决方案能跟上我们的步伐。
20年前是这样现在也是这样。
但是有一点是不同的,这也正是华尔街日报完全忽略的地方。而这也正是历史上开发的内部软件始终保持着专有的原因了,因为她是一个公司的 核心竞争力。然而今天,越来越多的公司意识到另一面:开源内部软件将会比保持专有获益更多。
但是有一点是不同的,这也正是华尔街日报完全忽略的地方。而这也正是历史上开发的内部软件始终保持着专有的原因了,因为它是一个公司的核心竞争力。然而今天,越来越多的公司意识到另一面:开源内部软件将会比保持专有获益更多。
这也就是为什么你的公司需要为开源项目做出更多的贡献。记住是更多。
### 不寻常的那些年
我们刚刚经历了一个很不一样的20年那时很多软件的开发都是为了内部的使用大多数人的精力都放在由SAP和微软这样的厂商建立的应用广泛的企业级解决方案。
不管怎么说,这都是一个理论。
@ -27,32 +30,37 @@
然而,开源的道路上,一些公司也发现,有些销售商不能很好地描述他们所想要的,即便是很好理解的产品类别,如像内容管理系统,他们需要 知道的是产品亮点,而不希望是一个模子刻出来的。
所以顾客没了,他们中有一部分变成了供应商。
所以顾客没了,他们中有一部分转变成了供应商。
这也是常有的事,[O'Grady指出了][4]这一点。2010年O'Grady发现了一个有趣的现象“软件提供商正面对着一个强有力的市场竞争者他们 的顾客。”
### 自己动手,丰衣足食
这也是常有的事,[O'Grady指出了][4]这一点。2010年O'Grady发现了一个有趣的现象“软件提供商正面对着一个强有力的市场竞争者他们的顾客。”
回想一下今天的高科技大多数都是开源的几乎所有的项目一开始都是某些公司的内部项目或者仅仅是有些开发者的爱好LinuxGitHadoopCassandraMongDBAndroid等等。没有一个项目起初是为了售卖而产生的。
相反,这些项目通常是由一些公司维护,他们使用开源的资源来构建软件并[完善软件][5]这主要是一些Web公司。不像以前银行医院和一些组织开发的软件只供内部使用他们开源源码。
虽然,[有些公司避免定制软件][6],因为他们不想自己维护它,开源(稍微)减轻了这些发展中公司来维护一个项目的压力。从而为项目发起人均摊项目的开发成本Yahoo开始于Hadoop但是现在最大的贡献者是Cloudera和Hortonworks。Facebook开始于Cassandra但是现在主要是靠DataStax在维护。等等。
虽然,[有些公司避免定制软件][6],因为他们不想自己维护它,开源(稍微)减轻了这些发展中公司来维护一个项目的压力。从而为项目发起人均摊项目的开发成本Yahoo建立了 Hadoop但是现在最大的贡献者是Cloudera和Hortonworks。Facebook 建立了 Cassandra但是现在主要是靠DataStax在维护。等等。
### 现在就走出来吧!
今天,真正的软件创新并不是闭门造车能造出来的,即便是可以,它也不会在那儿,开源项目颠覆了几十年的软件开发传统。
这不仅仅是一个人的一点点力量。
最好的开源项目都[发展得很快][7],但是这并不意味着别人在乎你的开源代码。[开放你的源码有显著的优缺点][8],其中一个很重要的优点是 很多伟大的开发者都希望为开源做出贡献:如果你也想找一个伟大的开发者跟你一起,你需要给他们一个开放的源代码来让他们工作。([Netflix][9]说)
最好的开源项目都[发展得很快][7],但是这并不意味着别人在乎你的开源代码。[开放你的源码有显著的优缺点][8],其中一个很重要的优点是很多伟大的开发者都希望为开源做出贡献:如果你也想找一个伟大的开发者跟你一起,你需要给他们一个开放的源代码来让他们工作。([Netflix][9]说)
但是,我们没有理由站在一边看,现在正是时候参与开源社区了,而不是一些不清楚的社区。是的,开源最大的参与者正是你们和你们的公司。 赶紧开始吧。
但是,我们没有理由站在一边看,现在正是时候参与开源社区了,而不是把“社区”妖魔化。是的,开源最大的参与者正是你们和你们的公司。 赶紧开始吧。
主要图片来自于Shutterstock. (注Shutterstock是美国的一家摄影图片网站。)
--------------------------------------------------------------------------------
via: http://readwrite.com/2014/08/16/open-source-software-business-zulily-erp-wall-street-journal
作者:[Matt Asay][a]
译者:[barney-ro](https://github.com/barney-ro)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,16 +1,16 @@
为什么一些古老的编程语言不会消亡?
================================================================================
> 我们中意于我们所知道的。
> 我们钟爱我们已知的。
![](http://a4.files.readwrite.com/image/upload/c_fill,h_900,q_70,w_1600/MTIzMDQ5NjY0MTUxMjU4NjM2.jpg)
当今许多知名的编程语言已经都非常古老了。PHP 语言20年、Python 语言23年、HTML 语言21年、Ruby 语言和 JavaScript 语言已经19年C 语言更是高达42年之久。
这是没人能预料得到的,即使是计算机科学家 [Brian Kernighan][1] 也一样。他是写著第一本关于 C 语言的作者之一,到今天这本书还在印刷着。C 语言本身的发明者 [Dennis Ritchie][2] 是 Kernighan 的合著者,他于 2011 年已辞世。)
这是没人能预料得到的,即使是计算机科学家 [Brian Kernighan][1] 也一样。他是写著第一本关于 C 语言的作者之一,到今天这本书还在印刷着。C 语言本身的发明者 [Dennis Ritchie][2] 是 Kernighan 的合著者,他于 2011 年已辞世。)
“我依稀记得早期跟编辑们的谈话告诉他们我们已经卖出了5000册左右的量”最近采访 Kernighan 时他告诉我说。“我们设法做的更好。我没有想到的是在2014年的教科书里学生仍然在使用第一个版本的书。”
关于 C 语言的持久性特别显著的就是 Google 开发出了新的语言 Go,解决同一问题比用 C 语言更有效率
关于 C 语言的持久性特别显著的就是 Google 开发出了新的语言 Go,解决同一问题比用 C 语言更有效率。不过,我仍然很难想象 Go 能彻底杀死 C无论它有多么好
“大多数语言并不会消失或者至少很大一部分用户承认它们不会消失”他说。“C 语言仍然在一定的领域独领风骚,所以它很接地气。”
@ -20,13 +20,13 @@
分别来自普林斯顿大学和加州大学伯克利分校的研究者 Ari Rabkin 和 Leo Meyerovich 花费了两年时间来研究解决上面的问题。他们的研究报告,[《编程语言使用情况实例分析》][3],记录了对超过 200,000 个 Sourceforge 项目和超过 13,000 个程序员投票结果的分析。
他们主要的发现呢?大多数时候程序员选择的编程语言都是他们所熟悉的。
他们主要的发现是什么呢?大多数时候程序员选择的编程语言都是他们所熟悉的。
存在着我们使用的语言是因为我们经常使用他们,” Rabkin 告诉我。“例如:天文学家就经常使用 IDL [交互式数据语言]来开发他们的计算机程序,并不是因为它具有什么特殊的星级功能或其它特点,而是因为用它形成习惯了。他们已经用些语言构建出很优秀的程序了,并且想保持原状。”
这些我们使用的语言还继续存在是因为我们经常使用他们,” Rabkin 告诉我。“例如:天文学家就经常使用 IDL [交互式数据语言]来开发他们的计算机程序,并不是因为它具有什么特殊的亮点功能或其它特点,而是因为用它形成习惯了。他们已经用些语言构建出很优秀的程序了,并且想保持原状。”
换句话说,它部分要归功于创建其的语言的的知名度仍保留较大劲头。当然这并不意味着流行的语言不会变化。Rabkin 指出我们今天在使用的 C 语言就跟 Kernighan 第一次创建时的一点都不同,那时的 C 编译器跟现代的也不是完全兼容。
换句话说,它部分要归功于这些语言所创立的知名度仍保持较高。当然这并不意味着流行的语言不会变化。Rabkin 指出我们今天在使用的 C 语言就跟 Kernighan 第一次创建时的一点都不同,那时的 C 编译器跟现代的也不是完全兼容。
“有一个古老的关于工程师的笑话。工程师被问到哪一种编程语言人们会使用30年他说我不知道但它总会被叫做 Fortran” Rabkin 说到。“长期存活的语言跟他们在70年代和80年代刚设计出来的时候不一样了。人们通常都是在上面增加功能而不会删除功能因为要保持向后兼容但有些功能会被修正。”
“有一个古老的关于工程师的笑话。工程师被问到哪一种编程语言人们会使用30年他说我不知道但它总会被叫做 Fortran” Rabkin 说到。“长期存活的语言跟他们在70年代和80年代刚设计出来的时候不一样了。人们通常都是在上面增加功能,而不会删除功能,因为要保持向后兼容,但有些功能会被修正。”
向后兼容意思就是当语言升级后,程序员不仅可以使用升级语言的新特性,也不用回去重写已经实现的老代码块。老的“遗留代码”的语法规则已经不用了,但舍弃是要花成本的。只要它们存在,我们就有理由相信相关的语言也会存在。
@ -34,17 +34,17 @@
遗留代码指的是用过时的源代码编写的程序或部分程序。想想看,一个企业或工程项目的关键程序功能部分是用没人维护的编程语言写出来的。因为它们仍起着作用,用现代的源代码重写非常困难或着代价太高,所以它们不得不保留下来,即使其它部分的代码都变动了,程序员也必须不断折腾以保证它们能正常工作。
任何编程语言,存在了超过几十年时间都具有某种形式的遗留代码问题, PHP 也不例外。PHP 是一个很有趣的例子,因为它的遗留代码跟现在的代码明显不同,支持者或评论家都承认这是一个巨大的进步。
任何编程语言,存在了超过几十年时间都具有某种形式的遗留代码问题, PHP 也不例外。PHP 是一个很有趣的例子,因为它的遗留代码跟现在的代码明显不同,支持者或评论家都承认这是一个巨大的进步。
Andi Gutmans 是 已经成为 PHP4 的标准编译器的 Zend Engine 的发明者之一。Gutmans 说他和搭档本来是想改进完善 PHP3 的,他们的工作如此成功,以至于 PHP 的原发明者 Rasmus Lerdorf 也加入他们的项目。结果就成为了 PHP4 和他的后续者 PHP5 的编译器。
Andi Gutmans 是已经成为 PHP4 的标准编译器的 Zend Engine 的发明者之一。Gutmans 说他和搭档本来是想改进完善 PHP3 的,他们的工作如此成功,以至于 PHP 的原发明者 Rasmus Lerdorf 也加入他们的项目。结果就成为了 PHP4 和他的后续者 PHP5 的编译器。
因此,当今的 PHP 与它的祖先即最开始的 PHP 是完全不同的。然而,在 Gutmans 看来,在用古老的 PHP 语言版本写的遗留代码的地方一直存在着偏见以至于上升到整个语言的高度。比如 PHP 充满着安全漏洞或没有“集群”功能来支持大规模的计算任务等概念。
因此,当今的 PHP 与它的祖先——即最开始的 PHP 是完全不同的。然而,在 Gutmans 看来,在用古老的 PHP 语言版本写的遗留代码的地方一直存在着偏见以至于上升到整个语言的高度。比如 PHP 充满着安全漏洞或没有“集群”功能来支持大规模的计算任务等概念。
“批评 PHP 的人们通常批评的是在 1998 年时候的 PHP 版本,”他说。“这些人都没有与时俱进。当今的 PHP 已经有了很成熟的生态系统了。”
如今Gutmans 说他作为一个管理者最重要的事情就是鼓励人们升级到最新版本。“PHP有个很大的社区足以支持您的遗留代码的问题”他说。“但总的来说我们的社区大部分都在 PHP5.3 及以上的。”
问题是,任何语言用户都不会全部升级到最新版本。这就是为什么 Python 用户仍在使用 2000 年发布的 Python 2而不是使用 2008 年发布的 Python 3 的原因。甚至是已经六年了喜欢 Google 的大多数用户仍没有升级。这种情况是多种原因造成的,但它使得很多开发者在承担风险。
问题是,任何语言用户都不会全部升级到最新版本。这就是为什么 Python 用户仍在使用 2000 年发布的 Python 2而不是使用 2008 年发布的 Python 3 的原因。甚至在六年后,大多数像 Google 这样的用户仍没有升级。这种情况是多种原因造成的,但它使得很多开发者在承担风险。
“任何东西都不会消亡的”Rabkin 说。“任何语言的遗留代码都会一直存在。重写的代价是非常高昂的,如果它们不出问题就不要去改动。”
@ -54,15 +54,15 @@ Andi Gutmans 是 已经成为 PHP4 的标准编译器的 Zend Engine 的发明
> 有一件事使我们被深深震撼到了。这事最重要的就是我们给人们按年龄分组然后询问他们知道多少编程语言。我们主观的认为随着年龄的增长知道的会越来越多但实际上却不是25岁年龄组和45岁年龄组知道的语言数目是一样的。几个反复询问的问题这里持续不变的。您知道一种语言的几率并不与您的年龄挂钩。
换句话说,不仅仅年长的开发者坚持传统,年轻的程序员会认并采用古老的编程语言作为他们的第一们语言。这可能是因为这些语言具有很有趣的开发库及功能特点,也可能是因为在社区里开发者都是一个组的都喜爱这种开发语言。
换句话说,不仅仅年长的开发者坚持传统,年轻的程序员会认并采用古老的编程语言作为他们的第一们语言。这可能是因为这些语言具有很有趣的开发库及功能特点,也可能是因为在社区里开发者都是喜爱这种开发语言的一伙人
“在全球程序员关注的语言的数量是有定数的,” Rabkin 说。“如果一们语言表现出足够独特的价值,人们将会学习和使用它。如果是和您交流代码和知识的的某个人分享一门编程语言,您将会学习它。因此,例如,只要那些开发库是 Python 库和社区特长是 Python 语言的经验,那么 Python 将会大行其道。”
“在全球程序员关注的语言的数量是有定数的,” Rabkin 说。“如果一们语言表现出足够独特的价值,人们将会学习和使用它。如果是和您交流代码和知识的的某个人分享一门编程语言,您将会学习它。因此,例如,只要那些 Python 库存在、 社区也对 Python 语言很有经验的话,那么 Python 仍将会大行其道。”
研究人员发现关于语言实现的功能,社区是一个巨大的因素。虽然像 Python 和 Ruby 这样的高级语言并没有太大的差别,但,例如程序员就更容易觉得一种比另一种优越。
研究人员发现关于语言实现的功能,社区是一个巨大的因素。虽然像 Python 和 Ruby 这样的高级语言并没有太大的差别,但,程序员总是容易觉得一种比另一种优越。
“Rails 不一定要用 Ruby 语言编写,但它用了,这就是社因素在起作用,” Rabkin 说。“例如,复活 Objective-C 语言这件事就是苹果的工程师团队说‘让我们使用它吧,’ 他们就没得选择了。”
“Rails 不一定要用 Ruby 语言编写,但它用了,这就是社因素在起作用,” Rabkin 说。“例如,复活 Objective-C 语言这件事就是苹果的工程师团队说‘让我们使用它吧,’ 他们就没得选择了。”
通观社会的影响及老旧代码这些问题我们发现最古老的和最新的计算机语言都有巨大的惰性。Go 语言怎么样能超越 C 语言呢?如果有合适的人或公司说它超越它就超越。
通观社会的影响及老旧代码这些问题我们发现最古老的和最新的计算机语言都有巨大的惰性。Go 语言怎么样能超越 C 语言呢?如果有合适的人或公司说它超越它就超越。
“它归结为谁传播的更好谁就好,” Rabkin 说。
@ -74,7 +74,7 @@ via: http://readwrite.com/2014/09/02/programming-language-coding-lifetime
作者:[Lauren Orsini][a]
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,24 +1,26 @@
使用条块化I/O管理多个逻辑卷管理磁盘
使用条块化I/O管理多个LVM磁盘第五部分
================================================================================
在本文中我们将了解逻辑卷是如何通过条块化I/O来写入数据到磁盘的。逻辑卷管理的酷炫特性之一就是它能通过条块化I/O跨多个磁盘写入数据。
![Manage LVM Disks Using Striping I/O](http://www.tecmint.com/wp-content/uploads/2014/09/LVM-Striping.jpeg)
使用条块化I/O管理LVM磁盘
### LVM条块化是什么 ###
**LVM条块化**是LVM功能之一该技术会跨多个磁盘写入数据而不是对单一物理卷持续写入。
![Manage LVM Disks Using Striping I/O](http://www.tecmint.com/wp-content/uploads/2014/09/LVM-Striping.jpeg)
*使用条块化I/O管理LVM磁盘*
#### 条块化特性 ####
- 它会改善磁盘性能。
- 挽救对单一磁盘的重复硬写入。
- 避免对单一磁盘的不断的大量写入。
- 使用对多个磁盘的条块化写入,可以减少磁盘填满的几率。
在逻辑卷管理中,如果我们需要创建一个逻辑卷,扩展的卷会完全映射到卷组和物理卷。在此种情形中,如果其中一个**PV**物理卷被填满我们需要从其它物理卷中添加更多扩展。这样添加更多扩展到PV中后我们可以指定逻辑卷使用特定的物理卷写入I/O。
假设我们**四个磁盘**驱动器,分别指向了四个物理卷,如果各个物理卷总计可以达到**100 I/O**,我们卷组就可以获得**400 I/O**。
假设我们**四个磁盘**驱动器,分别指向了四个物理卷,如果各个物理卷总计可以达到**100 I/O**,我们卷组就可以获得**400 I/O**。
如果我们不使用**条块化方法**文件系统将横跨基础物理卷写入。例如写入一些数据到物理卷达到100 I/O这些数据只会写入到第一个PV**sdb1**。如果我们在写入时使用条块化选项创建逻辑卷它会分割100 I/O分别写入到四个驱动器中这就是说每个驱动器中都会接收到25 I/O。
@ -41,27 +43,31 @@
# fdisk -l | grep sd
![List Hard Drives](http://www.tecmint.com/wp-content/uploads/2014/09/List-Hard-Drives.png)
列出硬盘驱动器
现在我们必须为这4个硬盘驱动器**sdb****sdc****sdd**和**sde**创建分区,我们将用‘**fdisk**’命令来完成该工作。要创建分区,请遵从本文**第一部分**中**步骤#4**的说明,并在创建分区时确保你已将类型修改为**LVM8e**。
*列出硬盘驱动器*
现在我们必须为这4个硬盘驱动器**sdb****sdc****sdd**和**sde**创建分区,我们将用‘**fdisk**’命令来完成该工作。要创建分区,请遵从本文**[第一部分][1]**中**步骤#4**的说明,并在创建分区时确保你已将类型修改为**LVM8e**。
# pvcreate /dev/sd[b-e]1 -v
![Create Physical Volumes in LVM](http://www.tecmint.com/wp-content/uploads/2014/09/Create-Physical-Volumes-in-LVM.png)
在LVM中创建物理卷
*在LVM中创建物理卷*
PV创建完成后你可以使用**pvs**’命令将它们列出来。
# pvs
![Verify Physical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Verify-Physical-Volumes.png)
验证物理卷
*验证物理卷*
现在我们需要使用这4个物理卷来定义卷组。这里我定义了一个物理扩展大小PE为**16MB**,名为**vg_strip**的卷组。
# vgcreate -s 16M vg_strip /dev/sd[b-e]1 -v
上面命令中选项的说明。
上面命令中选项的说明:
- **[b-e]1** 定义硬盘驱动器名称如sdb1sdc1sdd1sde1。
- **-s** 定义物理扩展大小。
- **-v** 详情。
@ -71,14 +77,16 @@ PV创建完成后你可以使用**pvs**’命令将它们列出来。
# vgs vg_strip
![Verify Volume Group](http://www.tecmint.com/wp-content/uploads/2014/09/Verify-Volume-Group.png)
验证卷组
*验证卷组*
要获取VG更详细的信息可以在**vgdisplay**命令中使用‘-v选项它将给出**vg_strip**卷组中所使用的全部物理卷的详细情况。
# vgdisplay vg_strip -v
![Volume Group Information](http://www.tecmint.com/wp-content/uploads/2014/09/Volume-Group-Information.png)
卷组信息
*卷组信息*
回到我们的话题,现在在创建逻辑卷时,我们需要定义条块化值,就是数据需要如何使用条块化方法来写入到我们的逻辑卷中。
@ -91,46 +99,54 @@ PV创建完成后你可以使用**pvs**’命令将它们列出来。
- **-i** –条块化
![Create Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Create-Logical-Volumes.png)
创建逻辑卷
*创建逻辑卷*
在上面的图片中,我们可以看到条块尺寸的默认大小为**64 KB**,如果我们需要自定义条块值,我们可以使用**-I**大写I。要确认逻辑卷已经是否已经创建请使用以下命令。
# lvdisplay vg_strip/lv_tecmint_strp1
![Confirm Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Confirm-Logical-Volumes.png)
确认逻辑卷
*确认逻辑卷*
现在接下来的问题是我们怎样才能知道条块被写入到了4个驱动器。这里我们可以使用**lvdisplay**’和**-m**(显示逻辑卷映射)命令来验证。
# lvdisplay vg_strip/lv_tecmint_strp1 -m
![Check Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Check-Logical-Volumes.png)
检查逻辑卷
*检查逻辑卷*
要创建自定义的条块尺寸,我们需要用我们自定义的条块大小**256KB**来创建一个**1GB**大小的逻辑卷。现在我打算将条块分布到3个PV上。这里我们可以定义我们想要哪些pv条块化。
# lvcreate -L 1G -i3 -I 256 -n lv_tecmint_strp2 vg_strip /dev/sdb1 /dev/sdc1 /dev/sdd1
![Define Stripe Size](http://www.tecmint.com/wp-content/uploads/2014/09/Define-Stripe-Size.png)
定义条块大小
*定义条块大小*
接下来,检查条块大小和条块化的卷。
# lvdisplay vg_strip/lv_tecmint_strp2 -m
![Check Stripe Size](http://www.tecmint.com/wp-content/uploads/2014/09/Check-Stripe-Size.png)
检查条块大小
*检查条块大小*
是时候使用设备映射了,我们使用‘**dmsetup**’命令来完成这项工作。它是一个低级别的逻辑卷管理工具,它用于管理使用了设备映射驱动的逻辑设备。
# dmsetup deps /dev/vg_strip/lv_tecmint_strp[1-2]
![Device Mapper](http://www.tecmint.com/wp-content/uploads/2014/09/Device-Mapper.png)
设备映射
*设备映射*
这里我们可以看到strp1依赖于4个驱动器strp2依赖于3个设备。
希望你已经明白,我们怎样能让逻辑卷条块化来写入数据。对于此项设置,必须掌握逻辑卷管理基础知识。在我的下一篇文章中,我将给大家展示怎样在逻辑卷管理中迁移数据。到那时,请静候更新。同时,别忘了对本文提出有价值的建议。
希望你已经明白,我们怎样能让逻辑卷条块化来写入数据。对于此项设置,必须掌握逻辑卷管理基础知识。
在我的下一篇文章中,我将给大家展示怎样在逻辑卷管理中迁移数据。到那时,请静候更新。同时,别忘了对本文提出有价值的建议。
--------------------------------------------------------------------------------
@ -138,8 +154,9 @@ via: http://www.tecmint.com/manage-multiple-lvm-disks-using-striping-io/
作者:[Babin Lonston][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://linux.cn/article-3965-1.html

View File

@ -1,10 +1,11 @@
迁移LVM分区到新的逻辑卷(驱动器)——第六部分
迁移LVM分区到新的逻辑卷/驱动器(第六部分)
================================================================================
这是我们正在开展的逻辑卷管理系列的第六部分。在本文中,我们将为大家展示怎样在线将现存的逻辑卷迁移到其它新的驱动器。在开始之前我想要先来介绍一下LVM迁移及其特性。
这是我们正在进行的LVM系列的第六部分。在本文中我们将为大家展示怎样在线将现存的逻辑卷迁移到其它新的驱动器。在开始之前我想要先来介绍一下LVM迁移及其特性。
![LVM Storage Migration](http://www.tecmint.com/wp-content/uploads/2014/10/LVM-Migrations.png)
LVM存储迁移
*LVM存储迁移*
### 什么是LVM迁移 ###
@ -17,7 +18,7 @@ LVM存储迁移
- 我们可以使用任何类型的磁盘如SATA、SSD、SAS、SAN storage iSCSI或者FC。
- 在线迁移磁盘,而且数据不会丢失。
在LVM迁移中我们将交换各个卷、文件系统以及位于现存存储中的数据。例如,如果我们有一个单一逻辑卷,它已经映射到了物理卷,而该物理卷是一个物理硬盘驱动器。
在LVM迁移中我们将交换各个卷、文件系统以及位于已有的存储中的数据。例如,如果我们有一个单一逻辑卷,它已经映射到了物理卷,而该物理卷是一个物理硬盘驱动器。
现在如果我们需要升级服务器存储为SSD硬盘驱动器我们首先需要考虑什么重新格式化磁盘我们不必重新格式化服务器LVM可以选择将这些旧的SATA驱动器上的数据迁移到新的SSD驱动器上。在线迁移将会支持任何类型的磁盘不管是本地驱动器还是SAN或者光纤通道都可以。
@ -35,7 +36,8 @@ LVM存储迁移
# lvs
![Check Logical Volume Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Logical-Volume-Disk.png)
检查逻辑卷磁盘
*检查逻辑卷磁盘*
### 步骤2 检查新添加的驱动器 ###
@ -44,7 +46,8 @@ LVM存储迁移
# fdisk -l | grep dev
![Check New Added Drive](http://www.tecmint.com/wp-content/uploads/2014/10/Check-New-Added-Drive.png)
检查新添加的驱动器
*检查新添加的驱动器*
**注意**:你看到上面屏幕中的内容了吗?新的驱动器已经被成功添加了,其名称为“**/dev/sda**”。
@ -57,7 +60,8 @@ LVM存储迁移
# cat tecmint.txt
![Check Logical Volume Data](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Logical-Volume-Data.png)
检查逻辑卷数据
*检查逻辑卷数据*
**注意**:出于演示的目的,我们已经在**/mnt/lvm**挂载点下创建了两个文件,我们将在线将这些数据迁移到新的驱动器中。
@ -67,7 +71,8 @@ LVM存储迁移
# vgs -o+devices | grep tecmint_vg
![Confirm Logical Volume Names](http://www.tecmint.com/wp-content/uploads/2014/10/Confirm-Logical-Volume-Names.png)
确认逻辑卷名称
*确认逻辑卷名称*
**注意**:看到上面屏幕中的内容了吗?“**vdb**”容纳了卷组**tecmint_vg**。
@ -79,7 +84,8 @@ LVM存储迁移
# pvs
![Create Physical Volume](http://www.tecmint.com/wp-content/uploads/2014/10/Create-Physical-Volume.png)
创建物理卷
*创建物理卷*
**6.**接下来使用vgextend命令来添加新创建的物理卷到现存卷组tecmint_vg。
@ -87,14 +93,16 @@ LVM存储迁移
# vgs
![Add Physical Volume](http://www.tecmint.com/wp-content/uploads/2014/10/Add-Physical-Volume.png)
添加物理卷
*添加物理卷*
**7.**要获得卷组的完整信息列表请使用vgdisplay命令。
# vgdisplay tecmint_vg -v
![List Volume Group Info](http://www.tecmint.com/wp-content/uploads/2014/10/List-Volume-Group-Info.png)
列出卷组信息
*列出卷组信息*
**注意**在上面屏幕中我们可以看到在输出结果的结束处我们的PV已经添加到了卷组中。
@ -108,7 +116,8 @@ LVM存储迁移
# ls -l /dev | grep vd
![List Device Information](http://www.tecmint.com/wp-content/uploads/2014/10/List-Device-Information.png)
列出设备信息
*列出设备信息*
**注意**:在上面的命令中,我们可以看到主设备号是**252**,次设备号是**17**,它连接到了**vdb1**。希望你理解了上面命令的输出。
@ -122,7 +131,8 @@ LVM存储迁移
- **1** = 添加单个镜像
![Mirroring Method Migration](http://www.tecmint.com/wp-content/uploads/2014/10/Mirroring-Method-Migration.png)
镜像法迁移
*镜像法迁移*
**注意**:上面的迁移过程根据卷的大小会花费一段时间。
@ -131,14 +141,16 @@ LVM存储迁移
# lvs -o+devices
![Verify Converted Mirror](http://www.tecmint.com/wp-content/uploads/2014/10/Verify-Converted-Mirror.png)
验证转换的镜像
*验证转换的镜像*
**11.**当你确认转换的镜像没有任何问题后,你可以移除旧的虚拟磁盘**vdb1**。**-m**选项将移除镜像,先前我们使用**l**来添加镜像。
# lvconvert -m 0 /dev/tecmint_vg/tecmint_lv /dev/vdb1
![Remove Virtual Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Remove-Virtual-Disk.png)
移除虚拟磁盘
*移除虚拟磁盘*
**12.**在旧虚拟磁盘移除后,你可以使用以下命令来再次检查逻辑卷设备。
@ -147,7 +159,8 @@ LVM存储迁移
# ls -l /dev | grep sd
![Check New Mirrored Device](http://www.tecmint.com/wp-content/uploads/2014/10/Check-New-Mirrored-Device.png)
检查新镜像的设备
*检查新镜像的设备*
在上面的图片中,你看到了吗?我们的逻辑卷现在依赖于**8,1**,名称为**sda1**。这说明我们的迁移过程已经完成了。
@ -157,7 +170,8 @@ LVM存储迁移
# cat tecmin.txt
![Check Mirrored Data](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Mirrored-Data.png)
检查镜像的数据
*检查镜像的数据*
# vgreduce /dev/tecmint_vg /dev/vdb1
@ -170,7 +184,8 @@ LVM存储迁移
# lvs
![Delete Virtual Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Delete-Virtual-Disk.png)
删除虚拟磁盘
*删除虚拟磁盘*
### 步骤6 LVM pvmove镜像法 ###
@ -190,7 +205,7 @@ via: http://www.tecmint.com/lvm-storage-migration/#comment-331336
作者:[Babin Lonston][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
在Linux上使用smartmontools查看硬盘的健康状态
使用 smartmontools 查看硬盘的健康状态
================================================================================
要说Linux用户最不愿意看到的事情莫过于在毫无警告的情况下发现硬盘崩溃了。诸如[RAID][2]的[备份][1]和存储技术可以在任何时候帮用户恢复数据,但为预防硬件突然崩溃造成数据丢失所花费的代价却是相当可观的,特别是在用户从来没有提前考虑过在这些情况下的应对措施时。
@ -28,7 +28,7 @@
![](https://farm4.staticflickr.com/3953/15352881249_96c09f7ccc_o.png)
其中sdx代表分配给机器上对应硬盘上的设备名。
其中sdX代表分配给机器上对应硬盘上的设备名。
如果想要显示出某个指定硬盘的信息比如设备模式、S/N、固件版本、大小、ATA版本/修订号、SMART功能的可用性和状态,在运行smartctl命令时添加"--info"选项,并按如下所示指定硬盘的设备名。
@ -67,8 +67,8 @@
- **THRESH**在报告硬盘FAILED状态前WORST可以允许的最小值。
- **TYPE**属性的类型Pre-fail或Old_age。Pre-fail类型的属性可被看成一个关键属性表示参与磁盘的整体SMART健康评估PASSED/FAILED。如果任何Pre-fail类型的属性故障那么可视为磁盘将要发生故障。另一方面Old_age类型的属性可被看成一个非关键的属性如正常的磁盘磨损表示不会使磁盘本身发生故障。
- **UPDATED**表示属性的更新频率。Offline代表磁盘上执行离线测试的时间。
- **WHEN_FAILED**如果VALUE小于等于THRESH会被设置成“FAILING_NOW”如果WORST小于等于THRESH会被设置成“In_the_past”如果都不是会被设置成“-”。在“FAILING_NOW”情况下需要备份重要文件ASAP特别是属性是Pre-fail类型时。“In_the_past”代表属性已经故障了但在运行测试的时候没问题。“-”代表这个属性从没故障过。
- **RAW_VALUE**制造商定义的原始值从VALUE派生。
- **WHEN\_FAILED**如果VALUE小于等于THRESH会被设置成“FAILING\_NOW”如果WORST小于等于THRESH会被设置成“In\_the\_past”如果都不是会被设置成“-”。在“FAILING\_NOW”情况下需要尽快备份重要文件特别是属性是Pre-fail类型时。“In\_the\_past”代表属性已经故障了但在运行测试的时候没问题。“-”代表这个属性从没故障过。
- **RAW\_VALUE**制造商定义的原始值从VALUE派生。
这时候你可能会想“是的smartctl看起来是个不错的工具但我更想知道如何避免手动运行的麻烦。”如果能够以指定的间隔运行同时又能通知我测试结果那不是更好吗
@ -134,7 +134,7 @@ via: http://xmodulo.com/check-hard-disk-health-linux-smartmontools.html
作者:[Gabriel Cánepa][a]
译者:[KayGuoWhu](https://github.com/KayGuoWhu)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -76,7 +76,7 @@ via: http://www.ubuntugeek.com/configuring-layer-two-peer-to-peer-vpn-using-n2n.
作者:[ruchi][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,8 +1,8 @@
Linux 系统中使用 logwatch 监控日志文件
================================================================================
Linux 操作系统和许多应用程序会创建特殊的文件来记录它们的运行事件,这些文件通常被称作“日志”。当要了解操作系统或第三方应用程序的行为或进行故障排队的话,这些系统日志或特写的应用程序日志文件是必不可少的的工具。但是,日志文件并没有您们所谓的“清晰”或“容易”这种程度的可读性。手工分析原始的日志文件简直是浪费时间,并且单调乏味。出于这个原因,对于系统管理员来说,发现任何一款能把原始的日志文件转换成更人性化的记录摘要的工具,将会受益无穷。
Linux 操作系统和许多应用程序会创建特殊的文件来记录它们的运行事件,这些文件通常被称作“日志”。当要了解操作系统或第三方应用程序的行为或进行故障排查时,这些系统日志或特定的应用程序日志文件是必不可少的的工具。但是,日志文件并没有您们所谓的“清晰”或“容易”这种程度的可读性。手工分析原始的日志文件简直是浪费时间,并且单调乏味。出于这个原因,对于系统管理员来说,发现任何一款能把原始的日志文件转换成更人性化的记录摘要的工具,将会受益无穷。
[logwatch][1] 是一款用 Perl 语言编写的开源日志解析分析器。它能对原始的日志文件进行解析并转换成结构化格式的文档也能根据您的使用情况和需求来定制报告。logwatch 的主要目的是生成更易于使用的日志摘要并不是用来对日志进行实时的处理和监控的。正因为如此logwatch 通常被设定好时间和频率的自动定时任务来调度运行或者是有需要日志处理的时候从命令行里手动运行。一旦日志报告生成logwatch 通过电子邮件把这报告发送给您,您可以把它保存成文件或者在屏幕上直接显示
[logwatch][1] 是一款用 Perl 语言编写的开源日志解析分析器。它能对原始的日志文件进行解析并转换成结构化格式的文档也能根据您的使用情况和需求来定制报告。logwatch 的主要目的是生成更易于使用的日志摘要并不是用来对日志进行实时的处理和监控的。正因为如此logwatch 通常被设定好时间和频率的自动定时任务来调度运行或者是有需要日志处理的时候从命令行里手动运行。一旦日志报告生成logwatch 可以通过电子邮件把这报告发送给您,您可以把它保存成文件或者直接显示在屏幕上。
Logwatch 报告的详细程度和报告覆盖范围是完全可定制化的。Logwatch 的日志处理引擎也是可扩展的,从某种意义上来说,如果您想在一个新的应用程序中使用 logwatch 功能的话,只需要为这个应用程序的日志文件编写一个日志处理脚本(使用 Perl 语言),然后挂接到 logwatch 上就行。
@ -20,13 +20,13 @@ logwatch 有一点不好的就是,在它生成的报告中没有详细的时
### 配置 Logwatch ###
安装时主要的配置文件logwatch.conf被放到 **/etc/logwatch/conf** 目录中。此文件定义的设置选项会覆盖掉定义在 /usr/share/logwatch/default.conf/logwatch.conf 文件中的系统级设置。
安装时主要的配置文件logwatch.conf被放到 **/etc/logwatch/conf** 目录中。此文件(默认是空的)定义的设置选项会覆盖掉定义在 /usr/share/logwatch/default.conf/logwatch.conf 文件中的系统级设置。
在命令行中,启动 logwatch, 如果不带参数的话,将会使用 /etc/logwatch/conf/logwatch.conf 文件中定义的自定义选项。但,只要一指定参数,它们就会覆盖 /etc/logwatch/conf/logwatch.conf 文件中的任意默认/自定义设置。
在命令行中,启动 logwatch, 如果不带参数的话,将会使用 /etc/logwatch/conf/logwatch.conf 文件中定义的选项。但,只要一指定参数,它们就会覆盖 /etc/logwatch/conf/logwatch.conf 文件中的任意默认/自定义设置。
这篇文章里,我们会编辑 /etc/logwatch/conf/logwatch.conf 文件来对一些默认的设置项做些个性化设置。
Detail = <Low, Med, High, or a number>
Detail = <Low, Med, High, 或数字>
“Detail” 配置指令控制着 logwatch 报告的详细程度。它可以是个正整数也可以是分别代表着10、5和0数字的 High、Med、Low 几个选项。
@ -53,7 +53,7 @@ logwatch 有一点不好的就是,在它生成的报告中没有详细的时
Service = <service-name-2>
. . .
“Service” 选项指定想要监控的一个或多个服务。在 /usr/share/logwatch/scripts/services 目录下列出的服务都能被监控,它们已经涵盖了重要的系统服务(例如,pam,secure,iptables,syslogd 等),也涵盖了一些像 sudo、sshd、http、fail2ban、samba等主流的应用服务。如果您想添加新的服务到列表中得编写一个相应的日志处理 Perl 脚本,并把它放在这个目录中。
“Service” 选项指定想要监控的一个或多个服务。在 /usr/share/logwatch/scripts/services 目录下列出的服务都能被监控,它们已经涵盖了重要的系统服务(例如pam,secure,iptables,syslogd 等),也涵盖了一些像 sudo、sshd、http、fail2ban、samba等主流的应用服务。如果您想添加新的服务到列表中得编写一个相应的日志处理 Perl 脚本,并把它放在这个目录中。
如果这个选项要用来选择特定的服务话,您需要把 /usr/share/logwatch/default.conf/logwatch.conf 文件中的 "Service = All " 这一行注释掉。
@ -123,7 +123,7 @@ via: http://xmodulo.com/monitor-log-file-linux-logwatch.html
作者:[Gabriel Cánepa][a]
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,295 @@
你值得拥有 —— 25 个 Linux 性能监控工具
================================================================================
一段时间以来我们在网上向读者介绍了如何为Linux以及类Linux操作系统配置多种不同的性能监控工具。在这篇文章中我们将罗列一系列使用最频繁的性能监控工具并对介绍到的每一个工具提供了相应的简介链接大致将其划分为两类基于命令行的和提供图形化接口的。
### 基于命令行的性能监控工具 ###
#### 1. dstat - 多类型资源统计工具 ####
该命令整合了**vmstat****iostat**和**ifstat**三种命令。同时增加了新的特性和功能可以让你能及时看到各种的资源使用情况,从而能够使你对比和整合不同的资源使用情况。通过不同颜色和区块布局的界面帮助你能够更加清晰容易的获取信息。它也支持将信息数据导出到**cvs**格式文件中,从而用其他应用程序打开,或者导入到数据库中。你可以用该命令来[监控cpu内存和网络状态随着时间的变化][1]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/dstat.png)
#### 2. atop - 相比top更好的ASCII码体验 ####
这个使用**ASCII**码显示方式的命令行工具是一个显示所有进程活动的性能监控工具。它可以展示每日的系统日志以进行长期的进程活动分析并高亮显示过载的系统使用资源。它包含了CPU内存交换空间磁盘和网络层的度量指标。所有这些功能只需在终端运行**atop**即可。
# atop
当然你也可以使用[交互界面来显示][2]数据并进行排序。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/atop1.jpg)
#### 3. Nmon - 类Unix系统的性能监控 ####
Nmon是**Nigel's Monitor**缩写,它最早开发用来作为**AIX**的系统监控工具。如果使用**在线模式**,可以使用光标键在屏幕上操作实时显示在终端上的监控信息。使用**捕捉模式**能够将数据保存为**CSV**格式,方便进一步的处理和图形化展示。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nmon_interface.png)
更多的信息参考我们的[nmon性能监控文章][3]。
#### 4. slabtop - 显示内核slab缓存信息 ####
这个应用能够显示**缓存分配器**是如何管理Linux内核中缓存的不同类型的对象。这个命令类似于top命令区别是它的重点是实时显示内核slab缓存信息。它能够显示按照不同排序条件来排序显示缓存列表。它同时也能够显示一个slab层信息的统计信息的题头。举例如下
# slabtop --sort=a
# slabtop -s b
# slabtop -s c
# slabtop -s l
# slabtop -s v
# slabtop -s n
# slabtop -s o
**更多信息参阅**[内核slab缓存文章][4]。
#### 5. sar - 性能监控和瓶颈检查 ####
**sar** 命令可以将操作系统上所选的累积活动计数器内容信息输出到标准输出上。其基于计数值和时间间隔参数的**审计系统**会按照指定的时间间隔输出指定次数的监控信息。如果时间间隔参数为设置为0那么[sar命令将会显示系统从开机到当时时刻的平均统计信息][5]。有用的命令如下:
# sar -u 2 3
# sar -u -f /var/log/sa/sa05
# sar -P ALL 1 1
# sar -r 1 3
# sar -W 1 3
#### 6. Saidar - 简单的统计监控工具 ####
Saidar是一个**简单**且**轻量**的系统信息监控工具。虽然它无法提供大多性能报表,但是它能够通过一个简单明了的方式显示最有用的系统运行状况数据。你可以很容易地看到[运行时间、平均负载、CPU、内存、进程、磁盘和网络接口][6]统计信息。
Usage: saidar [-d delay] [-c] [-v] [-h]
-d 设置更新时间(秒)
-c 彩色显示
-v 显示版本号
-h 显示本帮助
![](http://blog.linoxide.com/wp-content/uploads/2014/10/saidar-e1413370985588.png)
#### 7. top - 经典的Linux任务管理工具 ####
作为一个广为人知的**Linux**工具,**top**是大多数的类Unix操作系统任务管理器。它可以显示当前正在运行的进程的列表用户可以按照不同的条件对该列表进行排序。它主要显示了系统进程对**CPU**和内存的使用状况。top可以快速检查是哪个或哪几个进程挂起了你的系统。你可以在[这里][7]看到top使用的例子。 你可以在终端输入top来运行它并进入到交互模式
交互模式的一些快捷操作:
全局命令: <回车/空格> ?, =, A, B, d, G, h, I, k, q, r, s, W, Z
统计区的命令: l, m, t, 1
任务区的命令:
外观: b, x, y, z 内容: c, f, H, o, S, u 大小: #, i, n 排序: <, >, F, O, R
色彩方案: <Ret>, a, B, b, H, M, q, S, T, w, z, 0 - 7
窗口命令: -, _, =, +, A, a, G, g, w
![](http://blog.linoxide.com/wp-content/uploads/2014/10/top.png)
#### 8. Sysdig - 系统进程的高级视图 ####
**Sysdig**是一个能够让系统管理员和开发人员以前所未有方式洞察其系统行为的监控工具。其开发团队希望改善系统级的监控方式,通过提供关于**存储,进程,网络和内存**子系统的**统一有序**以及**粒度可见**的方式来进行错误排查,并可以创建系统活动记录文件以便你可以在任何时间轻松分析。
简单例子:
# sysdig proc.name=vim
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig evt.type=chdir and user.name=root
# sysdig -l
# sysdig -L
# sysdig -c topprocs_net
# sysdig -c fdcount_by fd.sport "evt.type=accept"
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig -c topprocs_file
# sysdig -c fdcount_by proc.name "fd.type=file"
# sysdig -p "%12user.name %6proc.pid %12proc.name %3fd.num %fd.typechar %fd.name" evt.type=open
# sysdig -c topprocs_cpu
# sysdig -c topprocs_cpu evt.cpu=0
# sysdig -p"%evt.arg.path" "evt.type=chdir and user.name=root"
# sysdig evt.type=open and fd.name contains /etc
![](http://blog.linoxide.com/wp-content/uploads/2014/10/sysdig.jpg)
**更多信息** 可以在 [如何利用sysdig改善系统层次的监控和错误排查][8]
#### 9. netstat - 显示开放的端口和连接 ####
它是**Linux管理员**使用来显示各种网络信息的工具,如查看什么端口开放和什么网络连接已经建立以及何种进程运行在该连接之上。同时它也显示了不同程序间打开的**Unix套接字**的信息。作为大多数Linux发行版本的一部分netstat的许多命令在 [netstat和它的不同输出][9]中有详细的描述。最为常用的如下:
$ netstat | head -20
$ netstat -r
$ netstat -rC
$ netstat -i
$ netstat -ie
$ netstat -s
$ netstat -g
$ netstat -tapn
### 10. tcpdump - 洞察网络封包 ###
**tcpdump**可以用来查看**网络连接**的**封包**内容。它显示了传输过程中封包内容的各种信息。为了使得输出信息更为有用,它允许使用者通过不同的过滤器获取自己想要的信息。可以参照的例子如下:
# tcpdump -i eth0 not port 22
# tcpdump -c 10 -i eth0
# tcpdump -ni eth0 -c 10 not port 22
# tcpdump -w aloft.cap -s 0
# tcpdump -r aloft.cap
# tcpdump -i eth0 dst port 80
你可以文章“[在topdump和捕捉包][10]”中找到详细描述。
#### 11. vmstat - 虚拟内存统计信息 ####
**vmstat**是虚拟内存(**virtual memory** statistics)的缩写,作为一个**内存监控**工具,它收集和显示关于**内存****进程****终端**和**分页**和**I/O阻塞**的概括信息。作为一个开源程序它可以在大部分Linux发行版本中找到包括Solaris和FreeBSD。它用来诊断大部分的内存性能问题和其他相关问题。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/vmstat_delay_5.png)
**M更多信息** 参考 [vmstat命令文章][11]。
#### 12. free - 内存统计信息 ####
free是另一个能够在终端中显示内存和交换空间使用的命令行工具。由于它的简易它经常用于快速查看内存使用或者是应用于不同的脚本和应用程序中。在这里你可以看到[这个小程序的许多应用][12]。几乎所有的系统管理员日常都会用这个工具。:-)
![](http://blog.linoxide.com/wp-content/uploads/2014/10/free_hs3.png)
#### 13. Htop - 更加友好的top ####
**Htop**基本上是一个top改善版本它能够以更加多彩的方式显示更多的统计信息同时允许你采用不同的方式进行排序它提供了一个**用户友好**的接口。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/htop.png)
你可以在文章“[关于htop和top的比较][13]”中找到**更多的信息** 。
#### 14. ss - 网络管理的现代替代品 ####
**ss**是**iproute2**包的一部分。iproute2是用来替代一整套标准的**Unix网络**工具组件,它曾经用来完成[网络接口配置路由表和管理ARP表][14]任务。ss工具用来记录套接字统计信息它可以显示类似netstat一样的信息同时也能显示更多TCP和状态信息。一些例子如下
# ss -tnap
# ss -tnap6
# ss -tnap
# ss -s
# ss -tn -o state established -p
#### 15. lsof - 列表显示打开的文件 ####
**lsof**命令,意为“**list open files**”, 用于在许多类Unix系统中显示所有打开的文件及打开它们的进程。在大部分Linux发行版和其他类Linux操作系统中系统管理员用它来检查不同的进程打开了哪些文件。
# lsof +p process_id
# lsof | less
# lsof u username
# lsof /etc/passwd
# lsof i TCP:ftp
# lsof i TCP:80
你可以找到 **更多例子** 在[lsof 文章][15]
#### 16. iftop - 类似top的了网络连接工具 ####
**iftop**是另一个基于网络信息的类似top的程序。它能够显示当前时刻按照**带宽使用**量或者上传或者下载量排序的**网络连接**状况。它同时提供了下载文件的预估完成时间。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iftop.png)
**更多信息**可以参考[网络流量iftop文章][16]
#### 17. iperf - 网络性能工具 ####
**iperf**是一个**网络测试**工具,能够创建**TCP**和**UDP**数据连接并在网络上测量它们的**传输性能**。它支持调节关于时间,协议和缓冲等不同的参数。对于每一个测试,它会报告带宽,丢包和其他的一些参数。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iperf-e1413378331696.png)
如果你想用使用这个工具,可以参考这篇文章: [如何安装和使用iperf][17]
#### 18. Smem - 高级内存报表工具 ####
**Smem**是最先进的**Linux**命令行工具之一,它提供关于系统中已经使用的和共享的实际内存大小,试图提供一个更为可靠的当前**内存**使用数据。
$ smem -m
$ smem -m -p | grep firefox
$ smem -u -p
$ smem -w -p
参考我们的文章:[Smem更多的例子][18]
### 图形化或基于Web的性能工具 ###
#### 19. Icinga - Nagios的社区分支版本 ####
**Icinga**是一个**开源免费**的网络监控程序作为Nagios的分支它继承了前者现有的大部分功能同时基于这些功能又增加了社区用户要求已久的功能和补丁。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/Icinga-e1413377995731.png)
**更多信息**请参考[安装和配置lcinga文章][19].
#### 20. Nagios - 最为流行的监控工具. ####
作为在Linux上使用最为广泛和最为流行的**监控方案**,它有一个守护程序用来收集不同进程和远程主机的信息,这些收集到的信息都通过功能强大**的web界面**进行呈现。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nagios-e1413305858732.png)
你可以在文章“[如何安装nagios][20]”里面**找到更多的信息**
#### 21. Linux process explorer - Linux下的procexp ####
**Linux process explorer**是一个Linux下的图形化进程浏览工具。它能够显示不同的进程信息如进程数TCP/IP连接和每一个进程的性能指标。作为**Windows**下**procexp**在Linux的替代品是由**Sysinternals**开发的,其目标是比**top**和**ps**提供更好用户体验。
![](http://a.fsdn.com/con/app/proj/procexp/screenshots/tcpipview.png)
查看 [linux process explorer 文章][21]获取更多信息。
#### 22. Collectl - 性能监控工具 ####
你可以既可以通过交互的方式使用这个**性能监控**工具,也可以用它把**报表**写到磁盘上并通过web服务器来访问。它以一种**易读易管理**的格式,显示了**CPU磁盘内存网络网络文件系统进程slabs**等统计信息。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/collectl.png)
**更多** 关于[Collectl的文章][22]。
#### 23. MRTG - 经典网络流量监控图形工具 ####
这是一个采用**rrdtool**的生成图形的流量监控工具。作为**最早**的提供**图形化界面**的流量监控工具它被广泛应用在类Unix的操作系统中。查看我们关于[如何使用MRTG][23]的文章获取更多关于安装和配置的信息。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/mrtg.png)
#### 24. Monit - 简单易用的监控工具 ####
**Monit**是一个用来**监控进程****系统加载****文件系统**和**目录文件**等的开源的Linux工具。你能够让它自动化维护和修复也能够在运行错误的情景下执行特定动作或者发邮件报告提醒系统管理员。如果你想要用这个工具你可以查看[如何使用Monit的文章][24]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/monit.png)
#### 25. Munin - 为服务器提供监控和提醒服务 ####
作为一个网络资源监控工具,*Munin**能够帮助分析**资源趋势**和**查看薄弱环节**以及导致产生**性能问题**的原因。开发此软件的团队希望它能够易用和用户体验友好。该软件是用Perl开发的并采用**rrdtool**来绘制图形,使用了**web界面**进行呈现。开发人员推广此应用时声称当前已有500多个监控插件可以“**即插即用**”。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/Ubuntu-2014-10-13-10-37-34-e1413185930801.png)
**更多信息**可以在[关于Munin的文章][25]。
--------------------------------------------------------------------------------
via: http://linoxide.com/monitoring-2/linux-performance-monitoring-tools/
作者:[Adrian Dinu][a]
译者:[andyxue](https://github.com/andyxue)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/
[1]:http://linux.cn/article-3215-1.html
[2]:http://linoxide.com/monitoring-2/guide-using-linux-atop/
[3]:http://linoxide.com/monitoring-2/install-nmon-monitor-linux-performance/
[4]:http://linux.cn/article-3702-1.html
[5]:http://linoxide.com/linux-command/linux-system-performance-monitoring-using-sar-command/
[6]:http://linoxide.com/monitoring-2/monitor-linux-saidar-tool/
[7]:http://linux.cn/article-2352-1.html
[8]:http://linux.cn/article-4341-1.html
[9]:http://linux.cn/article-2434-1.html
[10]:http://linoxide.com/linux-how-to/network-traffic-capture-tcp-dump-command/
[11]:http://linux.cn/article-2472-1.html
[12]:http://linux.cn/article-2443-1.html
[13]:http://linux.cn/article-3141-1.html
[14]:http://linux.cn/article-4372-1.html
[15]:http://linux.cn/article-4099-1.html
[16]:http://linux.cn/article-1843-1.html
[17]:http://linoxide.com/monitoring-2/install-iperf-test-network-speed-bandwidth/
[18]:http://linoxide.com/tools/memory-usage-reporting-smem/
[19]:http://linoxide.com/monitoring-2/install-configure-icinga-linux/
[20]:http://linux.cn/article-2436-1.html
[21]:http://sourceforge.net/projects/procexp/
[22]:http://linux.cn/article-3154-1.html
[23]:http://linoxide.com/tools/multi-router-traffic-grapher/
[24]:http://linoxide.com/monitoring-2/monit-linux/
[25]:http://linoxide.com/ubuntu-how-to/install-munin/

View File

@ -27,7 +27,7 @@
### eCryptFS基础 ###
eCrypFS是一个基于FUSE的用户空间加密文件系统在Linux内核2.6.19及更高版本中可用作为encryptfs模块。eCryptFS加密的伪文件系统挂载到当前文件系统顶部。它可以很好地工作在EXT文件系统家族和其它文件系统如JFS、XFS、ReiserFS、Btrfs甚至是NFS/CIFS共享文件系统上。Ubuntu使用eCryptFS作为加密其家目录的默认方法ChromeOS也是。在eCryptFS底层默认使用的是AES算法但是它也支持其它算法如blowfish、des3、cast5、cast6。如果你是通过手工创建eCryptFS设置你可以选择其中一种算法。
eCrypFS是一个基于FUSE的用户空间加密文件系统在Linux内核2.6.19及更高版本中可用作为encryptfs模块。eCryptFS加密的伪文件系统挂载到当前文件系统顶部。它可以很好地工作在EXT文件系统家族和其它文件系统如JFS、XFS、ReiserFS、Btrfs甚至是NFS/CIFS共享文件系统上。Ubuntu使用eCryptFS作为加密其家目录的默认方法ChromeOS也是。在eCryptFS底层默认使用的是AES算法但是它也支持其它算法如blowfish、des3、cast5、cast6。如果你是通过手工创建eCryptFS设置你可以选择其中一种算法。
就像我所的Ubuntu让我们在安装过程中选择是否加密/home目录。好吧这是使用eCryptFS的最简单的一种方法。
@ -63,13 +63,13 @@ Arch Linux
![](https://farm6.staticflickr.com/5608/15453440890_3b4be6d5a7_z.jpg)
它会要求你输入登录密码和挂载密码。登录密码和你常规登录的密码一样,而挂载密码用于派生一个文件加密主密钥。留空来生成一个,这样会更安全。登出然后重新登录。
它会要求你输入登录密码和挂载密码。登录密码和你常规登录的密码一样,而挂载密码用于派生一个文件加密主密钥。这里留空可以生成一个(复杂的),这样会更安全。登出然后重新登录。
你会注意到eCryptFS默认在你的家目录中创建了两个目录Private和.Private。~/.Private目录包含有加密的数据而你可以在~/Private目录中访问到相应的解密后的数据。在你登录时~/.Private目录会自动解密并映射到~/Private目录因此你可以访问它。当你登出时~/Private目录会自动卸载而~/Private目录中的内容会加密回到~/.Private目录。
eCryptFS怎么会知道你拥有~/.Private目录并自动将其解密到~/Private目录而不需要我们输入密码呢这就是eCryptFS的PAM模块捣的鬼它为我们提供了这项便利服务。
如果你不想~/Private目录在登录时自动挂载只需要在运行ecryptfs-setup-private工具时添加“--noautomount”选项。同样如果你不想要~/Private目录在登出后自动卸载也可以自动“--noautoumount”选项。但是那样后你需要自己手工挂载或卸载~/Private目录
如果你不想~/Private目录在登录时自动挂载只需要在运行ecryptfs-setup-private工具时添加“--noautomount”选项。同样如果你不想要~/Private目录在登出后自动卸载也可以自动“--noautoumount”选项。但是那样后你需要自己手工挂载或卸载~/Private目录
$ ecryptfs-mount-private ~/.Private ~/Private
$ ecryptfs-umount-private ~/Private
@ -94,7 +94,7 @@ via: http://xmodulo.com/encrypt-files-directories-ecryptfs-linux.html
作者:[Christopher Valerio][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -51,7 +51,7 @@ Shell 脚本 - 使用 if 语句进行条件检测
echo "Number is smaller"
fi
### If..elif..else..fi 语句 (Short for else if) ###
### If..elif..else..fi 语句 (简写的 else if) ###
Bourne Shell 的 if 语句语法中else 语句里的代码块会在 if 条件为假时执行。我们还可以将 if 语句嵌套到一起,来实现多重条件的检测。我们可以使用 elif 语句else if 的缩写)来构建多重条件的检测。
@ -94,7 +94,7 @@ Bourne Shell 的 if 语句语法中else 语句里的代码块会在 if 条件
If 和 else 语句可以在一个 bash 脚本里相互嵌套。关键词 “fi” 表示里层 if 语句的结束,所有 if 语句必须使用 关键词 “fi” 来结束。
基本 if 语句的 **嵌套语法**
基本 if 语句的**嵌套语法**
if [ 判断条件1 ]
then
@ -139,7 +139,7 @@ via: http://www.linuxtechi.com/shell-scripting-checking-conditions-with-if/
作者:[Pradeep Kumar][a]
译者:[ThomazL](https://github.com/ThomazL)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,10 +1,10 @@
Linux中使用rsync——文件和目录排除列表
================================================================================
**rsync**是一个十分有用而且十分流行的linux工具。它用于备份和恢复文件也用于对比和同步文件。我们已经在前面的文章讲述了[Linux中rsync命令的使用实例][1]而今天我们将增加一些更为有用的rsync使用技巧。
**rsync**是一个十分有用而且十分流行的linux工具。它用于备份和恢复文件也用于对比和同步文件。我们已经在前面的文章讲述了[如何在Linux下使用rsync][1]而今天我们将增加一些更为有用的rsync使用技巧。
### 排除文件和目录列表 ###
有时候,当我们做大量同步的时候,我们可能想要从同步的文件和目录中排除一个文件和目录的列表。一般来说,像不能被同步的设备文件和某些系统文件,或者像临时文件或者缓存文件这类占据不必要磁盘空间的文件,这类文件时我们需要排除的。
有时候,当我们做大量同步的时候,我们可能想要从同步的文件和目录中排除一个文件和目录的列表。一般来说,像设备文件和某些系统文件,或者像临时文件或者缓存文件这类占据不必要磁盘空间的文件是不合适同步的,这类文件是我们需要排除的。
首先让我们创建一个名为“excluded”的文件当然你想取什么名都可以然后将我们想要排除的文件夹或文件写入该文件一行一个。在我们的例子中如果你想要对根分区进行完整的备份你应该排除一些在启动时创建的设备目录和放置临时文件的目录列表看起来像下面这样
@ -19,7 +19,8 @@ Linux中使用rsync——文件和目录排除列表
### 从命令行排除文件 ###
你也可以从命令行直接排除文件该方法在你要排除的文件数量较少并且在你想要将它写成脚本或加到crontab中又不想脚本或cron依赖于另外一个文件运行时十分有用。
For example if you wish to sync /var to a backup directory but you don't wish to include cache and tmp folder that usualy don't hold important content between restarts you can use the following command:
例如,如果你想要同步/var到一个备份目录但是你不想要包含cache和tmp这些通常不会有重要内容的文件夹你可以使用以下命令
$ sudo rsync -aAXhv --exclude={"/var/cache","/var/tmp"} /var /home/adrian/var
@ -34,9 +35,9 @@ via: http://linoxide.com/linux-command/exclude-files-rsync-examples/
作者:[Adrian Dinu][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/
[1]:http://linoxide.com/how-tos/rsync-copy/
[1]:http://linux.cn/article-4503-1.html

View File

@ -1,25 +1,25 @@
Pitivi 发布 0.94 版本,使用 GTK HeaderBar修复无数 Bugs
Pitivi 0.94 切换到 GTK HeaderBar修复无数 Bugs
=====================================
** 我是 [Pitivi 视频编辑器][1] 的狂热爱好者。Pitivi 可能不是至少现在不是Linux 上可用的最拉风的,功能完善的非线性视频编辑器,但是它绝对是最可靠的一个。 **
** 我是 [Pitivi 视频编辑器][1] 的狂热爱好者。Pitivi 可能不是至少现在不是Linux 上可用的、最拉风的、功能完善的、非线性视频编辑器,但是它绝对是最可靠的一个。 **
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/pitivi-tile.jpg)
自然而然地,我一直在期待这个开源视频编辑器在 [这周末][2] 发布的新的 beta 测试版。
自然而然地,我一直在期待这个开源视频编辑器[这次][2]发布的新的 beta 测试版。
Pitivi 0.94 是基于新的 “GStreamer Editing Service”GES的第四个发行版本。
开发组成员 Jean-François Fortin Tam,称号 “Nekohayo” 将本次升级描述为 “** ...主要作为一个维护版本发布,但是除了对 Bug 的修复之外,还是增加了几个有意思的改进和功能。 **
开发组成员 Jean-François Fortin Tam“Nekohayo”将本次升级描述为 “**...主要作为一个维护版本发布,但是除了对 Bug 的修复之外,还是增加了几个有意思的改进和功能。**”
## 有什么新改进? ##
### 有什么新改进? ###
有不少有意思的改进!作为 Pitivi 0.94 版本中最明显的变化Pitivi 添加了如同 GNOME 客户端一般的 GTK HeaderBar 装饰。HeaderBar 整合了桌面窗口栏,标题栏以及工具栏,节省了大块浪费的垂直以及水平的占用空间。
“*当你用过一次后,你就在也回不来了,*” Fortin Tam 介绍说。欣赏一下下面这张截图,你肯定会同意的。
“*当你用过一次后,你就再也不会走了*” Fortin Tam 介绍说。欣赏一下下面这张截图,你肯定会同意的。
![Pitivi now uses GTK HeaderBar and menu button (image: Nekohayo)](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/pitivi-0.94-headerbar.jpeg)
Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo
*Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo*
那么应用菜单又怎么样呢?别担心,应用菜单遵循了 GNOME 交互界面的标准,看一下自己机器上的应用菜单确认一下吧。
@ -49,13 +49,11 @@ Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo
上面这些信息听起来都很不错吧?下一次更新会更好!这不只是一个通常的来自开发者的夸张,如同 Jean François 解释的一般:
> “下一次更新0.95)会运行在难以置信的强大的后端上。感谢 Mathieu [Duponchelle] 和 Thibault [Saunier] 在用 NLE新的为了 GES 的非线性引擎)替代 GNonLin 并修复问题等工作中做出的努力。”
> “下一次更新0.95)会运行在令人难以置信的强大的后端上。感谢 Mathieu [Duponchelle] 和 Thibault [Saunier] 在用 NLE新的为了 GES 的非线性引擎)替代 GNonLin 并修复问题等工作中做出的努力。”
Ubuntu 14.10 带有老的(更容易崩溃)的软件中心,进入 Pitivi 官网¹下载 [安装包][5] 来体验最新杰作。
Ubuntu 14.10 带有老的(更容易崩溃)的软件中心,进入 Pitivi 官网下载 [安装包][5] 来体验最新杰作。
** Pitivi 基金会酬了将近 €20,000使我们能够向着约定的 1.0 版本迈出一大步。如果你也想早点看到 1.0 版本的到来的话,省下你在星巴克买的格郎德香草奶油咖啡,捐赠我们! **
*¹目前 0.94 安装包还没发布,你可以下载 nightly tar*
**Pitivi 基金会筹了将近 €20,000使我们能够向着约定的 1.0 版本迈出一大步。如果你也想早点看到 1.0 版本的到来的话,省下你在星巴克买的格郎德香草奶油咖啡,捐赠我们!**
--------------------------------------------------------------------------------
@ -64,7 +62,7 @@ via: http://www.omgubuntu.co.uk/2014/11/pitivi-0-94-header-bar-more-features
作者:[Joey-Elijah Sneddon][a]
译者:[ThomazL](https://github.com/ThomazL)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,45 +1,36 @@
如何从Ubuntu的声音菜单中移除音乐播放器
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/music-players.jpg)
**自从2010年首次出现Ubuntu 的声音菜单已经被证明是Unity 桌面上的最流行的独有特性之一。**
**自从2010年的介绍一来Ubuntu声音菜单已经被证明是最流行和个性的统一桌面之一.**
把音乐播放器与音量控制程序集成到一个标准的界面里是一种看起来很聪明的做法,这样就不用到处找声音相关的各种程序。人们不禁要问,为什么其它操作系统没有效仿这种做法!
随着音乐播放器与音量程序合成小体积的应用程序-即集成,其中一个希望找到与声音相关的蠢事-通过标准接口的灵感。人们不禁要问,为什么其它操作系统没有效仿这种做法!
#### 冗长的 ####
尽管它看起来很方便但是这个小应用当前存在一个问题相当多的东西集在一起看起来想一个MP3,是否真正的把想要的东西都放在里面了。虽然有用,但是一个无所不再的应用程序清单已经安装了,这让一些不经常适用的人看着很累赘和反感。
我将要打赌上面的截图看起来一定很熟悉,你们中的很多人一定阅读过吧!不要害怕,**dconf-editor **就在这里。
#### 臃肿 ####
尽管它看起来很方便,但是这个小应用当前存在一个问题:很多播放器都堆在一起,像一个组合音响一样。也许你用得着,但是你安装的所有的媒体播放器都挤在这里,这会让人看着很累赘和反感。
我将要打赌,当你读到这里时,一定发现上面的截图看起来很熟悉!不要担心,**dconf-editor**可以解决它。
### 从Ubuntu 声音菜单中移除播放器 ###
#### 第一部分: 基础知识 ####
最快速和最简单地从声音菜单中移除播放器的方法就是卸载相关的应用程序。但这是极端的方式,我的意思是指你也许想要保留应用程序,但是不需要它集成。
最快速和最简单地从声音菜单中移除播放器的方法就是卸载相关的应用程序。但这是极端的方式,我的意思是指你也许想要保留应用程序,但是不需要它集成到菜单里面
只删除播放器但是保留我们需要的应用程序我们用到一个看起来令人惊讶的工具叫“dconf-editor”.
只删除播放器但是保留我们需要的应用程序我们用到一个看起来令人惊讶的工具叫“dconf-editor”
你可能已经安装了如果没有安装的话那么你从Ubuntu软件中心找出。
- [在Ubuntu中点击安装Dconf-Editor][1]
一旦安装完毕找到Unity Dash并打开。打开的时候不要惊慌你不会再回到2002年了它确实是这样子的。
一旦安装完毕找到Unity Dash并打开。打开的时候不要惊慌你没有到2002年它确实是这种古老的样子。
使用右侧菜单栏,你需要从导航到 com > canonical > indicator > sound.下面的面板将会出现。
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/dconf-editor.jpg)
双击靠近interested-media-players的比括号并删除你希望从声音菜单里移除掉的播放器但需要保留在方括号中且不要删除任何你想保留逗号或者撇号。
双击“interested-media-players”旁的闭括号并删除你希望从声音菜单里移除掉的播放器但需要保留方括号中且不要删除任何需要保留的逗号或者单引号。
举个例子,我移除掉这些
@ -55,9 +46,9 @@
#### 第二部分:黑名单 ####
等等还不能关闭dconf-editor。尽管上面的步骤看起来把事情处理得干净利落但是一些播放器在打开时会立即重新加载到声音菜单。为了避免重复这个过程将它们添加到**媒体播放器黑名单**中。
等等还不能关闭dconf-editor。尽管上面的步骤看起来把事情处理得干净利落但是一些播放器在打开时会立即重新加载到声音菜单。为了避免重复这个过程将它们添加到**blacklisted-media-player**中。
记得每个在括号里的播放器都用逗号分隔多个条目。他们也必须在方括号内,所以在退出之前请务必仔细检查。
记得每个在括号里的播放器都用逗号分隔多个条目。他们也必须在方括号内,所以在退出之前请务必仔细检查。
最终结果如下:
@ -69,7 +60,7 @@ via: http://www.omgubuntu.co.uk/2014/11/remove-players-ubuntu-sound-menu
作者:[Joey-Elijah Sneddon][a]
译者:[disylee](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
运行级别与服务管理命令systemd简介
systemd的运行级别与服务管理命令简介
================================================================================
![](http://www.linux.com/images/stories/41373/Linux_kernel_unified_hierarchy_cgroups_and_systemd.svg.png)
@ -6,20 +6,21 @@
在开始介绍systemd命令前让我们先简单的回顾一下历史。在Linux世界里有一个很奇怪的现象一方面Linux和自由软件FOSS在不断的向前推进另一方面人们对这些变化却不断的抱怨。这就是为什么我要在此稍稍提及那些反对systemd所引起的争论的原因因为我依然记得历史上有不少类似的争论
- 软件包Pacakge是邪恶的因为真的Linux用户会从源码构建他所想要的的一切并严格的管理系统中安装的软件。
- 解析依赖关系的包管理器是邪恶的,真的Linux用户会手动解决这些该死的依赖关系。
- 软件包Pacakge是邪恶的因为真的Linux用户会从源码构建他所想要的的一切并严格的管理系统中安装的软件。
- 解析依赖关系的包管理器是邪恶的,真的Linux用户会手动解决这些该死的依赖关系。
- apt-get总能把事情干好所以只有Yum是邪恶的。
- Red Hat简直就是Linux中的微软。
- 好样的Ubuntu
- 滚蛋吧Ubuntu
诸如此类...就像我之前常常说的一样变化总是让人沮丧。这些该死的变化搅乱了我的工作流程这可不是一件小事情任何业务流程的中断都会直接影响到生产力。但是我们现在还处于计算机发展的婴儿期在未来的很长的一段时间内将会持续有快速的变化和发展。想必大家应该都认识一些因循守旧的人在他们的心里商品一旦买回家以后就是恒久不变的就像是买了一把扳手、一套家具或是一个粉红色的火烈鸟草坪装饰品。就是这些人仍然在坚持使用Windows Vista甚至还有人在使用运行Windows95的老破烂机器和CRT显示器。他们不能理解为什么要去换一台新机器。老的还能用啊不是么
诸如此类...就像我之前常常说的一样变化总是让人沮丧。这些该死的变化搅乱了我的工作流程这可不是一件小事情任何业务流程的中断都会直接影响到生产力。但是我们现在还处于计算机发展的婴儿期在未来的很长的一段时间内将会持续有快速的变化和发展。想必大家应该都认识一些因循守旧的人在他们的心里商品一旦买回家以后就是恒久不变的就像是买了一把扳手、一套家具或是一个粉红色的火烈鸟草坪装饰品。就是这些人仍然在坚持使用Windows Vista甚至还有人在使用运行Windows 95的老破烂机器和CRT显示器。他们不能理解为什么要去换一台新机器。老的还能用啊不是么
这让我回忆起了我在维护老电脑上的一项伟大的成就那台破电脑真的早就该淘汰掉。从前我有个朋友有一台286的老机器安装了一个极其老的MS-DOS版本。她使用这台电脑来处理一些简单的任务比如说约会、日记、记账等我还用BASIC给她写了一个简单的记账软件。她不用关注任何安全更新是这样么因为它压根都没有联网。所以我会时不时给她维修一下电脑更换电阻、电容、电源或者是CMOS电池什么的。它竟然还一直能用。它那袖珍的琥珀CRT显示器变得越来越暗在使用了20多年后终于退出了历史舞台。现在我的这位朋友换了一台运行Linux的老Thinkpad来干同样的活。
前面的话题有点偏题了下面抓紧时间开始介绍systemd。
###运行级别 vs. 状态###
SysVInit使用静态的运行级别来构建不同的启动状态大部分发布版本中提供了以下5个运行级别
- 单用户模式Single-user mode
@ -28,7 +29,7 @@ SysVInit使用静态的运行级别来构建不同的启动状态大部分发
- 系统关机System shutdown
- 系统重启System reboot
对于我来说,使用多个运行级别并没有太大的好处,但它们却一直在系统中存在着。 不同于运行级别systemd可以创建不同的状态状态提供了灵活的机制来设置启动时的配置项。这些状态是由多个unit文件组成的状态又叫做启动目标target。启动目标有一个漂亮的描述性命名而不是像运行级别那样使用数字。unit文件可以控制服务、设备、套接字和挂载点。参考/usr/lib/systemd/system/graphical.target这是CentOS 7默认的启动目标
对于我来说,使用多个运行级别并没有太大的好处,但它们却一直在系统中存在着。 不同于运行级别systemd可以创建不同的状态状态提供了灵活的机制来设置启动时的配置项。这些状态是由多个unit文件组成的状态又叫做启动目标target。启动目标有一个清晰的描述性命名而不是像运行级别那样使用数字。unit文件可以控制服务、设备、套接字和挂载点。参考/usr/lib/systemd/system/graphical.target这是CentOS 7默认的启动目标
[Unit]
Description=Graphical Interface
@ -71,15 +72,16 @@ SysVInit使用静态的运行级别来构建不同的启动状态大部分发
DIR_SUFFIX="${APACHE_CONFDIR##/etc/apache2-}"
else
DIR_SUFFIX=
整个文件一共有410行。
你可以检查unit件的依赖关系我常常被这些复杂的依赖关系给吓到
你可以检查unit件的依赖关系,我常常被这些复杂的依赖关系给吓到:
$ systemctl list-dependencies httpd.service
### cgroups ###
cgroups或者叫控制组在Linux内核里已经出现好几年了但直到systemd的出现才被真正使用起来。[The kernel documentation][1]中是这样描述cgroups的“控制组提供层次化的机制来管理任务组使用它可以聚合和拆分任务组并管理任务组后续产生的子任务。”换句话说它提供了多种有效的方式来控制、限制和分配资源。systemd使用了cgroups你可以便捷查看它使用下面的命令可以展示你系统中的整个cgroup树
cgroups或者叫控制组在Linux内核里已经出现好几年了但直到systemd的出现才被真正使用起来。[The kernel documentation][1]中是这样描述cgroups的“控制组提供层次化的机制来管理任务组使用它可以聚合和拆分任务组并管理任务组后续产生的子任务。”换句话说它提供了多种有效的方式来控制、限制和分配资源。systemd使用了cgroups你可以便捷查看它使用下面的命令可以展示你系统中的整个cgroup树
$ systemd-cgls
@ -115,7 +117,7 @@ via: http://www.linux.com/learn/tutorials/794615-systemd-runlevels-and-service-m
作者:[Carla Schroder][a]
译者:[coloka](https://github.com/coloka)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,10 +1,10 @@
5最佳开源的浏览器安全应用
5最佳开源的浏览器安全应用
================================================================================
浏览器是现在各种在线服务的入口。电脑安全问题迄今仍未得到解决,技术进步为恶意软件提供了新的途径,感染我们的设备入侵商业网络。例如,智能手机与平板为恶意软件--及其同伙“[恶意广告][1]”--带来一片全新天地,它们在其中腾挪作乱。
浏览器是现在各种在线服务的入口。电脑安全问题迄今仍未得到解决,技术进步为恶意软件提供了新的途径,感染我们的设备入侵商业网络。例如,智能手机与平板为恶意软件--及其同伙“[恶意广告][1]”--带来一片全新天地,它们在其中腾挪作乱。
恶意广告在合法广告与合法网络中注入恶意软件。当然你可能会认为“合法”广告与网络与非法广告与网络之间仅有一线之隔。但是请不要偏题哦。隐私与安全天生就是一对兄弟,保护隐私也就是保护你的安全。
Firefox, Chrome, 以及 Opera当仁不让属最棒的浏览器性能最佳、兼容性最好、以及安全性最优。以下五个开源安全应用安装于浏览器后会助你抵御种种威胁。
Firefox, Chrome, 以及 Opera 当仁不让属最棒的浏览器:性能最佳、兼容性最好、以及安全性最优。以下五个开源安全应用安装于浏览器后会助你抵御种种威胁。
### 保护隐私: 开源浏览器安全应用 ###
@ -12,11 +12,11 @@ Firefox, Chrome, 以及 Opera当仁不让属最棒的浏览器性能最佳、
广告网络为恶意软件提供了肥沃的土壤。一个广告网络可以覆盖数千站点因此攻陷一个广告网络就相当于攻陷数千台机器。AdBlock及其衍生品—[AdBlock Plus][2], [AdBlock Pro][3], 与 [AdBlock Edge][4]--都是屏蔽广告的优秀工具,可以让那些充斥烦人广告的网站重新还你一片清静。
当然,凡事都有两面性:上述做法损害了依靠广告收入的站点的利益。这些工具一键式白名单功能,对于那些你希望支持的网站,你可以通过白名单功能关闭这些网站的广告屏蔽。(真的,我亲爱的站长们,如果你不希望网站访问者屏蔽你的广告,那么就适可而止,不要让人反感。)
当然,凡事都有两面性:上述做法损害了依靠广告收入的站点的利益。这些工具一键式白名单功能,对于那些你希望支持的网站,你可以通过白名单功能关闭这些网站的广告屏蔽。(真的,我亲爱的站长们,如果你不希望网站访问者屏蔽你的广告,那么就适可而止,不要让人反感。当然,作为粉丝,也请您支持您喜爱的站点,将它们放到白名单吧。
![](http://www.smallbusinesscomputing.com/imagesvr_ce/5731/fig-1-easylist_1.jpg)
图1:在Ad Blocker中添加其它过滤规则。
*图1:在Ad Blocker中添加其它过滤规则。*
Ad Blocker们不仅能屏蔽广告它们还能屏蔽网站跟踪爬虫与恶意域名。要打开额外过滤规则点击ad blocker图标 > 点击**首选项**,转至**过滤规则订阅**标签。点击按纽**添加订阅过滤规则**,然后加入**Easy Privacy + EasyList**规则。加入恶意域名过滤也是个不错的选择它会屏蔽那些供恶意软件与间谍软件寄生的域名。Adblock可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
@ -24,7 +24,7 @@ Ad Blocker们不仅能屏蔽广告它们还能屏蔽网站跟踪爬虫与恶
浏览器扩展HTTPS Everywhere可确保在网站HTTPS可用的时候总是以HTTPS方式连接到站点。HTTPS意味着你的连接是以SSL安全套接层方式加密的SSL协议通常用于加密网站与电子邮件连接。HTTPS Everywhere可在Firefox, Chrome, 及Opera下使用。
安装了HTTPS Everywhere之后它会询问你是否希望启用SSL检测程序。点击是因为SSL检测程序会提供额外保护防止中间人攻击与虚假SSL证书攻击。HTTPS Everywhere可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
安装了HTTPS Everywhere之后它会询问你是否希望启用SSL检测程序。点击因为SSL检测程序会提供额外保护防止中间人攻击与虚假SSL证书攻击。HTTPS Everywhere可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
#### 3. [Social Fixer][6] ####
@ -37,7 +37,9 @@ Social Fixer本身不是安全工具但它具有两个重要的安全特性
![](http://www.smallbusinesscomputing.com/imagesvr_ce/2858/fig-2-socialfixer_1.jpg)
图2: 使用Social Fixer匿名化Facebook网面。
*图2: 使用Social Fixer匿名化Facebook网面。*
LCTT 译注:好吧,这个应用和我等无关~~
#### 4. [Privacy Badger][7] ####
@ -47,7 +49,7 @@ AdBlock也能拦截这些乌七八糟的东西不过Privacy Badger在此方
![](http://www.smallbusinesscomputing.com/imagesvr_ce/9256/fig-3-privacybadger_1.jpg)
图3: Privacy Badger拦截跟踪站点。
*图3: Privacy Badger拦截跟踪站点。*
Privacy Badger装好后就能使用了。点击图标看看它对你浏览的网页都拦截了哪些东西。你可以试试访问Huffingtonpost.com这是一家不在每一个页面塞满第三方组件誓不罢休的网站图3
@ -63,15 +65,15 @@ Disconnect还有安全搜索功能可以阻止搜索引擎爱挖数据的癖
想象一下,网页上所有东西都腾空而出,奔你而去。当然这一切都是抽象的且在幕后悄然发生,不象有人正在猛击窗户试图进入你家那么明显罢了。但是,威胁倒是实实在在的,而且数不胜数,所以你必须采取预防措施,来保护自己。
Carla Schroder著有The Book of Audacity, Linux Cookbook, Linux Networking Cookbook等书并撰写了上百篇Linux指南文章。她曾担任Linux Planet与Linux Today网站总编。
本文作者 Carla Schroder 著有The Book of Audacity, Linux Cookbook, Linux Networking Cookbook等书并撰写了上百篇Linux指南文章。她曾担任Linux Planet与Linux Today网站总编。
--------------------------------------------------------------------------------
via: http://www.smallbusinesscomputing.com/biztools/5-best-open-source-web-browser-security-apps.html
作者:[Carla Schroder][a]
译者:[译者ID](https://github.com/yupmoon)
校对:[校对者ID](https://github.com/校对者ID)
译者:[yupmoon](https://github.com/yupmoon)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,14 +1,16 @@
图形化显示Linux内存使用情况
使用 smem 可视化显示Linux内存使用情况
================================================================================
物理内存不足对Linux桌面系统和服务器系统的性能影响都很大。当你的电脑变慢时,要做的第一件事就是释放内存。尤其是在多用户环境以及执行关键任务的服务器环境下,内存消耗会变得更加关键,因为多个用户和应用线程会同时争更多的内存空间。
物理内存不足对Linux桌面系统和服务器系统的性能影响都很大。当你的计算机变慢时,要做的第一件事就是释放内存。尤其是在多用户环境以及执行关键任务的服务器环境下,内存消耗会变得更加关键,因为多个用户和应用线程会同时争更多的内存空间。
如果要监测系统内各种资源的使用情况比如说CPU或内存图形化显示是一种高效的方法通过图形界面可以快速分析各用户和进程的资源消耗情况。本教程将给大家介绍**在linux下图形化分析内存使用情况**的方法,使用到命令行工具是[smem][1].
### 物理内存使用情况: RSS vs. PSS vs. USS ###
### 物理内存使用情况: RSS 、 PSS 和 USS ###
由于Linux使用到了虚拟内存virtual memory因此要准确的计算一个进程实际使用的物理内存就不是那么简单。 只知道进程的虚拟内存大小也并没有太大的用处,因为还是无法获取到实际分配的物理内存大小。
**RSSResident set size**使用top命令可以查询到是最常用的内存指标表示进程占用的物理内存大小。但是将各进程的RSS值相加通常会超出整个系统的内存消耗这是因为RSS中包含了各进程间共享的内存。**PSSProportional set size**会更准确一些,它将共享内存的大小进行平均后,再分摊到各进程上去。**USS(Unique set size )**是PSS的自己它只计算了进程独自占用的内存大小不包含任何共享的部分。
- **RSSResident set size**使用top命令可以查询到是最常用的内存指标表示进程占用的物理内存大小。但是将各进程的RSS值相加通常会超出整个系统的内存消耗这是因为RSS中包含了各进程间共享的内存。
- **PSSProportional set size**会更准确一些,它将共享内存的大小进行平均后,再分摊到各进程上去。
- **USS(Unique set size )**是PSS中自己的部分它只计算了进程独自占用的内存大小不包含任何共享的部分。
### 安装Smem ###
@ -20,7 +22,7 @@ smem是一个能够生成多种内存耗用报告的命令行工具它从/pro
#### 在Fedora 或 CentOS/RHEL上安装Smem ####
在CentOS/RHEL上你首先得[使能][2]EPEL仓
在CentOS/RHEL上你首先得[启用][2]EPEL仓库
$ sudo yum install smem python-matplotlib
@ -44,17 +46,17 @@ smem是一个能够生成多种内存耗用报告的命令行工具它从/pro
![](https://farm9.staticflickr.com/8543/15798375491_510698d98f_z.jpg)
smem提供了以下选项来对输出结果进行筛选支持按映射方式mapping,进程和用户三个维度的筛选:
smem提供了以下选项来对输出结果进行筛选支持按映射方式mapping进程和用户三个维度的筛选:
- -M <mapping-filtering-regular-expression>
- -P <process-filtering-regular-expression>
- -U <user-filtering-regular-expression>
- -M <正则表达式>
- -P <正则表达式>
- -U <正则表达式>
想了解smem更多的使用方式可以查询用户手册man page
### 使用smem图形化显示内存使用情况 ###
图形化的报告使用起来会更加方便快捷。smem支持支持两种格式的图形显示方式:直方图和饼图。
图形化的报告使用起来会更加方便快捷。smem支持两种格式的图形显示方式直方图和饼图。
下面是一些图形化显示的实例。
@ -78,7 +80,7 @@ via: http://xmodulo.com/visualize-memory-usage-linux.html
作者:[Dan Nanni][a]
译者:[coloka](https://github.com/coloka)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
Postfix提示和故障排除命令
Postfix 技巧和故障排除命令
================================================================================
这里是一些我每天用的命令当然其他的email管理员也会使用因此我写下来以防我忘记。
@ -16,7 +16,7 @@ Postfix提示和故障排除命令
# postqueue -f
立即交付所有某domain.com域名的所有邮件
立即投递某domain.com域名的所有邮件
# postqueue -s domain.com
@ -39,7 +39,7 @@ Postfix提示和故障排除命令
你也可以查看下面的连接这个连接有很多例子和不错的可用的解释文档可以用来配置postfix.
[Postfix Configuration - ][1]
[Postfix Configuration][1]
--------------------------------------------------------------------------------
@ -47,7 +47,7 @@ via: http://techarena51.com/index.php/postfix-configuration-and-explanation-of-p
作者:[Leo G][a]
译者:[Vic020](http://www.vicyu.net)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,163 @@
一些关于Java的句子
================================================================================
本文并没有什么新鲜的。我只是收集了一些不太重要的语句,但这些语句可能对初级程序员来说很重要。也就是些无聊的旧东西。
如果以下的这些你都知道的话那么你比Java的了解已经超过了对一个平常的家庭主妇的了解。我不知道清楚所有的这些是否是有意义的。即使不知道其中的一些特性你照样也可以成为一个相当不错的Java程序员。然而本文中许多的新信息可能表明你还有很大的发展空间。
### Java中有四种不同的访问类型(而不是三种) ###
这四种类型包括:`private`, package private (包访问权限无修饰符又叫default, 译者注)。如果你在类中定义一个元素时并不加任何访问类型修饰符,它将被默认设置为包访问权限(package private),而不是`public`或者`protected`。
![Java中有四种级别的访问类型](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/four-levels-of-protection.png)
*Java有四个级别的访问类型。*
从另一方面来说,如果在接口中,你不指定方法的访问修饰符,那么它将是`public`类型的。你也可以显式地指定它为`public`类型, 但这并不符合SONAR一个开源代码质量管理平台译者注的代码质量管理思想。
![访问类型是传递的](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-transitive.png)
*访问类型是传递的*
> 我的“在Java中允许选择性的在接口的方法中写`public`”的观点是一个技术错误。
同样你也可在接口的字段前写`final`,甚至是`static`。这说明这些字段可以是非静态或非final吗不是的接口中的字段中总是final和static的。
### Protected和package private是不一样的 ###
Package private或者default访问类型可以使得相同包(package)下其他类能够访问这些字段或方法。保护类型(`protected`)的方法和字段可以被相同包下的类使用(这和package private是一样的),同时它也可以被其他类使用,只要那个类继承了这个包含这些`protected`方法或字段的类。
### Protected是可传递的 ###
如果有三个包a、b、c每个包都分别包含A、B、C类而且B继承AC继承B那么C可以访问A中的protected字段和方法。
package a;
public class A {
protected void a() {
}
}
package b;
import a.A;
public class B extends A {
protected void b() {
a();
}
}
package c;
import b.B;
public class C extends B {
protected void c() {
a();
}
}
### 接口不能定义protected方法 ###
很多人认为可以在接口中定义`protected`方法。如果你这么做的话,编译器很快就会毫不留情地给你报错。顺便说下,这也就是我为什么认为允许`public`关键字在接口中是一个技术错误,它会让人觉得还可以写其他访问类型似的。
![Private is the new public](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/private-is-the-new-public.png)
*private是一种新的public*
如果你还想在一个接口的方法中声明protected方法,你可能还不理解封装的含义。
### 此private非彼private ###
私有变量和方法在编译单元内是可见的。如果这听起来太神秘的话换种说法几乎就是在同一个Java文件中。这比“在它们被定义的类中”听起来好理解些。它们在同一编译单元的类和接口中也是可见的。嵌套类可以看到类中封装的私有字段和方法。然而当前封闭类也可以看到该类下任何深度下类中的私有方法和字段。
package a;
class Private {
private class PrivateInPrivate {
private Object object;
}
Object m() {
return new PrivateInPrivate().object;
}
}
后者并不广为人知,事实上也很少有用到。
### Private是类的访问级别而不是对象 ###
如果你可以访问一个变量或方法,那么不管它属于哪个对象你都可以访问它。如果`this.a`可以访问到,那`another.a`也可以访问到,只要它们是同一个类的实例。同一个类的实例对象可以随意调用其他实例的变量或方法。不过这样的代码一般都没有意义。现实生活中异常是`equals()`(由Eclipse生成 15 - 18行)
package a;
public class PrivateIsClass {
private Object object;
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PrivateIsClass other = (PrivateIsClass) obj;
if (object == null) {
if (other.object != null)
return false;
} else if (!object.equals(other.object))
return false;
return true;
}
}
###静态(static)类可能有很多实例 ###
![Protection is not object level. It is class level.](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-class-feature.png)
*访问类型不是对象级别的而是类级别的。*
那些不支持有任何实例的类通常被称为实用工具类。它们只包含静态字段和静态方法以及唯一的不被该类的任何静态方法调用的私有构造函数。在Java 8中也可以有这样的一个野兽(这个词翻译不通,译者注)在接口中实现因为Java 8的接口可以有静态方法。我不觉得我们应该使用这个特性而不是实用工具类。我也不完全确信我们应该使用实用工具类。
静态类总是在另一个类或接口中。它们是嵌套类。他们是静态的,就像静态方法不能访问类的实例方法和字段一样,静态内部类也不能访问嵌入类的实例方法和字段。这是因为内部类没有嵌入类实例的引用(或者说是指针,如果你喜欢这么叫的话)。内部类(内部类,也即非静态嵌套类, 译者注),而非静态嵌套类, 没有嵌入类的一个实例,它是无法被创建的。每个内部类的实例都具有嵌入类实例的一个引用,因此一个内部类可以访问嵌入类的实例方法和字段。
因为这个原因,要是没有外部类的一个实例,你就不能创建一个内部类。当然,如果是当前对象,也就是`this`的话,你就可以不需要指定它。在这种情况下你可以使用`new`, 在这种情况下,也就是`this.new`的简式。在一个静态的环境中,例如从一个静态方法你必须指定内部类应该创建哪个封闭类的实例。见第10行:
package a;
class Nesting {
static class Nested {}
class Inner {}
void method(){
Inner inner = new Inner();
}
static void staticMethod(){
Inner inner = new Nesting().new Inner();
}
}
### 匿名类只能访问final变量 ###
![Variable has to be effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/effective-final.png)
*变量必须是有效的final*
当一个匿名类被定义在一个方法中,它可以访问局部变量如果该变量是`final`的。但这说的有点模糊。它们不得不声明成final,他们还必须是有效final。这也是Java 8中发布的一些特性。你不需要声明这些变量为`final`型,但它们仍然必须是有效的`final`。
![Java 8 does not require final, only effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/java_ee_-_javabeantester_src_main_java_com_javax0_jbt_blog_java_-_eclipse_-__users_verhasp_github_javax_blog.png)
*Java 8并不要求`final`只要求有效final。*
为什么你需要对一些东西声明`final`,当它被检查必须是这样的。就像方法的参数。它们也必须是`final`的。你说这不是Java所必须的吗?嗯,你是对的。这只是一个良好的编程风格所必须的。
--------------------------------------------------------------------------------
via: http://www.javacodegeeks.com/2014/11/some-sentences-about-java.html
作者:[Peter Verhas][a]
译者:[a598799539](https://github.com/a598799539)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.javacodegeeks.com/author/peter-verhas/

View File

@ -1,35 +1,23 @@
如何不使用DBCA在Oracle 11中删除数据库
================================================================================
本文简短的教程将会向你展示如何不使用DBCA数据库配置助手在Oracle 11中删除数据
本文简短的教程将会向你展示如何不使用DBCA数据库配置助手在Oracle 11中删除数据库。
#### 1- 导入数据库的SID如果没有定义的话 ####
命令:
export ORACLE_SID=database
#### 2- 以操作系统认证连接数据库 ####
命令:
[oracle@Oracle11 ~]$ sqlplus / as sysdba
提示:
----------
SQL*Plus: Release 11.2.0.1.0 Production on Mon Dec 1 17:38:02 2014
----------
Copyright (c) 1982, 2009, Oracle. All rights reserved.
----------
Connected to an idle instance.
#### 3- 启动数据库实例 ####
命令:
SQL> startup
提示:
ORACLE instance started.
Total System Global Area 3340451840 bytes
Fixed Size 2217952 bytes
@ -41,22 +29,18 @@
#### 4- 关闭数据库 ####
命令:
SQL> shutdown immediate;
提示:
Database closed.
Database dismounted.
ORACLE instance shut down.
#### 5- 启动独占模式 ####
命令:
SQL> startup mount exclusive restrict
提示:
ORACLE instance started.
----------
Total System Global Area 3340451840 bytes
Fixed Size 2217952 bytes
Variable Size 1828718624 bytes
@ -66,19 +50,14 @@
#### 6- 删除数据库 ####
命令:
SQL> drop database;
提示:
----------
Database dropped.
----------
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
SQL>
完成!
@ -88,7 +67,7 @@ via: http://www.unixmen.com/drop-database-oracle-11-without-using-dcba/
作者:[M.el Khamlichi][a]
译者:[VicYu/Vic020](http://vicyu.net/)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,548 @@
Linux Journal杂志2014读者选择奖
================================================================================
又到了Linux Journal杂志刊发2014读者选择奖的时候了鉴于去年的形式好评如潮因此我们仍沿续旧年格式让你的意见再次得到回响。虽然有些地方我们会稍加评论不过基本上还是以报道结果为主。以下敬请欣赏本年度读者选择奖名单
我们希望读者选择奖一年好似一年。如果你对新分类有任何建议,或者有任何评价与反馈,都可以通过以下方式联系我们:[http://www.linuxjournal.com/contact][1]
如欲了解完整获奖名单请查阅本杂志2014年12月刊。
### 最佳Linux发行版 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f1.jpg)
虽然本年度基于Debian/Ubuntu的发行版获得最多票数但“最佳Linux发行版”分类有点类似于“最好吃的比萨”--就算得票垫底但它仍算是比萨呀选择Linux不会错的而投票之五花八门恰恰呈现出开源世界里的选择多样性。
- Ubuntu 16.5%
- Debian 16.4%
- Linux Mint 11%
- Arch Linux 8.5%
- Fedora 8.3%
- CentOS 6%
- openSUSE 5.3%
- Kubuntu 4.1%
- Gentoo 2.9%
- Slackware 2.7%
- Xubuntu 2.5%
- 其它 2.3%
- Red Hat Enterprise Linux 1.6%
- NixOS 1.4%
- elementary OS 1.3%
- Lubuntu 1.2%
- CrunchBang 1%
- Mageia .7%
- LXLE .4%
- Tails .4%
- Android-x86 .3%
- Bodhi Linux .3%
- Chakra .3%
- Kali Linux .3%
- PCLinuxOS .3%
- SolydK .3%
- Mandriva .1%
- Oracle Linux .1%
### 最佳Linux移动系统 ###
安卓在移动领域是如此的举足轻重,所以我们决定让安卓的各种版本独立参与投票。因此,尽管以下系统本质上属于安卓,但我们仍沿用其名而不改称安卓,因为这样更加一目了然。
- Stock Android 37.1%
- Sailfish OS 27.6%
- CyanogenMod 20.2%
- 其它 3%
- Ubuntu Phone 3%
- Amazon Fire OS 1.5%
- Ubuntu for Android 1.4%
- Replicant .8%
- Tizen .8%
### 最佳Linux智能手机厂商 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f3.jpg)
- Samsung 29%
- Jolla 26.7%
- Nexus 16.5%
- 其它 7.1%*
- HTC 7%
- LG 5.3%
- Sony 3.7%
- Nokia 1.8%
- Huawei 1.4%
- GeeksPhone 1%
- Amazon .6%
*在"其它"当中,摩托罗拉获得最多提名,其次是一加。
### 最佳Linux平板 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f4.jpg)
- Google Nexus 7 35.3%
- Google Nexus 10 14.8%
- Samsung Galaxy Tab 14%
- Samsung Galaxy Note 9.8%
- ASUS Transformer Pad 8.4%
- 其它 6.4%
- Kindle Fire HD 4.7%
- ASUS MeMO Pad 2%
- Dell Venue 1.6%
- Acer Iconia One 1.4%
- Samsung Galaxy Note Edge .9%
- Ekoore Python S3 .7%
### 最佳基于Linux的其它配件不含智能手机或平板###
我们是一群树莓派粉如假包换不过说真的这怎么能怪我们呢树莓派又出了新款B+,让原本就美妙绝伦的树莓派愈发的标致可人。并非我有未卜先知之功,但我对明年的冠军早就心中有数了。
- Raspberry Pi 71.4%
- BeagleBone Black 8.1%
- 其它 4.3%*
- Lego Mindstorms Ev3 3.7%
- Moto 360 3.4%
- Cubieboard 1.7%
- Parrot A.R Drone 1.7%
- Samsung Gear S 1.4%
- Yamaha Motif XF8 1.1%
- Nvidia Jetson-K1 Development System .8%
- Cloudsto EVO Ubuntu Linux Mini PC .5%
- VoCore Open Hardware Computer .5%
- LG G Watch .4%
- RaZberry .4%
- VolksPC .4%
- IFC6410 Pico-ITX Board .2%
- JetBox 5300 .1%
*在“其它”当中提名最多是Odroid与CuBox。
### 最佳笔记本厂商 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/lenovo.jpg)
本分类原本用于评价哪个厂商对Linux最花心思不过谢天谢地如今大多数笔记本运行起Linux来还是相当不错的。因此无需我们将重点放在“嗯这台能运行Linux”这种问题上面而可以切切实实地看到精华之作。把眼光放长远些。
- Lenovo 32%
- ASUS 19.3%
- Dell 18.5%
- System76 10.6%
- 其它 7.9%*
- Acer 4.5%
- ThinkPenguin 1.9%
- LinuxCertified 1.8%
- ZaReason 1.6%
- EmperorLinux 1.5%
- CyberPower .3%
- Eurocom .1%
*在“其它”当中提名最多的依次是运行Linux的苹果、惠普、东芝以及三星。
### 最佳内容管理系统 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f6.jpg)
- WordPress 34.7%
- Drupal 25.3%
- Joomla! 11.1%
- MediaWiki 10.5%
- 其它 10%*
- Alfresco 4.3%
- WebGUI 1.3%
- ikiwiki 1.1%
- eZ publish .7%
- Wolf CMS .4%
- Elgg .3%
- Blosxom .2%
*在“其它”当中提名最多的依次是DokuWiki, Plone, Django 以及 Typo3。
### 最佳对Linux友好的虚拟主机公司 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/question.jpg)
提到虚拟主机这年头要找到不对Linux友好的公司那是相当之难。事实上要找到一家提供Windows的主机服务商才是一种挑战。这一类别的冠军“其它”就显而易见的说明了这一问题或许设一个“最差虚拟主机”分类更加有用
- 其它 22.8%*
- Amazon 22.5%
- Rackspace 13.1%
- Linode 10.4%
- GoDaddy.com 6.5%
- OVH 5.6%
- DreamHost 5.4%
- 1&1 4.8%
- LAMP Host 2.9%
- Hurricane Electric 2.6%
- Liquid Web .6%
- RimuHosting .6%
- Host Media .5%
- Savvis .5%
- Blacknight Solutions .4%
- Netfirms .4%
- Prgmr .4%
*在“其它”当中提名最多的依次是Digital Ocean (压倒性优势), Hetzner, BlueHost 以及 WebFaction。
### 最佳浏览器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f7.jpg)
Firefox显著优势拨得今年的头筹。即使以Chrome加Chromium计算Firefox仍位居榜首。我们曾经担心Firefox死忠会悄然流失不过还好Firefox依然宝马未老仍是一款快速、可行以及兼容度极佳的浏览器。
- Firefox 53.8%
- Chrome 26.9%
- Chromium 8.1%
- Iceweasel 4%
- Opera 3%
- 其它 2%
- SeaMonkey .8%
- rekonq .5%
- dwb .4%
- QupZill .4%
- Dillo .2%
### 最佳电邮客户端###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f8.jpg)
如果我没有亲身了解到身边有多少铁杆极客粉的话我也许会指责Kyle Rankin投票有作弊嫌疑。他的最爱--Mutt电邮客户端并未登顶但是对于一个没有图形界面的程序来说获得第三名也算是个比较骄人的成绩了。
- Mozilla Thunderbird 44.4%
- Gmail 24.7%
- Mutt 6.8%
- Evolution 5.5%
- KMail 5.3%
- 其它 3.2%
- Claws Mail 2.2%
- Zimbra 2%
- Alpine 1.8%
- Geary 1.7%
- SeaMonkey 1%
- Opera Mail .9%
- Sylpheed .4%
### 最佳音频编辑工具###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f9.jpg)
- Audacity 69.1%
- FFmpeg 10.8%
- VLC 9.7%
- Ardour 4.9%
- 其它 1.9%
- SoX 1.3%
- Mixxx 1.1%
- LMMS .7%
- Format Junkie .5%
### 最佳音频播放器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10.jpg)
VLC登上视频播放器分类的榜首见下文应该是毫无悬念的但让人大跌眼镜的是它居然在音频播放器分类中也有不俗的成绩。或许它可以考虑成为一站式媒体播放器。不管怎样我们都乐见其取得好成绩。
- VLC 25.2%
- Amarok 15.3%
- Rhythmbox 10.4%
- Clementine 8.6%
- MPlayer 6.1%
- Spotify 5.9%
- Audacious 5.5%
- Banshee 4.6%
- 其它 4%*
- XBMC 3.1%
- foobar2000 3%
- Xmms 2.4%
- DeaDBeeF 1.2%
- MOC .9%
- cmus .8%
- Ncmpcpp .8%
- Guayadeque .6%
- Mixxx .4%
- MPC-HC .4%
- Subsonic .4%
- Nightingale .3%
- Decibel Audio Player .2%
*在"其它"当中Quod Libet获得最多提名。
### 最佳视频播放器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_0.jpg)
- VLC 64.7%
- MPlayer 14.5%
- XBMC 6.4%
- Totem 2.7%
- 其它 2.7%*
- Plex 2%
- Kaffeine 1.9%
- mpv 1.6%
- MythTV 1.6%
- Amarok 1.4%
- Xmms .3%
- Daum Potplayer .2%
- Clementine .1%
*在“其它”当中提名最多是SMPlayer。
### 最佳视频编辑器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_1.jpg)
再次证明了我们的读者群深具极客色彩。我们未指定“非线性编辑器”因此就转码技术而言VLC在视频编辑类别中勉强获胜。干得好VLC干得好
- VLC 17.5%
- Kdenlive 16.4%
- Blender 15.1%
- Avidemux 13.2%
- OpenShot 13.2%
- Cinelerra 7.5%
- PiTiVi 4.9%
- LightWorks 4.8%
- 其它 4.7%
- LiVES 1.4%
- Shotcut .6%
- Jahshaka .4%
- Flowblade .4%
### 最佳云存储 ###
[](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f11.jpg)
- Dropbox 30.5%
- ownCloud 23.6%
- Google Drive 16%
- rsync 8.3%
- 其它 7.5%*
- Amazon S3 6.6%
- SpiderOak 4.4%
- Box 1.8%
- Copy 1%
- AjaXplorer .3%
Dropbox在这一领域曾经独步天下几无对手虽然这次仍为头魁但优势已经不那么明显了。Dropbox的方便与稳定无可否认但是将你的宝贵数据托管在ownCloud上可管可控也让ownCloud登上第二名的宝座。
*在“其它”当中,提名最多是 Younited 与 MEGA。当然很多人可能会说“非万不得已时不会选择云存储/我的文件都是存在本地”。
### 最佳Linux游戏 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/CIVILIZATION-V-FRONT-OF-BOX.jpg)
我很少玩游戏所以每年我都特期待这一类别排名希望可以从中找到最受欢迎的游戏以供闲暇之需。看到NetHack排名这么靠前我倒觉得挺开心的尤其是在联想到竞争对手后更是心满意足。徘徊在让我们这些老派的龙与地下城玩家痴迷的随机通道确实有点意思。
- Civilization 5 26.5%
- 其它 23.5%*
- Team Fortress 2 8.7%
- NetHack 8.4%
- X-Plane 10 7.1%
- Dota 6.1%
- Bastion 5.4%
- Scorched 3D 3.7%
- Destiny 3.6%
- Ultima IV 1.9%
- FreeCol 1.8%
- Kpat 1.4%
- FreeOrion 1.1%
- Ryzom .9%
*在“其它”当中提名最多的依次是Minecraft, 0 A.D., Frozen Bubble, Battle for Wesnoth, Portal 以及 Counter Strike。
### 最佳虚拟方案 ###
我认为与Vagrant的关系大大带动了Oracle旗下VirtualBox的普及。当然Vagrant也与其它虚拟平台合作但自从其与VirtualBox无缝结合后我认为对VirtualBox是极大提升。虚拟化实现系统是如此的高效与可靠从裸机开始重构系统的方案几近历史。
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Virtualbox_logo_0.jpg)
- Oracle VM VirtualBox 33.4%
- VMware 22.3%
- KVM 21.1%
- XEN 5.7%
- QEMU 5.3%
- OpenStack 4.9%
- 其它 4.2%*
- OpenVZ 1.7%
- Linux-VServer 1.3%
- Symantec Workspace Virtualization .1%
*在“其它”当中提名最多的依次是Docker, ProxMox 与 LXC。
### 最佳监控应用 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Nagios-Core-4.0.8.png)
- Nagios 27.1%
- Wireshark 20.7%
- htop 12.3%
- Zabbix 10.5%
- 其它 8.6%*
- Zenoss 6.2%
- Munin 3.4%
- PC Monitor 2.8%
- New Relic 1.9%
- Opsview 1.2%
- SaltStack 1%
- NTM (Network Traffic Monitor) .7%
- xosview .7%
- Manage Engine .5%
- FlowViewer .3%
- Circonus .2%
- SysPeek .2%
*在“其它”当中提名最多是Icinga 与 OpenNMS。
### 最佳开发运维配置管理工具###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Git-Logo-2Color.jpg)
Git能拿到本类别第一名倒是蛮有趣的虽然针对配置文件使用标准版本控制工具当然无可厚非但我总觉得它应该配合Chef或Puppet一起使用。至少开发运维DevOps让我们这些执拗的老派系统管理员象对待代码一样处理配置文件。版本控制真令人难以置信这一点似乎绝大多数读者均无异议。
- Git 39.4%
- Puppet 17.2%
- Ansible 8.9%
- cron jobs 8.8%
- Subversion 7.6%
- Chef 5%
- SaltStack 5.4%
- 其它 4.6%*
- CFEngine 3%
*在“其它”当中,提名最多是 NixOps。
### 最佳编程语言 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13.jpg)
- Python 30.2%
- C++ 17.8%
- C 16.7%
- Perl 7.1%
- Java 6.9%
- 其它 4.6%
- Ruby 4.3%
- Go 2.4%
- JavaScript 2.4%
- QML 2.2%
- Fortran 1.4%
- Haskell 1.4%
- Lisp 1.2%
- Erlang .6%
- Rust .6%
- D .4%
- Hack .1%
*在“其它”当中提名最多的依次是Scala, PHP 以及 Clojure。
### 最佳脚本语言 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13_0.jpg)
Python强悍无比无论在脚本及编程分类都有大量拥趸。对于象我这样懂Bash以及一点PHP皮毛的人来说很明显在我一头扎进开发过程中我需要重点突破。敢说空格无用我--空格来也!
- Python 37.1%
- Bash/Shell scripts 27%
- Perl 11.8%
- PHP 8.4%
- JavaScript 6.7%
- Ruby 4.9%
- 其它 2.1%
- Lua 2%
### 最佳Linux/开源新产品/新项目 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f14.jpg)
Docker无疑是大赢家当之无愧--游戏规则改变者嘛。 Jolla/Sailfish也小受欢迎真是令人欣慰。我们爱安卓不过多个选择不正是我们作为开源鼓手所提倡的一个重要方面吗。
- Docker 28%
- Jolla and Sailfish OS 19%
- LibreOffice 7%
- ownCloud 5%
- Steam 5%
- Zenoss Control Center 5%
- Raspberry Pi 4%
- Git 4%
- Apache Cordova/OpenOffice/Spark/Tika 3%
- Ansible 2%
- Elementary OS 2%
- OpenStack 2%
- Zabbix 2%
- CoreOS 2%
- Firefox OS 2%
- KDE Connect 1%
- NixOS and NixOps 1%
- Open Media Vault 1%
###你用Linux做过的最酷的事情 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/tux_cruise.png)
这是读者选择奖里我最钟爱的新分类。想象一下你参加某次Linux会议期间询问人们他们用Linux做过的最酷的事情。这里所做的与之大同小异这里我们仅列出部分我们比较喜欢的如欲了解完整列表请访问[http://www.linuxjournal.com/rc2014/coolest][2]。
注:最常见的答案是:“使用它”;“挽救数据/照片/导致Windows 机器罢工的任何东西”;“说服朋友/家人/业务转向使用Linux”“学习”“讲授”“获得工作”“家庭自动化”“构建家庭媒体服务器”。下表是我们选出的并非最常见的答案而是一些比较具体与有个性的答案。
- 在上世纪90年代中期建立procmail垃圾邮件预过滤规则。
- 450-节点计算集群。
- 7.1 通道前置放大器集成Mopidy音乐播放器
- Linux机器人 参加Eurobot年度比赛
- 无意间打印到错误的大陆。
- 视频同步时增加音频通道。
- 使用自已编写的代码分析NASA卫星数据。
- 远程逗着猫玩。
- 通过声音以及移动应用自动控制家里整个灯光设置。
- 窗台植物自动浇水系统。
- 浴室收音机。
- 配制啤酒。
- 创建了一个运行在国际空间站的应用。
- 为某大型收费高速公路系统建立一套实时收费系统。
- 自己装配智能手机。
- 使用树莓派建立基于网络的家庭报警系统。
- 树莓派集群破解加密的办公文档。
- 控制我的Parrot无人机。
- 控制186台风力涡轮机的通信。
- 在Linux下使用Stellarium控制我的米德望远镜。
- 用一台十几年的老笔记本转换卡带式家庭视频的格式。
- 在靠近北极地区创建网状网络。
- 使用无线数据发射器创建海洋环境下的传感器浮标。
- 发现新行星。
- 修复位于美国丹佛的jabber服务器 而我当时却身在约丹安曼一家酒店大堂。
- 得到一张Linus亲笔签名的Red Hat 5.0 CD。
- 入侵我的咖啡机,在咖啡做好后给我一条消息。
- 给我女儿介绍乐高机器人EV3。
- 监控酒窖温度与湿度,过热或过温时开门。
- 用树莓派代替温泉浴缸上的控制器。
- 使用脚本连接四天每隔15秒开关一次同事的CD托盘。
- 使用LFS系统为一家全国性石油公司迁移ACH自动转帐系统。
- 身在其它城市冲我家里的马桶。
- 远程控制鸡舍门。
- 使用树莓派为16个站点部署基于网络的洒水器控制器并控制水池与庭院灯光
- 链接SSH通道通过三级跳连接家与工作因网络设置方面的限制
- 建立一套系统,监控可再生能源的安装部分:两套固定的太阳能电池阵,一套两轴太阳跟踪太阳能电池阵,以及一台风力涡轮机。生产以及天气数据实时显示在网络站点。
- 还是在“猫”时代,我用电脑每天早上叫醒我女朋友去上班。
- 使用一个Wii摇控器通过蓝牙将我的笔记本作为红外摄像机侦测我女儿的旋转木马的运动以及控制视频游戏。
--------------------------------------------------------------------------------
via: http://www.linuxjournal.com/rc2014
作者:[Shawn Powers][a]
译者:[yupmoon](https://github.com/yupmoon)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxjournal.com/users/shawn-powers
[1]:http://www.linuxjournal.com/contact
[2]:http://www.linuxjournal.com/rc2014/coolest

View File

@ -1,27 +1,26 @@
、Linux 3.18 内核发布了,下面的是更新的内容
Linux 3.18 新内核带来了什么新东西?
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2011/07/Tux-psd3894.jpg)
新的一月意味着新的稳定版Linux内核的发布今天Linus Torvalds[宣布Linux 3.18 很快就会发布了][1]。
新的一月意味着新的稳定版Linux内核的发布前一段时间,Linus Torvalds[宣布Linux 3.18 很快就会发布了][1]。
Torvalds在Linux内核邮件列表中解释到由于在3.17中还存在几个令一小部分用户烦心的问题,‘**绝不可以在一些人积极解决老问题时其他人无所事事。**
Torvalds在Linux内核邮件列表中解释到由于在3.17中还存在几个令一小部分用户烦心的问题,但是**绝不可以在一些人积极解决老问题时其他人无所事事。**
### Linux 3.18中有什么新的? ###
Linux 3.18内核主要致力于硬件支持、电源效率、bug修复和可靠性。
如往常一样,这些内容跨很大,容易让人迷惑 。比如:加密层多重缓冲操作 - 到气冲感知, 就像对雷蛇游戏手柄的支持。
如往常一样,这些内容跨很大,容易让人迷惑 。比如:加密层多重缓冲操作 - 到气冲感知, 就像对雷蛇游戏手柄的支持。
下面我们收集了这个版本的重要的改变。这远远不是所有的,只是选取了一些更相关的内容。
- Nouveau (免费 Nvidia GPU 驱动) 现在支持基础 DisplayPort 音频
- Nouveau (开源的 Nvidia GPU 驱动) 现在支持基础 DisplayPort 音频
- 对雷蛇游戏手柄的支持用在Xbox 360上
- Xilinx USB2 外设
- 对Microchip AR1021 i2c、PenMount 6000 touch的触摸屏支持
- 音频编码: Cirrus Logic CS35L32、 Everest ES8328and Freescale ES8328
- 音频支持: 通用飞思卡尔声卡, A模拟SSM4567音频放大器
- 不同的文件系统提升, 包括 Btrfs 和 F2FS
- 对Microchip AR1021 i2c、PenMount 6000 touch的触摸屏支持
- 音频编码: Cirrus Logic CS35L32、 Everest ES8328 Freescale ES8328
- 音频支持: 通用飞思卡尔声卡, Analog Devices SSM4567音频放大器
- 几个文件系统提升, 包括 Btrfs 和 F2FS
- 现在支持了DCTCP拥塞控制算法
- JIT 编译64位 eBPF程序
- “Tinification” 帮助开发人员编译更精简更小的内核
@ -34,7 +33,7 @@ Linux 3.18内核主要致力于硬件支持、电源效率、bug修复和可靠
- [下载Linux内核源码包][2]
有一个由Canonical维护的最新Linux内核归档。尽管你可能在其他地方看到过但是请注意这不是针对终端用户的。没有任何保证与支持你自己承担风险。
这里有一个由Canonical维护的最新Linux内核归档。尽管你可能在其他地方看到过但是请注意这不是针对终端用户的。没有任何保证与支持你自己承担风险。
- [访问Ubuntu内核主线归档][3]
@ -44,7 +43,7 @@ via: http://www.omgubuntu.co.uk/2014/12/linux-kernel-3-18-released-whats-new
作者:[Joey-Elijah Sneddon][a]
译者:[geekpi](https://github.com/geekpi)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,112 @@
如何在Linux下使用rsync
================================================================================
对于各种组织和公司数据对他们是最重要的即使对于电子商务数据也是同样重要的。Rsync是一款通过网络备份重要数据的工具/软件。它同样是一个在类Unix和Window系统上通过网络在系统间同步文件夹和文件的网络协议。Rsync可以复制或者显示目录并复制文件。Rsync默认监听TCP 873端口通过远程shell如rsh和ssh复制文件。Rsync必须在远程和本地系统上都安装。
rsync的主要好处是
**速度**:最初会在本地和远程之间拷贝所有内容。下次,只会传输发生改变的块或者字节。
**安全**传输可以通过ssh协议加密数据。
**低带宽**rsync可以在两端压缩和解压数据块。
语法:
#rsysnc [options] source path destination path
### 示例: 1 - 启用压缩 ###
[root@localhost /]# rsync -zvr /home/aloft/ /backuphomedir
building file list ... done
.bash_logout
.bash_profile
.bashrc
sent 472 bytes received 86 bytes 1116.00 bytes/sec
total size is 324 speedup is 0.58
上面的rsync命令使用了-z来启用压缩-v是可视化-r是递归。上面在本地的/home/aloft/和/backuphomedir之间同步。
### 示例: 2 - 保留文件和文件夹的属性 ###
[root@localhost /]# rsync -azvr /home/aloft/ /backuphomedir
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面我们使用了-a选项它保留了所有人和所属组、时间戳、软链接、权限并以递归模式运行。
### 示例: 3 - 同步本地到远程主机 ###
root@localhost /]# rsync -avz /home/aloft/ azmath@192.168.1.4:192.168.1.4:/share/rsysnctest/
Password:
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面的命令允许你在本地和远程机器之间同步。你可以看到在同步文件到另一个系统时提示你输入密码。在做远程同步时你需要指定远程系统的用户名和IP或者主机名。
### 示例: 4 - 远程同步到本地 ###
[root@localhost /]# rsync -avz azmath@192.168.1.4:192.168.1.4:/share/rsysnctest/ /home/aloft/
Password:
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面的命令同步远程文件到本地。
### 示例: 5 - 找出文件间的不同 ###
[root@localhost backuphomedir]# rsync -avzi /backuphomedir /home/aloft/
building file list ... done
cd+++++++ backuphomedir/
>f+++++++ backuphomedir/.bash_logout
>f+++++++ backuphomedir/.bash_profile
>f+++++++ backuphomedir/.bashrc
>f+++++++ backuphomedir/abc
>f+++++++ backuphomedir/xyz
sent 650 bytes received 136 bytes 1572.00 bytes/sec
total size is 324 speedup is 0.41
上面的命令帮助你找出源地址和目标地址之间文件或者目录的不同。
### 示例: 6 - 备份 ###
rsync命令可以用来备份linux。
你可以在cron中使用rsync安排备份。
0 0 * * * /usr/local/sbin/bkpscript &> /dev/null
----------
vi /usr/local/sbin/bkpscript
rsync -avz -e ssh -p2093 /home/test/ root@192.168.1.150:/oracle/data/
--------------------------------------------------------------------------------
via: http://linoxide.com/how-tos/rsync-copy/
作者:[Bobbin Zachariah][a]
译者:[geekpi](https://github.com/geekpi)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/bobbin/

View File

@ -1,68 +1,68 @@
The history of Android
安卓编年史5
================================================================================
![闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/calclockonpresszx.png)
闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能。
Ron Amadeo供图
安卓0.9第一次给我们展现了闹钟和计算器应用程序。闹钟应用的特征是有个扁平的模拟时钟,下方是一排设置的闹钟的滚动列表。不同于其它种类的开关,闹钟使用一个复选框来设置。闹钟可以设置为每周特定几天重复,以及它还有一整个列表的可选的,独特的闹钟铃声。
*闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能* [Ron Amadeo供图]
安卓0.9第一次给我们展现了闹钟和计算器应用程序。闹钟应用的特征是有个扁平的模拟时钟,下方是一排设置的闹钟的滚动列表。不同于其它种类的开关,闹钟使用一个复选框来设置。闹钟可以设置为每周特定几天重复,以及它还有一整个列表的可选的、独特的闹钟铃声。
计算器是一个全黑色的应用带有有光泽的圆形按钮。通过菜单可以打开带有高级功能的附加面板。再次强调一致性不是谷歌的强项所在。按键中的Pi键按下的高亮是红色的——在安卓0.9的其它地方,按键按下的高亮通常是橙色的。实际上,计算器中用到的所有东西是仅用于计算器的百分百定制设计。
![打开菜单的谷歌地图和新路线界面。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/mps092.png)
打开菜单的谷歌地图和新路线界面。
Ron Amadeo供图
*打开菜单的谷歌地图和新路线界面* [Ron Amadeo供图]
谷歌地图在安卓0.9中真正能够运行——客户端能够连接到谷歌地图服务器并下载地图块。给予我们地图图像——要记住谷歌地图是个基于云的应用。连最老旧的版本也会下载更为现代的地图块所以忽略实际的地图块的样子吧。地图的菜单获得了和浏览器菜单相同的全灰设计待遇缩放控件也和浏览器的相同。最重要的“我的位置”按钮最终来到了安卓0.9这意味着该版本的地图支持GPS定位。
路线界面得到了改进。奇怪的聊天气泡附加不对齐的按钮已经被去除换为更具交互性的书签图标切换地点按钮移动到了左边“go”按钮的现在被标记为“获取路线(Route)”。
![谷歌地图图层选择,搜索历史,新加入的街景视图。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/maps3.png)
谷歌地图图层选择,搜索历史,新加入的街景视图。
Ron Amadeo供图
*谷歌地图图层选择,搜索历史,新加入的街景视图* [Ron Amadeo供图]
“图层(Layers)”被重命名为“地图模式(Map Mode)”并且变成一个单选列表。一次只能选择一个地图类型——举个例子,你在卫星地图视图下不能查看交通状况。埋藏在菜单中的还有被匆忙放到一起的搜索记录界面。搜索历史看起来只是个概念验证,带着巨大的,模糊的搜索图标填充的搜索项被放置于半透明的背景之上。
街景曾经是个单独的应用尽管它从没提供给公众但在0.9中它被作为一个地图模式内置于谷歌地图之中。你可以拖拽小Pegman街景小人到地图上它会显示一个弹出气泡来展示街景的快照。点击快照会启动那个位置的街景。这时街景除了可滚动的360度影像之外不会显示任何东西——在显示界面上根本就没有用户界面UI
![我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面。 ](http://cdn.arstechnica.net/wp-content/uploads/2013/12/manystarbucks.png)
我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面。
Ron Amadeo供图
*我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面* [Ron Amadeo供图]
安卓0.9同样第一次给我们展示了信息应用,称为“信息”(Messaging)。就像一些早期的安卓设计,信息并不确定它应该是一个暗色系应用还是亮色系应用。第一眼可以看到的屏幕是信息列表,一个极力避免空白的质朴黑色界面,看起来像是建立在设置界面的设计之上。但点击“新信息”或已存在的会话后,你会被带到一个白色以及蓝色的文本信息的滚动列表这里。这两个相连的界面真是没法再更不一样一点了。
![信息应用的会话窗口,附件窗口,会话列表,以及设置。](http://cdn.arstechnica.net/wp-content/uploads/2014/03/sms09.png)
信息应用的会话窗口,附件窗口,会话列表,以及设置。
Ron Amadeo供图
*信息应用的会话窗口,附件窗口,会话列表,以及设置* [Ron Amadeo供图]
信息支持一定范围的附件你可以附上图片声音或者一个幻灯片到你的信息之中。图片和声音可以实时录制或是从手机存储中拉取。另一个奇怪的UI选择是对于附件菜单中的每一项安卓基本都已经有现成的图标可用但信息却全部使用了另外定制的设计。
信息是最先带有自己设置界面的应用之一。用户可以请求已读以及送达报告以及设置下载偏好。
![幻灯片制作器。右边图片显示了菜单选项。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/slideshow.png)
幻灯片制作器。右边图片显示了菜单选项。
Ron Amadeo供图
附件选项中的“幻灯片”选项实际上是以一个全功能的幻灯片制作器的形式到来的。你可以添加图片,选择幻灯顺序,添加音乐,修改每张幻灯片的显示时间,以及添加文字。这已经复杂到足够给它一个自己的应用图标了,但令人惊奇的是它被隐藏在信息应用的菜单之中。在纵向模式下这是为数不多的完全无用的安卓应用之一——唯一的看图片方式以及控制是在横向显示之中。奇怪的是,纵向模式它仍然能够旋转,但显示输出变得一团糟。
*幻灯片制作器。右边图片显示了菜单选项* [Ron Amadeo供图]
附件选项中的“幻灯片”选项实际上是以一个全功能的幻灯片制作器的形式到来的。你可以添加图片,选择幻灯顺序,添加音乐,修改每张幻灯片的显示时间,以及添加文字。这已经复杂到足够给它一个自己的应用图标了,但令人惊奇的是它被隐藏在信息应用的菜单之中。在纵向模式下这是为数不多的完全无法使用的安卓应用之一——唯一的看图片方式以及控制是在横向显示之中。奇怪的是,纵向模式它仍然能够旋转,但显示输出变得一团糟。
![音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/music09.png)
音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面。
Ron Amadeo供图
*音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面* [Ron Amadeo供图]
安卓0.9第一次将音乐应用带进了安卓。首屏基本上只是几个将你带到各个功能视图的巨大的,矮胖的导航按钮。在应用底部是一个“正在播放”栏,仅仅包含了音轨名,艺术家,以及一个播放/暂停按钮。歌曲列表仅仅有个最简的无修饰界面,仅仅显示了歌曲名,艺术家,专辑以及时长。艺术家专辑是这个应用中唯一有希望看到色彩的地方。它在专辑视图里显示为一个小快照,在正在播放界面显示为巨大的,四分之一屏的图片。
正如安卓在这个时期的系统绝大多数部分,音乐应用的界面可能没什么好多看几眼的,但功能已经基本齐全。正在播放界面有一个让你拖动歌曲的播放列表按钮,随机播放,重复播放,搜索,以及选择背景声音按钮。
正如安卓在这个时期的系统绝大多数部分,音乐应用的界面可能没什么值得看的,但功能已经基本齐全。正在播放界面有一个让你拖动歌曲的播放列表按钮,随机播放,重复播放,搜索,以及选择背景声音按钮。
![“相册”的所有相册视图,单个相册视图,以及单张图片视图。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/pictures09.png)
“相册”的所有相册视图,单个相册视图,以及单张图片视图。
Ron Amadeo供图
*“相册”的所有相册视图,单个相册视图,以及单张图片视图* [Ron Amadeo供图]
相册被简单地称为“图片”。初始视图显示你的所有相册。两个默认的相册是“相机”和巨大的合集相册叫做“全部图片”。每个相册的快照由2x2的图片组成每张图片有个白色的粗边框。
单个相册视图的样子大概是你所希望的:一个可滚动的图片方阵。你不能在单个图片大小的范围内向左右滑动来移动图片,而是应该轻点图片来移动图片。相册同样没有双指捏合缩放,你只能使用按钮来缩放图片。
![图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/pics209.png)
图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置。
Ron Amadeo供图
*图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置* [Ron Amadeo供图]
“图片”看起来十分简单,直到你点击菜单按钮并突然看到无数的选项。图片可以截取,旋转,删除,或设置壁纸或联系人图标。就像浏览器一样,所有的这一切通过一个笨拙的二级菜单系统完成。但是,我们为何又将看起来完全不同的菜单练联系到一起?
@ -81,7 +81,7 @@ Ron Amadeo供图
via: http://arstechnica.com/gadgets/2014/06/building-android-a-40000-word-history-of-googles-mobile-os/5/
译者:[alim0x](https://github.com/alim0x) 校对:[校对者ID](https://github.com/校对者ID)
译者:[alim0x](https://github.com/alim0x) 校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,38 +0,0 @@
Translating By instdio
"Fork Debian" Project Aims to Put Pressure on Debian Community and Systemd Adoption
================================================================================
> There is still a great deal of resistance in the Debian community towards the upcoming adoption of systemd
**The Debian project decided to adopt systemd a while ago and ditch the upstart counterpart. The decision was very controversial and it's still contested by some users. Now, a new proposition has been made, to fork Debian into something that doesn't have systemd.**
![](http://i1-news.softpedia-static.com/images/news2/Fork-Debian-Project-Started-to-Put-Pressure-on-Debian-Community-and-Systemd-Adoption-462598-2.jpg)
systemd is the replacement for the init system and it's the daemon that starts right after the Linux kernel. It's responsible for initiating all the other components in a system and it's also responsible for shutting them down in the correct order, so you might imagine why people think this is an important piece of software.
The discussions in the Debian community have been very heated, but systemd prevailed and it looked like the end of it. Linux distros based on it have already started to make the changes. For example, Ubuntu is already preparing to adopt systemd, although it's still pretty far off.
### Forking Debian, not really a solution ###
Developers have already forked systemd, but the projects resulted don't have a lot of support from the community. As you can imagine, systemd also has a big following and people are not giving up so easily. Now, someone has made a website called debianfork.org to advocate for a Debian without systemd, in an effort to put pressure on the developers.
"We are Veteran Unix Admins and we are concerned about what is happening to Debian GNU/Linux to the point of considering a fork of the project. Some of us are upstream developers, some professional sysadmins: we are all concerned peers interacting with Debian and derivatives on a daily basis. We don't want to be forced to use systemd in substitution to the traditional UNIX sysvinit init, because systemd betrays the UNIX philosophy."
"We contemplate adopting more recent alternatives to sysvinit, but not those undermining the basic design principles of 'do one thing and do it well' with a complex collection of dozens of tightly coupled binaries and opaque logs," reads the [website][1], among a lot of other things.
Basically, the new website is not actually about a Debian fork, but more like a form of pressure for the [upcoming vote][2] that will be taken for the "Re-Proposal - preserve freedom of choice of init systems." This is a general resolution made by Ian Jackson and he hopes to get enough support in order to turn back the decision made by the Technical Committee regarding systemd.
It's clear that the debate is still not over in the Debian community, but it remains to be seen if the decisions already made can be overturned.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Fork-Debian-Project-Started-to-Put-Pressure-on-Debian-Community-and-Systemd-Adoption-462598.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://debianfork.org/
[2]:https://lists.debian.org/debian-vote/2014/10/msg00001.html

View File

@ -1,30 +0,0 @@
Red Hat acquires FeedHenry to get mobile app chops
================================================================================
Red Hat wants a piece of the enterprise mobile app market, so it has acquired Irish company FeedHenry for approximately $82 million.
The growing popularity of mobile devices has put pressure on enterprise IT departments to make existing apps available from smartphones and tablets -- a trend that Red Hat is getting in on with the FeedHenry acquisition.
The mobile app segment is one of the fastest growing in the enterprise software market, and organizations are looking for better tools to build mobile applications that extend and enhance traditional enterprise applications, according to Red Hat.
"Mobile computing for the enterprise is different than Angry Birds. Enterprise mobile applications need a backend platform that enables the mobile user to access data, build backend logic, and access corporate APIs, all in a scalable, secure manner," Craig Muzilla, senior vice president for Red Hat's Application Platform Business, said in a [blog post][1].
FeedHenry provides a cloud-based platform that lets users develop and deploy applications for mobile devices that meet those demands. Developers can create native apps for Android, iOS, Windows Phone and BlackBerry as well as HTML5 apps, or a mixture of native and Web apps.
A key building block is Node.js, an increasingly popular platform based on Chrome's JavaScript runtime for building fast and scalable applications.
From Red Hat's point of view, FeedHenry is a natural fit with the company's strengths in enterprise middleware and PaaS (platform-as-a-service). It adds better mobile capabilities to the JBoss Middleware portfolio and OpenShift PaaS offerings, Red Hat said.
Red Hat plans to continue to sell and support FeedHenry's products, and will continue to honor client contracts. For the most part, it will be business as usual, according to Red Hat. The transaction is expected to close in the third quarter of its fiscal 2015.
--------------------------------------------------------------------------------
via: http://www.computerworld.com/article/2685286/red-hat-acquires-feedhenry-to-get-mobile-app-chops.html
作者:[Mikael Ricknäs][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Mikael-Rickn%C3%A4s/
[1]:http://www.redhat.com/en/about/blog/its-time-go-mobile

View File

@ -1,58 +0,0 @@
Suse enterprise Linux can take your system back in time
================================================================================
> Suse Linux Enterprise Server 12 features a new system snapshot and rollback capability
The newest enterprise edition of the Suse Linux distribution allows administrators to go back in time, for instance, to immediately before they made that fatal system-crippling mistake.
Suse Linux Enterprise Server 12 (SLES 12) features a system snapshot and rollback capability that allows the user to boot the system to an earlier configuration, should the latest one unexpectedly fail.
Such a capability can be handy for undoing a system configuration change that did not turn out as expected. For instance, an administrator might have the SLES computer in a perfectly fine running state, but then install a botched software update, or make a change that destroys the kernel. Typically, Unix systems have been unforgiving about such mistakes, forcing the administrator to reinstall the system software from scratch, should they not know how to undo the unfortunate change.
"This stuff happens, for whatever reason," said Matthias Eckermann, Suse senior product manager. "So the admin has an emergency exit, so to speak."
Users of Microsoft Windows and Apple Macintosh systems have long enjoyed rollback functionality within their respective OSes, but this capability had been missing in Unix-based systems such as Linux, at least as a native function of the OS.
For this functionality, the Suse team used the [Btrfs][1] file system (B-tree file system, often pronounced as "Butter FS"), an open-source file system developed by Oracle engineer Chris Mason ([now at Facebook][2]). Mason created Btrfs to address emerging enterprise requirements such as the ability to make snapshots and to scale across multiple storage nodes.
Although Btrfs is supported in the mainline Linux kernel, SLES is the first major Linux distribution to use Btrfs as the default file system. "Over the last five years, we specifically focused on making Btrfs enterprise-ready," Eckermann said.
The rollback capability also relies on the open-source tool [Snapper][3], first developed by Suse, to manage the snapshots.
The Suse team integrated Snapper with SLES so that users now have the ability, when the OS is first being loaded, to boot into an earlier snapshot of the system. "Whoever installs SLES 12 gets this capability by default," Eckermann said.
SLES also integrated Btrfs with the [Samba Windows file server][4], which makes Linux files accessible to Windows machines. For Windows users, SLES can now make multiple snapshots of a file appear as different versions of a file, which are all accessible.
Initially, Enterprise Suse supports rollbacks for only system changes, though users can also deploy it to handle changes in a user's home directory, in which data is typically kept. "We already have it running, but it is not supported," Eckermann said. Users can continue to use ext3, ext4 or some other traditional Linux file system as their default.
SLES 12, released Monday, comes with a number of other features as well. Like other distributions, SLES has [caught the fever for Docker containers][5] and now comes with a built-in framework to run this virtualization technology. For the first time, the package also provides geo-clustering, which allows the user to build replicate clusters across different geographic regions.
An organization could use geo-clustering, for instance, to set up multiple copies of a single cluster in data centers around the world, so if one or more regions go offline, the others can continue operations unabated, Eckermann said.
Suse [is among the world's most widely used distributions][6] of Linux, along with Ubuntu/Debian, and Red Hat Enterprise Linux. A free version is available under OpenSuse and Suse Linux offers a commercial edition packaged for enterprise usage.
Suse Linux's parent company, Attachmate, is in the process of merging with Micro Focus. Eckermann expects no major changes in the operations of Suse Linux resulting from the new ownership.
SLES 12 is [offered at an annual subscription][7] of US$349 per server. A free 60-day trial is also available.
![](http://images.techhive.com/images/article/2014/10/sle_12_installed_system_08_snapper_gui-2-100527225-large.idge.png)
Through the combined powers of the Btrfs file system and the Snapper utility, SUSE Enterprise Linux can now take snapshots of the system, and roll back to an earlier configuration if necessary.
--------------------------------------------------------------------------------
via: http://www.computerworld.com/article/2838950/suse-enterprise-linux-can-take-your-system-back-in-time.html
作者:[Joab Jackson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Joab-Jackson/
[1]:https://btrfs.wiki.kernel.org/index.php/Main_Page
[2]:http://www.phoronix.com/scan.php?page=news_item&px=MTUzNTE
[3]:http://snapper.io/
[4]:http://www.samba.org/
[5]:http://www.pcworld.com/article/2838452/canonical-celebrates-cloud-freedoms-with-new-ubuntu.html
[6]:http://distrowatch.com/table.php?distribution=suse
[7]:https://www.suse.com/products/server/how-to-buy/

View File

@ -1,41 +0,0 @@
Mozilla to Launch Brand New Developer Web Browser Next Week
================================================================================
**When you woke up this morning you probably didnt expect to come online and see the words Mozilla, New, and Web Browser writ large across the web. **
But that my bed-headed compadre is precisely what youre looking at.
youtube 地址,发布的时候不行做个链接吧
<iframe width="750" height="422" frameborder="0" allowfullscreen="" src="https://www.youtube.com/embed/Ehv5u-5rE8c?feature=oembed"></iframe>
### Mozilla Pushing Boundaries ###
Mozilla has always been at the forefront of pushing open source, open standards and open access. They steer one of the most popular desktop browsers in the world. Their open-source Linux mobile OS [is sold on 12 smartphones from 13 operators in 24 countries][1]. Theyre [even taking on the Google Chromecast][2]!
Their desire to democratise the web shows no sign of abating. In a teaser posted on the Mozilla Blog this morning the company has announced a new effort to push boundaries further — this time for developers rather than users.
Teased as something “unique but familiar”, the company intend to release a brand new browser based on Firefox but designed by developers, for developers. Mozilla say it integrates “powerful new tools like [WebIDE][3] and the [Firefox Tools Adapter][4]”.
> “When building for the Web, developers tend to use a myriad of different tools which often dont work well together. This means you end up switching between different tools, platforms and browsers which can slow you down and make you less productive.”
### #Fx10 ###
The “Firefox Developer Browser” is being touted for an initial release date of November 10. Its not yet known what platforms it will target but since this is a) Mozilla and b) aimed at developers itd be a huge shock if Linux builds werent readily available on day dot.
Mozilla say those interested should sign up for their [Hacks newsletter][5] to receive notification when the browser is released.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/mozilla-launch-brand-new-developer-focused-web-browser
作者:[ Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:https://twitter.com/firefox/status/522175938952716289
[2]:http://www.omgchrome.com/mozillas-chromecast-rival-leaks-online/
[3]:https://hacks.mozilla.org/2014/06/webide-lands-in-nightly/
[4]:https://hacks.mozilla.org/2014/09/firefox-tools-adapter/
[5]:https://hacks.mozilla.org/newsletter/

View File

@ -1,36 +0,0 @@
Ubuntu Touch RTM Gets Major Update Video Tour
================================================================================
**new Ubuntu Touch RTM version has been released and the developers have made a number of important fixes, not to mention all the improvements that have been made to the backend.**
The Ubuntu Touch RTM stable images don't arrive all that often. Only six have been launched so far and each new version is sensibly better than the previous one. The current release is no exception, although it seems to have a longer and more complex changelog than the previous one.
Long gone are the days when a Mir update would break Ubuntu, but now all sorts of smaller problems are cropping up. In fact, all landings have been suspended before this new update was released, in an effort to track down and correct all the major bugs. Some problems still remain, but none of them should be an inconvenience.
### This is just the RTM branch, not the final version ###
Ubuntu Touch is still a work in progress, but, from the looks of it, the developers are homing in the final version. It shouldn't take too long now and we might get it in a month or so. That would be a fair assessment, if Meizu's plans to launch an Ubuntu phone in December holds.
"Good news! As per earlier announcement we promoted a new image to the ubuntu-rtm/14.09 channel! Please enjoy image #6 (previously known as #140 for krillin, #118 for mako and #112 for x86). Because of those we plan on promoting another image as soon as possible if those issues get fixed. But no worries - no freezes required this time. The landing gates will remain opened until the next serious promotion! Once again big thanks to everyone involved!" [said][1] Canonical's Łukasz Zemczak.
There are still a few minor issues, they will be corrected very soon. For example, the user metrics that could be found on the lock screen are no longer changing with a double tap, the media hug might strain the CPU in certain situations, and the Unity 8 environment might crash from time to time.
On the upside, Ubuntu Touch should be much more stable now, the video playback now works properly in landscape mode, the Unity 8 desktop has been updated, and a lot of critical changes have been made.
Users can test Ubuntu Touch RTM on Nexus 4 and Nexus 7 devices, and the official website has comprehensive [wiki][2] that details the installation.
youtu.be链接地址[http://youtu.be/_DtNvz_WVu8][3]
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Ubuntu-Touch-RTM-Gets-Major-Update-Video-Tour-464075.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:https://lists.launchpad.net/ubuntu-phone/msg10368.html
[2]:https://wiki.ubuntu.com/Touch/DualBootInstallation
[3]:http://youtu.be/_DtNvz_WVu8

View File

@ -1,39 +0,0 @@
Canonical Launches LXD Open Source Virtualization Container
================================================================================
> Canonical is launching a new container-based virtualization hypervisor for its open source Ubuntu Linux operating system, called LXD. How will it get along with Docker?
As open source container-based virtualization explodes in popularity, perhaps it was only a matter of time before [Canonical][1] announced its own, homegrown virtualization container system to contend with [Docker][2]. That's what the company has now done with the launch of [LXD][3] for [Ubuntu Linux][4].
Canonical announced the hypervisor— which the company is pronouncing "lex-dee," the better, I suppose, to avoid confusion with the Schedule 1 drug of similar nomenclature—Nov. 4. The pitch for the tool, which is basically a server for the [LXC][5] virtualized container system built into the Linux kernel, goes like this:
> Imagine you could launch a new machine in under a second, and that you could launch hundreds of them on a single server. Hundreds! Now, imagine that you have hardware-guaranteed security to ensure that those machines cant pry or spy on one another. Imagine you can connect them separately and securely to networks. And imagine that you can run that on a single node or a million, live migrate machines between those nodes, and talk to all of it through a clean, extensible REST API. That's what LXD sets out to deliver.
LXD will also feature tight integration with OpenStack—in fact, it's part of Canonical's [OpenStack][6] Juno for Ubuntu—as well as hardware-level security protections, according to the company, which said it is working with chip manufacturers (it hasn't indicated which ones) on the latter technology.
This is all pretty cool. If Canonical fully implements these features, LXD could go a long way toward making LXC a truly enterprise-ready containerized virtualization platform.
But to do that, Canonical needs to siphon off some of the momentum Docker is currently enjoying and reorient part of the open source container-based virtualization world toward LXD. So far, Canonical appears eager to position LXD as a technology that can complement and enhance Docker, not compete directly with it. That makes sense to a degree, since LXD and Docker are somewhat different sorts of beasts, at least for now. But Canonical has stated its ambition "to bring much of the awesome security and isolation of LXD to docker [sic] as well," an idea that may not sit well with the Docker community, especially if LXD remains closely intertwined with Ubuntu rather than being distribution-agnostic.
It doesn't help that what Canonical is doing with LXD is very similar to what it has already done with technologies including [Unity][7], the desktop interface it designed for Ubuntu. Like LXD, Unity was a way for Canonical to replace a major part of the Ubuntu software stack—specifically, the [GNOME][8] desktop environment—with a homegrown alternative, providing the company more control over Ubuntu, yet also making Ubuntu less readily compatible with many open source apps that were not designed for Ubuntu and Unity. The move engendered more than a little ill-will among the Ubuntu user base, although most of that sentiment has long since dissipated.
It's hard to imagine Canonical marginalizing Docker in the same way it has GNOME, and even harder to imagine many people feeling emotional about this in the way they did when Unity replaced GNOME. But time will tell.
--------------------------------------------------------------------------------
via: http://thevarguy.com/ubuntu/110514/canonical-launches-lxd-open-source-virtualization-container
作者:[Christopher Tozzi][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://thevarguy.com/author/christopher-tozzi
[1]:http://canonical.com/
[2]:http://docker.com/
[3]:http://www.ubuntu.com/cloud/tools/lxd
[4]:http://ubuntu.com/
[5]:https://linuxcontainers.org/
[6]:http://openstack.org/
[7]:http://unity.ubuntu.com/
[8]:http://www.gnome.org/

View File

@ -1,43 +0,0 @@
Massive 20% Improvement to Land in Intel's Mesa Driver Thanks to Valve's Efforts
================================================================================
> A group of devs from LunarG found a bottleneck in the driver
**Intel users should see a major improvement with their hardware after a group of developers from LunarG found out that there was a bottleneck in the DRM driver.**
![](http://i1-news.softpedia-static.com/images/news2/Massive-20-Improvement-to-Land-in-Intel-s-Mesa-Driver-Thanks-to-Valve-s-Efforts-464233-2.jpg)
The drivers on the Linux platform are not stellar, and most of the time, pieces of hardware work better on other operating systems, like Windows, for example. It might be strange that the same game, on the same hardware, works better on one platform than it does on the other, but things have been like this forever and no one expects any kind of big breakthroughs.
To be fair, the drivers from AMD, NVIDIA, and Intel have been improving in the last couple of years, especially after Steam for Linux was released. Actually, LunarG works with Valve to improve the state of the Intel drivers and to find ways to boost the performance on Linux. They had a big breakthrough and a kernel update should arrive very soon.
### Users with Intel-powered machines should be very happy ###
Valve tasked LunarG with improving the Intel drivers, which are lagging a little bit behind the competition, at least in terms of graphics. Some of the latest Intel processors are pretty powerful and you would expect them to be able to perform much better, at least as well as on Windows, but there was a problem.
The guys from LunarG worked on a piece of software called GlassyMesa, which drastically improved Intel's shader compiler stack. They also made a number of improvements in the past few months, but none of these changes was reflected in the driver's performance. This led them to believe that there had to be a bottleneck somewhere along the line.
"We started to suspect there was a bigger bottleneck masking the improvements, and sure enough we were able to generate a test program that showed a huge performance issue with how the hardware samplers were working as compared to the OpenGL driver running under windows. Something was slowing down the samplers on Linux, and we were determined to find out what," wrote the devs on their blog.
They did all sorts of testing, but they don't have access to the way the hardware is set up. Therefore, they sent the test program to Intel and the engineers found the problem and corrected it. As you can imagine, the people at Intel didn't say anything about what they actually corrected.
### 20% increase in performance is no small matter ###
In any case, LunarG also published some of the improvements they saw, and one of them is a 20% increase in game framerate.
- Left4Dead2 with frames that have hordes of zombies we've seen an increase of 17-25%
- Counter-Strike GO: 16-20%
- Lightsmark increased on a GT2 by 60% (HD4600) 4770
A kernel patch is required to make all these improvements available to users. It's not clear whether it will be available in Linux kernel 3.18 or 3.19, but it's coming. It also means that the kernel patch will be backported to the SteamOS kernel as well.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Massive-20-Improvement-to-Land-in-Intel-s-Mesa-Driver-Thanks-to-Valve-s-Efforts-464233.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie

View File

@ -1,48 +0,0 @@
Prizes Ahoy! Ubuntu Scope Showdown Kicks Off
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/scope.jpg)
**SDK at the ready: Ubuntu has launched a new development competition for its mobile platform, with some swish prizes up for grabs for the winners.**
The [Ubuntu Scope Showdown][1] is the third such initiative to be held by the project and the second pitched squarely at mobile.
But this time around amateur and l33t developers alike are being tasked with a new brief: creating custom home screen experiences — [Scopes][2] — for Ubuntu on phones.
### Er, What Is a Scope? ###
We often refer to Scopes as mini search engines, little portals that help you find content from a specific web site, service or topic — think eBay, Cat gifs, or Restaurants Nearby — from the home screen, no need to open an app.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/07/scopes-customization.jpg)
Thanks to the rich variety of result layouts content can be surfaced and previewed in interesting, intuitive ways. And when youre happy with what youve found you can (typically) click through to open it up in an app or a new tab in the Ubuntu web browser.
Scopes can be added, removed, re-ordered and favourited for easy access. Some Scopes can search multiple sources, others just the one.
Its because of this multifariousness that makes the lack of a traditional home screen as we know it from Android, iOS and other mobile platforms (pages and pages of scopes) less of a negative. Theres no desktop; no custom wallpaper you can cover with icons, folders, shortcuts and widgets, but there is, quite literally, a world of information at your fingertips.
### The Competition ###
The Ubuntu Scope Showdown runs for five weeks (October 30 December 3) giving participants just about enough time to take a project from concept to completion using the Ubuntu SDK and submitted to the Ubuntu Store.
The overall winner (decided by a judging panel of which, disclaimer ahoy, I am part of), will bag a brand new Dell XPS 13 Laptop (Developer Edition) preloaded with Ubuntu.
Runners up nab a Logitech UE Boom Bluetooth speaker, a Nexus 7 (2013) running Ubuntu, or one of two bundles of Ubuntu merchandise.
Interested in taking part? Youll find more details on the entry requirements plus links to all the documentation you can eat on the [developer.ubuntu.com mini-site][3].
Will you be taking part?
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/ubuntu-scope-showdown-competition-launched
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://developer.ubuntu.com/2014/10/scope-development-competition/
[2]:http://developer.ubuntu.com/scopes/
[3]:http://developer.ubuntu.com/

View File

@ -1,36 +0,0 @@
Ubuntu's Click Packages Might End the Linux Packaging Nightmare
================================================================================
> It's time to have one type of package for all distros
**The new Click packages that are already used on the Ubunu Touch platform by Canonical are also coming to the desktop and they might be able to change the Linux packaging paradigm.**
![](http://i1-news.softpedia-static.com/images/news2/Ubuntu-s-Click-Packages-Might-End-the-Linux-Packaging-Nightmare-464271-3.jpg)
Ubuntu is the most used Linux operating system, so it's very likely that, if something really catches on with users of this distribution, it will probably shake things up in the Linux ecosystem as well. For now, the app packaging for Linux operating systems is a mess. It has improved over the years, but it still poses many problems.
There isn't any kind of unification and different distros use different packages. Debian-based distros use .deb and Fedora-based ones use .rpm, but you can also find packages with .sh or .run. The main problem is that they depend very much on the libraries that are already installed or available in the repos. Even if you have a .deb file for your Ubuntu system, it's not a guarantee that it will work. It might very well depend on a library that's not available for that particular version.
### One package to rule them all ###
For now, only the Ubuntu SDK can make Click packages, but they present some advantages over regular ones. For example, they are much safer than regular packages, mostly because there are no maintainer scripts that can run as root. In conjunction with the Ubuntu Software Center and Apparmor, the Click packages are pretty safe.
One of the best features of Click packages is that they have no external dependencies, which means that you can basically run them on any system, regardless of the available libraries installed or in the repositories. Martin Albisetti from Canonical explains this feature in more detail on his [blog][1].
"Clicks, by design, can't express any external dependencies other than a base system (called a 'framework'). That means that if your app depends on a fancy library that isn't shipped by default, you just bundle it into the Click package and you're set. You get to update it whenever it suits you as a developer, and have predictability over how it will run on a user's computer (or device!). That opens up the possibility of shipping newer versions of a library, or just sticking with one that works for you."
Another cool feature is that Click packages for different versions of the same app can be run on the same system. There are numerous applications out there that need to be alone on the system, otherwise they create problems for users, but the confinement provided by Click packages solves this issue.
These are just a few of the features that are already implemented. It will take a while until they reach the desktop, however. They will land along with Unity 8, but they are coming nonetheless. We can only hope that other distros will adopt this kind of format and not do their own similar thing, which would preserve the current packaging problems.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Ubuntu-s-Click-Packages-Might-End-the-Linux-Packaging-Nightmare-464271.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://beuno.com.ar/archives/334

View File

@ -1,54 +0,0 @@
Open-Source Vs Groupon: GNOME Battle To Protect Their Trademark
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/gnome-blank.jpg)
**GNOME is a name synonymous with open-source software, but if the billion-dollar company Groupon has its way it could soon mean something different.**
[Groupon][1], famed for its deal-of-the-day website, recently unveiled a “tablet-based platform“ called GNOME, and has filed requisite trademark filings — 10 so far — seeking ownership of the name.
Naturally, this has the GNOME Foundation concerned. GNOME is a [registered trademark][2] of the foundation, and has been since 2006. This mark was issued under a number of sections, including operating system which the Chicago-based Groupon is also claiming against.
Could it just be that theyve never heard of GNOME before? Highly unlikely.
![Groupons POS system. Ahem.](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/Gnome-Stand.jpg)
Groupons POS system. Ahem.
Even the most Saul Goodman-y of lawyers would first check existing trademarks and investigate the company(s) owning or contesting. Even assuming that lapse in professionalism, most would have at least given the name a quick Google. Damningly, the company has previously [claimed to be fuelled by open-source][3].
Groupon clearly knows of GNOME, knows what it does, what it stands for and how long its been around yet considers itself better placed to “own” the name for its brand of hokey in-store point-of-sale terminals.
*Hrm.*
### Campaign to Protect GNOME ###
Ask not what GNOME can do for you, but what you can do for GNOME. This morning the GNOME Foundation [launched a campaign][4] to raise (an estimated) US$80,000 to battle the first found of marks Groupon has applied to register.
“**We must not let a billion-dollar-company take the well-established name of one of the biggest Free Software communities,**” says Tobias Mueller, a GNOME Foundation director.
**“If you want to help GNOME defend its trademark and promote Free Software, visit the campaigns page, share the link, and let Groupon know that they behaved terribly”.**
Lucas Nussbaum, **Debian Project Leader**, sums the whole situation up succinctly:
“**This legal defense is not just about protecting GNOMEs trademark; it is about asserting to the corporate world that FLOSS trademarks can and will be guarded. Not just by the project in question, but by the community as a whole. As a result, all FLOSS trademarks will be strengthened at once.**”
More details can be found on the GNOME Groupon Campaign page.
- [GNOME vs Groupon Campaign Page][5]
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/gnome-groupon-trademark-battle
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://groupon.com/
[2]:http://tsdr.uspto.gov/#caseNumber=76368848&caseType=SERIAL_NO&searchType=statusSearch
[3]:https://engineering.groupon.com/2014/open-source/sharing-is-caring-open-source-at-groupon/
[4]:https://mail.gnome.org/archives/foundation-list/2014-November/msg00020.html
[5]:http://www.gnome.org/groupon/

View File

@ -1,56 +0,0 @@
Chakra Linux 2014.11 Brings a Custom and Cool KDE 4.14.2 Desktop Gallery
================================================================================
> A new version of Chakra Linux has been released
**Chakra Linux, a distribution specially built to take advantage of KDE Software Compilation and the Plasma desktop, has been upgraded to version 2014.11 and is now ready for download.**
The developers of this distribution usually choose names of famous scientists. The current iteration of the Chakra Linux, which is actually the second version in the branch, has been dubbed Euler, after Swiss mathematician and physicist Leonhard Euler, who refined calculus and graph theory. Because it follows the KDE releases, it means that we will probably get another version in a few months.
Surprisingly, if you already have Chakra Linux installed, it won't be enough just to keep your system up to date. Upgrading the OS with the provided ISO is quite easy, but if you're doing it manually, then you'll have to follow a rather intricate tutorial on how to do it properly. It's not unusual for developers to make such big changes that result in the usual updating process not working, but sometimes it's necessary.
### The latest Chakra Linux is using KDE 4.14.2 ###
The Chakra Linux developers are following the latest KDE branch very closely, but not the latest version. Case in point, KDE 4.14.3 was released yesterday, but Chakra features KDE 4.14.2. On the other hand, the developers go through great lengths to customize the KDE desktop so that it's unique to this particular distribution.
"The Chakra team is happy to announce the second release of the Chakra Euler series, which follows the KDE Applications and Platform 4.14 releases. The main reason for providing this new ISO, in addition to providing a new KDE release, is that Chakra has now implemented the /usr merge changes. If you already have Chakra installed on your system manual intervention is needed, so please follow the [instructions][1] on how to properly update. For new installations using this ISO, this is of course not needed."
"The extra repository, which is disabled by default, provides the must-have GTK-based applications and their dependencies. Kapudan, our desktop greeter which runs after the first boot, will allow you to enable it. Please have in mind that our installer, Tribe, does not currently officially support UEFI, RAID, LVM and GPT, although you might find some workarounds in our forums," [reads][2] the official website.
The developers also say that the Linux kernel has been updated to version 3.16.4, the systemd component has been updated to version 216, and all of the video drivers, free or proprietary, have been updated as well.
A complete list of new features and updates can be found in the official announcement.
Download Chakra Linux 2014.11:
- [Chakra GNU/Linux 2014.11 (ISO) 64-bitFile size: 1.7 GB][3]
- [MD5][4]
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-1.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-2.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-3.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-4.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-5.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-6.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-7.jpg)
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://chakraos.org/news/index.php?/archives/134-Warning-Manual-intervention-needed-on-your-next-major-update.html
[2]:http://chakraos.org/news/index.php?/archives/135-Chakra-2014.11-Euler-released.html
[3]:http://sourceforge.net/projects/chakra/files/2014.11/chakra-2014.11-euler-x86_64.iso
[4]:http://chakra-project.org/get/checksums.txt

View File

@ -1,50 +0,0 @@
GNOME 3.14.2 Officially Released, Finally Drops SSLv3
================================================================================
> Users will find the new version in the repositories
![](http://i1-news.softpedia-static.com/images/news2/GNOME-3-14-2-Officially-Released-Finally-Drops-SSLv3-464903-2.jpg)
**The GNOME development team has released the second update for the for GNOME 3.14.x branch and it brings a large number of fixes and improvements for a lot of the packages from the stack.**
GNOME 3.14 was initially released a few weeks ago and the developers are still ironing out a few issues. The new version has been received very well by the community and it's been adopted already by numerous Linux distributions. It's very likely that GNOME 3.14.2 will be integrated in most of the big repositories, as soon as possible.
The GNOME project managed to stay on track and the new release has arrived on time. Not all of the packages in the stack have been updated, but there are more than enough to get the users interested. It's a good idea to upgrade your desktop environment as soon as possible in order to get all of these enhancement.
### GNOME 3.14.2 gets a ton of improvements ###
Just like the previous iteration, the 3.14.2 release does have a few things that really stand out. For example, the NetworkManager dependency of GNOME Shell has been removed, the queued up notifications are now summarized, the handling of multi-day events has been improved, the GtkMenu use has been refined, various fixes for Mutter have been added, and the SSLv3 use has been disabled.
"Here comes our second update to GNOME 3.14, it has many fixes, various improvements, documentation and translation updates, we hope you'll enjoy it. Individual modules may get new stable 3.14 releases but our focus is now on the development branches, we released a first snapshot as 3.15.1 two weeks ago and will get another one by the end of the month.," [says][1] GNOME developer Frederic Peters.
GNOME 3.14.2 comes with updates for these core apps: Adwaita Icon Theme, Eye of GNOME, Epiphany, evolution-data-server, Glib, GNOME Calculator, GNOME Contacts, GNOME Desktop, GNOME Shell, GNOME Terminal, Mutter, Nautilus, Tracker, and more.
The apps that receive upgrades in the 3.14.2 branch include Aisleriot, Bijiben, Brasero, Cheese, Evolution, File Roller, Gedit, Four in a Row, GNOME Boxes, GNOME Maps, GNOME Music, Hitori, Orca, Rygel, Vinagre, and more.
We [detailed the GNOME 3.14.x release][2] when it was made available and you can find more details in the original report.
Download the GNOME 3.14.2 stack
- [GNOME 3.14.2 Stable Sources][3]
- [GNOME 3.14.2 Stable Modules][4]
- [GNOME 3.15.1 Unstable Sources][5]
- [GNOME 3.15.1 Unstable Modules][6]
But keep in mind that these are the source packages. If you want an easy upgrade or install, be sure to check the repositories.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/GNOME-3-14-2-Officially-Released-Finally-Drops-SSLv3-464903.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://permalink.gmane.org/gmane.comp.gnome.devel.announce/397
[2]:http://news.softpedia.com/news/GNOME-3-14-Officially-Released-Screenshot-Tour-and-Video-459865.shtml
[3]:https://download.gnome.org/core/3.14/3.14.2/sources/
[4]:https://download.gnome.org/teams/releng/3.14.2/
[5]:https://download.gnome.org/core/3.15/3.15.1/sources/
[6]:https://download.gnome.org/teams/releng/3.15.1/

View File

@ -1,100 +0,0 @@
Budgie Desktop v8 Released With Improved Menu, Panel
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-desktop.jpg)
**A new version of [Evolve OS][1]s simple [Budgie Desktop Environment][2] has been released, and the improvements under its wing are impressive.**
Made up of 78 commits, the lightweight desktop lands with a host of new options and applets to play with. Its plumage has also benefitted from a bit of TLC, with key parts of the shell feeling fresher and looking more refined.
But will the changes ruffle the feathers of the Budgie flock or leave them squawking in awe? Lets take a closer look.
### Budgie v8 ###
#### Menu Changes ####
The **Budgie Menu** now uses a narrower compact layout by default. This style lists the applications in categories (as previously) but sorted by usage rather than name.
Software that you open most often sit nearer the top of each category header. Its an efficacious decision that should help save time for those who hunt n scroll for apps rather than use the handy search filter.
![The Menu uses compact mode by default](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-desktop-8.jpg)
The Menu uses compact mode by default
The old two-pane setup that featured in earlier builds remains available; you can toggle it back on in Budgies preferences (**right click on the menu applet > Preferences**).
The power option menu that previously
resided in the main menu has been moved over to the System Tray applet (i.e., volume). Additionally, you can now access System Settings entries from the menu itself — no more scratching of heads!
#### Panel Changes ####
![Quicklist support in Budgie 0.8](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/quicklist-support-in-budgie.jpg)
Quicklist support in Budgie 0.8
The Budgie Panel and task list applet both benefit from a raft of improvements, including new auto-hide options, dynamic theming support and a new GNOME 2-style menu bar option.
- Auto-hide (optional)
- Quicklist support
- Dark theme support
- Application pinning
- App attention hint
- GNOME Panel theming
- Old-school Menu Bar applet (optional)
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/classic-menu.jpg)
#### Elsewhere ####
Other changes include support for GNOME 3.10 and up; improved animations when changing wallpapers; and the run dialog has been hugely improved in design, sporting an almost Alfred/GNOME-Do-esque design. Mmmhm!
![Run, Run, Run](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-run-dialog.jpg)
Run, Run, Run
### Install Budgie Desktop on Ubuntu ###
Budgie 0.8 is, as with previous releases, available to install in Ubuntu 14.04 LTS and Ubuntu 14.10 by way of an official PPA. The desktop can be installed alongside Unity, GNOME Shell and Cinnamon without much (if any) issue.
To install, open a new Terminal window and enter the following commands. Enter your password where prompted.
sudo add-apt-repository ppa:evolve-os/ppa
sudo apt-get update && sudo apt-get install budgie-desktop
After the install has completed you will need to log out of Unity (or whichever desktop youre currently using). At the Unity Greeter click the Ubuntu logo emblem, select the Budgie session from the session list, and then log in as normal. The Budgie desktop will load.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/select-budgie.jpg)
#### Notes for Ubuntu Users ####
![Expect Odd Theming Issues in Ubuntu](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/not-all-perfect.jpg)
Expect Odd Theming Issues in Ubuntu
While Budgie is now easy for Ubuntu users to install it is not designed for it specifically (the Evolve OS distribution is the best way to experience it).
Naturally, you might not fancy upheaving to another OS. Thats fine, but if you plan on keeping Budgie caged in Ubuntu youll need to note the following caveats (lest you end up bird-brained).
First up, **Budgie is under active development**. Several key features remain missing, including native network management support. An applet can be added to the panel that supports Ubuntus Indicator Applets, but its a little rough around the edges.
You should also expect some theming issues when using the shell with Ambiance/Radiance. The Adwaita theme (and other GNOME themes) work best. You should also disable Ubuntus Overlay Scrollbars.
Finally, logout (volume > power button) **does not work under Ubuntu**. To log out you should use the run dialog (Alt+F2) and the following command:
gnome-session-quit
If all of that sounds like fun rather than faff, theres plenty to enjoy in Budgie and not just its minimal system footprint! Let us know your own thoughts on it, what youd like to see it add next, etc. in the comments below.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/budgie-desktop-0-8-released-big-changes
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://evolve-os.com/
[2]:http://www.omgubuntu.co.uk/2014/07/install-budgie-evolve-os-desktop-ubuntu-14-04

View File

@ -1,51 +0,0 @@
Ubuntu 15.04 Gets Tentative Release Date of April 23, 2015
================================================================================
![](http://i.imgur.com/FfX14E9.jpg)
**Doing anything special on April 23 next year? You might well be — its the tentative release date being given for Ubuntu 15.04 Vivid Vervet.**
The date, along with those of various other development milestones, is listed as part of a [draft release schedule][1] on the Ubuntu Wiki page for the V update. As of writing all dates are subject to approval from the Ubuntu release team and are therefore **not final**.
Ubuntus previous spring release, 14.04 LTS, went live on April 17, 2014.
### Veracity Potential is Void ###
![Dates not yet ready to be inked in](http://www.omgubuntu.co.uk/wp-content/uploads/2014/05/california-calendar.jpg)
Dates not yet ready to be inked in
Draft means just that, but having covered some 10 Ubuntu release over five years I do know that the proposed dates dont tend to differ too wildly from those that go final (famous last words, Im sure!).
Even so, take the proposals with a pinch of optimism for now. Ill be keeping both this page and the fancy-schmancy graphic updated as, if or when anything changes.
### Key Ubuntu 15.04 Release Dates ###
As with all releases post-13.04, Ubuntu proper only makes fleeting appearances in select milestone releases, specifically the final beta and the release candidate stages.
Ubuntus family of flavours, which may include Ubuntu MATE this cycle, take full advantage of the testing opportunities at hand.
- **Alpha 1** December 18th (for flavours)
- **Alpha 2** January 22nd (for flavours)
- *Feature Freeze* — February 19th
- **Beta 1** August 28th (for flavours)
- *UI Freeze* — March 12th
- **Final Beta** March 26th
- *Kernel Freeze* — April 9th
- **Release Candidate** April 16th
The final release of the Vivid Vervet in all its vivacious glory is pencilled in for release on:
- **Ubuntu 15.04 Final** April 23rd
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/ubuntu-15-04-release-schedule-date-vivid-vervet
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:https://wiki.ubuntu.com/VividVervet/ReleaseSchedule

View File

@ -1,42 +0,0 @@
Systemd fallout: Two Debian technical panel members resign
================================================================================
![](http://www.itwire.com/media/k2/items/cache/985881530be9dfdb268b3ae49be9a710_XL.jpg)
**Two well-known and experienced Debian developers, both members of the project's technical committee, have announced they will be leaving the committee.**
The resignations of [Colin Watson][1] and [Russ Allberry][2] from the panel come soon after senior developer Joey Hess [resigned][3] from the project altogether.
There has been much acrimony recently over the adoption of the systemd init system as the default for Jessie, the next release of Debian, which is expected to come out in the next few months.
The Debian Technical Committee [decided][4] back in February, via the casting vote of panel chief Bdale Garbee, to adopt systemd as the default. This decision came after months of discussion.
Recently, there has been [another push][5] for reconsideration led by another technical committee member, Ian Jackson, and [a general resolution][6] was put up for vote. It is open for voting until midnight on November 18, UTC (10am on Wednesday AEST). There are a few options proposed by others, including one from the Debian Project leader Lucas Nussbaum, besides the main resolution.
In the initial vote back in February, Allberry supported systemd as the default, while Watson, an employee of Canonical, the company that creates the Ubuntu GNU/Linux distribution, expressed a preference for upstart. Jackson also backed upstart.
In [a post][7] explaining his decision, Watson, one of the first batch to join Canonical, attributed it to a general move on his part to start spending his Debian time on things he found enjoyable. Late last month, [he asked][8] to be moved from the Ubuntu Foundations team to the Launchpad engineering team. Watson has given the Debian Technical Committee time to appoint someone in his place before he moves on.
In contrast, Allberry's [resignation post][9] said he wanted to leave immediately, though he later added that he would stay on for a while if needed.
His frustration was clear: "If any part of this doesn't make sense, or if any of it feels like an attack or a reaction to any single person or event, I'm happy to clarify. I would appreciate it if people would ask for clarification rather than making assumptions, as assumptions about other people's motives are one of the things that I find the most demoralising about the Debian project right now."
--------------------------------------------------------------------------------
via: http://www.itwire.com/business-it-news/open-source/66153-systemd-fallout-two-debian-technical-panel-members-resign
作者:[Sam Varghese][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.itwire.com/business-it-news/open-source/itemlist/user/902-samvarghese
[1]:https://lists.debian.org/debian-ctte/2014/11/msg00052.html
[2]:https://lists.debian.org/debian-ctte/2014/11/msg00071.html
[3]:http://www.itwire.com/business-it-news/open-source/66014-systemd-fallout-joey-hess-quits-debian-project
[4]:http://www.itwire.com/business-it-news/open-source/63121-garbees-casting-vote-means-systemd-is-debian-init
[5]:http://www.itwire.com/business-it-news/open-source/65781-pushback-against-systemd-in-debian-gathers-steam
[6]:https://www.debian.org/vote/2014/vote_003
[7]:https://lists.debian.org/debian-ctte/2014/11/msg00052.html
[8]:http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/ubuntu/2014-10-26-moving-on-but-not-too-far.html
[9]:https://lists.debian.org/debian-ctte/2014/11/msg00071.html

View File

@ -1,64 +0,0 @@
After an 18 Month Gap, Opera for Linux Returns With New Stable Release
================================================================================
**The first stable release of Opera for Linux in more than 18 months is now available for download.**
![Hello again, Opera!](http://www.omgubuntu.co.uk/wp-content/uploads/2014/06/iopera.jpg)
Hello again, Opera!
Opera for Computers for Linux 26 (no really, thats its name) features a complete top-to-bottom overhaul, new features, and better performance thanks to its Aura and Blink underpinnings (yes, its no longer based on the proprietary Presto web engine).
#### Features ####
The browser [wiggled its toes in tux-friendly waters back in June with the launch of a developer preview][1], but if you last tried Opera when it looked like this, youll want to grab some smelling salts: things have changed.
youtube 视频,发布时可换成链接地址
<iframe width="750" height="422" src="https://www.youtube.com/embed/-kS10C2BUOs?feature=oembed" frameborder="0" allowfullscreen></iframe>
Alongside an impressive new look and blazing fast, standards-compliant rendering engine come many new and improved features.
- **Discover** — Shows articles from around the web in a range of categories
- **Speed Dial** — Supports interactive widgets, folders, and themes
- **Tab Peek** — Preview the content of an open tab without switching back to it
- **Opera Turbo** — Data-saving mode ideal for patchy connections
- **Rich bookmarking** — including new sharing functionality
- **Add-ons** — compatible with Chrome extensions, too
- **Support for HiDPI displays on Linux**
### Download Opera for Linux 26 ###
Opera say those running Opera 12.6 on a 64-bit version of Ubuntu still supported by Canonical will automatically receive this new update through the Ubuntu Software Center.
But in all honesty Im not sure anyone is in that boat! So, helpfully, a Debian installer can be downloaded from the Opera website. This will also add the Opera repository to your Software Sources to enable you to receive future updates in a timely fashion.
- [Download Opera for Computers for Linux 26][2]
Feel free to kit your new browser out with our nifty Opera Add-On, too:
- [Install OMG! Ubuntu! Opera Extension][3]
#### Important Notice about Linux Support ####
**Opera for Linux is 64-bit only**. The company say this decision was made based on what most Linux desktop users have installed. While annoying it is part of a larger overall trend away from 32-bit software, with Opera for Mac also being 64-bit exclusive, too.
In another case of “spending limited resources wisely”, this release is only being officially supported on Ubuntu (and buntu-based derivatives, including Linux Mint).
Users on other distributions, from Arch to openSUSE, can still install Opera for Linux but will need to [use a (fairly simple) workaround][4] or hunt down an unofficial repository.
**If you give it a spin let us know what you make of it in the comments below.**
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/12/new-opera-for-linux-goes-stable-download-now
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/06/opera-linux-chromium-download-released
[2]:http://opera.com/computer/linux
[3]:https://addons.opera.com/en/extensions/details/omg-ubuntu-for-opera/?display=en
[4]:https://gist.github.com/ruario/99522c94838d0680633c#file-manual-install-of-opera-md

View File

@ -1,61 +0,0 @@
Firefox 34 Arrives with Plugin-Free Video Calling and Powerful WebIDE
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/firefox-banner.jpg)
**Its been a busy few months for Mozilla, what with agreeing to a deal to switch its default search provider to Yahoo!, launching a custom version of its browser packed full of developer goodies, and launching Firefox OS handsets in new territories.**
Today, Mozilla has released Firefox 34 for Windows, Mac and Linux desktops, the first stable release since last months security n bug fix update.
### Headline Feature ###
Despite the rapid release cycle Mozilla once again manages to deliver some great new features.
Making its first appearance in a stable release is **Firefox Hello**, Mozillas WebRTC feature.
Though not enabled for all (you can manually turn it on via about:config), the feature bring plugin-free video and voice calls to the browser. No Skype, no add-ons, no hassle. You simple click the Firefox Hello icon, send your share link to the recipient to initiate a connection (assuming theyre also using a WebRTC-enabled browser, like Google Chrome or Opera).
![The Hello Firefox Popup](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/hello-firefox.jpg)
The Hello Firefox Popup
Signing in with a Firefox account will give you more features, including a contacts book with one-click calling (no need to share links).
#### Other Changes ####
Version 34 also makes it easier to **switch themes** (formerly known as personas), with live previews and a switcher menu now available on the **Customising canvas**:
![Ad-hoc theme switching](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-theme-switcher.jpg)
Ad-hoc theme switching
The first major search engine change arrives in this release, with Yandex shipping as default for Belarusian, Kazakh, and Russian locales. Yahoo! will be enabled for US users in the near future. But remember: [this does not affect the version of Firefox provided in Ubuntu][1].
US users get secure **HTTPS** Wikipedia searching from the search box:
![Secure Wikipedia Searches for English US Users](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-https-search-for-wikipedia.jpg)
Secure Wikipedia Searches for English US Users
In addition to improved HTML5 support (largely around WebCrypto features) a [**new WebIDE tool**][2] ships in this release, and is packed full of great tools for developers.
From Android connectivity and an in-app editor to support for deploying and testing apps in a Firefox OS simulator. If you havent tried Firefox OS in a while, v2.2 (unstable) has plenty to play with including edge swiping, new home screen arranging features, and some new APIs.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-webide.jpg)
### Download Firefox 34 ###
Canonical will roll out Firefox 34 to users of Ubuntu 12.04, 14.04 and 14.10 in the next 24 hours or so, so keep an eye out. If youre super impatient the release can also be downloaded from Mozilla servers directly.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/12/firefox-34-changes-include-hello-html5-webide
作者:[Joey-Elijah Sneddon ][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/11/firefox-set-yahoo-default-search-engine-ubuntu-not-affected
[2]:https://developer.mozilla.org/en-US/docs/Tools/WebIDE

View File

@ -1,78 +0,0 @@
From Mint to Trisquel: The Top Linux Distro Releases in November 2014
================================================================================
**November wasnt heavy on new Linux distribution releases, but still had more than enough to keep distro-hoppers bouncing from download server to ISO mirror and back again.**
From the free software ethic of **Trisquel** to the nostalgic glow of **Ubuntu MATE**, lets take a look at the major Linux distribution releases made in November 2014.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/linux-mint-17.jpg)
### Linux Mint 17.1 ###
Linux Mint 17.1 Rebecca is the big hitter on this list, going stable just in time to make it.
Based on Ubuntu 14.04 and using Linux kernel 3.13, the update also comes loaded with the **latest [Cinnamon 2.4][1] desktop environment, a customisable version of the Nemo file manager**, and improvements to the Update Manager to make package upgrades safer, saner and swifter.
Other changes see the **Background**, **Login** and **Theme** settings panes redesigned, and **Privacy and Notification sections** added. The default **system font has been switched to Noto Sans**, while fans of customisation will enjoy new colors added to the Mint-X theme package.
Linux Mint 17.1 delivers a set of solid, well thought out changes and performance improvements, important for an LTS release supported until 2019.
More information and those all important downloads can be found on the official project website.
- [Visit the Linux Mint Website][2]
### Ubuntu Mate 14.04 LTS ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/ubuntu-mate-lts.jpg)
It may have [arrived after the release of Ubuntu MATE 14.10][3] (**timey-wimey**), but as the first Long Term Support release of the flavor Ubuntu MATE 14.04 was welcomed with warm arms, especially by those who love to bask in the green-hued glow of GNOME 2 nostalgia.
Packed with security updates, MATE 1.8.1, and new software included out of the box, Ubuntu MATE 14.04 LTS is a notable update with plenty to tempt those on the newer (but older) 14.10 release.
For full hardware requirements, support information and download links, head on over to the official project website.
- [Download Ubuntu MATE 14.04 LTS][4]
### Trisquel 7.0 ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/trisquel-7-300x224.jpg)
The [latest stable release of Trisquel][5], an Ubuntu-based distribution endorsed by the Free Software Foundation (FSF), arrived in the middle of November — and was met by **a lot** of interest.
The free (as in freedom) distribution is built on Ubuntu 14.04 LTS but ships without any of the proprietary bits and pieces. Its a “pure” Linux experience that may require some workarounds, but serves to flag up the areas where more attention is needed in FOSS hardware support and app alternatives.
The Libre Linux 3.13 Kernel, GNOME 3.12 Flashback desktop and the Firefox-based Abrowser 33 are among the changes to be found in Trisquel 7.
- [Download Trisquel 7][6]
### Other Notable Releases ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/opensuse-desktop-kde.jpg)
Outside of the Ubuntu-based bubble November 2014 saw releases of other popular Linux distributions, including beta milestones of Mageia 5 and Fedora 21, and a new stable release of Scientific Linux 6.6.
Joining them is openSUSE 13.2 (stable) — the first release to follow a change in the way openSUSE development takes place, the first to adopt the new openSUSE design guidelines and the first to ship with a streamlined (if still unwieldy) installer.
The release has been getting great reviews from the geek press, who gave particular praise for the GNOME 3.14 implementation.
Coming from Ubuntu, where “everything just works”, the cultural and technical gulf can be daunting at first. But if you have some free time, like the color green and relish a challenge, the official openSUSE 13.2 [release announcement][7] should be your starting point.
**Have you tried any of these releases above? Let us know what you made of them in the space down below .**
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/linux-distro-releases-round-november-2014
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/11/install-cinnamon-2-4-ubuntu-14-04-lts
[2]:http://www.linuxmint.com/download.php
[3]:http://www.omgubuntu.co.uk/2014/11/ubuntu-mate-14-04-download-released
[4]:https://ubuntu-mate.org/longterm/
[5]:http://www.omgubuntu.co.uk/2014/11/download-trisquel-7-0-kernel-3-13
[6]:https://trisquel.info/en/download
[7]:https://news.opensuse.org/2014/11/04/opensuse-13-2-green-light-to-freedom/

View File

@ -1,540 +0,0 @@
translating by yupmoon
Readers' Choice Awards 2014--Linux Journal
================================================================================
It's time for another Readers' Choice issue of Linux Journal! The format last year was well received, so we've followed suit making your voices heard loud again. I couldn't help but add some commentary in a few places, but for the most part, we just reported results. Please enjoy this year's Readers' Choice Awards!
We'd like to make Readers' Choice Awards even better next year. Please send ideas for new categories and any comments or feedback via [http://www.linuxjournal.com/contact][1].
Please see the December 2014 issue of Linux Journal for the complete list of winners.
### Best Linux Distribution ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f1.jpg)
Although this year the Debian/Ubuntu-based distros took the lion's share of the votes, the "Best Linux Distribution" category is a bit like "Best Kind of Pizza"—even the bottom of the list is still pizza. It's hard to go wrong with Linux, and the wide variety of votes only proves how many different choices exist in our wonderful Open Source world.
- Ubuntu 16.5%
- Debian 16.4%
- Linux Mint 11%
- Arch Linux 8.5%
- Fedora 8.3%
- CentOS 6%
- openSUSE 5.3%
- Kubuntu 4.1%
- Gentoo 2.9%
- Slackware 2.7%
- Xubuntu 2.5%
- Other 2.3%
- Red Hat Enterprise Linux 1.6%
- NixOS 1.4%
- elementary OS 1.3%
- Lubuntu 1.2%
- CrunchBang 1%
- Mageia .7%
- LXLE .4%
- Tails .4%
- Android-x86 .3%
- Bodhi Linux .3%
- Chakra .3%
- Kali Linux .3%
- PCLinuxOS .3%
- SolydK .3%
- Mandriva .1%
- Oracle Linux .1%
### Best Mobile Linux OS ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f2.jpg)
Android is such a dominant force in the mobile world, we decided to allow Android variants to be counted separately. So although the underlying system on some of these are indeed Android, it seems far more informative this way.
- Stock Android 37.1%
- Sailfish OS 27.6%
- CyanogenMod 20.2%
- Other 3%
- Ubuntu Phone 3%
- Amazon Fire OS 1.5%
- Ubuntu for Android 1.4%
- Replicant .8%
- Tizen .8%
### Best Linux Smartphone Manufacturer ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f3.jpg)
- Samsung 29%
- Jolla 26.7%
- Nexus 16.5%
- Other 7.1%*
- HTC 7%
- LG 5.3%
- Sony 3.7%
- Nokia 1.8%
- Huawei 1.4%
- GeeksPhone 1%
- Amazon .6%
*Under "Other", Motorola got many write-ins, followed by OnePlus.
### Best Linux Tablet ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f4.jpg)
- Google Nexus 7 35.3%
- Google Nexus 10 14.8%
- Samsung Galaxy Tab 14%
- Samsung Galaxy Note 9.8%
- ASUS Transformer Pad 8.4%
- Other 6.4%
- Kindle Fire HD 4.7%
- ASUS MeMO Pad 2%
- Dell Venue 1.6%
- Acer Iconia One 1.4%
- Samsung Galaxy Note Edge .9%
- Ekoore Python S3 .7%
### Best Other Linux-Based Gadget (not including smartphones or tablets) ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f5.jpg)
We are a Raspberry Pi-loving bunch, that's for sure! But really, who can blame us? With the new B+ model, the already awesome RPi is getting sleeker and more useful. I'm no fortune teller, but I suspect I know next year's winner already.
- Raspberry Pi 71.4%
- BeagleBone Black 8.1%
- Other 4.3%*
- Lego Mindstorms Ev3 3.7%
- Moto 360 3.4%
- Cubieboard 1.7%
- Parrot A.R Drone 1.7%
- Samsung Gear S 1.4%
- Yamaha Motif XF8 1.1%
- Nvidia Jetson-K1 Development System .8%
- Cloudsto EVO Ubuntu Linux Mini PC .5%
- VoCore Open Hardware Computer .5%
- LG G Watch .4%
- RaZberry .4%
- VolksPC .4%
- IFC6410 Pico-ITX Board .2%
- JetBox 5300 .1%
*Under "Other", the most popular write-ins were Odroid and CuBox.
### Best Laptop Vendor ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/lenovo.jpg)
This category used to be a rating of which vendors worked the best with Linux, but thankfully, now most laptops work fairly well. So, we truly get to see the cream rise to the top and focus on things other than "it works with Linux". It's awesome living in the future.
- Lenovo 32%
- ASUS 19.3%
- Dell 18.5%
- System76 10.6%
- Other 7.9%*
- Acer 4.5%
- ThinkPenguin 1.9%
- LinuxCertified 1.8%
- ZaReason 1.6%
- EmperorLinux 1.5%
- CyberPower .3%
- Eurocom .1%
*Under "Other", the most popular write-ins were (in this order) Apple running Linux, HP, Toshiba and Samsung.
### Best Content Management System ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f6.jpg)
- WordPress 34.7%
- Drupal 25.3%
- Joomla! 11.1%
- MediaWiki 10.5%
- Other 10%*
- Alfresco 4.3%
- WebGUI 1.3%
- ikiwiki 1.1%
- eZ publish .7%
- Wolf CMS .4%
- Elgg .3%
- Blosxom .2%
*Under "Other", the most popular write-ins were (in this order) DokuWiki, Plone, Django and Typo3.
### Best Linux-Friendly Web Hosting Company ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/question.jpg)
When it comes to Web hosting, it's hard to find a company that isn't Linux-friendly these days. In fact, finding a hosting provider running Windows is more of a challenge. As is obvious by our winner ("Other"), the options are amazing. Perhaps a "Worst Web Hosting" category would be more useful!
- Other 22.8%*
- Amazon 22.5%
- Rackspace 13.1%
- Linode 10.4%
- GoDaddy.com 6.5%
- OVH 5.6%
- DreamHost 5.4%
- 1&1 4.8%
- LAMP Host 2.9%
- Hurricane Electric 2.6%
- Liquid Web .6%
- RimuHosting .6%
- Host Media .5%
- Savvis .5%
- Blacknight Solutions .4%
- Netfirms .4%
- Prgmr .4%
*Under "Other", the most write-ins went to (in this order) Digital Ocean (by a landslide), followed by Hetzner, BlueHost and WebFaction.
### Best Web Browser ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f7.jpg)
Firefox takes the gold this year by a significant margin. Even if you combine Chrome and Chromium, Firefox still takes the top spot. There was a time when we worried that the faithful Firefox would fade away, but thankfully, it's remained strong and continues to be a fast, viable, compatible browser.
- Firefox 53.8%
- Chrome 26.9%
- Chromium 8.1%
- Iceweasel 4%
- Opera 3%
- Other 2%
- SeaMonkey .8%
- rekonq .5%
- dwb .4%
- QupZill .4%
- Dillo .2%
### Best E-mail Client ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f8.jpg)
If I didn't know firsthand how many hard-core geeks live among us, I might accuse Kyle Rankin of voting fraud. His beloved Mutt e-mail client doesn't take top spot, but for a program without any graphical interface, third place is impressive!
- Mozilla Thunderbird 44.4%
- Gmail 24.7%
- Mutt 6.8%
- Evolution 5.5%
- KMail 5.3%
- Other 3.2%
- Claws Mail 2.2%
- Zimbra 2%
- Alpine 1.8%
- Geary 1.7%
- SeaMonkey 1%
- Opera Mail .9%
- Sylpheed .4%
### Best Audio Editing Tool ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f9.jpg)
- Audacity 69.1%
- FFmpeg 10.8%
- VLC 9.7%
- Ardour 4.9%
- Other 1.9%
- SoX 1.3%
- Mixxx 1.1%
- LMMS .7%
- Format Junkie .5%
### Best Audio Player ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10.jpg)
We figured VLC would take top spot in the video player category (see below), but it was a bit of a surprise to see how many folks prefer it as an audio player as well. Perhaps it's become the one-stop shop for media playback. Either way, we're thrilled to see VLC on the top.
- VLC 25.2%
- Amarok 15.3%
- Rhythmbox 10.4%
- Clementine 8.6%
- MPlayer 6.1%
- Spotify 5.9%
- Audacious 5.5%
- Banshee 4.6%
- Other 4%*
- XBMC 3.1%
- foobar2000 3%
- Xmms 2.4%
- DeaDBeeF 1.2%
- MOC .9%
- cmus .8%
- Ncmpcpp .8%
- Guayadeque .6%
- Mixxx .4%
- MPC-HC .4%
- Subsonic .4%
- Nightingale .3%
- Decibel Audio Player .2%
*Under "Other", Quod Libet had the most write-ins.
### Best Video Player ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_0.jpg)
- VLC 64.7%
- MPlayer 14.5%
- XBMC 6.4%
- Totem 2.7%
- Other 2.7%*
- Plex 2%
- Kaffeine 1.9%
- mpv 1.6%
- MythTV 1.6%
- Amarok 1.4%
- Xmms .3%
- Daum Potplayer .2%
- Clementine .1%
*Under "Other", most write-ins were for SMPlayer.
### Best Video Editor ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_1.jpg)
This is another testament to the geek factor when it comes to our readers. We didn't specify "non-linear editor", so by a transcoding technicality, VLC eked out a win in the video editing category. Well played, VLC, well played.
- VLC 17.5%
- Kdenlive 16.4%
- Blender 15.1%
- Avidemux 13.2%
- OpenShot 13.2%
- Cinelerra 7.5%
- PiTiVi 4.9%
- LightWorks 4.8%
- Other 4.7%
- LiVES 1.4%
- Shotcut .6%
- Jahshaka .4%
- Flowblade .4%
### Best Cloud-Based File Storage ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f11.jpg)
In a category that used to have few options, Dropbox still takes top spot, but the margin is closing. It's hard to argue against Dropbox's convenience and stability, but hosting your own data on ownCloud gives it quite a boost into the second-place spot.
- Dropbox 30.5%
- ownCloud 23.6%
- Google Drive 16%
- rsync 8.3%
- Other 7.5%*
- Amazon S3 6.6%
- SpiderOak 4.4%
- Box 1.8%
- Copy 1%
- AjaXplorer .3%
*Under "Other", the most write-ins went to Younited and MEGA. Many also said things like "no cloud is the best choice/my files stay on my storage/local only".
### Best Linux Game ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/CIVILIZATION-V-FRONT-OF-BOX.jpg)
I rarely play games, so every year I look forward to this category to find the most popular options for those few times I do. I'm personally tickled to see NetHack so high on the list, especially considering the opposition. There's just something about wandering around random tunnels that appeals to the old-school DnD player in all of us.
- Civilization 5 26.5%
- Other 23.5%*
- Team Fortress 2 8.7%
- NetHack 8.4%
- X-Plane 10 7.1%
- Dota 6.1%
- Bastion 5.4%
- Scorched 3D 3.7%
- Destiny 3.6%
- Ultima IV 1.9%
- FreeCol 1.8%
- Kpat 1.4%
- FreeOrion 1.1%
- Ryzom .9%
*Under "Other", the most write-ins were (in this order) Minecraft, 0 A.D., Frozen Bubble, Battle for Wesnoth, Portal and Counter Strike.
### Best Virtualization Solution ###
I think the relationship with Vagrant has helped Oracle's VirtualBox significantly in popularity. Yes, Vagrant works with other virtualization platforms, but since it so seamlessly integrates with VirtualBox, I think it gets quite a boost. Virtualization is such an efficient and reliable way to implement systems, bare-metal solutions are almost a thing of the past!
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Virtualbox_logo_0.jpg)
- Oracle VM VirtualBox 33.4%
- VMware 22.3%
- KVM 21.1%
- XEN 5.7%
- QEMU 5.3%
- OpenStack 4.9%
- Other 4.2%*
- OpenVZ 1.7%
- Linux-VServer 1.3%
- Symantec Workspace Virtualization .1%
*Under "Other", the most write-ins went to Docker, ProxMox and LXC, in that order.
### Best Monitoring Application ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Nagios-Core-4.0.8.png)
- Nagios 27.1%
- Wireshark 20.7%
- htop 12.3%
- Zabbix 10.5%
- Other 8.6%*
- Zenoss 6.2%
- Munin 3.4%
- PC Monitor 2.8%
- New Relic 1.9%
- Opsview 1.2%
- SaltStack 1%
- NTM (Network Traffic Monitor) .7%
- xosview .7%
- Manage Engine .5%
- FlowViewer .3%
- Circonus .2%
- SysPeek .2%
*Under "Other", most write-ins went to Icinga and OpenNMS.
### Best DevOps Configuration Management Tool ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Git-Logo-2Color.jpg)
It was interesting to see Git take top spot in this category, because although it certainly would work to use standard version control on configuration files, I always assumed it would be used alongside tools like Chef or Puppet. If nothing else, the DevOps movement has taught crusty old system administrators like myself to treat configuration files like code. Version control is incredible, and it seems as though most readers agree.
- Git 39.4%
- Puppet 17.2%
- Ansible 8.9%
- cron jobs 8.8%
- Subversion 7.6%
- Chef 5%
- SaltStack 5.4%
- Other 4.6%*
- CFEngine 3%
*Under "Other", most write-ins went to NixOps.
### Best Programming Language ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13.jpg)
- Python 30.2%
- C++ 17.8%
- C 16.7%
- Perl 7.1%
- Java 6.9%
- Other 4.6%
- Ruby 4.3%
- Go 2.4%
- JavaScript 2.4%
- QML 2.2%
- Fortran 1.4%
- Haskell 1.4%
- Lisp 1.2%
- Erlang .6%
- Rust .6%
- D .4%
- Hack .1%
*Under "Other", most write-ins went to Scala, PHP and Clojure (in that order).
### Best Scripting Language ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13_0.jpg)
Python is incredibly powerful, and it appears to be a favorite in both the scripting and programming categories. As someone who knows Bash and a little PHP, I think it's clear what I need to focus on as I delve into the world of development. Meaningful whitespace, here I come!
- Python 37.1%
- Bash/Shell scripts 27%
- Perl 11.8%
- PHP 8.4%
- JavaScript 6.7%
- Ruby 4.9%
- Other 2.1%
- Lua 2%
### Best New Linux/Open-Source Product/Project ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f14.jpg)
Docker is clearly our winner here, and rightly so—what a game-changing technology. It's nice to see Jolla/Sailfish get some love as well. We love Android, but having a choice is a vital part of who we are as Open Source advocates.
- Docker 28%
- Jolla and Sailfish OS 19%
- LibreOffice 7%
- ownCloud 5%
- Steam 5%
- Zenoss Control Center 5%
- Raspberry Pi 4%
- Git 4%
- Apache Cordova/OpenOffice/Spark/Tika 3%
- Ansible 2%
- Elementary OS 2%
- OpenStack 2%
- Zabbix 2%
- CoreOS 2%
- Firefox OS 2%
- KDE Connect 1%
- NixOS and NixOps 1%
- Open Media Vault 1%
### Coolest Thing You've Ever Done with Linux ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/tux_cruise.png)
This is my favorite new category for the Readers' Choice Awards. Imagine attending a Linux conference and asking everyone the coolest thing they've done with Linux. That's basically what happened here! We've listed a handful of our favorites, but for the entire list, check out: [http://www.linuxjournal.com/rc2014/coolest][2].
Note: the most common answers were "use it"; "rescue data/photos/whatever off broken Windows machines"; "convert friends/family/businesses to Linux"; "learn"; "teach"; "get a job"; "home automation"; and "build a home media server". The following list is of our favorite more-specific and unique answers, not the most common ones.
- Building my procmail pre-spam spam filter back in the mid-late 1990s.
- 450-node compute cluster.
- 7.1 channel preamp with integrated mopidy music player.
- A robot running Linux (for the Eurobot annual competition).
- Accidentally printing on the wrong continent.
- Adding an audio channel to a video while also syncing it.
- Analyzed NASA satellite data with self-written code.
- Annoyed the cat remotely.
- Automated my entire lighting setup in my house to respond to voice and my mobile apps.
- Automatic window plant irrigation system.
- Bathroom radio.
- Brewing beer.
- Built an application that runs on the International Space Station.
- Built a system for real-time toll collection for a major toll highway system.
- Built our own smartphone.
- Built Web-based home alarm system on Raspberry Pi.
- Cluster of Raspberry Pis to crack encrypted office documents.
- Controlled my Parrot drone.
- Controlled the comms for 186 Wind turbines.
- Controlling my Meade Telescope with Stellarium under Linux.
- Converted my old VHS family videos, using a laptop more than ten years old.
- Created a mesh network in the subarctic.
- Created an ocean environmental sensor buoy with radio data transmitter.
- Discovered new planets.
- Fixed a jabber server in Denver, USA, while in a hotel lobby in Amman, Jordan.
- Got Linus' autograph on a Red Hat 5.0 CD.
- Hacked my coffee machine to send me a text message when the coffee is ready.
- Introduced my daughter to Lego Mindstorm EV3.
- Monitor the temp and humidity of my wine cellar and open the doors when too hot or humid.
- Replaced the controller in my hot tub with a Raspberry Pi.
- Scripted opening and closing of a co-worker's CD tray every 15 seconds for four days.
- Used an LFS system to move ACH transfers for a national gas company.
- Flushed my toilet from another city.
- Remote chicken door.
- Web-based sprinkler controller for 16 stations on a Raspberry PI (also control the pool and yard lights).
- Chaining SSH tunnels together to get from work to home via three hops due to restrictive network settings.
- Built a system that monitors a renewable energy installation with two fixed solar arrays, a two axis sun tracking solar array and a wind turbine. Production and weather data are displayed on a Web site in real time.
- Back in the days of modems, I had my computer call up my girlfriend every morning, so she would wake up and go to work.
- Used a Wii controller, through Bluetooth with my Linux computer as an Infrared Camera, to detect the movement of my daughter's Fisher Price Sit and Spin Pony, and to control a video game.
--------------------------------------------------------------------------------
via: http://www.linuxjournal.com/rc2014
作者:[Shawn Powers][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxjournal.com/users/shawn-powers
[1]:http://www.linuxjournal.com/contact
[2]:http://www.linuxjournal.com/rc2014/coolest

View File

@ -1,38 +0,0 @@
Linus Torvalds Thanks Microsoft for a Great Black Friday Monitor Deal
================================================================================
![Linus Torvalds](http://i1-news.softpedia-static.com/images/news2/Linus-Torvalds-Thanks-Microsoft-for-a-Great-Black-Friday-Monitor-Deal-466599-2.jpg)
> The creator of the Linux kernel now has a UHD display
**Linus Torvalds is the creator of the Linux Kernel, he advocated for years against Microsoft's practices and he often talked about Windows. These are just some of the reasons why it's funny to see him thank Microsoft, even if it's probably done sarcastically.**
The rhetoric regarding the Linux vs. Windows subject has subsided a great deal in the last few years. There have been some issues with UEFI and other similar problems, but for the most part things have quieted down.
There is no one left at the Redmond campus to call Linux a cancer and no one is making fun of Windows for crashing all the time. In fact, there has been some sort of reconciliation between the two sides, which seems to benefit everyone.
It's not like Microsoft is ready to adopt the Linux kernel for their operating system, but the new management of the company talks about Linux as a friend, especially in the cloud.
They can no longer ignore it, even if they want to. The same happened with Linus Torvalds who hasn't said anything bad about Microsoft and Windows for a long time, and that is a good thing.
### Linus Torvalds saying "thanks" to Microsoft is not something you see every day ###
The creator of the Linux kernel talked about a great Black Friday deal he got from the Microsoft store, for a UHD monitor. He shared this piece of info on Google+ and some of the users also found it amusing to read that he's giving sincere thanks to Microsoft for their great deal.
"Whee. Just installed a new monitor. 3840x2160 resolution - it's the Dell 28" UHD panel - for $299 (€241) thanks to Microsoft's black Friday deal. Thanks MS! Ok, I have to admit that it's not actually a great panel: very clear color shifts off-center, 30Hz refresh etc. But still - I'm a nut for resolution, and at $299 (€241) I decided that this will carry me over until better panels start showing up at good prices," wrote Linus on [Google+][1].
In the meantime, he is also working on the latest kernel branch, 3.18, which will probably be released sometime at the end of this week. It's not clear how things will evolve after that, especially given the fact that the holidays are approaching fast, and devs might be a little sluggish when it comes to pushing patches and new features for the next 3.19 branch.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Linus-Torvalds-Thanks-Microsoft-for-a-Great-Black-Friday-Monitor-Deal-466599.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:https://plus.google.com/+LinusTorvalds/posts/4MwQKZhGkEr

View File

@ -1,42 +0,0 @@
Apparently This Trojan Virus May Have Infected Linux Systems For Years
================================================================================
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/trojan-word-cloud.jpg)
One of the first few argument in [why should you switch to Linux][1] is that Linux is secure and virus free. It is widely perceived by most of the Linux users that Linux is immune to viruses, which is true to an extent but not entirely.
Like any other OS, Linux too is not immune to malware, trojan, rootkit, virus etc. There have been several [famous Linux viruses][2]. But if you compare those to that of Windows, the number is infinitesimal. So, why am I talking about Linux viruses today then? Because a new trojan has been detected in market which might be impacting Linux systems.
### Turla infects Linux systems as well ###
Few months back a sophisticated cyber espionage program, nicknamed [Turla][3], was detected. It was supposed to be originated in Russia, allegedly with Russian government backing. The spyware program was targeting government organizations in Europe and the United States for four years.
In a recent report, researchers at [Kaspersky][4] has found that Turla was not only affecting Windows system but also Linux operating system. Kaspersky researchers have termed it the missing piece of Turla puzzle. As per the report:
> “This newly found Turla component supports Linux for broader system support at victim sites. The attack tool takes us further into the set alongside the Snake rootkit and components first associated with this actor a couple years ago. We suspect that this component was running for years at a victim site, but do not have concrete data to support that statement just yet.”
### What is this Linux module of Turla and how dangerous it is? ###
Going by the Kaspersky report,
> The Linux Turla module is a C/C++ executable statically linked against multiple libraries, greatly increasing its file size. It was stripped of symbol information, more likely intended to increase analysis effort than to decrease file size. Its functionality includes hidden network communications, arbitrary remote command execution, and remote management. Much of its code is based on public sources.
Report also mentions that this trojan doesnt require elevated privileges (read root) while running arbitrary remote commands and it cannot be discovered by commonly used administrative tools. Personally, I doubt their claims.
So, as a Linux desktop user, should you be scared? In my opinion, it is too early to go in to panic mode as we experienced with [ShellShock Linux bug][5]. Turla was originally intended for government organization, not common users. Lets wait and watch for more concrete news. Ill keep on updating this article. Till then enjoy Linux.
--------------------------------------------------------------------------------
via: http://itsfoss.com/apparently-trojan-virus-infected-linux-systems-years/
作者:[Abhishek][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://itsfoss.com/author/Abhishek/
[1]:http://itsfoss.com/reasons-switch-linux-windows-xp/
[2]:http://www.unixmen.com/meet-linux-viruses/
[3]:http://www.reuters.com/article/2014/03/07/us-russia-cyberespionage-insight-idUSBREA260YI20140307
[4]:https://securelist.com/blog/research/67962/the-penquin-turla-2/
[5]:http://itsfoss.com/linux-shellshock-check-fix/

View File

@ -32,4 +32,4 @@ via: http://www.computerworld.com/article/2857129/turla-espionage-operation-infe
[a]:http://www.computerworld.com/author/Lucian-Constantin/
[1]:http://news.techworld.com/security/3505688/invisible-russian-cyberweapon-stalked-us-and-ukraine-since-2005-new-research-reveals/
[2]:https://securelist.com/blog/research/67962/the-penquin-turla-2/
[2]:https://securelist.com/blog/research/67962/the-penquin-turla-2/

View File

@ -0,0 +1,25 @@
Git 2.2.1 Released To Fix Critical Security Issue
================================================================================
![](http://www.phoronix.com/assets/categories/freesoftware.jpg)
Git 2.2.1 was released this afternoon to fix a critical security vulnerability in Git clients. Fortunately, the vulnerability doesn't plague Unix/Linux users but rather OS X and Windows.
Today's Git vulnerability affects those using the Git client on case-insensitive file-systems. On case-insensitive platforms like Windows and OS X, committing to .Git/config could overwrite the user's .git/config and could lead to arbitrary code execution. Fortunately with most Phoronix readers out there running Linux, this isn't an issue thanks to case-sensitive file-systems.
Besides the attack vector from case insensitive file-systems, Windows and OS X's HFS+ would map some strings back to .git too if certain characters are present, which could lead to overwriting the Git config file. Git 2.2.1 addresses these issues.
More details via the [Git 2.2.1 release announcement][1] and [GitHub has additional details][2].
--------------------------------------------------------------------------------
via: http://www.phoronix.com/scan.php?page=news_item&px=MTg2ODA
作者:[Michael Larabel][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.michaellarabel.com/
[1]:http://article.gmane.org/gmane.linux.kernel/1853266
[2]:https://github.com/blog/1938-git-client-vulnerability-announced

View File

@ -0,0 +1,43 @@
Google Cloud offers streamlined Ubuntu for Docker use
================================================================================
> Ubuntu Core provides a minimal Lightweight Linux environment for running containers
Google has adopted for use in its cloud a streamlined version of the Canonical Ubuntu Linux distribution tweaked to run Docker and other containers.
Ubuntu Core was designed to provide only the essential components for running Linux workloads in the cloud. An [early preview edition][1] of it, which Canonical calls "Snappy," was released last week. The new edition jettisoned many of the libraries and programs usually found in general use Linux distributions that were unnecessary for cloud use.
[ [Get started with Docker][2] using this step-by-step guide to the red-hot open source framework. | Get the latest insight on the tech news that matters from [InfoWorld's Tech Watch blog][3]. ]
The Google Compute Engine (GCE) [joins Microsoft Azure][4] in supporting the fresh distribution.
According to Canonical, Ubuntu Core should provide users with an easy way to deploy Docker, an [increasingly lightweight virtualization container][4] that allows users to quickly spin up workloads and easily move them around, even across different cloud providers.
Google has been an ardent supporter of Docker and container-based virtualization itself. In June, the company [released as open source its software for managing containers][5], called Kubernetes.
The design of Ubuntu Core is similar to another Linux distribution, CoreOS, [first released a year ago][7].
Developed in part by two ex-Rackspace engineers, [CoreOS][8] is a lightweight Linux distribution designed to work in clustered, highly scalable environments favored by companies that do much or all of their business on the Web.
CoreOS was quickly adopted by many cloud providers, including Microsoft Azure, Amazon Web Services, DigitalOcean and Google Compute Engine.
Like CoreOS, Ubuntu Core offers an expedited process for updating components, reducing the amount of time that an administrator would need to manually manage them.
--------------------------------------------------------------------------------
via: http://www.infoworld.com/article/2860401/cloud-computing/google-cloud-offers-streamlined-ubuntu-for-docker-use.html
作者:[Joab Jackson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.infoworld.com/author/Joab-Jackson/
[1]:http://www.ubuntu.com/cloud/tools/snappy
[2]:http://www.infoworld.com/article/2607941/linux/how-to--get-started-with-docker.html
[3]:http://www.infoworld.com/blog/infoworld-tech-watch/
[4]:http://www.ubuntu.com/cloud/tools/snappy
[5]:http://www.itworld.com/article/2695383/open-source-tools/docker-all-geared-up-for-the-enterprise.html
[6]:http://www.itworld.com/article/2695501/cloud-computing/google-unleashes-docker-management-tools.html
[7]:http://www.itworld.com/article/2696116/open-source-tools/coreos-linux-does-away-with-the-upgrade-cycle.html
[8]:https://coreos.com/using-coreos/

View File

@ -0,0 +1,28 @@
New 64-bit Linux Kernel Vulnerabilities Disclosed This Week
================================================================================
![](http://www.phoronix.com/assets/categories/linuxkernel.jpg)
For those that didn't hear the news yet, multiple Linux x86_64 vulnerabilities were made public this week.
With CVE-2014-9322 that's now public, there's a local privilege escalation issue affecting all kernel versions prior to Linux 3.17.5. CVE-2014-9322 is described as "privilege escalation due to incorrect handling of a #SS fault caused
by an IRET instruction. In particular, if IRET executes on a writeable kernel stack (this was always the case before 3.16 and is sometimes the case on 3.16 and newer), the assembly function general_protection will execute with the user's gsbase and the kernel's gsbase swapped. This is likely to be easy to exploit for privilege escalation, except on systems with SMAP or UDEREF. On those systems, assuming that the mitigation works correctly, the impact of this bug may be limited to massive memory corruption and an eventual crash or reboot."
Fortunately, it's fixed [in Linux kernel Git since late November][1]. CVE-2014-9322 is linked to CVE-2014-9090, which is also corrected by the fixes in Git.
There's also two x86_64 kernel bugs related to espfix. "The next two bugs are related to espfix. The IRET instruction has IMO a blatant design flaw: IRET to a 16-bit user stack segment will leak bits 31:16 of the kernel stack pointer. This flaw exists on 32-bit and 64-bit systems. 32-bit Linux kernels have mitigated this leak for a long time, and 64-bit Linux kernels have mitigated this leak since 3.16. The mitigation is called espfix."
Fixes for CVE-2014-8133 and CVE-2014-8134 are in KVM and Linux kernel Git as of a few days ago. More details on these x86_64 vulnerabilities via [this oss-sec posting][2]. These issues were uncovered by Andy Lutomirski at AMA Capital Management.
--------------------------------------------------------------------------------
via: http://www.phoronix.com/scan.php?page=news_item&px=MTg2NzY
作者:[Michael Larabel][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.michaellarabel.com/
[1]:https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/arch/x86/kernel/entry_64.S?id=6f442be2fb22be02cafa606f1769fa1e6f894441
[2]:http://seclists.org/oss-sec/2014/q4/1052

View File

@ -0,0 +1,44 @@
Linus Torvalds Launches Linux Kernel 3.19 RC1, One of the Biggest So Far
================================================================================
> new development cycle for Linux kernel has started
![](http://i1-news.softpedia-static.com/images/news2/Linus-Torvalds-Launches-Linux-kernel-3-19-RC1-One-of-the-Biggest-So-Far-468043-2.jpg)
**The first Linux kernel Release Candidate has been made available in the 3.19 branch and it looks like it's one of the biggest ones so far. Linux Torvalds surprised everyone with an early launch, but it's easy to understand why.**
The Linux kernel development cycle has been refreshed with a new released, 3.19. Given the fact that the 3.18 branch reached stable status just a couple of weeks ago, today's release was not completely unexpected. The holidays are coming and many of the developers and maintainers will probably take a break. Usually, a new RC is launched on a weekly basis, but users might see a slight delay this time.
There is no mention of the regression problem that was identified in Linux kernel 3.18, but it's pretty certain that they are still working to fix it. On the other hand, Linux did say that this is a very large released, in fact it's one of the biggest ones made until now. It's likely that many devs wanted to push their patches before the holidays, so the next RC should be a smaller.
### Linux kernel 3.19 RC1 marks the start of a new cycle ###
The size of the releases has been increasing, along with the frequency. The development cycle for the kernel usually takes about 8 to 10 weeks and it seldom happens to be more than that, which brings a nice predictability for the project.
"That said, maybe there aren't any real stragglers - and judging by the size of rc1, there really can't have been much. Not only do I think there are more commits than there were in linux-next, this is one of the bigger rc1's (at least by commits) historically. We've had bigger ones (3.10 and 3.15 both had large merge windows leading up to them), but this was definitely not a small merge window."
"In the 'big picture', this looks like a fairly normal release. About two thirds driver updates, with about half of the rest being architecture updates (and no, the new nios2 patches are not at all dominant, it's about half ARM, with the new nios2 support being less than 10% of the arch updates by lines overall)," [reads][1] the announcement made by Linus Torvalds.
More details about this RC can be found on the official mailing list.
#### Download Linux kernel 3.19 RC1 source package: ####
- [tar.xz (3.18.1 Stable)][3]File size: 77.2 MB
- [tar.xz (3.19 RC1 Unstable)][4]
It you want to test it, you will need to compile it yourself although it's advisable to not use a production machines.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Linus-Torvalds-Launches-Linux-kernel-3-19-RC1-One-of-the-Biggest-So-Far-468043.shtml
作者:[Silviu Stahie ][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://lkml.iu.edu/hypermail/linux/kernel/1412.2/02480.html
[2]:http://linux.softpedia.com/get/System/Operating-Systems/Kernels/Linux-Kernel-Development-8069.shtml
[3]:https://www.kernel.org/pub/linux/kernel/v3.x/linux-3.18.1.tar.xz
[4]:https://www.kernel.org/pub/linux/kernel/v3.x/testing/linux-3.19-rc1.tar.xz

View File

@ -1,5 +1,3 @@
barney-ro translating
5 Awesome Open Source Backup Software For Linux and Unix-like Systems
================================================================================
A good backup plan is essential in order to have the ability to recover from

View File

@ -1,4 +1,3 @@
mdjsjdqe translating...
11 Useful Utilities To Supercharge Your Ubuntu Experience
================================================================================
**Whether youre a relative novice or a seasoned pro, we all want to get the most from our operating system. Ubuntu, like most modern OSes, has more to offer than what is presented at first blush.**

View File

@ -1,104 +0,0 @@
[Translating by Stevearzh]
NetHack
================================================================================
## The best game of all time? ##
**Its tremendously addictive. It takes a lifetime to master. And people play it for decades without completing it. Welcome to the strange world of NetHack…**
Believe it or not, its possible to be terrified by the sight of the letter D. Or ecstatic about the sight of a % character. (And the less said about ^, the better.) But before you assume weve gone totally loopy and close the tab, bear with us for a moment: those characters represent dragons, food rations and traps respectively. Welcome to NetHack, where your imagination needs to play a big role in the gameplay.
You see, NetHack is a text-mode game: it just uses the standard terminal character set to portray the player, enemies, items and surroundings. Graphical versions of the game exist, but NetHack purists tend to avoid them, and whats the point of a game if you cant play it when youre SSHed into your revived Amiga 3000 running NetBSD? In some ways, NetHack is a lot like Vi it has been ported to nigh-on every operating system in existence, and its requirements are absolutely minimal.
Now, given that it looks like utter pants when compared to modern games, what makes NetHack so appealing? Well, this dungeon exploring masterpiece is incredibly rich and detailed. There are so many items to discover, spells to cast, monsters to fight and tricks to learn and the dungeons are generated randomly. Theres so much to explore, and no two games are ever the same. People play NetHack for years and decades without complete it, still discovering new secrets each time.
Here well show you how NetHack came about, give you a guided tour of the dungeons, and show you some tricks. Note: by reading this feature, you agree to not sue us when you become addicted to NetHack and your real-life productivity is obliterated.
![The NetHack interface](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_annotated.png)
The NetHack interface
### Possibly the oldest still-developed game ###
Despite its name, NetHack isnt an online game. Its based on an earlier dungeon-exploring romp called Hack, which in turn was a descendant of an 1980 game called Rogue. NetHacks first release arrived in 1987, and although no new features have been added since version 3.4.3 in 2003, various patches, add-ons and spin-offs are still doing the rounds on the web. This makes it arguably the oldest game thats still being hacked on and played by a sizeable group of people. Go to [www.reddit.com/r/nethack][1] to see what we mean long-time NetHack players are still discussing new strategies, discoveries and tricks. Occasionally youll see gleeful messages from old timers who have finally, after many years, completed the game.
But how do you complete it? Well, NetHack is set in a large and deep dungeon. You start at the top level 1 and your goal is to keep going down until you find a hugely valuable item called the Amulet of Yendor. This is typically in level 20 or lower, but it can vary. As you traverse through and down the dungeon, youll meet all manner of monsters, traps and human characters; some will try to kill you, some will stay out of your way, and some…. well, you dont know until you get close to them.
> Theres so much to learn, and many items only work best when combined with others.
What makes NetHack so compelling is the vast range of items crammed into the game. Weapons, armour, spell books, rings, gems theres so much to learn, and many items only work best when combined with others. Monsters often drop useful items when you kill them, although some items can have very negative effects if you dont use them correctly. Youll find shops in the dungeon that are packed with potentially useful bits of kit, but dont expect the shopkeeper to give you great descriptions. Youve got to learn from experience. Some items arent much use at all, and the game is packed with humour you can even throw a cream pie in your own face.
But before you even set foot in the dungeon, NetHack asks you what kind of player you want to be. You can take your journey as a knight, a monk, a wizard or even a humble tourist, amongst many other player types. They all have their own strengths and weaknesses, and NetHack addicts love to try completing the game with the weaker types. You know, to show off to other players.
> ## Spoilers dont spoil the fun ##
> In NetHack parlance, “spoilers” provide information on monsters, items, weapons and armour. Its technically possible to complete the game without using them, but very few players ever achieve this, as the game is monumentally complex. Consequently its not regarded as bad form to use spoilers but its still more fun to try to work things out yourself first, and only consult the spoilers when you really need them.
> A great source is [www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html][2] which separates spoilers into categories. For things that happen randomly in the game, such as the effects from drinking from fountains, it gives you the odds of a certain thing happening.
### Your first dungeon crawl ###
NetHack is available for almost every major OS and Linux distribution in the world, so you should be able to grab it with “apt-get install nethack” or “yum install nethack” or whatever is appropriate for your distro. Then run it in a terminal window by just typing “nethack”. The game will ask if it should pick a player type for you but as a newcomer, its best if you choose one of the tougher characters first. So hit “n” and then hit “v” to choose the Valkyrie type, and “d” to be a dwarf.
Then NetHack will give you some plot blurb, explaining that your god seeks the Amulet of Yendor, so your goal is to retrieve it and present it to him. Hit space when youre done reading the text (and any other time you see “More” on the screen). And here we go youre in the dungeon!
As described earlier, your character is represented by a @ sign. You can see the walls of a room around you, and the dot characters depict empty space in the room. First of all, get used to the movement keys: h, j, k and l. (Yes, its just like Vim, as covered in issue 3 of Linux Voice!) These move you left, down, up and right respectively. You can also move diagonally with y, u, b and n. So walk around the room until you get used to the controls.
NetHack is turn-based, so if youre not moving or performing an action, the game stays still. This lets youplan your moves in advance. You will see a “d” or “f” character moving around the room as well: this is your pet dog or cat, which (normally) wont harm you and can assist you in killing monsters. Pets can be annoying though they occasionally eat foot rations and tasty corpses before you get to them.
![Hit “i” to bring up an inventory of your currently carried items](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_inventory.png)
Hit “i” to bring up an inventory of your currently carried items
### Whats behind the door? ###
Now, lets go out of the room. There will be gaps around the edge, and possibly “+” signs. That “+” is a closed door, so go up to it and hit “o” to open. You will be asked for a direction, so if the door is to the left of you, press “h”. (And if the door is stuck, try opening it a few times.) Youll then end up in a corridor, marked by “#” symbols, so walk around it until you find another room.
On your travels youll see various items. Some, such as money (denoted by a “$” symbol) are picked up automatically; for other items, you have to press the comma key whilst standing on them. If there are multiple items, youll be given a menu, so press the appropriate keys shown in the menu and then Enter to choose what you want. At any time you can hit “i” to bring up your inventory list see the screenshot.
What happens if you see a monster? At these early stages of the game, the monsters youre likely to come across will be represented by “d”, “x” and “:” characters. To attack, simply walk into them. The game will tell you if your attacks are successful using the messages along the top and also how the monster is responding. These early monsters are simple to kill, so you shouldnt have any trouble defeating them, but keep an eye on your HP in the status line at the bottom.
> Early monsters are simple to kill, but keep an eye on your HP.
If a monster leaves behind a corpse (“%”), you can hit comma to take it and then press “e” to eat it. (Whenever youre prompted to choose an item, you can press its corresponding key from the inventory list, or “?” to bring up a mini list.) Warning! Some corpses are poisonous, and these are things youll learn on your travels.
If youre exploring a corridor and appear to come to a dead end, you can hit “s” to search until you find a door. This can take ages, however, so you can speed things up a bit: type “10” and then “s” and you will perform 10 searches in a row. This takes up 10 moves in game time, however, so if youre hungry you could get close to starvation!
Common items youll find in the top levels of the dungeon are “{” (fountains) and “!” (potions). For the former, you can stand on it and hit q to “quaff” from it the effects can vary from useful to deadly. For potions, pick them up and then use “q” to drink them. If you find a shop, you can pick up items and then hit “p” to pay before leaving. Use “d” to drop something.
![Souped-up versions of NetHack with fancy graphics are available, such as Falcons Eye](http://www.linuxvoice.com/wp-content/uploads/2014/12/falcon.jpg)
Souped-up versions of NetHack with fancy graphics are available, such as Falcons Eye
> ## Stupid ways to die ##
> A popular acronym amongst NetHack players is “YASD” Yet Another Stupid Death. It describes a situation where the player buys the farm due to his/her own silliness or lack of concentration. Weve had many of these, but our favourite goes as follows:
> We were browsing a shop, inspecting items, when a snake suddenly jumped out from behind a potion. After killing the snake, a message popped up saying that we were getting hungry, so we opted to eat the snakes corpse. Bad idea! This made us blind, so we couldnt see other characters or items in the shop. We tried to get to the exit, but instead bumped into the shopkeeper and accidentally attacked him. This made him furious; he started firing magic missiles at us. We just about managed to get into the corridor outside the shop, but died from the onslaught.
> If you come to any equally silly ends, let us know on our forums. And dont worry nobody will judge you. Dying like this is all part of growing up in the NetHack world.
### Equip yourself ###
On your travels, and especially after you kill monsters, youll find weapons and armour. Again, use comma to pick these up, and then “w” (lowercase) to wield a weapon or “W” (uppercase) to wear a piece of armour. You can use “T” to remove armour and “t” to throw weapons often handy if youre in a very sticky situation.
Sometimes its useful to examine things from a distance before getting close to them. Hit “;” (semicolon) and “Pick an object” will appear at the top of the screen. Use the movement keys until your view lands on the thing you want to inspect, and then hit “:” (colon). A description will appear at the top.
As your goal is to go further down the dungeon until you find the Amulet of Yendor, keep an eye out for “<” and “>” signs. These are stairs up and down respectively, and you can use the same keys to climb them. Note! Make sure your pet is standing in an adjacent square if you want it to follow you into the next level. If you need a break, use “S” (capital s) to save, and type #quit to exit. Next time you run NetHack, your game will be resumed.
We wont spoil whats ahead, as many of the dungeon levels have amazing designs, characters and secrets. So well leave you with three tips: if you come across an item that completely baffles you, try searching for it on the NetHack wiki at [http://nethack.wikia.com][3]. Youll also find an excellent (albeit very long) guidebook at [www.nethack.org/v343/Guidebook.html][4]. Happy exploring!
--------------------------------------------------------------------------------
via: http://www.linuxvoice.com/nethack/
作者:[Mike Saunders][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxvoice.com/author/mike/
[1]:http://www.reddit.com/r/nethack
[2]:http://www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html
[3]:http://nethack.wikia.com/
[4]:http://www.nethack.org/v343/Guidebook.html

View File

@ -0,0 +1,70 @@
Calife A lightweight alternative to sudo
================================================================================
Calife requests user's own password for becoming login (or root, if no login is provided), and switches to that user and group ID after verifying proper rights to do so. A shell is then executed. If calife is executed by root, no password is requested and a shell with the appropriate user ID is executed.
The invoked shell is the user's own except when a shell is specified in the configuration file calife.auth.
If "-" is specified on the command line, user's profile files are read as if it was a login shell.
This is not the traditional behavior of su.
Only users specified in calife.auth can use calife to become another one with this method.
calife.auth is installed as /etc/calife.auth
### Calife Features ###
Here is an extensive list of features:
you keep your environment variables and shell aliases intact
it has start and end of session logging
you can have a list of all permitted logins for each calife user. That way, you can give a user newsmasters rights without giving out the root password
you can specify a group in the configuration file instead of the logins of all administrators: Juste use @staff or %staff and all members of the staff group will have access to calife
calife can also be used to become users even if they have no home directory or even no shell. Thats very practical if you want to become uucp or even bin
you can make calife runs a specific system-wide script at the end of the session (to send a mailabout what was done as root for example)
### Install calife in ubuntu ###
Open the terminal and run the following command
sudo apt-get install calife
### Using Calife ###
### Syntax ###
calife [-] [login]
Check calife manpage for more details
--------------------------------------------------------------------------------
via: http://www.ubuntugeek.com/calife-a-lightweight-alternative-to-sudo.html
作者:[ruchi][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.ubuntugeek.com/author/ubuntufix
[1]:
[2]:
[3]:
[4]:
[5]:
[6]:
[7]:
[8]:
[9]:
[10]:
[11]:
[12]:
[13]:
[14]:
[15]:
[16]:
[17]:
[18]:
[19]:
[20]:

View File

@ -1,5 +1,3 @@
[felixonmars translating...]
Upstream and Downstream: why packaging takes time
================================================================================
Here in the KDE office in Barcelona some people spend their time on purely upstream KDE projects and some of us are primarily interested in making distros work which mean our users can get all the stuff we make. I've been asked why we don't just automate the packaging and go and do more productive things. One view of making on a distro like Kubuntu is that its just a way to package up the hard work done by others to take all the credit. I don't deny that, but there's quite a lot to the packaging of all that hard work, for a start there's a lot of it these days.

View File

@ -1,103 +0,0 @@
Why Is Huffington Post Running A Multi-Part Series To Promote The Lies Of A Guy Who Pretended To Invent Email?
================================================================================
**from the that's-just-wrong dept**
I thought this story had ended a few years ago. Back in 2012, we wrote about how The Washington Post and some other big name media outlets were claiming that a guy named V.A. Shiva Ayyadurai had "invented email" in 1978. The problem was that [it wasn't even close to true][1] and relied on a number of total misconceptions about email, software and copyright law. Ayyadurai and some of his friends have continued to play up the claim that he "invented" email, but it simply was never true, and it's reaching a level that seems truly bizarre. Ayyadurai may have done some interesting things, but his continued false insistence that he invented email is reaching really questionable levels. And, now it's gone absolutely nutty, with the Huffington Post running [a multi-part series][2] (up to five separate articles so far -- all done in the past 10 days) all playing up misleading claims saying that Ayyadurai invented email, even though even a basic understanding of the history shows he did not.
Let's take care of the basics first, and then we'll dig in on what's going on here, because it's really quite ridiculous. First off, no one denies that V.A. Shiva Ayyadurai -- an apparently very bright 14-year-old at the time -- wrote an email software program for the University of Medicine and Dentistry of New Jersey (UMDNJ) in 1978. By all accounts, it was a perfectly decent email system that allowed the UMDNJ staff to send electronic messages. Further, no one doubts that, in 1981, Ayyadurai registered the copyright on his program, which was called EMAIL. The problems are that (1) email was invented long before 1978, (2) the copyright is merely on the specific software code, not the idea of email, and (3) while Ayyadurai may have independently recreated the basics of email (and even added a nice feature), none of his work was even remotely related to what later became the standards of email. What's most sickening about this is that as part of this new PR campaign, Ayyadurai is ridiculously arguing that the reason no one believes him isn't because he's simply wrong, but because they can't stand to believe that "a dark-skinned immigrant kid, 14 years old," invented email, and that it was done in "one of the poorest cities in the US" rather than at a famous university.
Again, that might make for a nice story line if there were some factual basis behind it, but there isn't. The history of email [is well-documented][3] from [multiple sources][4] and it began way, way before 1978. And while early versions were somewhat crude, by 1978 they had basically everything that Ayyadurai claims to have invented (it is entirely believable that Ayyadurai, as a bright kid, independently came up with the same ideas, but he was hardly the first). There was a messaging system called MAILBOX at MIT in 1965. You can read [all the details of it here][5], including source code. Ray Tomlinson is frequently credited with inventing the modern concept of email for the internet by establishing the @ symbol (in 1972) as a way of determining both the user and which computer to send the email to. By 1975, there were things like email folders (invented by Larry Roberts) and some other basic email apps. As is noted, by 1976 -- two years before Ayyadurai wrote his app -- email was *75% of all ARPANET traffic*.
So, let's get to the Huffington Post trying to whitewash all of this factual history out of existence.
It started on August 20th, with an article by Larry Weber, CEO of Racepoint Global, kicking off a supposed "series" called "The History of Email." Except that the series has little to do with the history of email at all. It's just about Ayyadurai writing his particular email program in 1978. Great story. Smart kid done good. Has nothing to do with the invention of email. Weber, though, calls it [The Boy Who Invented Email][6]. At this point, it should be worth questioning why Weber suddenly decided this was such an interesting story. If you don't know, Weber is one of PR's [biggest names][7], having built one of the most successful PR companies in history. It seems odd that he "just happened" to come across Ayyadurai's fake story and decided to help create a 5-part series about it. I have reached out to both Weber and the Huffington Post to ask if Weber has any financial relationship with Ayyadurai. As I publish this, neither has responded. The post will be updated if I hear from either. None of the posts in the series disclose any such relationship. Nor does the Huffington Post indicate that this is a "sponsored" post as far as I can tell.
The [second][8] and [third][9] articles in the series are both written by Leslie Michelson, the Director of High Performance and Research Computing at Rutgers Medical School (which took over UMDNJ a while back). More importantly, in 1978 he was the Director of the Laboratory Computer Network at UMDNJ, and apparently "challenged" Ayyadurai to create an electronic interoffice mail system. The [fourth article][10] in the series is by Robert Field, a technologist at Rutgers Medical School and, in 1978, a colleague of Ayyadurai at UMDNJ. See a pattern? Huffington Post also [interviewed Ayyadurai][11] for HuffPost Live in which he mostly attacks anyone who challenges his story, comparing himself to Philo T. Farnsworth -- except in that case, Farnsworth actually invented TV before anyone else. Ayyadurai did not do that with email. Apparently there are two more in this series that are still to come.
When you look at the collection of articles, they all repeat the same basic things: Ayyadurai did create an email system and "it was recognized by the federal government." This is misleading in the extreme. It's amusing how they all use the exact same language. Larry Weber claims:
> On August 30, 1982, **the US government officially recognized V.A. Shiva Ayyadurai as the inventor of email** by awarding him the first US Copyright for "Email," "Computer Program for Electronic Mail System," for his 1978 invention. This was awarded at a time when Copyright was the only way to protect software inventions.
Leslie Michaelson says:
> On August 30,1982, V.A. Shiva Ayyadurai **received official recognition as the inventor of email from the U.S. government**, for the work he had done in 1978.
Every article in the series includes this image of his copyright registration:
[![](https://i.imgur.com/AscOfQh.png)][12]
Except, if you know anything about copyright, you know that what they're claiming is not at all true. The registration of copyrights is about as close to a rubber-stamping process as is possible. It has nothing to do with "inventions" at all, but is rather a copyright for the specific software program. Ayyadurai received a copyright on his email program and that's it. It has absolutely nothing to do with him being the inventor of email.
Microsoft holds a copyright on Windows, but no one claims it "invented" the glass things you look outside your building with. Hell, no one even claims that Microsoft invented windowing user interfaces, because it did not. The name of the program and the fact that you can copyright it does not make you the "inventor" of the concept behind it.
Weber, Ayyadurai and his friends try to counter the "it's a copyright, not a patent" claim with an incredibly misleading response. Here's Michelson:
> On August 30, 1982, Shiva was issued the first Copyright for "Email", "Computer Program for Electronic Mail System." At that time, Copyright was the equivalent of a patent, as there was no other way to protect software inventions. Only in 1980 was the Copyright Act of 1976 amended to protect software. Patent law had not even caught up to software in 1980
Copyright was not, and has never been "the equivalent of a patent." Copyright and patents are two very different things. Copyright protects specific expression. Patents protect inventions. That's why copyright protected only the specific code that Ayyadurai wrote, rather than the concept of email. While it's true that software wasn't considered patentable by many at the time, that doesn't, in any way, mean that a copyright on a particular piece of software was the equivalent in any way, to a patent at the time.
To further their argument, both Weber and Michelson include nearly identical, but slightly different, infographics on the history of email, which (of course) start in 1978 with Ayyadurai's work. According to those charts, email was barely even a thing outside of UMDNJ until 1985 when offline email readers come about. The infographic is the work of the impressive sounding International Center for Integrative Systems. What's left out is that the "[Founder and Chairman][13]" of the International Center for Integrative Systems happens to be... a guy named V.A. Shiva Ayyadurai. The same infographic tosses in a "milestone" in email in 1995, when "Echomail" launched. Doesn't sound familiar? Echomail was a company started by... V.A. Shiva Ayyadurai.
The rest of the articles seem to just focus on attacking those who actually were involved in the invention of email and who dared to speak out against Ayyadurai's claims. The story, which includes no actual support, is that the folks at BBN decided in the early 80s that email security was a big business opportunity and rewrote history. Whether or not BBN played up their role in the history of email is debatable, but none of that changes the fact that they (and many others) were using email, and had email software, long before Ayyadurai did anything. At no point do any of them address the long history of email systems long before Ayyadurai arrived on the scene. Instead, they just talk about this grand conspiracy theory, claiming (ridiculously) that if BBN were outed as not being the inventor of email (even though no one really claims the company was the inventor of email) it would harm its business. That makes no sense at all. First of all, BBN's history of work related to the internet is long and well-detailed (there's even a [fantastic book][14] about it). Even if it had nothing to do with email, it's other work is much more impressive. Second, the company is currently owned by defense contracting giant Raytheon. Does anyone honestly think Raytheon cares one way or the other who "invented email"?
All of their "debunking" claims rest entirely on a RAND report written by David Crocker in 1977, where they take two sentences totally out of context. Here's what Ayyadurai, Weber and their friends claim Crocker said:
> "At this time, no attempt is being made to emulate a full-scale, inter-organizational mail system. The fact that the system is intended for use in various organizational contexts and by users of differing expertise makes it almost impossible to build a system which responds to all users' needs."
It's telling that Ayyadurai and his friends never actually tell you the name of the report or link to it. Because actually reading what Crocker wrote would undermine their argument. The report is called "Framework and Functions of the 'MS' Personal Message System" and you can read it here. Not only do Ayyadurai and his friends take Crocker entirely out of context, the two sentences above are not even contiguous sentences. They're not even on the same page. The first sentence is on page 18 of the paper. And it just says that this particular implementation (the program called MS) is focused on certain facets, and for MS "no attempt is being made to emulate a full-scale inter-organization mail system" even though the entire point of the paper is how various email implementations are clearly replicating inter-organizational mail systems. The second sentence comes on page 21 (with lots in between) and just focuses on the fact that lots of users have very different requests and desires, and it's impossible to satisfy everyone -- and that it, alone, is beyond the scope of this project. He's not, as Ayyadurai implies, claiming that building an interoffice email system is impossible. He's claiming that creating a full system that satisfies absolutely everyone is impossible. However, he does make it clear that other components are being worked on, and when combined could create a more functional email system. Here's that part, back in context:
> To construct a fully-detailed and monolithic message processing environment requires a much larger effort than has been possible with MS. In addition, the fact that the system is intended for use in various organizational contexts and by users of differing expertise makes it almost impossible to build a system which responds to all users' needs. Consequently, important segments of a full message environment have received little or no attention and decisions have been made with the expectation that other Unix capabilities will be used to augment MS. For example, MS has fairly primitive data-base management filing and cataloging) facilities and message folders have been implemented in a way which allows them to be modified by programs, such as text editors, which access them directly, rather than through the message system.
From the actual source documents (which, again, Ayyadurai and his friends fail to link to and totally misrepresent), it's clear that all Crocker is saying is that no single system will satisfy everyone's current interests. He's not saying it's impossible to create an interoffice email system. He's just saying that lots of different people have lots of different needs for an interoffice email system, and for the team building MS, it would be too difficult to satisfy everyone's exact requests, so they're focusing on certain features, knowing others will add other components later. And, given that people are still working to improve upon email today, it seems that's still basically true.
Back to the rest of the paper, which actually does a tremendous job undermining basically all of Ayyadurai's claims (again, which suggests why no one names or links to the full paper) -- in the very first paragraph (again, this is prior to Ayyadurai doing anything) it talks about research for "computer software" for "electronic mail." Ooops. It goes on:
> This report describes the design of one such program--the "MS" message system. Early electronic mail systems have existed on the larger computers. MS incorporates and expands upon many of the functions and concepts of such systems within an integrated package...
In other words, the very paper that Ayyadurai and his friends insist prove that there was no email prior to 1978 talks in depth about a variety of email programs. Again, remember that this was written in 1977. This is not historical revisionism. It goes on:
> One of the earliest and most popular applications of the ARPANET computer communications network has been the transfer of text messages between people using different computers. This "electronic mail" capability was originally grafted onto existing informal facilities; however, they proved inadequate. A large network greatly expands the base of potential communicators; when coupled with the communication convenience of a message system, there results a considerable expansion to the list of features desired by users. Systems which have responded to these increased user needs have resided on medium- and large-scaled computers.
In other words, lots of folks are working on email systems. Ayyadurai tries to brush all those aside by saying that his actually included things like "folders." But again, Crocker's paper notes:
> Messages reside in file "folders" and may contain any number of fields, or "components."
It actually has a whole section on folders. It also shows some sample messages at the time, showing "to," "from," "cc," "subject," and "message" fields, showing that the very basics of interoffice mail (such as "cc" -- standing for carbon copy, which was a standard bit of interoffice mail) had already moved into email. Here's a screenshot (which you can click for a larger version):
[![](https://i.imgur.com/KJW7BnAm.png)][15]
Ayyadurai has built up his entire reputation around the (entirely false) claim that he "invented" email. His bio, his Twitter feed and his website all position himself as having invented email. He didn't. It looks like he wrote an implementation of an email system in 1978, long after others were working on similar things. He may have added some nice features, including the "blind carbon copy/bcc" concept (Update: Nope, bcc was in a [1977 RFC][16]). He also appears to have potentially been ahead of others in making a full address book be a part of the email system. He may, in fact, be the first person who shortened "electronic mail" to "email" which is cool enough, and he'd have an interesting claim if that's all he claimed. Unfortunately, he's claiming much, much more than that. He's set up [an entire website][17] in which he accuses lots of folks, including Techdirt, of unfairly "attacking" him. He apparently believes that some of the attacks on him are [because][18] he spoke out against corruption in India. Or because people think only rich white people can invent stuff. None of that is accurate. There's a simple fact, and it's that Ayyadurai did not invent email.
He does not even attempt to counter any of the actual facts. The documents that are presented are misleading or out of context. He misrepresents what a copyright registration means. And his main "smoking gun," in support of his claim that people are trying to unfairly write him out of history, is presented in a misleading way, out of context, with two entirely separate sentences pushed together to pretend they say something they didn't.
He's clearly quite proud of the email software he wrote in 1978, and that's great. He should be. It may have made some incremental improvements on what else was already out there, but it is not inventing email. It's also entirely possible that he was wholly unaware of everything else that was out there. And, again, that's great. We've talked many times in the past about multiple people coming up with the same ideas around the same time. Ayyadurai should be quite proud of what he's done. But he's simply not telling the truth when he claims to have invented email. His website is full of accolades from the past, including his Westinghouse award (which is a prestigious award for high schoolers), his copyrights and his later patents. There are local newspaper clippings. That's all great. It reminds me of the folder my mother has on all the nice things that happened to me as a kid. But none of it means he invented email.
It's unclear why Huffington Post is publishing this ludicrous and disproven narrative. It's unclear why one of the biggest names in PR is involved in all of this, though you can take some guesses. But there are facts, and they include that "electronic mail" existed long before V.A. Shiva Ayyadurai wrote his program as a precocious teenager. Huffington Post is either not disclosing a paid-for series of posts (which would be a massive ethical breach) or they've been taken for a ride. Neither option speaks well of HuffPo and its journalistic integrity.
--------------------------------------------------------------------------------
via: https://www.techdirt.com/articles/20140901/07280928386/huffpo-publishes-bizarre-misleading-factually-incorrect-multi-part-series-pretending-guy-invented-email-even-though-he-didnt.shtml
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://www.techdirt.com/articles/20120222/11132917842/how-guy-who-didnt-invent-email-got-memorialized-press-smithsonian-as-inventor-email.shtml
[2]:http://www.huffingtonpost.com/news/the-history-of-email/
[3]:http://www.nethistory.info/History%20of%20the%20Internet/email.html
[4]:http://www.ir.bbn.com/~craig/email.pdf
[5]:http://www.multicians.org/thvv/mail-history.html
[6]:http://www.huffingtonpost.com/larry-weber/the-history-of-email-boy-who-invented-email_b_5690783.html
[7]:http://en.wikipedia.org/wiki/Larry_Weber
[8]:http://www.huffingtonpost.com/leslie-p-michelson/the-history-of-email-invention-of-email_b_5707913.html
[9]:http://www.huffingtonpost.com/leslie-p-michelson/history-of-email-introduction_b_5726018.html
[10]:http://www.huffingtonpost.com/robert-field/history-of-email-first-email-system_b_5722000.html
[11]:http://www.huffingtonpost.com/2014/08/28/email-shiva-ayyadurai_n_5731606.html
[12]:https://imgur.com/AscOfQh
[13]:http://integrativesystems.org/board.asp
[14]:http://www.amazon.com/gp/product/0684832674/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=0684832674&linkCode=as2&tag=techdirtcom-20&linkId=OSP5B7BVSLAG5XNX
[15]:https://imgur.com/KJW7BnA
[16]:http://tools.ietf.org/html/rfc733
[17]:http://www.inventorofemail.com/index.asp
[18]:http://gizmodo.com/5888702/corruption-lies-and-death-threats-the-crazy-story-of-the-man-who-pretended-to-invent-email

View File

@ -1,48 +0,0 @@
CoreOS breaks with Docker
================================================================================
> Summary: CoreOS, a new enterprise Linux company and a Docker partner, is now proposing its own alternative to Docker's container technology.
[Docker][1] exploded out of nowhere in 2014 to make container technology white hot in cloud and datacenter technical circles. Even [Microsoft joined its open-source virtualization revolution][2]. Now, however, early Docker supporter [CoreOS][3], a new large-scale Linux distributor vendor, is turning its back on it and developing its own container technology: [Rocket][4].
![](http://cdn-static.zdnet.com/i/r/story/70/00/036331/coreos-200x77.jpg?hash=MTAvMJZ3MJ&upscale=1)
While [CoreOS][5] is relatively unknown outside of Linux circles and Silicon Valley, it's seen by those in the know as an up and coming Linux distribution for datacenters and clouds. It's not an insignificant company crying foul, because [Docker's take on virtualization has proven to be so popular][6]. Indeed, CoreOS currently requires Docker to work well, and Brandon Philips, CoreOS' co-founder and CTO, has been a top Docker contributor and was serving on the Docker governance board.
So, why is CoreOS breaking with Docker? First, because "We believe strongly in the Unix philosophy: Tools should be independently useful, but have clean integration points." However, it also said that "Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: Building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server."
In short, instead of Docker being a Unix-style, simple reusable component, CoreOS sees Docker becoming a platform. And CoreOS has no interest in that.
Instead, with Rocket, they propose going back to the [original Docker proposal][7] for what a container should be.
CoreOS spells out that Rocket will be:
- **Composable**: All tools for downloading, installing, and running containers should be well integrated, but independent and composable.
- **Secure**: Isolation should be pluggable, and the crypto primitives for strong trust, image auditing, and application identity should exist from day one.
- **Image distribution**: Discovery of container images should be simple and facilitate a federated namespace and distributed retrieval. This opens the possibility of alternative protocols, such as BitTorrent, and deployments to private environments without the requirement of a registry.
- **Open**: The format and runtime should be well specified and developed by a community. We want independent implementations of tools to be able to run the same container consistently.
To do this, CoreOS is not forking Docker. Alex Polvi, CoreOS' CEO, wrote, "From a security and composability perspective, the Docker process model — where everything runs through a central daemon — is fundamentally flawed. To 'fix' Docker would essentially mean a rewrite of the project, while inheriting all the baggage of the existing implementation."
CoreOS already has an [alpha version of Rocket on GitHub][8], but it's still open to other ideas on how to build a Docker alternative. At the same time, however, CoreOS states that it won't be leaving Docker behind. "We will continue to make sure CoreOS is the best place to run Docker ... [and] expect Docker to continue to be fully integrated with CoreOS as it is today."
While I can understand CoreOS' concerns, I find it hard to imagine that its attempt to come up with a successful alternative to Docker will come to anything. Docker certainly isn't perfect, but in a matter of mere months, it gathered support from almost everyone in the enterprise operating system business. The only way I can see CoreOS' Rocket launching successfully will be if Docker falls flat on its face, and I just don't see that happening.
--------------------------------------------------------------------------------
via: http://www.zdnet.com/coreos-breaks-with-docker-7000036331/#ftag=RSS06bb67b
作者:[Steven J. Vaughan-Nichols][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
[1]:https://www.docker.com/
[2]:http://www.zdnet.com/docker-container-support-coming-to-microsofts-next-windows-server-release-7000034708
[3]:https://coreos.com/
[4]:https://coreos.com/blog/rocket
[5]:http://www.zdnet.com/coreos-linux-for-the-cloud-and-the-datacenter-7000031137/
[6]:http://www.zdnet.com/what-is-docker-and-why-is-it-so-darn-popular-7000032269/
[7]:https://github.com/docker/docker/commit/0db56e6c519b19ec16c6fbd12e3cee7dfa6018c5
[8]:https://github.com/coreos/rocket

View File

@ -1,49 +0,0 @@
CoreOS Team Develops Rocket, Breaks with Docker
================================================================================
![](https://farm8.staticflickr.com/7297/12199695124_53d5323167_t.jpg)
[Docker][1] has easily emerged as one of the top open source stories of the year, and has helped many organizations [benefit from container technology][2]. As weve reported, even Google is [working closely][3] with it, and Microsoft is as well.
However, the folks behind CoreOS, a very popular Linux flavor for use in cloud deployments, are developing their own container technology, [dubbed Rocket][4], which will actually compete with Docker. Here are the details.
Rocket is a new container runtime, designed for composability, security, and speed, according to the CoreOS team. The group has released a [prototype version on GitHub][5] to begin getting community feedback.
“When Docker was first introduced to us in early 2013, the idea of a “standard container” was striking and immediately attractive: a simple component, a composable unit, that could be used in a variety of systems. The Docker repository [included a manifesto][6] of what a standard container should be. This was a rally cry to the industry, and we quickly followed. We thought Docker would become a simple unit that we can all agree on.”
“Unfortunately, a simple re-usable component is not how things are playing out. Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server. The standard container manifesto [was removed][7]. We should stop talking about Docker containers, and start talking about the Docker Platform.”
“We still believe in the original premise of containers that Docker introduced, so we are doing something about it. Rocket is a command line tool, rkt, for running App Containers. An App Container is the specification of an image format, container runtime, and a discovery mechanism.”
There is a specification coming for App Container Images (ACI). Anyone can [Read about and contribute to the ACI draft][8].
The Register also [notes this interesting aspect][9] of Rocket:
“Significantly, all of CoreOS's tools for working with App Container will be integrated, yet independent from one another. Rocket can run as a standalone tool on any flavor of Linux, not just CoreOS.”
In a [blog post][10], Docker CEO Ben Golub voiced disagreement with CoreOS's move, and he writes:
“There are technical or philosophical differences, which appears to be the case with the recent announcement regarding Rocket. We hope to address some of the technical arguments posed by the Rocket project in a subsequent post.”
It sounds like a standards skirmish is going to come of all this, but, as is often the case with standards confrontations, users may benefit from the competition.
--------------------------------------------------------------------------------
via: http://ostatic.com/blog/coreos-team-develops-rocket-breaks-with-docker
作者:[Sam Dean][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://ostatic.com/member/samdean
[1]:https://www.docker.com/
[2]:http://ostatic.com/blog/linux-containers-with-docker
[3]:http://ostatic.com/blog/google-sets-sights-squarely-on-docker-with-new-container-engine
[4]:https://coreos.com/blog/rocket/
[5]:https://github.com/coreos/rocket
[6]:https://github.com/docker/docker/commit/0db56e6c519b19ec16c6fbd12e3cee7dfa6018c5
[7]:https://github.com/docker/docker/commit/eed00a4afd1e8e8e35f8ca640c94d9c9e9babaf7
[8]:https://github.com/coreos/rocket/blob/master/app-container/SPEC.md#app-container-image
[9]:http://www.theregister.co.uk/2014/12/01/coreos_rocket_announcement/
[10]:http://blog.docker.com/2014/12/initial-thoughts-on-the-rocket-announcement/

View File

@ -1,57 +0,0 @@
Interview: Apache Software Foundation Elevates Drill to Top-Level Project
================================================================================
![](http://i1311.photobucket.com/albums/s669/webworkerdaily/tomer_zps5e1225aa.png)
The Apache Software Foundation (ASF) has [announced][1] that [Apache Drill][2] has graduated from the Apache Incubator to become a Top-Level Project (TLP).
Apache Drill is billed as the world's first schema-free SQL query engine that delivers real-time insights by removing the constraint of building and maintaining schemas before data can be analyzed.
Drill enables rapid application development on Apache Hadoop and also allows enterprise BI analysts to access Hadoop in a self-service fashion. OStatic caught up with Tomer Shiran (shown here), a member of the Drill Project Management Committee, to get his thoughts. Here they are in an interview.
**Can you provide a brief overview of what Drill is and what kinds of users it can make a difference for?**
Drill is the world's first distributed, schema-free SQL engine. Analysts and developers can use Drill to interactively explore data in Hadoop and other NoSQL databases, such as HBase and MongoDB. There's no need to explicitly define and maintain schemas, as Drill can automatically leverage the structure that's embedded in the data.
This enables self-service data exploration, which is not possible with traditional data warehouses or SQL-on-Hadoop solutions like Hive and Impala, in which DBAs must manage schemas and transform the data before it can be analyzed.
**What level of community involvement with Drill already exists?**
Drill is an Apache project, so it's not owned by any vendor. Developers in the community can contribute to Drill. MapR currently employs the largest number of contributors, but we're seeing an increasing number of contributions from other companies, and that trend has been accelerating in recent months.
For example, the MongoDB storage plugin (enabling queries on MongoDB) was contributed by developers at Intuit.
**Hadoop has a lot of momentum on the Big Data front. How can Drill help organizations leveraging Hadoop?**
Drill is the ideal interactive SQL engine for Hadoop. One of the main reasons organizations choose Hadoop is due to its flexibility and agility. Unlike traditional databases, getting data into Hadoop is easy, and users can load data in any shape or size on their own. Early attempts at SQL on Hadoop (eg, Hive, Impala) force schemas to be created and maintained even for self-describing data like JSON, Parquet and HBase tables.
These systems also require data to be transformed before it can be queried. Drill is the only SQL engine for Hadoop that doesn't force schemas to be defined before data can be queried, and doesn't require any data transformations. In other words, Drill maintains the flexibility and agility paradigms that made Hadoop popular, thus making it the natural technology for data exploration and BI on Hadoop.
**What does Drill's status as a top-level project at Apache mean for its development and future?**
Drill's graduation to a top-level project is an indication that Drill has established a strong community of users and developers. Graduation is a decision made by the Apache Software Foundation (ASF) board, and it provides confidence to Drill's potential users and contributors that the project has a strong foundation. From a governance standpoint, a top-level project has its own board (also known as PMC). The PMC Chair (Jacques Nadeau) is a VP at Apache.
**How do you think Drill will evolve over the next several years?**
Drill has a large and growing community of contributors. Drill 1.0 will be out in Q1'15. We'll see many new features over the next several years. Here are a just a few examples of initiatives that are currently under way:
Drill currently supports HDFS, HBase and MongoDB. Additional data sources are being added, including Cassandra and RDBMS (all JDBC-enabled databases, including Oracle and MySQL). A single query can incorporate/join data from different sources. In the next year, Drill will become the standard SQL engine for modern datastores (which are all schema-free in nature): Hadoop, NoSQL databases - HBase/MongoDB/Cassandra, and search - Elasticsearch/Solr.
A single enterprise or cloud provider will be able to serve multiple groups/departments/organizations, each having its own workloads and SLA requirements. For example, in Drill 1.0 will support user impersonation, meaning that a query can only access the data that the user is authorized to access, and this will work with all supported data sources (Hadoop, HBase, MongoDB, etc.)
Drill will support not only SELECT and CREATE TABLE ... AS SELECT (CTAS) queries, but also INSERT/UPDATE/DELETE, enabling Drill to be used for operational applications (in addition to data exploration and analytics). Drill will also support the ultra-low latency and high concurrency required for such use cases.
Full TPC-DS support. Unlike other SQL-on-Hadoop technologies, Drill is designed to support the ANSI SQL standard as opposed to a SQL-like language. This provides better support for BI and other tools. Drill will be able to run TPC-DS, unmodified, in 2015.
--------------------------------------------------------------------------------
via: http://ostatic.com/blog/interview-apache-software-foundation-elevates-drill-to-top-level-project
作者:[Sam Dean][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://ostatic.com/member/samdean
[1]:https://blogs.apache.org/foundation/entry/the_apache_software_foundation_announces66
[2]:http://drill.apache.org/

View File

@ -1,3 +1,6 @@
////translating by yupmoon
Open source all over the world
================================================================================
![](https://opensource.com/sites/default/files/styles/image-full-size/public/images/business/BUS_OpenSourceExperience_520x292_cm.png)
@ -145,4 +148,4 @@ via: https://opensource.com/business/14/12/jim-whitehurst-inspiration-open-sourc
[44]:http://jobs.redhat.com/life-at-red-hat/our-culture/
[45]:http://www.gutenberg.org/ebooks/4300
[46]:https://twitter.com/philshapiro
[47]:http://libreoffice.org/
[47]:http://libreoffice.org/

View File

@ -0,0 +1,81 @@
Docker and the Integrated Open Source Company
================================================================================
Its been a long time since an open source project has gotten as much buzz and attention as Docker. The easiest way to explain the concept is, well, to look at the logo of the eponymous1 company that created and manages the project:
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/docker.png)
The reference in the logo is to shipping containers, one of the most important inventions of the 20th century. Actually, the word “invention” is not quite right: the idea of putting bulk goods into consistently-sized boxes goes back at least a few hundred years.[2][1] What changed the world was the standardization of containers by a trucking magnate named Malcom McLean and Keith Tantlinger, his head engineer. Tantlinger developed much of the technology undergirding the intermodal container, especially its corner casting and Twistlock mechanism that allowed the containers to be stacked on ships, transported by trucks, and moved by crane. More importantly, Tantlinger convinced McLean to release the patented design for anyone to copy without license, knowing that the technology would only be valuable if it were deployed in every port and on every transport ship in the world. Tantlinger, to put it in software terms, open-sourced the design.
Shipping containers really are a perfect metaphor for what Docker is building: standardized containers for applications.
- Just as the idea of a container wasnt invented by Tantlinger, Docker is building on a concept that has been around for quite a while. Companies like Oracle, HP, and IBM have used containers for many years, and Google especially has a very similar implementation to Docker that they use for internal projects. Docker, though, by being open source and [community-centric][2], offers the promise of standardization
- It doesnt matter what is inside of a shipping container; the container itself will fit on any ship, truck, or crane in the world. Similarly, it doesnt matter what app (and associated files, frameworks, dependencies, etc.) is inside of a docker container; the container will run on any Linux distribution and, more importantly, just about every cloud provider including AWS, Azure, Google Cloud Platform, Rackspace, etc.
- When you move abroad, you can literally have a container brought to your house, stick in your belongings, and then have the entire thing moved to a truck to a crane to a ship to your new country. Similarly, containers allow developers to build and test an application on their local machine and have confidence that the application will behave the exact same way when it is pushed out to a server. Because everything is self-contained, the developer does not need to worry about there being different frameworks, versions, and other dependencies in the various places the application might be run
The implications of this are far-reaching: not only do containers make it easier to manage the lifecycle of an application, they also (theoretically) commoditize cloud services through the age-old hope of “write once run anywhere.” More importantly, at least for now, docker containers offer the potential of being far more efficient than virtual machines. Relative to a container, using virtual machines is like using a car transport ship to move cargo: each unique entity on the ship is self-powered, which means a lot of wasted resources (those car engines arent very useful while crossing the ocean). Similarly, each virtual machine has to deal with the overhead of its own OS; containers, on the other hand, all share the same OS resulting in huge efficiency gains.[3][4]
In short, Docker is a really big deal from a technical perspective. What excites me, though, is that the company is also innovating when it comes to their business model.
----------
The problem with monetizing open source is self-evident: if the software is freely available, what exactly is worth paying for? And, unlike media, you cant exactly stick an advertisement next to some code!
For many years the default answer has been to “be like Red Hat.” Red Hat is the creator and maintainer of the Red Hat Enterprise Linux (RHEL) distribution, which, like all Linux distributions, is freely available.[4][5] Red Hat, however, makes money by offering support, training, a certification program, etc. for enterprises looking to use their software. It is very much a traditional enterprise model make money on support! just minus the up-front license fees.
This sort of business is certainly still viable; Hortonworks is [set to IPO][3] with a similar model based on Hadoop, albeit at a much lower valuation than it received during its last VC round. That doesnt surprise me: I dont think this is a particularly great model from a business perspective.
To understand why its useful to think about there being three distinct parts of any company that is based on open source: the open source project itself, any value-added software built on top of that project, and the actual means of making money:
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/opensourcepaper.jpg)
*There are three parts of an open source business: the project itself, the value-added software on top of that project, and the means of monetization*
The problem with the “Red Hat” model is the complete separation of all three of these parts: Red Hat doesnt control the core project (Linux), and their value-added software (RHEL) is free, leaving their money-making support program to stand alone. To the companys credit they have pulled this model off, but I think a big reason is because utilizing Linux was so much more of a challenge back in the 90s.[5][11] I highly doubt Red Hat could successfully build a similar business from scratch today.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/redhatpaper.jpg)
*The three parts of Red Hats business are separate and more difficult for the company to control and monetize*
GitHub, the repository hosting service, is exploring what is to my mind a more compelling model. GitHubs value-added software is a hosting service based on Git, an open-source project designed by Linux creator Linus Torvalds. Crucially, GitHub is seeking to monetize that hosting service directly, both through a SaaS model and through an on-premise enterprise offering[6][6]. This means that, in comparison to Red Hat, there is one less place to disintermediate GitHub: you cant get their value-added software (for private projects public is free) unless youre willing to pay.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/githubpaper.jpg)
*While GitHub does not control Git, their value-added software and means of monetization are unified, making the latter much easier and more sustainable*
Docker takes the GitHub model a step further: the company controls everything from the open source project itself to the value-added software (DockerHub) built on top of that, and, just last week, [announced a monetization model][7] that is very similar to GitHubs enterprise offering. Presuming Docker continues its present momentum and finds success with this enterprise offering, they have the potential to be a fully integrated open source software company: project, value-added software, and monetization all rolled into one.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/dockerpaper.jpg)
*Docker controls all the parts of their business: they are a fully integrated open source company.*
This is exciting, and, to be honest, a little scary. What is exciting is that very few movements have had such a profound effect as open source software, and not just on the tech industry. Open source products are responsible for end user products like this blog; more importantly, open source technologies have enabled exponentially more startups to get off the ground with minimal investment, vastly accelerating the rate of innovation and iteration in tech.[7][8] The ongoing challenge for any open source project, though, is funding, and Dockers business model is a potentially sustainable solution not just for Docker but for future open source technologies.
That said, if Docker is successful, over the long run commercial incentives will steer the Docker open source project in a way that benefits Docker the company, which may not be what is best for the community broadly. That is what is scary about this: might open source in the long run be subtly corrupted by this business model? The makers of CoreOS, a stripped-down Linux distribution that is a perfect complement for Docker, [argued that was the case][9] last week:
> We thought Docker would become a simple unit that we can all agree on. Unfortunately, a simple re-usable component is not how things are playing out. Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server. The standard container manifesto was removed. We should stop talking about Docker containers, and start talking about the Docker Platform. It is not becoming the simple composable building block we had envisioned.
This, I suppose, is the beauty of open source: if you disagree, fork, which is essentially what CoreOS did, launching their own “Rocket” container.[8][10] It also shows that Dockers business model and any business model that contains open source will never be completely defensible: there will always be a disintermediation point. I suspect, though, that Rocket will fail and Dockers momentum will continue: the logic of there being one true container is inexorable, and Docker has already built up quite a bit of infrastructure and just maybe a business model to make it sustainable.
--------------------------------------------------------------------------------
via: http://stratechery.com/2014/docker-integrated-open-source-company/
作者:[Ben Thompson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://stratechery.com/category/about/
[1]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:1:1300
[2]:https://github.com/docker/docker
[3]:http://blogs.wsj.com/digits/2014/12/01/ipo-bound-hortonworks-drops-out-of-billion-dollar-startup-club/
[4]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:2:1300
[5]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:3:1300
[6]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:5:1300
[7]:http://blog.docker.com/2014/12/docker-announces-docker-hub-enterprise/
[8]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:6:1300
[9]:https://coreos.com/blog/rocket/
[10]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:7:1300
[11]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:4:1300

View File

@ -0,0 +1,74 @@
2015 will be the year Linux takes over the enterprise (and other predictions)
================================================================================
> Jack Wallen removes his rose-colored glasses and peers into the crystal ball to predict what 2015 has in store for Linux.
![](http://tr1.cbsistatic.com/hub/i/r/2014/12/15/f79d21fe-f1d1-416d-ba22-7e757dfcdb31/resize/620x485/52a10d26d34c3fc4201c5daa8ff277ff/linux2015hero.jpg)
The crystal ball has been vague and fuzzy for quite some time. Every pundit and voice has opined on what the upcoming year will mean to whatever topic it is they hold dear to their heart. In my case, we're talking Linux and open source.
In previous years, I'd don the rose-colored glasses and make predictions that would shine a fantastic light over the Linux landscape and proclaim 20** will be the year of Linux on the _____ (name your platform). Many times, those predictions were wrong, and Linux would wind up grinding on in the background.
This coming year, however, there are some fairly bold predictions to be made, some of which are sure things. Read on and see if you agree.
### Linux takes over big data ###
This should come as no surprise, considering the advancements Linux and open source has made over the previous few years. With the help of SuSE, Red Hat, and SAP Hana, Linux will hold powerful sway over big data in 2015. In-memory computing and live kernel patching will be the thing that catapults big data into realms of uptime and reliability never before known. SuSE will lead this charge like a warrior rushing into a battle it cannot possibly lose.
This rise of Linux in the world of big data will have serious trickle down over the rest of the business world. We already know how fond enterprise businesses are of Linux and big data. What we don't know is how this relationship will alter the course of Linux with regards to the rest of the business world.
My prediction is that the success of Linux with big data will skyrocket the popularity of Linux throughout the business landscape. More contracts for SuSE and Red Hat will equate to more deployments of Linux servers that handle more tasks within the business world. This will especially apply to the cloud, where OpenStack should easily become an overwhelming leader.
As the end of 2015 draws to a close, Linux will continue its take over of more backend services, which may include the likes of collaboration servers, security, and much more.
### Smart machines ###
Linux is already leading the trend for making homes and autos more intelligent. With improvements in the likes of Nest (which currently uses an embedded Linux), the open source platform is poised to take over your machines. Because 2015 should see a massive rise in smart machines, it goes without saying that Linux will be a huge part of that growth. I firmly believe more homes and businesses will take advantage of such smart controls, and that will lead to more innovations (all of which will be built on Linux).
One of the issues facing Nest, however, is that it was purchased by Google. What does this mean for the thermostat controller? Will Google continue using the Linux platform -- or will it opt to scrap that in favor of Android? Of course, a switch would set the Nest platform back a bit.
The upcoming year will see Linux lead the rise in popularity of home automation. Wink, Iris, Q Station, Staples Connect, and more (similar) systems will help to bridge Linux and home users together.
### The desktop ###
The big question, as always, is one that tends to hang over the heads of the Linux community like a dark cloud. That question is in relation to the desktop. Unfortunately, my predictions here aren't nearly as positive. I believe that the year 2015 will remain quite stagnant for Linux on the desktop. That complacency will center around Ubuntu.
As much as I love Ubuntu (and the Unity desktop), this particular distribution will continue to drag the Linux desktop down. Why?
Convergence... or the lack thereof.
Canonical has been so headstrong about converging the desktop and mobile experience that they are neglecting the current state of the desktop. The last two releases of Ubuntu (one being an LTS release) have been stagnant (at best). The past year saw two of the most unexciting releases of Ubuntu that I can recall. The reason? Because the developers of Ubuntu are desperately trying to make Unity 8/Mir and the ubiquitous Ubuntu Phone a reality. The vaporware that is the Ubuntu Phone will continue on through 2015, and Unity 8/Mir may or may not be released.
When the new iteration of the Ubuntu Unity desktop is finally released, it will suffer a serious setback, because there will be so little hardware available to truly show it off. [System76][1] will sell their outstanding [Sable Touch][2], which will probably become the flagship system for Unity 8/Mir. As for the Ubuntu Phone? How many reports have you read that proclaimed "Ubuntu Phone will ship this year"?
I'm now going on the record to predict that the Ubuntu Phone will not ship in 2015. Why? Canonical created partnerships with two OEMs over a year ago. Those partnerships have yet to produce a single shippable product. The closest thing to a shippable product is the Meizu MX4 phone. The "Pro" version of that phone was supposed to have a formal launch of Sept 25. Like everything associated with the Ubuntu Phone, it didn't happen.
Unless Canonical stops putting all of its eggs in one vaporware basket, desktop Linux will take a major hit in 2015. Ubuntu needs to release something major -- something to make heads turn -- otherwise, 2015 will be just another year where we all look back and think "we could have done something special."
Outside of Ubuntu, I do believe there are some outside chances that Linux could still make some noise on the desktop. I think two distributions, in particular, will bring something rather special to the table:
- [Evolve OS][3] -- a ChromeOS-like Linux distribution
- [Quantum OS][4] -- a Linux distribution that uses Android's Material Design specs
Both of these projects are quite exciting and offer unique, user-friendly takes on the Linux desktop. This is quickly become a necessity in a landscape being dragged down by out-of-date design standards (think the likes of Cinnamon, Mate, XFCE, LXCE -- all desperately clinging to the past).
This is not to say that Linux on the desktop doesn't have a chance in 2015. It does. In order to grasp the reins of that chance, it will have to move beyond the past and drop the anchors that prevent it from moving out to deeper, more viable waters.
Linux stands to make more waves in 2015 than it has in a very long time. From enterprise to home automation -- the world could be the oyster that Linux uses as a springboard to the desktop and beyond.
What are your predictions for Linux and open source in 2015? Share your thoughts in the discussion thread below.
--------------------------------------------------------------------------------
via: http://www.techrepublic.com/article/2015-will-be-the-year-linux-takes-over-the-enterprise-and-other-predictions/
作者:[Jack Wallen][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.techrepublic.com/search/?a=jack+wallen
[1]:https://system76.com/
[2]:https://system76.com/desktops/sable
[3]:https://evolve-os.com/
[4]:http://quantum-os.github.io/

View File

@ -0,0 +1,91 @@
A brief history of Linux malware
================================================================================
A look at some of the worms and viruses and Trojans that have plagued Linux throughout the years.
### Nobodys immune ###
![Image courtesy Shutterstock](http://images.techhive.com/images/article/2014/12/121114-linux-malware-1-100535381-orig.jpg)
Although not as common as malware targeting Windows or even OS X, security threats to Linux have become both more numerous and more severe in recent years. There are a couple of reasons for that the mobile explosion has meant that Android (which is Linux-based) is among the most attractive targets for malicious hackers, and the use of Linux as a server OS for and in the data center has also grown but Linux malware has been around in some form since well before the turn of the century. Have a look.
### Staog (1996) ###
![](http://images.techhive.com/images/article/2014/12/121114-stago-100535400-orig.gif)
The first recognized piece of Linux malware was Staog, a rudimentary virus that tried to attach itself to running executables and gain root access. It didnt spread very well, and it was quickly patched out in any case, but the concept of the Linux virus had been proved.
### Bliss (1997) ###
![](http://images.techhive.com/images/article/2014/12/121114-3new-100535402-orig.gif)
If Staog was the first, however, Bliss was the first to grab the headlines though it was a similarly mild-mannered infection, trying to grab permissions via compromised executables, and it could be deactivated with a simple shell switch. It even kept a neat little log, [according to online documentation from Ubuntu][1].
### Ramen/Cheese (2001) ###
![](http://images.techhive.com/images/article/2014/12/121114-ramen-100535404-orig.jpg)
Cheese is the malware you actually want to get certain Linux worms, like Cheese, may actually have been beneficial, patching the vulnerabilities the earlier Ramen worm used to infect computers in the first place. (Ramen was so named because it replaced web server homepages with a goofy image saying that “hackers looooove noodles.”
### Slapper (2002) ###
![Image courtesy Wikimedia CommonsCC LicenseKevin Collins](http://images.techhive.com/images/article/2014/12/121114-linux-malware-5-100535389-orig.jpg)
The Slapper worm struck in 2002, infecting servers via an SSL bug in Apache. That predates Heartbleed by 12 years, if youre keeping score at home.
### Badbunny (2007) ###
![Image courtesy Shutterstock](http://images.techhive.com/images/article/2014/12/121114-linux-malware-6-100535384-orig.jpg)
Badbunny was an OpenOffice macro worm that carries a sophisticated script payload that worked on multiple platforms even though the only effect of a successful infection was to download a raunchy pic of a guy in a bunny suit, er, doing what bunnies are known to do.
### Snakso (2012) ###
![](http://images.techhive.com/images/article/2014/12/121114-linux-malware-7-100535385-orig.jpg)
Image courtesy [TechWorld UK][2]
The Snakso rootkit targeted specific versions of the Linux kernel to directly mess with TCP packets, injecting iFrames into traffic generated by the infected machine and pushing drive-by downloads.
### Hand of Thief (2013) ###
![](http://images.techhive.com/images/article/2014/12/121114-thief-100535405-orig.jpg)
Hand of Thief is a commercial (sold on Russian hacker forums) Linux Trojan creator that made quite a splash when it was introduced last year. RSA researchers, however, discovered soon after that [it wasnt quite as dangerous as initially thought][3].
### Windigo (2014) ###
![](http://images.techhive.com/images/article/2014/12/121114-linux-malware-9-100535390-orig.jpg)
Image courtesy [freezelight][4]
Windigo is a complex, large-scale cybercrime operation that targeted tens of thousands of Linux servers, causing them to produce spam and serve drive-by malware and redirect links. Its still out there, according to ESET security, [so admins should tread carefully][5].
### Shellshock/Mayhem (2014) ###
![Shellshock/Mayhem (2014)](http://images.techhive.com/images/article/2014/12/121114-malware-mayhem-100535406-orig.gif)
Striking at the terminal strikes at the heart of Linux, which is why the recent Mayhem attacks which targeted the so-called Shellshock vulnerabilities in Linuxs Bash command-line interpreter using a specially crafted ELF library were so noteworthy. Researchers at Yandex said that the network [had snared 1,400 victims as of July][6].
### Turla (2014) ###
![Image courtesy CW](http://images.techhive.com/images/article/2014/12/121114-linux-malware-11-100535391-orig.jpg)
A large-scale campaign of cyberespionage emanating from Russia, called Epic Turla by researchers, was found to have a new Linux-focused component earlier this week. Its apparently [based on a backdoor access program from all the way back in 2000 called cd00r][7].
--------------------------------------------------------------------------------
via: http://www.networkworld.com/article/2858742/linux/a-brief-history-of-linux-malware.html
作者:[Jon Gold][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.networkworld.com/author/Jon-Gold/
[1]:https://help.ubuntu.com/community/Linuxvirus
[2]:http://news.techworld.com/security/3412075/linux-users-targeted-by-mystery-drive-by-rootkit/
[3]:http://www.networkworld.com/article/2168938/network-security/dangerous-linux-trojan-could-be-sign-of-things-to-come.html
[4]:https://www.flickr.com/photos/63056612@N00/155554663
[5]:http://www.welivesecurity.com/2014/04/10/windigo-not-windigone-linux-ebury-updated/
[6]:http://www.pcworld.com/article/2825032/linux-botnet-mayhem-spreads-through-shellshock-exploits.html
[7]:http://www.computerworld.com/article/2857129/turla-espionage-operation-infects-linux-systems-with-malware.html

View File

@ -0,0 +1,143 @@
20 Linux Commands Interview Questions & Answers
================================================================================
**Q:1 How to check current run level of a linux server ?**
Ans: who -r & runlevel commands are used to check the current runlevel of a linux box.
**Q:2 How to check the default gatway in linux ?**
Ans: Using the commands “route -n” and “netstat -nr” , we can check default gateway. Apart from the default gateway info , these commands also display the current routing tables .
**Q:3 How to rebuild initrd image file on Linux ?**
Ans: In case of CentOS 5.X / RHEL 5.X , mkinitrd command is used to create initrd file , example is shown below :
# mkinitrd -f -v /boot/initrd-$(uname -r).img $(uname -r)
If you want to create initrd for a specific kernel version , then replace uname -r with desired kernel
In Case of CentOS 6.X / RHEL 6.X , dracut command is used to create initrd file example is shown below :
# dracut -f
Above command will create the initrd file for the current version. To rebuild the initrd file for a specific kernel , use below command :
# dracut -f initramfs-2.x.xx-xx.el6.x86_64.img 2.x.xx-xx.el6.x86_64
**Q:4 What is cpio command ?**
Ans: cpio stands for Copy in and copy out. Cpio copies files, lists and extract files to and from a archive ( or a single file).
**Q:5 What is patch command and where to use it ?**
Ans: As the name suggest patch command is used to apply changes ( or patches) to the text file. Patch command generally accept output from the diff and convert older version of files into newer versions. For example Linux kernel source code consists of number of files with millions of lines , so whenever any contributor contribute the changes , then he/she will be send the only changes instead of sending the whole source code. Then the receiver will apply the changes with patch command to its original source code.
Create a diff file for use with patch,
# diff -Naur old_file new_file > diff_file
Where old_file and new_file are either single files or directories containing files. The r option supports recursion of a directory tree.
Once the diff file has been created, we can apply it to patch the old file into the new file:
# patch < diff_file
**Q:6 What is use of aspell ?**
Ans: As the name suggest aspell is an interactive spelling checker in linux operating system. The aspell command is the successor to an earlier program named ispell, and can be used, for the most part, as a drop-in replacement. While the aspell program is mostly used by other programs that require spell-checking capability, it can also be used very effectively as a stand-alone tool from the command line.
**Q:7 How to check the SPF record of domain from command line ?**
Ans: We can check SPF record of a domain using dig command. Example is shown below :
linuxtechi@localhost:~$ dig -t TXT google.com
**Q:8 How to identify which package the specified file (/etc/fstab) is associated with in linux ?**
Ans: # rpm -qf /etc/fstab
Above command will list the package which provides file “/etc/fstab”
**Q:9 Which command is used to check the status of bond0 ?**
Ans: cat /proc/net/bonding/bond0
**Q:10 What is the use of /proc file system in linux ?**
Ans: The /proc file system is a RAM based file system which maintains information about the current state of the running kernel including details on CPU, memory, partitioning, interrupts, I/O addresses, DMA channels, and running processes. This file system is represented by various files which do not actually store the information, they point to the information in the memory. The /proc file system is maintained automatically by the system.
**Q:11 How to find files larger than 10MB in size in /usr directory ?**
Ans: # find /usr -size +10M
**Q:12 How to find files in the /home directory that were modified more than 120 days ago ?**
Ans: # find /home -mtime +l20
**Q:13 How to find files in the /var directory that have not been accessed in the last 90 days ?**
Ans: # find /var -atime -90
**Q:14 Search for core files in the entire directory tree and delete them as found without prompting for confirmation**
Ans: # find / -name core -exec rm {} \;
**Q:15 What is the purpose of strings command ?**
Ans: The strings command is used to extract and display the legible contents of a non-text file.
**Q:16 What is the use tee filter ?**
Ans: The tee filter is used to send an output to more than one destination. It can send one copy of the output to a file and another to the screen (or some other program) if used with pipe.
linuxtechi@localhost:~$ ll /etc | nl | tee /tmp/ll.out
In the above example, the output from ll is numbered and captured in /tmp/ll.out file. The output is also displayed on the screen.
**Q:17 What would the command export PS1 = ”$LOGNAME@`hostname`:\$PWD: do ?**
Ans: The export command provided will change the login prompt to display username, hostname, and the current working directory.
**Q:18 What would the command ll | awk {print $3,”owns”,$9} do ?**
Ans: The ll command provided will display file names and their owners.
**Q:19 What is the use of at command in linux ?**
Ans: The at command is used to schedule a one-time execution of a program in the future. All submitted jobs are spooled in the /var/spool/at directory and executed by the atd daemon when the scheduled time arrives.
**Q:20 What is the role of lspci command in linux ?**
Ans: The lspci command displays information about PCI buses and the devices attached to your system. Specify -v, -vv, or -vvv for detailed output. With the -m option, the command produces more legible output.
--------------------------------------------------------------------------------
via: http://www.linuxtechi.com/20-linux-commands-interview-questions-answers/
作者:[Pradeep Kumar][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxtechi.com/author/pradeep/
[1]:
[2]:
[3]:
[4]:
[5]:
[6]:
[7]:
[8]:
[9]:
[10]:
[11]:
[12]:
[13]:
[14]:
[15]:
[16]:
[17]:
[18]:
[19]:
[20]:

View File

@ -0,0 +1,125 @@
Defending the Free Linux World
================================================================================
![](http://www.linuxinsider.com/ai/908455/open-invention-network.jpg)
**Co-opetition is a part of open source. The Open Invention Network model allows companies to decide where they will compete and where they will collaborate, explained OIN CEO Keith Bergelt. As open source evolved, "we had to create channels for collaboration. Otherwise, we would have hundreds of entities spending billions of dollars on the same technology."**
The [Open Invention Network][1], or OIN, is waging a global campaign to keep Linux out of harm's way in patent litigation. Its efforts have resulted in more than 1,000 companies joining forces to become the largest defense patent management organization in history.
The Open Invention Network was created in 2005 as a white hat organization to protect Linux from license assaults. It has considerable financial backing from original board members that include Google, IBM, NEC, Novell, Philips, [Red Hat][2] and Sony. Organizations worldwide have joined the OIN community by signing the free OIN license.
Organizers founded the Open Invention Network as a bold endeavor to leverage intellectual property to protect Linux. Its business model was difficult to comprehend. It asked its members to take a royalty-free license and forever forgo the chance to sue other members over their Linux-oriented intellectual property.
However, the surge in Linux adoptions since then -- think server and cloud platforms -- has made protecting Linux intellectual property a critically necessary strategy.
Over the past year or so, there has been a shift in the Linux landscape. OIN is doing a lot less talking to people about what the organization is and a lot less explaining why Linux needs protection. There is now a global awareness of the centrality of Linux, according to Keith Bergelt, CEO of OIN.
"We have seen a culture shift to recognizing how OIN benefits collaboration," he told LinuxInsider.
### How It Works ###
The Open Invention Network uses patents to create a collaborative environment. This approach helps ensure the continuation of innovation that has benefited software vendors, customers, emerging markets and investors.
Patents owned by Open Invention Network are available royalty-free to any company, institution or individual. All that is required to qualify is the signer's agreement not to assert its patents against the Linux system.
OIN ensures the openness of the Linux source code. This allows programmers, equipment vendors, independent software vendors and institutions to invest in and use Linux without excessive worry about intellectual property issues. This makes it more economical for companies to repackage, embed and use Linux.
"With the diffusion of copyright licenses, the need for OIN licenses becomes more acute. People are now looking for a simpler or more utilitarian solution," said Bergelt.
OIN legal defenses are free of charge to members. Members commit to not initiating patent litigation against the software in OIN's list. They also agree to offer their own patents in defense of that software. Ultimately, these commitments result in access to hundreds of thousands of patents cross-licensed by the network, Bergelt explained.
### Closing the Legal Loopholes ###
"What OIN is doing is very essential. It offers another layer of IP protection, said Greg R. Vetter, associate professor of law at the [University of Houston Law Center][3].
Version 2 of the GPL license is thought by some to provide an implied patent license, but lawyers always feel better with an explicit license, he told LinuxInsider.
What OIN provides is something that bridges that gap. It also provides explicit coverage of the Linux kernel. An explicit patent license is not necessarily part of the GPLv2, but it was added in GPLv3, according to Vetter.
Take the case of a code writer who produces 10,000 lines of code under GPLv3, for example. Over time, other code writers contribute many more lines of code, which adds to the IP. The software patent license provisions in GPLv3 would protect the use of the entire code base under all of the participating contributors' patents, Vetter said.
### Not Quite the Same ###
Patents and licenses are overlapping legal constructs. Figuring out how the two entities work with open source software can be like traversing a minefield.
"Licenses are legal constructs granting additional rights based on, typically, patent and copyright laws. Licenses are thought to give a permission to do something that might otherwise be infringement of someone else's IP rights," Vetter said.
Many free and open source licenses (such as the Mozilla Public License, the GNU GPLv3, and the Apache Software License) incorporate some form of reciprocal patent rights clearance. Older licenses like BSD and MIT do not mention patents, Vetter pointed out.
A software license gives someone else certain rights to use the code the programmer created. Copyright to establish ownership is automatic, as soon as someone writes or draws something original. However, copyright covers only that particular expression and derivative works. It does not cover code functionality or ideas for use.
Patents cover functionality. Patent rights also can be licensed. A copyright may not protect how someone independently developed implementation of another's code, but a patent fills this niche, Vetter explained.
### Looking for Safe Passage ###
The mixing of license and patent legalities can appear threatening to open source developers. For some, even the GPL qualifies as threatening, according to William Hurley, cofounder of [Chaotic Moon Studios][4] and [IEEE][5] Computer Society member.
"Way back in the day, open source was a different world. Driven by mutual respect and a view of code as art, not property, things were far more open than they are today. I believe that many efforts set upon with the best of intentions almost always end up bearing unintended consequences," Hurley told LinuxInsider.
Surpassing the 1,000-member mark might carry a mixed message about the significance of intellectual property right protection, he suggested. It might just continue to muddy the already murky waters of today's open source ecosystem.
"At the end of the day, this shows some of the common misconceptions around intellectual property. Having thousands of developers does not decrease risk -- it increases it. The more developers licensing the patents, the more valuable they appear to be," Hurley said. "The more valuable they appear to be, the more likely someone with similar patents or other intellectual property will try to take advantage and extract value for their own financial gain."
### Sharing While Competing ###
Co-opetition is a part of open source. The OIN model allows companies to decide where they will compete and where they will collaborate, explained Bergelt.
"Many of the changes in the evolution of open source in terms of process have moved us into a different direction. We had to create channels for collaboration. Otherwise, we would have hundreds of entities spending billions of dollars on the same technology," he said.
A glaring example of this is the early evolution of the cellphone industry. Multiple standards were put forward by multiple companies. There was no sharing and no collaboration, noted Bergelt.
"That damaged our ability to access technology by seven to 10 years in the U.S. Our experience with devices was far behind what everybody else in the world had. We were complacent with GSM (Global System for Mobile Communications) while we were waiting for CDMA (Code Division Multiple Access)," he said.
### Changing Landscape ###
OIN experienced a growth surge of 400 new licensees in the last year. That is indicative of a new trend involving open source.
"The marketplace reached a critical mass where finally people within organizations recognized the need to explicitly collaborate and to compete. The result is doing both at the same time. This can be messy and taxing," Bergelt said.
However, it is a sustainable transformation driven by a cultural shift in how people think about collaboration and competition. It is also a shift in how people are embracing open source -- and Linux in particular -- as the lead project in the open source community, he explained.
One indication is that most significant new projects are not being developed under the GPLv3 license.
### Two Better Than One ###
"The GPL is incredibly important, but the reality is there are a number of licensing models being used. The relative addressability of patent issues is generally far lower in Eclipse and Apache and Berkeley licenses that it is in GPLv3," said Bergelt.
GPLv3 is a natural complement for addressing patent issues -- but the GPL is not sufficient on its own to address the issues of potential conflicts around the use of patents. So OIN is designed as a complement to copyright licenses, he added.
However, the overlap of patent and license may not do much good. In the end, patents are for offensive purposes -- not defensive -- in almost every case, Bergelt suggested.
"If you are not prepared to take legal action against others, then a patent may not be the best form of legal protection for your intellectual properties," he said. "We now live in a world where the misconceptions around software, both open and proprietary, combined with an ill-conceived and outdated patent system, leave us floundering as an industry and stifling innovation on a daily basis," he said.
### Court of Last Resort ###
It would be nice to think the presence of OIN has dampened a flood of litigation, Bergelt said, or at the very least, that OIN's presence is neutralizing specific threats.
"We are getting people to lay down their arms, so to say. At the same time, we are creating a new cultural norm. Once you buy into patent nonaggression in this model, the correlative effect is to encourage collaboration," he observed.
If you are committed to collaboration, you tend not to rush to litigation as a first response. Instead, you think in terms of how can we enable you to use what we have and make some money out of it while we use what you have, Bergelt explained.
"OIN is a multilateral solution. It encourages signers to create bilateral agreements," he said. "That makes litigation the last course of action. That is where it should be."
### Bottom Line ###
OIN is working to prevent Linux patent challenges, Bergelt is convinced. There has not been litigation in this space involving Linux.
The only thing that comes close are the mobile wars with Microsoft, which focus on elements high in the stack. Those legal challenges may be designed to raise the cost of ownership involving the use of Linux products, Bergelt noted.
Still, "these are not Linux-related law suits," he said. "They do not focus on what is core to Linux. They focus on what is in the Linux system."
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/Defending-the-Free-Linux-World-81512.html
作者Jack M. Germain
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://www.openinventionnetwork.com/
[2]:http://www.redhat.com/
[3]:http://www.law.uh.edu/
[4]:http://www.chaoticmoon.com/
[5]:http://www.ieee.org/

View File

@ -0,0 +1,102 @@
Docker CTO Solomon Hykes to Devs: Have It Your Way
================================================================================
![](http://www.linuxinsider.com/ai/845971/docker-cloud.jpg)
**"We made a very conscious effort with Docker to insert the technology into an existing toolbox. We did not want to turn the developer's world upside down on the first day. ... We showed them incremental improvements so that over time the developers discovered more things they could do with Docker. So the developers could transition into the new architecture using the new tools at their own pace."**
[Docker][1] in the last two years has moved from an obscure Linux project to one of the most popular open source technologies in cloud computing.
Project developers have witnessed millions of Docker Engine downloads. Hundreds of Docker groups have formed in 40 countries. Many more companies are announcing Docker integration. Even Microsoft will ship Windows 10 with Docker preinstalled.
![](http://www.linuxinsider.com/article_images/2014/81504_330x260.jpg)
Solomon Hykes
Founder and CTO of Docker
"That caught a lot of people by surprise," Docker founder and CTO Solomon Hykes told LinuxInsider.
Docker is an open platform for developers and sysadmins to build, ship and run distributed applications. It uses a Docker engine along with a portable, lightweight runtime and packaging tool. It also needs the Docker Hub and a cloud service for sharing applications and automating workflows.
Docker provides a vehicle for developers to quickly assemble their applications from components. It eliminates the friction between development, quality assurance and production environments. Thus, IT can ship applications faster and run them unchanged on laptops, on data center virtual machines, and in any cloud.
In this exclusive interview, LinuxInsider discusses with Solomon Hykes why Docker is revitalizing Linux and the cloud.
**LinuxInsider: You have said that Docker's success is more the result of being in the right place at the right time for a trend that's much bigger than Docker. Why is that important to users?**
**Solomon Hykes**: There is always an element of being in the right place at the right time. We worked on this concept for a long time. Until recently, the market was not ready for this kind of technology. Then it was, and we were there. Also, we were very deliberate to make the technology flexible and very easy to get started using.
**LI: Is Docker a new cloud technology or merely a new way to do cloud storage?**
**Hykes**: Containers in themselves are just an enabler. The really big story is how it changes the software model enormously. Developers are creating new kinds of applications. They are building applications that do not run on only one machine. There is a need for completely new architecture. At the heart of that is independence from the machine.
The problem for the developer is to create the kind of software that can run independently on any kind of machine. You need to package it up so it can be moved around. You need to cross that line. That is what containers do.
**LI: How analogous is the software technology to traditional cargo shipping in containers?**
**Hykes**: That is a very apt example. It is the same thing for shipping containers. The innovation is not in the box. It is in how the automation handles millions of those boxes moving around. That is what is important.
**LI: How is Docker affecting the way developers build their applications?**
**Hykes**: The biggest way is it helps them structure their applications for a better distributive system. Another distributive application is Gmail. It does not run on just one application. It is distributive. Developers can package the application as a series of services. That is their style of reasoning when they design. It brings the tooling up to the level of design.
**LI: What led you to this different architecture approach?**
**Hykes**: What is interesting about this process is that we did not invent this model. It was there. If you look around, you see this trend where developers are increasingly building distributive applications where the tooling is inadequate. Many people have tried to deal with the existing tooling level. This is a new architecture. When you come up with tools that support this new model, the logical thing to do is tell the developer that the tools are out of date and are inadequate. So throw away the old tools and here are the new tools.
**LI: How much friction did you encounter from developers not wanting to throw away their old tools?**
**Hykes**: That approach sounds perfectly reasonable and logical. But in fact it is very hard to get developers to throw away their tools. And for IT departments the same thing is very true. They have legacy performance to support. So most of these attempts to move into next-generation tools have failed. They ask too much of the developers from day one.
**LI: How did you combat that reaction from developers?**
**Hykes**: We made a very conscious effort with Docker to insert the technology into an existing toolbox. We did not want to turn the developer's world upside down on the first day. Instead, we showed them incremental improvements so that over time the developers discovered more things they could do with Docker. So the developers could transition into the new architecture using the new tools at their own pace. That makes all the difference in the world.
**LI: What reaction are you seeing from this strategy?**
**Hykes**: When I ask people using Docker today how revolutionary it is, some say they are not using it in a revolutionary way. It is just a little improvement in my toolbox. That is the point. Others say that they jumped all in on the first day. Both responses are OK. Everyone can take their time moving toward that new model.
**LI: So is it a case of integrating Docker into existing platforms, or is a complete swap of technology required to get the full benefit?**
**Hykes**: Developers can go either way. There is a lot of demand for Docker native. But there is a whole ecosystem of new tools and companies competing to build brand new platforms entirely build on top of Docker. Over time the world is trending towards Docker native, but there is no rush. We totally support the idea of developers using bits and pieces of Docker in their existing platform forever. We encourage that.
**LI: What about Docker's shared Linux kernel architecture?**
**Hykes**: There are two steps involved in answering that question. What Docker does is become a layer on top of the Linux kernel. It exposes an abstraction function. It takes advantage of the underlying system. It has access to all of the Linux features. It also takes advantage of the networking stack and the storage subsystem. It uses the abstraction feature to map what developers need.
**LI: How detailed a process is this for developers?**
**Hykes**: As a developer, when I make an application I need a run-time that can run my application in a sandbox environment. I need a packaging system that makes it easy to move it around to other machines. I need a networking model that allows my application to talk to the outside world. I need storage, etc. We abstract ... the gritty details of whatever the kernel does right now.
**LI: Why does this benefit the developer?**
**Hykes**: There are two really big advantages to that. The first is simplicity. Developers can actually be productive now because that abstraction is easier for them to comprehend and is designed for that. The system APIs are designed for the system. What the developer needs is a consistent abstraction that works everywhere.
The second advantage is that over time you can support more systems. For example, early on Docker could only work on a single distribution of Linux under very narrow versions of the kernel. Over time, we expanded the surface area for the number of systems out there that Docker supports natively. So now you can run Docker on every major Linux distribution and in combination with many more networking and storage features.
**LI: Does this functionality trickle down to nondevelopers, or is the benefit solely targeting developers?**
**Hykes**: Every time we expand that surface area, every single developer that uses the Docker abstraction benefits from that too. So every application running Docker gets the added functionality every time the Docker community adds to the expansion. That is the thing that benefits all users. Without that universal expansion, every single developer would not have time to invest to update. There is just too much to support.
**LI: What about Microsoft's recent announcement that it was shipping Docker support with Windows?**
**Hykes**: If you think of Docker as a very narrow and very simple tool, then why would you roll out support for Windows? The whole point is that over time, you can expand the reach of that abstraction. Windows works very differently, obviously. But now that Microsoft has committed to adding features to Windows 10, it exposes the functionality required to run Docker. That is real exciting.
Docker still has to be ported to Windows, but Microsoft has committed to contributing in a major way to the port. Realize how far Microsoft has come in doing this. Microsoft is doing this fully upstream in a completely native, open source way. Everyone installing Windows 10 will get Docker preinstalled.
**LI: What lies ahead for growing Docker's feature set and user base?**
**Hykes**: The community has a lot of features on the drawing board. Most of them have to do with more improved tools for developers to build better distributive applications. A toolkit implies having a series of tools with each tool designed for one job.
In each of these subsystems, there is a need for new tools. In each of these areas, you will see an enormous amount of activity in the community in terms of contributions and designs. In that regard, the Docker project is enormously ambitious. The ability to address each of these areas will ensure that developers have a huge array of choices without fragmentation.
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/Docker-CTO-Solomon-Hykes-to-Devs-Have-It-Your-Way-81504.html
作者Jack M. Germain
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://www.docker.com/

View File

@ -0,0 +1,120 @@
The Curious Case of the Disappearing Distros
================================================================================
![](http://www.linuxinsider.com/ai/828896/linux-distros.jpg)
"Linux is a big game now, with billions of dollars of profit, and it's the best thing since sliced bread, but corporations are taking control, and slowly but systematically, community distros are being killed," said Google+ blogger Alessandro Ebersol. "Linux is slowly becoming just like BSD, where companies use and abuse it and give very little in return."
Well the holidays are pretty much upon us at last here in the Linux blogosphere, and there's nowhere left to hide. The next two weeks or so promise little more than a blur of forced social occasions and too-large meals, punctuated only -- for the luckier ones among us -- by occasional respite down at the Broken Windows Lounge.
Perhaps that's why Linux bloggers seized with such glee upon the good old-fashioned mystery that came up recently -- delivered in the nick of time, as if on cue.
"Why is the Number of Linux Distros Declining?" is the [question][1] posed over at Datamation, and it's just the distraction so many FOSS fans have been needing.
"Until about 2011, the number of active distributions slowly increased by a few each year," wrote author Bruce Byfield. "By contrast, the last three years have seen a 12 percent decline -- a decrease too high to be likely to be coincidence.
"So what's happening?" Byfield wondered.
It would be difficult to imagine a more thought-provoking question with which to spend the Northern hemisphere's shortest days.
### 'There Are Too Many Distros' ###
![](http://www.linuxinsider.com/images/article_images/linuxgirl_bg_pinkswirl_150x245.jpg)
"That's an easy question," began blogger [Robert Pogson][2]. "There are too many distros."
After all, "if a fanatic like me can enjoy life having sampled only a dozen distros, why have any more?" Pogson explained. "If someone has a concept different from the dozen or so most common distros, that concept can likely be demonstrated by documenting the tweaks and package-lists and, perhaps, some code."
Trying to compete with some 40,000 package repositories like Debian's, however, is "just silly," he said.
"No startup can compete with such a distro," Pogson asserted. "Why try? Just use it to do what you want and tell the world about it."
### 'I Don't Distro-Hop Anymore' ###
The major existing distros are doing a good job, so "we don't need so many derivative works," Google+ blogger Kevin O'Brien agreed.
"I know I don't 'distro-hop' anymore, and my focus is on using my computer to get work done," O'Brien added.
"If my apps run fine every day, that is all that I need," he said. "Right now I am sticking with Ubuntu LTS 14.04, and probably will until 2016."
### 'The More Distros, the Better' ###
It stands to reason that "as distros get better, there will be less reasons to roll your own," concurred [Linux Rants][3] blogger Mike Stone.
"I think the modern Linux distros cover the bases of a larger portion of the Linux-using crowd, so fewer and fewer people are starting their own distribution to compensate for something that the others aren't satisfying," he explained. "Add to that the fact that corporations are more heavily involved in the development of Linux now than they ever have been, and they're going to focus their resources."
So, the decline isn't necessarily a bad thing, as it only points to the strength of the current offerings, he asserted.
At the same time, "I do think there are some negative consequences as well," Stone added. "Variation in the distros is a way that Linux grows and evolves, and with a narrower field, we're seeing less opportunity to put new ideas out there. In my mind, the more distros, the better -- hopefully the trend reverses soon."
### 'I Hope Some Diversity Survives' ###
Indeed, "the era of novelty and experimentation is over," Google+ blogger Gonzalo Velasco C. told Linux Girl.
"Linux is 20+ years old and got professional," he noted. "There is always room for experimentation, but the top 20 are here since more than a decade ago.
"Godspeed GNU/Linux," he added. "I hope some diversity survives -- especially distros without Systemd; on the other hand, some standards are reached through consensus."
### A Question of Package Managers ###
There are two trends at work here, suggested consultant and [Slashdot][4] blogger Gerhard Mack.
First, "there are fewer reasons to start a new distro," he said. "The basic nuts and bolts are mostly done, installation is pretty easy across most distros, and it's not difficult on most hardware to get a working system without having to resort to using the command line."
The second thing is that "we are seeing a reduction of distros with inferior package managers," Mack suggested. "It is clear that .deb-based distros had fewer losses and ended up with a larger overall share."
### Survival of the Fittest ###
It's like survival of the fittest, suggested consultant Rodolfo Saenz, who is certified in Linux, IBM Tivoli Storage Manager and Microsoft Active Directory.
"I prefer to see a strong Linux with less distros," Saenz added. "Too many distros dilutes development efforts and can confuse potential future users."
Fewer distros, on the other hand, "focuses development efforts into the stronger distros and also attracts new potential users with clear choices for their needs," he said.
### All About the Money ###
Google+ blogger Alessandro Ebersol also saw survival of the fittest at play, but he took a darker view.
"Linux is a big game now, with billions of dollars of profit, and it's the best thing since sliced bread," Ebersol began. "But corporations are taking control, and slowly but systematically, community distros are being killed."
It's difficult for community distros to keep pace with the ever-changing field, and cash is a necessity, he conceded.
Still, "Linux is slowly becoming just like BSD, where companies use and abuse it and give very little in return," Ebersol said. "It saddens me, but GNU/Linux's best days were 10 years ago, circa 2002 to 2004. Now, it's the survival of the fittest -- and of course, the ones with more money will prevail."
### 'Fewer Devs Care' ###
SoylentNews blogger hairyfeet focused on today's altered computing landscape.
"The reason there are fewer distros is simple: With everybody moving to the Google Playwall of Android, and Windows 10 looking to be the next XP, fewer devs care," hairyfeet said.
"Why should they?" he went on. "The desktop wars are over, MSFT won, and the mobile wars are gonna be proprietary Google, proprietary Apple and proprietary MSFT. The money is in apps and services, and with a slow economy, there just isn't time for pulling a Taco Bell and rerolling yet another distro.
"For the few that care about Linux desktops you have Ubuntu, Mint and Cent, and that is plenty," hairyfeet said.
### 'No Less Diversity' ###
Last but not least, Chris Travers, a [blogger][5] who works on the [LedgerSMB][6] project, took an optimistic view.
"Ever since I have been around Linux, there have been a few main families -- [SuSE][7], [Red Hat][8], Debian, Gentoo, Slackware -- and a number of forks of these," Travers said. "The number of major families of distros has been declining for some time -- Mandrake and Connectiva merging, for example, Caldera disappearing -- but each of these families is ending up with fewer members as well.
"I think this is a good thing," he concluded.
"The big community distros -- Debian, Slackware, Gentoo, Fedora -- are going strong and picking up a lot of the niche users that other distros catered to," he pointed out. "Many of these distros are making it easier to come up with customized variants for niche markets. So what you have is a greater connectedness within the big distros, and no less diversity."
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/The-Curious-Case-of-the-Disappearing-Distros-81518.html
作者Katherine Noyes
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://www.datamation.com/open-source/why-is-the-number-of-linux-distros-declining.html
[2]:http://mrpogson.com/
[3]:http://linuxrants.com/
[4]:http://slashdot.org/
[5]:http://ledgersmbdev.blogspot.com/
[6]:http://www.ledgersmb.org/
[7]:http://www.novell.com/linux
[8]:http://www.redhat.com/

View File

@ -1,88 +0,0 @@
The history of Android
================================================================================
youtube视频地址
<iframe width="640" height="360" frameborder="0" src="http://www.youtube-nocookie.com/embed/e52TSXwj774?start=0&amp;wmode=transparent" type="text/html" style="display:block"></iframe>
### Android 2.0, Éclair—blowing up the GPS industry ###
Forty-one days—that was how much time passed between Android 1.6 and 2.0. The first big version number bump for Android launched in October 2009 [on the Motorola Droid][1], the first "second generation" Android device. The Droid offered huge hardware upgrades over the G1, starting with the massive (at the time) 3.7 inch, 854×480 LCD. It brought a lot more power, too: a (still single-core) 600Mhz TI OMAP Cortex A8 with 256MB of RAM.
![The Motorola Droid stares into your soul.](http://cdn.arstechnica.net/wp-content/uploads/2014/03/2181.jpg)
The Motorola Droid stares into your soul.
The most important part of the Droid, though, was the large advertising campaign around it. The Droid was the flagship device for Verizon Wireless in the US, and with that title came a ton of ad money from America's biggest carrier. Verizon licensed the word "droid" from Lucasfilm and started up the ["Droid Does" campaign][2]—a shouty, explosion-filled set of commercials that positioned the device (and by extension, Android) as the violent, ass-kicking alternative to the iPhone. The press frequently declared the T-Mobile G1 as trying to be an “iPhone Killer," but the Droid came out and owned it.
Like the G1, the Droid had a hardware keyboard that slid out from the side of the phone. The trackball was gone, but some kind of d-pad was still mandatory, so Motorola placed a five-way d-pad on the right side of the keyboard. On the front, the Droid switched from hardware buttons to capacitive touch buttons, which were just paint on the glass touchscreen. Android 2.0 also finally allowed devices to do away with the “Call" and “End" buttons. So together with the demotion of the d-pad to the keyboard tray, the front buttons could all fit in a nice, neat strip. The result of all this streamlining was the best-looking Android device yet. The T-Mobile G1 looked like a Fisher-Price toy, but the Motorola Droid looked like an industrial tool that you could cut someone with.
![The lock and home screens from 2.0 and 1.6.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/intro202.png)
The lock and home screens from 2.0 and 1.6.
Photo by Ron Amadeo
Some of Verizon's grungy ad campaign leaked over to the software, where the default wallpaper was changed from a calm, watery vista to a picture of dirty concrete. The boot animation used a pulsing, red, Hal 9000 eyeball and the default notification tone shouted "[DRRRRROOOOIIIIDDDD][3]" every time you received an e-mail. Éclair was Androids angsty teenager phase.
One of the first things Android 2.0 presented to the user was a new lock screen. Slide-to-unlock was patented by Apple, so Google went with a rotary-phone-inspired arc unlock gesture. Putting your finger on the lock icon and sliding right would unlock the device, and sliding left from the volume icon would silence the phone. A thumb naturally moves in an arc, so this felt like an even more natural gesture than sliding in a straight line.
The default homescreen layout scrapped the redundant analog clock widget and introduced what is now an Android staple: a search bar at the top of the home screen. SMS Messaging and the Android Market were also given top billing in the new layout. The app drawer tab was given a sharp redesign, too.
![The app drawers and pictures of the “Add to Home" menus.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/icons.png)
The app drawers and pictures of the “Add to Home" menus.
Photo by Ron Amadeo
Android was developed at such a breakneck pace in the early days that the Android Team could never really plan for future devices when making interface art. The Motorola Droid—with its 854×480 LCD—was a huge bump up in resolution over the 320×480 G1-era devices. Nearly everything needed to be redrawn. Starting from scratch with interface art would pretty much be the main theme of Android 2.0.
Google took this opportunity to redesign almost every icon in Android, going from a cartoony look with an isometric perspective to straight-on icons done in a more serious style. The only set of icons that weren't redrawn were the status bar icons, which now look very out of place compared to the rest of the OS. These icons would hang around from Android 0.9 until 2.3.
There were a few changes to the app lineup as well. Camcorder was merged into the camera, the IM app was killed, and two new Google-made apps were added: Car Home, a launcher with big buttons designed for use while driving, and Corporate Calendar, which is identical to the regular calendar except it supports Exchange instead of Google Calendar. Weirdly, Google also included two third-party apps out of the box: Facebook and Verizon's Visual VM app. (Neither works today.) The second set of pictures displays the “Add to Home screen" menu, and it received all new art, too.
![A Places page, showing the “Navigate" option, the Navigation disclaimer, the actual Navigation screen, and the traffic info screen.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/nav2.png)
A Places page, showing the “Navigate" option, the Navigation disclaimer, the actual Navigation screen, and the traffic info screen.
Photo by Ron Amadeo
Beyond a redesign, the clear headline feature of Android 2.0 was Google Maps Navigation. Google updated Maps to allow for free turn-by-turn navigation, complete with a point of interest search and text to speech, which could read the names of streets aloud just like a standalone GPS unit. Turning GPS navigation from a separate product into a free smartphone feature pretty much [destroyed][4] the standalone GPS market overnight. TomToms stock dropped almost 40 percent during the week of Android 2.0s launch.
But navigation was pretty hard to get to at first. You had to open the search box, type in a place or address, and tap on the search result. Next, after tapping on the "Navigate" button, Google showed a warning stating that Navigation was in beta and should not be trusted. After tapping on "accept," you could jump in a car, and a harsh-sounding robot voice would guide you to your destination. Hidden behind the menu button was an option to check out the traffic and accidents for the entire route. This design of Navigation hung around forever. Even when the main Google Maps interface was updated in Android 4.0, the Android 2.0 stylings in the Navigation section hung around until almost Android 4.3.
Maps would also show a route overview, which contained traffic data for your route. At first it was just licensed by the usual traffic data provider, but later, Google would use information from Android and iOS phones running Google Maps to [crowd source traffic data][5]. It was the first step in Google's dominance of the mobile map game. After all, real-time traffic monitoring is really just a matter of how many points of data you have. Today, with hundreds of millions of Google Maps users across iOS and Android, Google has become the best provider of traffic data in the world.
With Maps Navigation, Android finally found its killer app. Google was offering something no one else could. There was finally an answer to the "Why should I buy this over an iPhone?" question. Google Maps didn't require PC-based updating like many GPS units did, either. It was always up-to-date thanks to the cloud, and all of those updates were free. The only downside was that you needed an Internet connection to use Google Maps.
As was greatly publicized during the [Apple Maps fiasco][6], accurate maps have become one of the most important features of a smartphone, even if no one really appreciates them when they work. Mapping the world is really only solvable with tons of person power, and today, Googles “Geo" division is the largest in the company with more than [7,000 employees][7]. For most of these people, their job is to literally drive down every road in the world with the companys camera-filled Street View cars. After eight years of data collection, Google has more than [five million miles][8] of 360-degree Street View imagery, and Google Maps is one of the biggest, most untouchable pillars of the company.
![The Car Home screen, and, because we have room, a horizontal version of Navigation.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/carhome1.png)
The Car Home screen, and, because we have room, a horizontal version of Navigation.
Photo by Ron Amadeo
Along with Google Maps Navigation came "Car Home," a large-buttoned home screen designed to help you use your phone while driving. It wasn't customizable, and each button was just a shortcut to a standard app. The Motorola Droid and its official [car dock accessory][9] had special magnets that would automatically trigger Car Home. While docked, pressing the hardware home button on the Droid would open Car Home instead of the normal home screen, and an on-screen home button led to the normal home screen.
Car Home, while useful, didnt last long—it was cut in Android 3.0 and never came back. GPS systems are almost entirely used in cars while driving, but encouraging users to do so with options like “search," which would bring up a keyboard, is something that Googles lawyers probably werent very fond of. With [Apples CarPlay][10] and Googles [Open Automotive Alliance][11], car computers are seeing a resurgence these days. This time, though, there is more of a focus on safety, and government organizations like the National Highway Traffic Safety Administration are on board to help out.
----------
![Ron Amadeo](http://cdn.arstechnica.net/wp-content//uploads/authors/ron-amadeo-sq.jpg)
[Ron Amadeo][a] / Ron is the Reviews Editor at Ars Technica, where he specializes in Android OS and Google products. He is always on the hunt for a new gadget and loves to rip things apart to see how they work.
[@RonAmadeo][t]
--------------------------------------------------------------------------------
via: http://arstechnica.com/gadgets/2014/06/building-android-a-40000-word-history-of-googles-mobile-os/10/
译者:[译者ID](https://github.com/译者ID) 校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://arstechnica.com/gadgets/2009/12/review-of-the-motorola-droid/
[2]:http://www.youtube.com/watch?v=e52TSXwj774
[3]:http://www.youtube.com/watch?v=UBL47tHrvMA
[4]:http://techcrunch.com/2009/10/28/googles-new-mobile-app-cuts-gps-nav-companies-at-the-knees/
[5]:http://googleblog.blogspot.com/2009/08/bright-side-of-sitting-in-traffic.html
[6]:http://arstechnica.com/apple/2012/09/apple-ceo-tim-cook-apologizes-for-ios-6-maps-promises-improvements/
[7]:http://www.businessinsider.com/apple-has-7000-fewer-people-working-on-maps-than-google-2012-9
[8]:https://developers.google.com/events/io/sessions/383278298
[9]:http://www.amazon.com/Motorola-Generation-Vehicle-Charger-Packaging/dp/B002Y3BYQA
[10]:http://arstechnica.com/apple/2014/03/ios-in-the-car-becomes-carplay-coming-to-select-dashboards-this-year/
[11]:http://arstechnica.com/information-technology/2014/01/open-automotive-alliance-aims-to-bring-android-inside-the-car/
[a]:http://arstechnica.com/author/ronamadeo
[t]:https://twitter.com/RonAmadeo

View File

@ -1,4 +1,3 @@
惊现译者CHINAANSHE 翻译!!
How to configure HTTP load balancer with HAProxy on Linux
================================================================================
Increased demand on web based applications and services are putting more and more weight on the shoulders of IT administrators. When faced with unexpected traffic spikes, organic traffic growth, or internal challenges such as hardware failures and urgent maintenance, your web application must remain available, no matter what. Even modern devops and continuous delivery practices can threaten the reliability and consistent performance of your web service.

View File

@ -1,143 +0,0 @@
7 Things to Do After Installing Ubuntu 14.10 Utopic Unicorn
================================================================================
After youve installed or [upgraded to Ubuntu 14.10][1], known by its codename Utopic Unicorn, there are a few things you should do to get it up and running in tip-top shape.
Whether youve performed a fresh install or upgraded an existing version, heres our biannual checklist of post-install tasks to get started with.
### 1. Get Acquainted ###
![The Ubuntu Browser](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/Screen-Shot-2014-10-23-at-20.02.54.png)
The Ubuntu Browser
The majority of changes rocking up in Ubuntu 14.10 arent immediately visible (save for some new wallpapers). That said, there are a bunch of freshly updated apps to get familiar with.
Preinstalled are the latest versions of workhouse staples **Mozilla Firefox**, **Thunderbird**, and **LibreOffice**. Dig a little deeper and youll also find Evince 3.14, and a brand new version of the “Ubuntu Web Browser” app, used for handling web-apps.
While youre getting familiar, be sure to fire up the Software Updater tool to **check for any impromptu issues Ubuntu has found and fixed** post-release. Yes, I know: you only just upgraded. But, even so — bugs dont adhere to deadlines like developers do!
### 2. Personalise The Desktop ###
![New wallpapers in 14.10](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/wallpapers-new-in-14.10.jpg)
New wallpapers in 14.10
Its your desktop PC, so dont put off making it look, feel and behave how you like.
Your first port of call might be changing the desktop wallpaper to one of the [twelve stunning new backgrounds][2] included in 14.10, ranging from retro record player to illustrated unicorn.
Wallpapers and a host of other theme and layout options are accessible from the **Appearance Settings** pane of the System Settings app. From here you can:
- Switch to a different theme
- Adjust launcher size & behaviour
- Enable workspaces & desktop icons
- Put app menus back into app windows
For some nifty new themes be sure to check out our **themes & icons category** here on the site.
### 3. Install Graphics Card Drivers ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/additional-drivers.jpg)
If you plan on playing the [latest Steam games][3], watching high-definition video or working with graphically intensive software youll want to enable the latest Linux graphics drivers available for your hardware.
Ubuntu makes this easy:
- Open up the Software & Updates tool from the Unity Dash
- Click the Additional Drivers tab
- Follow any on-screen prompts to check, install and apply changes
### 4. Enable Music & Video Codecs ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/msuci.jpg)
Games sorted, now to make **music and video files work just as well**.
Most popular formats, .mp3, .m4a, .mov, etc., will work fine in Ubuntu — after a little cajoling. Patent-encumbered codecs cannot ship in Ubuntu for legal reasons, leaving you unable to play popular audio and video formats out of the (invisible) box.
Dont panic. To play music or watch video you can install all of the codecs you need quickly, and through the Ubuntu Software Center.
- [Install Third-Party Codecs][4]
### 5. Pimp Your Privacy ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/privacy-in-ubuntu-settingd.jpg)
The Unity Dash is a great one-stop hub for finding stuff, be it a PDF file lurking on your computer or the current weather forecast in Stockholm, Sweden.
But the diversity of data surfaced through the Dash in just a few keystrokes doesnt suit everyones needs. So you may want to dial down the noise and restrict what shows up.
To stop certain files and folders from searched in the Dash and/or to disable all online results returned for a query, head to the **Privacy & Security** section in System Settings.
Here youll find all the tools, options and configuration switches you need, including options to:
- Choose what apps & files can be searched from the Dash
- Whether to require a password on waking from suspend
- Disable sending error reports to Canonical
- Turn off all online features of the Dash
### 6. Swap The Default Apps For Your Faves ###
![Make it yours](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/more-apps.jpg)
Make it yours
Ubuntu comes preloaded with a tonne of apps, including a web browser (Mozilla Firefox), e-mail client (Thunderbird), music player (Rhythmbox), office suite (LibreOffice) and instant messenger (Empathy Instant Messenger).
All well and good, theyre not everyones cup of tea. The Ubuntu Software Center is home to a slew of app alternatives, including:
- VLC Versatile media player
- Steam Games distribution platform
- [Geary — Easy-to-use desktop e-mail app][5]
- GIMP Advanced image editor similar to Photoshop
- Clementine — Stylish, fully-featured music player
- Chromium open-source version of Google Chrome (without Flash)
The Ubuntu Software Center plays host to a huge range of other apps, many of which you might not have heard of before. Since most apps are free, dont be scared to try things out!
### 7. Grab The Essentials ###
![Netflix in Chrome on Ubuntu](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/netflix-linux-working-in-chrome.jpg)
Netflix in Chrome on Ubuntu
Software Center apps aside, you may also wish to grab big-name apps like Skype, Spotify and Dropbox.
Google Chrome is also a must if you wish to watch Netflix natively on Ubuntu or benefit from the latest, safest version of Flash.
Most of these apps are available to download directly from their respective websites and can be installed on Ubuntu with a couple of clicks.
- [Download Skype for Linux][6]
- [Download Google Chrome for Linux][7]
- [Download Dropbox for Linux][8]
- [How to Install Spotify in Ubuntu][9]
Talking of Google Chrome — did you know you can (unofficially) [install and run Android apps through it?][9] Oh yes ;)
#### Finally… ####
The items above are not the only ones applicable post-upgrade. Read through and follow the ones that chime with you, and feel free to ignore those that dont.
Secondly, this is a list for those whove upgraded to or installed Ubuntu 14.10. Were not going walk you through carving it up into something that isnt Ubuntu. If Unity isnt your thing thats fine, but be logical about it; save yourself some time and install one of the official flavours or offshoots instead.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/10/7-things-to-do-after-installing-ubuntu-14-10-utopic-unicorn
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/10/ubuntu-14-10-release-download-now
[2]:http://www.omgubuntu.co.uk/2014/09/ubuntu-14-10-wallpaper-contest-winners
[3]:http://www.omgubuntu.co.uk/category/gaming
[4]:https://apps.ubuntu.com/cat/applications/ubuntu-restricted-extras/
[5]:http://www.omgubuntu.co.uk/2014/09/new-shotwell-geary-stable-release-available-to-downed
[6]:http://www.skype.com/en/download-skype/skype-for-linux/
[7]:http://www.google.com/chrome
[8]:https://www.dropbox.com/install?os=lnx
[9]:http://www.omgubuntu.co.uk/2013/01/how-to-install-spotify-in-ubuntu-12-04-12-10
[10]:http://www.omgubuntu.co.uk/2014/09/install-android-apps-ubuntu-archon

View File

@ -1,5 +1,3 @@
[felixonmars translating...]
“ntpq -p” output
================================================================================
The [Gentoo][1] (and others?) [incomplete man pages for “ntpq -p”][2] merely give the description: “*Print a list of the peers known to the server as well as a summary of their state.*”

View File

@ -1,4 +1,3 @@
[felixonmars translating...]
How To Use Emoji Anywhere With Twitter's Open Source Library
================================================================================
> Embed them in webpages and other projects via GitHub.

View File

@ -1,189 +0,0 @@
johnhoow translating...
Important 10 Linux ps command Practical Examples
================================================================================
As an Operating System which inspired from Unix, Linux has a built-in tool to capture current processes on the system. This tool is available in command line interface.
### What is PS Command ###
From its manual page, PS gives a snapshots of the current process. It will “capture” the system condition at a single time. If you want to have a repetitive updates in a real time, we can use top command.
PS support three (3) type of usage syntax style.
1. UNIX style, which may be grouped and **must** be preceded by a dash
2. BSD style, which may be grouped and **must not be** used with a dash
3. GNU long options, which are preceded by two dash
We can mix those style, but conflicts can appear. In this article, will use UNIX style. Heres are some examples of PS command in a daily use.
### 1. Run ps without any options ###
This is a very basic **ps** usage. Just type ps on your console to see its result.
![ps with no options](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_no_options.png)
By default, it will show us 4 columns of information.
- PID is a Process ID of the running command (CMD)
- TTY is a place where the running command runs
- TIME tell about how much time is used by CPU while running the command
- CMD is a command that run as current process
This information is displayed in unsorted result.
### 2. Show all current processes ###
To do this, we can use **-a** options. As we can guess, **-a is stand for “all”**. While x will show all process even the current process is not associated with any TTY (terminal)
$ ps -ax
This result might be long result. To make it more easier to read, combine it with less command.
$ ps -ax | less
![ps all information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_ax.png)
### 3. Filter processes by its user ###
For some situation we may want to filter processes by user. To do this, we can use **-u** option. Let say we want to see what processes which run by user pungki. So the command will be like below
$ ps -u pungki
![filter by user](http://blog.linoxide.com/wp-content/uploads/2014/10/ps__u.png)
### 4. Filter processes by CPU or memory usage ###
Another thing that you might want to see is filter the result by CPU or memory usage. With this, you can grab information about which processes that consume your resource. To do this, we can use **aux options**. Heres an example of it :
$ ps -aux | less
![show all information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux.png)
Since the result can be in a long list, we can **pipe** less command into ps command.
By default, the result will be in unsorted form. If we want to sort by particular column, we can add **--sort** option into ps command.
Sort by the highest **CPU utilization** in ascending order
$ ps -aux --sort -pcpu | less
![sort by cpu usage](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_cpu.png)
Sort by the highest **Memory utilization** in ascending order
$ ps -aux --sort -pmem | less
![sort by memory usage](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_mem.png)
Or we can combine itu a single command and display only the top ten of the result :
$ ps -aux --sort -pcpu,+pmem | head -n 10
### 5. Filter processes by its name or process ID ###
To to this, we can use **-C option** followed by the keyword. Let say, we want to show processes named getty. We can type :
$ ps -C getty
![filter by its name or process ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C.png)
If we want to show more detail about the result, we can add -f option to show it on full format listing. The above command will looks like below :
$ ps -f -C getty
![filter by its name or process ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C_f.png)
### 6. Filter processes by thread of process ###
If we need to know the thread of a particular process, we can use **-L option** followed by its Process ID (PID). Heres an example of **-L option** in action :
$ ps -L 1213
![show processes in threaded view](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_L.png)
As we can see, the PID remain the same value, but the LWP which shows numbers of thread show different values.
### 7. Show processes in hierarchy ###
Sometime we want to see the processes in hierarchical form. To do this, we can use **-axjf** options.
$ps -axjf
![show in hierarchy](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_axjf.png)
Or, another command which we can use is pstree.
$ pstree
![show information in hierarchy](http://blog.linoxide.com/wp-content/uploads/2014/10/pstree.png)
### 8. Show security information ###
If we want to see who is currently logged on into your server, we can see it using the ps command. There are some options that we can use to fulfill our needs. Heres some examples :
$ ps -eo pid,user,args
**Option -e** will show you all processes while **-o option** will control the output. **Pid**, **User and Args** will show you the **Process ID**, **the User who run the application** and **the running application**.
![show security information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_security_1.png)
The keyword / user-defined format that can be used with **-e option** are **args, cmd, comm, command, fname, ucmd, ucomm, lstart, bsdstart and start**.
### 9. Show every process running as root (real & effecitve ID) in user format ###
System admin may want to see what processes are being run by root and other information related to it. Using ps command, we can do by this simple command :
$ ps -U root -u root u
The **-U parameter** will select by **real user ID (RUID)**. It selects the processes whose real user name or ID is in the userlist list. The real User ID identifies the user who created the process.
While the **-u paramater** will select by effective user ID (EUID)
The last **u** paramater, will display the output in user-oriented format which contains **User, PID, %CPU, %MEM, VSZ, RSS, TTY, STAT, START, TIME and COMMAND** columns.
Heres the output of the above command.
![show real and effective User ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_root_real_effective_ID.png)
### 10. Use PS in a realtime process viewer ###
ps will display a report of what happens in your system. The result will be a static report.
Let say, we want to filter processes by CPU and Memory usage as on the point 4 above. And we want the report is updated every 1 second. We can do it by **combining ps command with watch command** on Linux.
Heres the command :
$ watch -n 1 ps -aux --sort -pmem, -pcpu
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_1.png)
If you feel the report is too long, **we can limit it** by - let say - the top 20 processes. We can add **head** command to do it.
$ watch -n 1 ps -aux --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_2.png)
This live reporter **is not** like top or htop of course. **But the advantage of using ps** to make live report is that you can custom the field. You can choose which field you want to see.
For example, **if you need only the pungki user shown**, then you can change the command to become like this :
$ watch -n 1 ps -aux -U pungki u --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_3.png)
### Conclusion ###
You may use **ps** on your daily usage to monitor about what happens your Linux system. But actually, you can generate various types of report using **ps** command with the use of appropriate paramaters.
**Another ps advantage** is that **ps** are installed by default in any kind of Linux. So you can just start to use it.
Don't forget to see **ps documentation** by typing **man ps** on you Linux console to explore more options.
--------------------------------------------------------------------------------
via: http://linoxide.com/how-tos/linux-ps-command-examples/
作者:[Pungki Arianto][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/pungki/

View File

@ -1,5 +1,3 @@
forsil translating ...
Undelete Files on Linux Systems
================================================================================
Often times, a computer user will delete a needed file accidentally and not have an easy way to regain or recreate the file. Thankfully, files can be undeleted. When a user deletes a file, it is not gone, only hidden for some time. Here is how it all works. On a filesystem, the system has what is called a file allocation list. This list keeps track of what files are where on the storage unit (hard-drive, MicroSD card, flash-drive, etc.). When a file is deleted, the filesystem will perform one of two tasks on the allocation table. The file's entry on the file allocation table marked as "free space" or the file's entry on the list is erased and then the space is marked as free. Now, if a file needs to be placed on the storage unit, the operating system will put the file in the space marked as empty. After the new file is written to the "empty space", the deleted file is now gone forever. When a deleted file is to be recovered, the user must not manipulate any files because if the "empty space" is used, then the file can never be retrieved.

View File

@ -1,248 +0,0 @@
SPccman translating
How to create a custom backup plan for Debian with backupninja
================================================================================
Backupninja is a powerful and highly-configurable backup tool for Debian based distributions. In the [previous tutorial][1], we explored how to install backupninja and how to set up two backup actions for the program to perform. However, we should note that those examples were only "the tip of the iceberg," so to speak. In this post we will discuss how to leverage custom handlers and helpers that allow this program to be customized in order to accomplish almost any backup need that you can think of.
And believe me - that is not an overstatement, so let's begin.
### A Quick Review of Backupninja ###
One of backupninja's distinguishing features is the fact that you can just drop plain text configuration or action files in /etc/backup.d, and the program will take care of the rest. In addition, we can write custom scripts (aka "handlers") and place them in /usr/share/backupninja to handle each type of backup action. Furthermore, we can have these scripts be executed via ninjahelper's ncurses-based interactive menus (aka "helpers") to guide us to create the configuration files we mentioned earlier, minimizing the chances of human error.
### Creating a Custom Handler and Helper ###
Our goal in this case is to create a script to handle the backup of chosen home directories into a tarball with either **gzip** or **bzip2** compression, excluding music and video files. We will simply name this script home, and place it under /usr/backup/ninja.
Although you could achieve the same objective with the default tar handler (refer to /usr/share/backupninja/tar and /usr/share/backupninja/tar.helper), we will use this approach to show how to create a useful handler script and ncurses-based helper from scratch. You can then decide how to apply the same principles depending on your specific needs.
Note that since handlers are sourced from the main script, there is no need to start with #!/bin/bash at the top.
Our proposed handler (/usr/share/backupninja/home) is as follows. It is heavily commented for clarification. The getconf function is used to read the backup action's configuration file. If you specify a value for a variable here, it will override the corresponding value present in the configuration file:
# home handler script for backupninja
# Every backup file will identify the host by its FQDN
getconf backupname
# Directory to store backups
getconf backupdir
# Default compression
getconf compress
# Include /home directory
getconf includes
# Exclude files with *.mp3 and *.mp4 extensions
getconf excludes
# Default extension for the packaged backup file
getconf EXTENSION
# Absolute path to date binary
getconf TAR `which tar`
# Absolute path to date binary
getconf DATE `which date`
# Chosen date format
DATEFORMAT="%Y-%m-%d"
# If backupdir does not exist, exit with fatal error
if [ ! -d "$backupdir" ]
then
mkdir -p "$backupdir" || fatal "Can not make directory $backupdir"
fi
# If backupdir is not writeable, exit with fatal error as well
if [ ! -w "$backupdir" ]
then
fatal "Directory $backupdir is not writable"
fi
# Set the right tar option as per the chosen compression format
case $compress in
"gzip")
compress_option="-z"
EXTENSION="tar.gz"
;;
"bzip")
compress_option="-j"
EXTENSION="tar.bz2"
;;
"none")
compress_option=""
;;
*)
warning "Unknown compress filter ($tar_compress)"
compress_option=""
EXTENSION="tar.gz"
;;
esac
# Exclude the following file types / directories
exclude_options=""
for i in $excludes
do
exclude_options="$exclude_options --exclude $i"
done
# Debugging messages, performing backup
debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
# Redirect standard output to a file with .list extension
# and standard error to a file with .err extension
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
[ $? -ne 0 ] && fatal "Tar backup failed"
Next, we will create our helper file (/usr/share/backupninja/home.helper) so that our handlers shows up as a menu in **ninjahelper**:
# Backup action's description. Separate words with underscores.
HELPERS="$HELPERS home:backup_of_home_directories"
home_wizard() {
home_title="Home action wizard"
backupname=`hostname --fqdn`
# Specify default value for the time when this backup actions is supposed to run
inputBox "$home_title" "When to run this action?" "everyday at 01"
[ $? = 1 ] && return
home_when_run="when = $REPLY"
# Specify default value for backup file name
inputBox "$home_title" "\"Name\" of backups" "$backupname"
[ $? = 1 ] && return
home_backupname="backupname = $REPLY"
backupname="$REPLY"
# Specify default directory to store the backups
inputBox "$home_title" "Directory where to store the backups" "/var/backups/home"
[ $? = 1 ] && return
home_backupdir="backupdir = $REPLY"
# Specify default values for the radiobox
radioBox "$home_title" "Compression" \
"none" "No compression" off \
"gzip" "Compress with gzip" on \
"bzip" "Compress with bzip" off
[ $? = 1 ] && return;
result="$REPLY"
home_compress="compress = $REPLY "
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Includes"
formItem "Include:" /home/gacanepa
formDisplay
[ $? = 0 ] || return 1
home_includes="includes = "
for i in $REPLY; do
[ -n "$i" ] && home_includes="$home_includes $i"
done
done
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Excludes"
formItem "Exclude:" *.mp3
formItem "Exclude:" *.mp4
# Add as many “Exclude” text boxes as needed to specify other exclude options
formItem "Exclude:"
formItem "Exclude:"
formDisplay
[ $? = 0 ] || return 1
home_excludes="excludes = "
for i in $REPLY; do
[ -n "$i" ] && home_excludes="$home_excludes $i"
done
done
# Save the config
get_next_filename $configdirectory/10.home
cat > $next_filename <<EOF
$home_when_run
$home_backupname
$home_backupdir
$home_compress
$home_includes
$home_excludes
# tar binary - have to be GNU tar
TAR `which tar`
DATE `which date`
DATEFORMAT "%Y-%m-%d"
EXTENSION tar
EOF
# Backupninja requires that configuration files be chmoded to 600
chmod 600 $next_filename
}
### Running Ninjahelper ###
Once we have created our handler script named home and the corresponding helper named home.helper, let's run ninjahelper command to create a new backup action:
# ninjahelper
And choose create a new backup action.
![](https://farm8.staticflickr.com/7467/15322605273_90edaa5bc1_z.jpg)
We will now be presented with the available action types. Let's select "backup of home directories":
![](https://farm9.staticflickr.com/8636/15754955450_f3ef82217b_z.jpg)
The next screens will display the default values as set in the helper (only 3 of them are shown here). Feel free to edit the values in the text box. Particularly, refer to the scheduling section of the documentation for the right syntax for the when variable.
![](https://farm8.staticflickr.com/7508/15941578982_24b680e1c3_z.jpg)
![](https://farm8.staticflickr.com/7562/15916429476_6e84b307aa_z.jpg)
![](https://farm8.staticflickr.com/7528/15319968994_41705b7283_z.jpg)
When you are done creating the backup action, it will show in ninjahelper's initial menu:
![](https://farm8.staticflickr.com/7534/15942239225_bb66dbdb63.jpg)
Then you can press ENTER to show the options available for this action. Feel free to experiment with them, as their description is quite straightforward.
Particularly, "run this action now" will execute the backup action in debug mode immediately regardless of the scheduled time:
![](https://farm8.staticflickr.com/7508/15754955470_9af6251096_z.jpg)
Should the backup action fail for some reason, the debug will display an informative message to help you locate the error and correct it. Consider, for example, the following error messages that were displayed after running a backup action with bugs that have not been corrected yet:
![](https://farm9.staticflickr.com/8662/15754955480_487d040fcd_z.jpg)
The image above tells you that the connection needed to complete the backup action could not be completed because the remote host seems to be down. In addition, the destination directory specified in the helper file does not exist. Once you correct the problems, re-run the backup action.
A few things to remember:
- If you create a custom script in /usr/share/backupninja (e.g., foobar) to handle a specific backup action, you also need to write a corresponding helper (e.g., foobar.helper) in order to create, through ninjahelper, a file named 10.foobar (11 and onward for further actions as well) in /etc/backup.d, which is the actual configuration file for the backup action.
- You can execute your backups at any given time via ninjahelper as explained earlier, or have them run as per the specified frequency in the when variable.
### Summary ###
In this post we have discussed how to create our own backup actions from scratch and how to add a related menu in ninjahelper to facilitate the creation of configuration files. With the previous [backupninja article][2] and the present one I hope I've given you enough good reasons to go ahead and at least try it.
--------------------------------------------------------------------------------
via: http://xmodulo.com/create-custom-backup-plan-debian.html
作者:[ Gabriel Cánepa][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/gabriel
[1]:http://xmodulo.com/backup-debian-system-backupninja.html
[2]:http://xmodulo.com/backup-debian-system-backupninja.html

View File

@ -1,249 +0,0 @@
文章重复
How to create a custom backup plan for Debian with backupninja
================================================================================
Backupninja is a powerful and highly-configurable backup tool for Debian based distributions. In the [previous tutorial][1], we explored how to install backupninja and how to set up two backup actions for the program to perform. However, we should note that those examples were only "the tip of the iceberg," so to speak. In this post we will discuss how to leverage custom handlers and helpers that allow this program to be customized in order to accomplish almost any backup need that you can think of.
And believe me - that is not an overstatement, so let's begin.
### A Quick Review of Backupninja ###
One of backupninja's distinguishing features is the fact that you can just drop plain text configuration or action files in /etc/backup.d, and the program will take care of the rest. In addition, we can write custom scripts (aka "handlers") and place them in /usr/share/backupninja to handle each type of backup action. Furthermore, we can have these scripts be executed via ninjahelper's ncurses-based interactive menus (aka "helpers") to guide us to create the configuration files we mentioned earlier, minimizing the chances of human error.
### Creating a Custom Handler and Helper ###
Our goal in this case is to create a script to handle the backup of chosen home directories into a tarball with either gzip or bzip2 compression, excluding music and video files. We will simply name this script home, and place it under /usr/backup/ninja.
Although you could achieve the same objective with the default tar handler (refer to /usr/share/backupninja/tar and /usr/share/backupninja/tar.helper), we will use this approach to show how to create a useful handler script and ncurses-based helper from scratch. You can then decide how to apply the same principles depending on your specific needs.
Note that since handlers are sourced from the main script, there is no need to start with #!/bin/bash at the top.
Our proposed handler (/usr/share/backupninja/home) is as follows. It is heavily commented for clarification. The getconf function is used to read the backup action's configuration file. If you specify a value for a variable here, it will override the corresponding value present in the configuration file:
# home handler script for backupninja
# Every backup file will identify the host by its FQDN
getconf backupname
# Directory to store backups
getconf backupdir
# Default compression
getconf compress
# Include /home directory
getconf includes
# Exclude files with *.mp3 and *.mp4 extensions
getconf excludes
# Default extension for the packaged backup file
getconf EXTENSION
# Absolute path to date binary
getconf TAR `which tar`
# Absolute path to date binary
getconf DATE `which date`
# Chosen date format
DATEFORMAT="%Y-%m-%d"
# If backupdir does not exist, exit with fatal error
if [ ! -d "$backupdir" ]
then
mkdir -p "$backupdir" || fatal "Can not make directory $backupdir"
fi
# If backupdir is not writeable, exit with fatal error as well
if [ ! -w "$backupdir" ]
then
fatal "Directory $backupdir is not writable"
fi
# Set the right tar option as per the chosen compression format
case $compress in
"gzip")
compress_option="-z"
EXTENSION="tar.gz"
;;
"bzip")
compress_option="-j"
EXTENSION="tar.bz2"
;;
"none")
compress_option=""
;;
*)
warning "Unknown compress filter ($tar_compress)"
compress_option=""
EXTENSION="tar.gz"
;;
esac
# Exclude the following file types / directories
exclude_options=""
for i in $excludes
do
exclude_options="$exclude_options --exclude $i"
done
# Debugging messages, performing backup
debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
# Redirect standard output to a file with .list extension
# and standard error to a file with .err extension
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
[ $? -ne 0 ] && fatal "Tar backup failed"
Next, we will create our helper file (/usr/share/backupninja/home.helper) so that our handlers shows up as a menu in ninjahelper:
# Backup action's description. Separate words with underscores.
HELPERS="$HELPERS home:backup_of_home_directories"
home_wizard() {
home_title="Home action wizard"
backupname=`hostname --fqdn`
# Specify default value for the time when this backup actions is supposed to run
inputBox "$home_title" "When to run this action?" "everyday at 01"
[ $? = 1 ] && return
home_when_run="when = $REPLY"
# Specify default value for backup file name
inputBox "$home_title" "\"Name\" of backups" "$backupname"
[ $? = 1 ] && return
home_backupname="backupname = $REPLY"
backupname="$REPLY"
# Specify default directory to store the backups
inputBox "$home_title" "Directory where to store the backups" "/var/backups/home"
[ $? = 1 ] && return
home_backupdir="backupdir = $REPLY"
# Specify default values for the radiobox
radioBox "$home_title" "Compression" \
"none" "No compression" off \
"gzip" "Compress with gzip" on \
"bzip" "Compress with bzip" off
[ $? = 1 ] && return;
result="$REPLY"
home_compress="compress = $REPLY "
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Includes"
formItem "Include:" /home/gacanepa
formDisplay
[ $? = 0 ] || return 1
home_includes="includes = "
for i in $REPLY; do
[ -n "$i" ] && home_includes="$home_includes $i"
done
done
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Excludes"
formItem "Exclude:" *.mp3
formItem "Exclude:" *.mp4
# Add as many “Exclude” text boxes as needed to specify other exclude options
formItem "Exclude:"
formItem "Exclude:"
formDisplay
[ $? = 0 ] || return 1
home_excludes="excludes = "
for i in $REPLY; do
[ -n "$i" ] && home_excludes="$home_excludes $i"
done
done
# Save the config
get_next_filename $configdirectory/10.home
cat > $next_filename <<EOF
$home_when_run
$home_backupname
$home_backupdir
$home_compress
$home_includes
$home_excludes
# tar binary - have to be GNU tar
TAR `which tar`
DATE `which date`
DATEFORMAT "%Y-%m-%d"
EXTENSION tar
EOF
# Backupninja requires that configuration files be chmoded to 600
chmod 600 $next_filename
}
### Running Ninjahelper ###
Once we have created our handler script named home and the corresponding helper named home.helper, let's run ninjahelper command to create a new backup action:
# ninjahelper
And choose create a new backup action.
![](https://farm8.staticflickr.com/7467/15322605273_90edaa5bc1_z.jpg)
We will now be presented with the available action types. Let's select "backup of home directories":
![](https://farm9.staticflickr.com/8636/15754955450_f3ef82217b_z.jpg)
The next screens will display the default values as set in the helper (only 3 of them are shown here). Feel free to edit the values in the text box. Particularly, refer to the scheduling section of the documentation for the right syntax for the when variable.
![](https://farm8.staticflickr.com/7508/15941578982_24b680e1c3_z.jpg)
![](https://farm8.staticflickr.com/7562/15916429476_6e84b307aa_z.jpg)
![](https://farm8.staticflickr.com/7528/15319968994_41705b7283_z.jpg)
When you are done creating the backup action, it will show in ninjahelper's initial menu:
![](https://farm8.staticflickr.com/7534/15942239225_bb66dbdb63.jpg)
Then you can press ENTER to show the options available for this action. Feel free to experiment with them, as their description is quite straightforward.
Particularly, "run this action now" will execute the backup action in debug mode immediately regardless of the scheduled time:
![](https://farm8.staticflickr.com/7508/15754955470_9af6251096_z.jpg)
Should the backup action fail for some reason, the debug will display an informative message to help you locate the error and correct it. Consider, for example, the following error messages that were displayed after running a backup action with bugs that have not been corrected yet:
![](https://farm9.staticflickr.com/8662/15754955480_487d040fcd_z.jpg)
The image above tells you that the connection needed to complete the backup action could not be completed because the remote host seems to be down. In addition, the destination directory specified in the helper file does not exist. Once you correct the problems, re-run the backup action.
A few things to remember:
- If you create a custom script in /usr/share/backupninja (e.g., foobar) to handle a specific backup action, you also need to write a corresponding helper (e.g., foobar.helper) in order to create, through ninjahelper, a file named 10.foobar (11 and onward for further actions as well) in /etc/backup.d, which is the actual configuration file for the backup action.
- You can execute your backups at any given time via ninjahelper as explained earlier, or have them run as per the specified frequency in the when variable.
### Summary ###
In this post we have discussed how to create our own backup actions from scratch and how to add a related menu in ninjahelper to facilitate the creation of configuration files. With the previous [backupninja article][2] and the present one I hope I've given you enough good reasons to go ahead and at least try it.
--------------------------------------------------------------------------------
via: http://xmodulo.com/create-custom-backup-plan-debian.html
作者:[Gabriel Cánepa][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/gabriel
[1]:http://xmodulo.com/backup-debian-system-backupninja.html
[2]:http://xmodulo.com/backup-debian-system-backupninja.html

View File

@ -0,0 +1,197 @@
Centralized Secure Storage (iSCSI) “Initiator Client” Setup on RHEL/CentOS/Fedora Part III
================================================================================
**iSCSI** Initiator are the clients which use to authenticated with iSCSI target servers to access the LUNs shared from target server. We can deploy any kind of Operating systems in those locally mounted Disks, just a single package need to be install to get authenticate with target server.
![Client Initiator Setup](http://www.tecmint.com/wp-content/uploads/2014/07/Client-Initiator-Setup.jpg)
Client Initiator Setup
#### Features ####
- Can handle any kind of file systems in locally mounted Disk.
- No need of restating the system after partition using fdisk.
#### Requirements ####
- [Create Centralized Secure Storage using iSCSI Target Part 1][1]
- [Create LUNs using LVM in Target Server Part 2][2]
#### My Client Setup for Initiator ####
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.50
- Ports Used : TCP 3260
**Warning**: Never stop the service while LUNs Mounted in Client machines (Initiator).
### Initiator Client Setup ###
**1.** In Client side, we need to install the package **iSCSI-initiator-utils**, search for the package using following command.
# yum search iscsi
**Sample Output**
============================= N/S Matched: iscsi ================================
iscsi-initiator-utils.x86_64 : iSCSI daemon and utility programs
iscsi-initiator-utils-devel.x86_64 : Development files for iscsi-initiator-utils
**2.** Once you locate the package, just install the initiator package using yum command as shown.
# yum install iscsi-initiator-utils.x86_64
**3.** After installing the package, we need to discover the share from **Target server**. The client side commands little hard to remember, so we can use man page to get the list of commands which required to run.
# man iscsiadm
![man iscsiadm](http://www.tecmint.com/wp-content/uploads/2014/07/man-iscsiadm.jpg)
man iscsiadm
**4.** Press **SHIFT+G** to Navigate to the Bottom of the man page and scroll little up to get the login example commands. We need to replace our **Target servers IP** address in below command Discover the Target.
# iscsiadm --mode discoverydb --type sendtargets --portal 192.168.0.200 --discover
**5.** Here we got the iSCSI (iqn) qualified name from above command execution.
192.168.0.200:3260,1 iqn.2014-07.com.tecmint:tgt1
![Discover Target](http://www.tecmint.com/wp-content/uploads/2014/07/Discover-Target.jpg)
Discover Target
**6.** To log-in use the below command to attach the LUN to our local System, this will authenticate with target server and allow us to log-in into LUN.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260 --login
![Login To Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Login-To-Target-Server.jpg)
Login To Target Server
**Note**: Use the login command and replace login with logout at end of command.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260 --logout
![Logout from Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Logout-from-Target-Server.jpg)
Logout from Target Server
**7.** After login to the LUN, list the records of Node using.
# iscsiadm --mode node
![List Node](http://www.tecmint.com/wp-content/uploads/2014/07/List-Node.jpg)
List Node
**8.** Display all data of a particular node.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260
**Sample Output**
# BEGIN RECORD 6.2.0-873.10.el6
node.name = iqn.2014-07.com.tecmint:tgt1
node.tpgt = 1
node.startup = automatic
node.leading_login = No
iface.hwaddress = <empty>
iface.ipaddress = <empty>
iface.iscsi_ifacename = default
iface.net_ifacename = <empty>
iface.transport_name = tcp
iface.initiatorname = <empty>
iface.bootproto = <empty>
iface.subnet_mask = <empty>
iface.gateway = <empty>
iface.ipv6_autocfg = <empty>
iface.linklocal_autocfg = <empty>
....
**9.** Then list the drive using, fdisk will list every authenticated disks.
# fdisk -l /dev/sda
![List Disks](http://www.tecmint.com/wp-content/uploads/2014/07/List-Disks.jpg)
List Disks
**10.** Run fdisk to create a new partition.
# fdisk -cu /dev/sda
![Create New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Create-New-Partition.jpg)
Create New Partition
**Note**: After Creating a Partition using fdisk, we dont need to reboot, as we used to do in our local systems, Because this is a remote shared storage mounted locally.
**11.** Format the newly created partition.
# mkfs.ext4 /dev/sda1
![Format New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Format-New-Partition.jpg)
Format New Partition
**12.** Create a Directory and mount the formatted partition.
# mkdir /mnt/iscsi_share
# mount /dev/sda1 /mnt/iscsi_share/
# ls -l /mnt/iscsi_share/
![Mount New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Mount-New-Partition.jpg)
Mount New Partition
**13.** List the Mount Points.
# df -Th
- **-T** Prints files system types.
- **-h** Prints in human readable format eg : Megabyte or Gigabyte.
![List New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/List-New-Partition.jpg)
List New Partition
**14.** If we need to permanently mount the Drive use fstab entry.
# vim /etc/fstab
**15.**Append the following Entry in fstab.
/dev/sda1 /mnt/iscsi_share/ ext4 defaults,_netdev 0 0
**Note:** Use _netdev in fstab, as this is a network device.
![Auto Mount Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Auto-Mount-Partition.jpg)
Auto Mount Partition
**16.** Finally check whether our fstab entry have any error.
# mount -av
- **-a** all mount point
- **-v** Verbose
![Verify fstab Entries](http://www.tecmint.com/wp-content/uploads/2014/07/Verify-fstab-Entries.jpg)
Verify fstab Entries
We have Completed Our client side configuration Successfully. Start to use the drive as we use our local system disk.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/iscsi-initiator-client-setup/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/
[2]:http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/

View File

@ -0,0 +1,148 @@
Create Centralized Secure Storage using iSCSI Target on RHEL/CentOS/Fedora Part -I
================================================================================
**iSCSI** is a block level Protocol for sharing **RAW Storage Devices** over TCP/IP Networks, Sharing and accessing Storage over iSCSI, can be used with existing IP and Ethernet networks such as NICs, Switched, Routers etc. iSCSI target is a remote hard disk presented from an remote iSCSI server (or) target.
![Install iSCSI Target in Linux](http://www.tecmint.com/wp-content/uploads/2014/07/Install-iSCSI-Target-in-Linux.jpg)
Install iSCSI Target in Linux
We dont need a high resource for stable connectivity and performance in Client sides. iSCSI Server called as Target, this shares the storage from server. iSCSI Clients called as Initiator, this will access the storage which shared from Target Server. There are iSCSI adapters available in market for Large Storage services such as SAN Storages.
**Why we need a iSCSI adapter for Large storage Area?**
Ethernet adapters (NIC) are designed to transfer packetized file level data among systems, servers and storage devices like NAS storages, they are not capable for transferring block level data over Internet.
### Features of iSCSI Target ###
- Possible to run several iSCSI targets on a single machine.
- A single machine making multiple iscsi target available on the iSCSI SAN
- The target is the Storage and makes it available for initiator (Client) over the network
- These Storages are Pooled together to make available to the network is iSCSI LUNs (Logical Unit Number).
- iSCSI supports multiple connections within the same session
- iSCSI initiator discover the targets in network then authenticating and login with LUNs, to get the remote storage locally.
- We can Install any Operating systems in those locally mounted LUNs as what we used to install in our Base systems.
### Why the need of iSCSI? ###
In Virtualization we need storage with high redundancy, stability, iSCSI provides those all in low cost. Creating a SAN Storage in low price while comparing to Fiber Channel SANs, We can use the standard equipments for building a SAN using existing hardware such as NIC, Ethernet Switched etc..
Let start to get install and configure the centralized Secure Storage using iSCSI Target. For this guide, Ive used following setups.
- We need separate 1 systems to Setup the iSCSI Target Server and Initiator (Client).
- Multiple numbers of Hard disk can be added in large storage environment, But we here using only 1 additional drive except Base installation disk.
- Here we using only 2 drives, One for Base server installation, Other one for Storage (LUNs) which we going to create in PART-II of this series.
#### Master Server Setup ####
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.200
- Ports Used : TCP 860, 3260
- Configuration file : /etc/tgt/targets.conf
## Installing iSCSI Target ##
Open terminal and use yum command to search for the package name which need to get install for iscsi target.
# yum search iscsi
#### Sample Output ####
========================== N/S matched: iscsi =======================
iscsi-initiator-utils.x86_64 : iSCSI daemon and utility programs
iscsi-initiator-utils-devel.x86_64 : Development files for iscsi-initiator-utils
lsscsi.x86_64 : List SCSI devices (or hosts) and associated information
scsi-target-utils.x86_64 : The SCSI target daemon and utility programs
We got the search result as above, choose the **Target** package and install to play around.
# yum install scsi-target-utils -y
![Install iSCSI Utils](http://www.tecmint.com/wp-content/uploads/2014/07/Install-iSCSI-in-Linux.jpg)
Install iSCSI Utils
List the installed package to know the default config, service, and man page location.
# rpm -ql scsi-target-utils.x86_64
![List All iSCSI Files](http://www.tecmint.com/wp-content/uploads/2014/07/List-All-ISCSI-Files.jpg)
List All iSCSI Files
Lets start the iSCSI Service, and check the status of Service up and running, iSCSI service named as **tgtd**.
# /etc/init.d/tgtd start
# /etc/init.d/tgtd status
![Start iSCSI Service](http://www.tecmint.com/wp-content/uploads/2014/07/Start-iSCSI-Service.jpg)
Start iSCSI Service
Now we need to configure it to start Automatically while system start-up.
# chkconfig tgtd on
Next, verify that the run level configured correctly for the tgtd service.
# chkconfig --list tgtd
![Enable iSCSI on Startup](http://www.tecmint.com/wp-content/uploads/2014/07/Enable-iSCSI-on-Startup.jpg)
Enable iSCSI on Startup
Lets use **tgtadm** to list what targets and LUNS we currently got configured in our Server.
# tgtadm --mode target --op show
The **tgtd** installed up and running, but there is no **Output** from the above command because we have not yet defined the LUNs in Target Server. For manual page, Run **man** command.
# man tgtadm
![iSCSI Man Pages](http://www.tecmint.com/wp-content/uploads/2014/07/iSCSI-Man-Pages.jpg)
iSCSI Man Pages
Finally we need to add iptables rules for iSCSI if there is iptables deployed in your target Server. First, find the Port number of iscsi target using following netstat command, The target always listens on TCP port 3260.
# netstat -tulnp | grep tgtd
![Find iSCSI Port](http://www.tecmint.com/wp-content/uploads/2014/07/Find-iSCSI-Port.jpg)
Find iSCSI Port
Next add the following rules to allow iptables to Broadcast the iSCSI target discovery.
# iptables -A INPUT -i eth0 -p tcp --dport 860 -m state --state NEW,ESTABLISHED -j ACCEPT
# iptables -A INPUT -i eth0 -p tcp --dport 3260 -m state --state NEW,ESTABLISHED -j ACCEPT
![Open iSCSI Ports](http://www.tecmint.com/wp-content/uploads/2014/07/Open-iSCSI-Ports.jpg)
Open iSCSI Ports
![Add iSCSI Ports to Iptables](http://www.tecmint.com/wp-content/uploads/2014/07/Add-iSCSI-Ports-to-Iptables.jpg)
Add iSCSI Ports to Iptables
**Note**: Rule may vary according to your **Default CHAIN Policy**. Then save the Iptables and restart the iptables.
# iptables-save
# /etc/init.d/iptables restart
![Restart iptables](http://www.tecmint.com/wp-content/uploads/2014/07/Restart-iptables.jpg)
Restart iptables
Here we have deployed a target server to share LUNs to any initiator which authenticating with target over TCP/IP, This suitable for small to large scale production environments too.
In my next upcoming articles, I will show you how to [Create LUNs using LVM in Target Server][1] and how to share LUNs on Client machines, till then stay tuned to TecMint for more such updates and dont forget to give valuable comments.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/

View File

@ -0,0 +1,230 @@
How to Create and Setup LUNs using LVM in “iSCSI Target Server” on RHEL/CentOS/Fedora Part II
================================================================================
LUN is a Logical Unit Number, which shared from the iSCSI Storage Server. The Physical drive of iSCSI target server shares its drive to initiator over TCP/IP network. A Collection of drives called LUNs to form a large storage as SAN (Storage Area Network). In real environment LUNs are defined in LVM, if so it can be expandable as per space requirements.
![Create LUNS using LVM in Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Create-LUNS-inLVM.png)
Create LUNS using LVM in Target Server
### Why LUNS are Used? ###
LUNS used for storage purpose, SAN Storages are build with mostly Groups of LUNS to become a pool, LUNs are Chunks of a Physical disk from target server. We can use LUNS as our systems Physical Disk to install Operating systems, LUNS are used in Clusters, Virtual servers, SAN etc. The main purpose of Using LUNS in Virtual servers for OS storage purpose. LUNS performance and reliability will be according to which kind of disk we using while creating a Target storage server.
### Requirements ###
To know about creating a ISCSI Target Server follow the below link.
- [Create Centralized Secure Storage using iSCSI Target Part I][1]
#### Master Server Setup ####
System informations and Network setup are same as iSCSI Target Server as shown in Part I, As we are defining LUNs in same server.
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.200
- Ports Used : TCP 860, 3260
- Configuration file : /etc/tgt/targets.conf
## Creating LUNs using LVM in iSCSI Target Server ##
First, find out the list of drives using **fdisk -l** command, this will manipulate a long list of information of every partitions on the system.
# fdisk -l
The above command only gives the drive informations of base system. To get the storage device information, use the below command to get the list of storage devices.
# fdisk -l /dev/vda && fdisk -l /dev/sda
![List Storage Drives](http://www.tecmint.com/wp-content/uploads/2014/07/1.jpg)
List Storage Drives
**NOTE**: Here **vda** is virtual machines hard drive as Im using virtual machine for demonstration, **/dev/sda** is added additionally for storage.
### Step 1: Creating LVM Drive for LUNs ###
We going to use **/dev/sda** drive for creating a LVM.
# fdisk -l /dev/sda
![List LVM Drive](http://www.tecmint.com/wp-content/uploads/2014/07/2.jpg)
List LVM Drive
Now lets Partition the drive using fdisk command as shown below.
# fdisk -cu /dev/sda
- The option **-c** switch off the DOS compatible mode.
- The option **-u** is used to listing partition tables, give sizes in sectors instead of cylinders.
Choose **n** to create a New Partition.
Command (m for help): n
Choose **p** to create a Primary partition.
Command action
e extended
p primary partition (1-4)
Give a Partition number which we need to create.
Partition number (1-4): 1
As here, we are going to setup a LVM drive. So, we need to use the default settings to use full size of Drive.
First sector (2048-37748735, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-37748735, default 37748735):
Using default value 37748735
Choose the type of partition, Here we need to setup a LVM so use **8e**. Use **l** option to see the list of type.
Command (m for help): t
Choose which partition want to change the type.
Selected partition 1
Hex code (type L to list codes): 8e
Changed system type of partition 1 to 8e (Linux LVM)
After changing the type, check the changes by print (**p**) option to list the partition table.
Command (m for help): p
Disk /dev/sda: 19.3 GB, 19327352832 bytes
255 heads, 63 sectors/track, 2349 cylinders, total 37748736 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x9fae99c8
Device Boot Start End Blocks Id System
/dev/sda1 2048 37748735 18873344 8e Linux LVM
Write the changes using **w** to exit from fdisk utility, Restart the system to make changes.
For your reference, Ive attached screen shot below that will give you a clear idea about creating LVM drive.
![Create LVM Partition](http://www.tecmint.com/wp-content/uploads/2014/07/3.jpg)
Create LVM Partition
After system reboot, list the Partition table using the following fdisk command.
# fdisk -l /dev/sda
![Verify LVM Partition](http://www.tecmint.com/wp-content/uploads/2014/07/4.jpg)
Verify LVM Partition
### Step 2: Creating Logical Volumes for LUNs ###
Now here, we going to create Physical volume using using pvcreate command.
# pvcreate /dev/sda1
Create a Volume group with name of iSCSI to identify the group.
# vgcreate vg_iscsi /dev/sda1
Here Im defining 4 Logical Volumes, if so there will be 4 LUNs in our iSCSI Target server.
# lvcreate -L 4G -n lv_iscsi vg_iscsi
# lvcreate -L 4G -n lv_iscsi-1 vg_iscsi
# lvcreate -L 4G -n lv_iscsi-2 vg_iscsi
# lvcreate -L 4G -n lv_iscsi-3 vg_iscsi
List the Physical volume, Volume group, logical volumes to confirm.
# pvs && vgs && lvs
# lvs
For better understanding of the above command, for your reference Ive included a screen grab below.
![Creating LVM Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/07/5.jpg)
Creating LVM Logical Volumes
![Verify LVM Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/07/6.jpg)
Verify LVM Logical Volumes
### Step 3: Define LUNs in Target Server ###
We have created Logical Volumes and ready to use with LUN, here we to define the LUNs in target configuration, if so only it will be available for client machines (Initiators).
Open and edit Targer configuration file located at /etc/tgt/targets.conf with your choice of editor.
# vim /etc/tgt/targets.conf
Append the following volume definition in target conf file. Save and close the file.
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-1
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-2
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-3
</target
![Configure LUNs in Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/7.jpg)
Configure LUNs in Target Server
- iSCSI qualified name (iqn.2014-07.com.tecmint:tgt1).
- Use what ever as your wish.
- Identify using target, 1st target in this Server.
- 4. LVM Shared for particular LUN.
Next, reload the configuration by starting **tgd** service as shown below.
# /etc/init.d/tgtd reload
![Reload Configuration](http://www.tecmint.com/wp-content/uploads/2014/07/8.jpg)
Reload Configuration
Next verify the available LUNs using the following command.
# tgtadm --mode target --op show
![List Available LUNs](http://www.tecmint.com/wp-content/uploads/2014/07/9.jpg)
List Available LUNs
![LUNs Information](http://www.tecmint.com/wp-content/uploads/2014/07/10.jpg)
LUNs Information
The above command will give long list of available LUNs with following information.
- iSCSI Qualified Name
- iSCSI is Ready to Use
- By Default LUN 0 will be reserved for Controller
- LUN 1, What we have Defined in the Target server
- Here i have defined 4 GB for a single LUN
- Online : Yes, Its ready to Use the LUN
Here we have defined the LUNs for target server using LVM, this can be expandable and support for many features such as snapshots. Let us see how to authenticate with Target server in PART-III and mount the remote Storage locally.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/

View File

@ -0,0 +1,81 @@
What is good audio editing software on Linux
================================================================================
Whether you are an amateur musician or just a student recording his professor, you need to edit and work with audio recordings. If for a long time such task was exclusively attributed to Macintosh, this time is over, and Linux now has what it takes to do the job. In short, here is a non-exhaustive list of good audio editing software, fit for different tasks and needs.
### 1. Audacity ###
![](https://farm9.staticflickr.com/8572/15405018653_83ba3e718d_c.jpg)
Let's get started head on with my personal favorite. [Audacity][1] works on Windows, Mac, and Linux. It is open source. It is easy to use. You get it: Audacity is almost perfect. This program lets you manipulate the audio waveform from a clean interface. In short, you can overlay tracks, cut and edit them easily, apply effects, perform advanced sound analysis, and finally export to a plethora of format. The reason I like it so much is that it combines both basic features with more complicated ones, but maintain an easy leaning curve. However, it is not a fully optimized software for hardcore musicians, or people with professional knowledge.
### 2. Jokosher ###
![](https://farm8.staticflickr.com/7524/15998875136_82903a9b4a_c.jpg)
On a different level, [Jokosher][2] focuses more on the multi-track aspect for musical artists. Developed in Python and using the GTK+ interface with GStreamer for audio back-end, Jokosher really impressed me with its slick interface and its extensions. If the editing features are not the most advanced, the language is clear and directed to musicians. And I really like the association between tracks and instruments for example. In short, if you are starting as a musician, it might be a good place to get some experience before moving on to more complex suites.
### 3. Ardour ###
![](https://farm9.staticflickr.com/8577/16024644385_d8cd8073a3_c.jpg)
And talking about compex suites, [Ardour][3] is complete software for recording, editing, and mixing. Designed this time to appeal to all professionals, Ardour features in term of sound routing and plugins go way beyond my comprehension. So if you are looking for a beast and are not afraid to tame it, Ardour is probably a good pick. Again, the interface contributes to its charm, as well as its extensive documentation. I particularly appreciated the first-launch configuration tool.
### 4. Kwave ###
![](https://farm8.staticflickr.com/7557/15402389884_633a8b04c5_c.jpg)
For all KDE lovers, [KWave][4] corresponds to your idea of design and features. There are plenty of shortcuts and interesting options, like memory management. Even if the few effects are nice, we are more dealing with a simple tool to cut/paste audio together. It becomes shard not to compare it with Audacity unfortunately. And on top of that, the interface did not appeal to me that much.
### 5. Qtractor ###
![](https://farm8.staticflickr.com/7551/16022707501_68c39f37e5_c.jpg)
If Kwave is too simplistic for you but a Qt-based program really has some appeal, then [Qtractor][5] might be your option. It aims to be "simple enough for the average home user, and yet powerful enough for the professional user." Indeed the quantity of features and options is almost overwhelming. My favorite being of course customizable shortcuts. Apart from that, Qtractor is probably one of my favorite tools to deal with MIDI files.
### 6. LMMS ###
![](https://farm8.staticflickr.com/7509/15838603239_ef0ecbc8d2_c.jpg)
Standing for Linux MultiMedia Studio, LMMS is directly targeted for music production. If you do not have prior experience and do not want to spend too much time getting some, go elsewhere. LMMS is one of those complex but powerful software that only a few will truly master. The number of features and effects is simply too long to list, but if I had to pick one, I would say that the Freeboy plugin to emulate Game Boy sound system is just magical. Past that, go see their amazing documentation.
### 7. Traverso ###
![](https://farm8.staticflickr.com/7537/15838603279_70ee925057_c.jpg)
Finally, Traverso stood out to me for its unlimited track count and its direct integration with CD burning capacities. Aside from that, it appeared to me as a middle man between a simplistic software and a professional program. The interface is very KDE-like, and the keyboard configuration is always welcome. And cherry on the cake, Traverso monitors your resources and make sure that your CPU or hard drive does not go overboard.
To conclude, it is always a pleasure to see such a large diversity of applications on Linux. It makes finding the software that best fits your needs always possible. While my personal favorite stays Audacity, I was very surprised by the design of programs like LMMS or Jokosher.
Did we miss one? What do you use for audio editing on Linux? And why? Let us know in the comments.
--------------------------------------------------------------------------------
via: http://xmodulo.com/good-audio-editing-software-linux.html
作者:[Adrien Brochard][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/adrien
[1]:http://audacity.sourceforge.net/
[2]:https://launchpad.net/jokosher/
[3]:http://ardour.org/
[4]:http://kwave.sourceforge.net/
[5]:http://qtractor.sourceforge.net/qtractor-index.html
[6]:
[7]:
[8]:
[9]:
[10]:
[11]:
[12]:
[13]:
[14]:
[15]:
[16]:
[17]:
[18]:
[19]:
[20]:

View File

@ -0,0 +1,307 @@
Translating by GOLinux!
Setting up a PXE Network Boot Server for Multiple Linux Distribution Installations in RHEL/CentOS 7
================================================================================
**PXE Server** Preboot eXecution Environment instructs a client computer to boot, run or install an operating system directly form a network interface, eliminating the need to burn a CD/DVD or use a physical medium, or, can ease the job of installing Linux distributions on your network infrastructure on multiple machines the same time.
![Setting PXE Network Boot in RHEL/CentOS 7](http://www.tecmint.com/wp-content/uploads/2014/10/Setting-PXE-Network-Boot-in-CentOS.png)
Setting PXE Network Boot in RHEL/CentOS 7
#### Requirements ####
- [CentOS 7 Minimal Installation Procedure][1]
- [RHEL 7 Minimal Installation Procedure][2]
- [Configure Static IP Address in RHEL/CentOS 7][3]
- [Remove Unwanted Services in RHEL/CentOS 7][4]
- [Install NTP Server to Set Correct System Time in RHEL/CentOS 7][5]
This article will explain how you can install and configure a **PXE Server** on **RHEL/CentOS 7** x64-bit with mirrored local installation repositories, sources provided by CentOS 7 DVD ISO image, with the help of **DNSMASQ** Server.
Which provides **DNS** and **DHCP** services, **Syslinux** package which provides bootloaders for network booting, **TFTP-Server**, which makes bootable images available to be downloaded via network using **Trivial File Transfer Protocol** (TFTP) and **VSFTPD** Server which will host the local mounted mirrored DVD image which will act as an official RHEL/CentOS 7 mirror installation repository from where the installer will extract its required packages.
### Step 1: Install and configure DNSMASQ Server ###
**1.** No need to remind you that is absolutely demanding that one of your network card interface, in case your server poses more NICs, must be configured with a static IP address from the same IP range that belongs to the network segment that will provide PXE services.
So, after you have configured your static IP Address, updated your system and performed other initial settings, use the following command to install **DNSMASQ** daemon.
# yum install dnsmasq
![Install dnsmasq Package](http://www.tecmint.com/wp-content/uploads/2014/10/Install-dnsmasq-in-CentOS.jpg)
Install dnsmasq Package
**2.** DNSMASQ main default configuration file located in **/etc** directory is self-explanatory but intends to be quite difficult to edit, do to its highly commented explanations.
First make sure you backup this file in case you need to review it later and, then, create a new blank configuration file using your favorite text editor by issuing the following commands.
# mv /etc/dnsmasq.conf /etc/dnsmasq.conf.backup
# nano /etc/dnsmasq.conf
**3.** Now, copy and paste the following configurations on **dnsmasq.conf** file and assure that you change the below explained statements to match your network settings accordingly.
interface=eno16777736,lo
#bind-interfaces
domain=centos7.lan
# DHCP range-leases
dhcp-range= eno16777736,192.168.1.3,192.168.1.253,255.255.255.0,1h
# PXE
dhcp-boot=pxelinux.0,pxeserver,192.168.1.20
# Gateway
dhcp-option=3,192.168.1.1
# DNS
dhcp-option=6,92.168.1.1, 8.8.8.8
server=8.8.4.4
# Broadcast Address
dhcp-option=28,10.0.0.255
# NTP Server
dhcp-option=42,0.0.0.0
pxe-prompt="Press F8 for menu.", 60
pxe-service=x86PC, "Install CentOS 7 from network server 192.168.1.20", pxelinux
enable-tftp
tftp-root=/var/lib/tftpboot
![Dnsmasq Configuration](http://www.tecmint.com/wp-content/uploads/2014/10/dnsmasq-configuration.jpg)
Dnsmasq Configuration
The statements that you need to change are follows:
- **interface** Interfaces that the server should listen and provide services.
- **bind-interfaces** Uncomment to bind only on this interface.
- **domain** Replace it with your domain name.
- **dhcp-range** Replace it with IP range defined by your network mask on this segment.
- **dhcp-boot** Replace the IP statement with your interface IP Address.
- **dhcp-option=3,192.168.1.1** Replace the IP Address with your network segment Gateway.
- **dhcp-option=6,92.168.1.1** Replace the IP Address with your DNS Server IP several DNS IPs can be defined.
- **server=8.8.4.4** Put your DNS forwarders IPs Addresses.
- **dhcp-option=28,10.0.0.255** Replace the IP Address with network broadcast address optionally.
- **dhcp-option=42,0.0.0.0** Put your network time servers optionally (0.0.0.0 Address is for self-reference).
- **pxe-prompt** Leave it as default means to hit F8 key for entering menu 60 with seconds wait time..
- **pxe=service** Use x86PC for 32-bit/64-bit architectures and enter a menu description prompt under string quotes. Other values types can be: PC98, IA64_EFI, Alpha, Arc_x86, Intel_Lean_Client, IA32_EFI, BC_EFI, Xscale_EFI and X86-64_EFI.
- **enable-tftp** Enables the build-in TFTP server.
- **tftp-root** Use /var/lib/tftpboot the location for all netbooting files.
For other advanced options concerning configuration file feel free to read [dnsmasq manual][6].
### Step 2: Install SYSLINUX Bootloaders ###
**4.** After you have edited and saved **DNSMASQ** main configuration file, go ahead and install **Syslinx** PXE bootloader package by issuing the following command.
# yum install syslinux
![Install Syslinux Bootloaders](http://www.tecmint.com/wp-content/uploads/2014/10/install-syslinux-bootloaders.jpg)
Install Syslinux Bootloaders
**5.** The PXE bootloaders files reside in **/usr/share/syslinux** absolute system path, so you can check it by listing this path content. This step is optional, but you might need to be aware of this path because on the next step, we will copy of all its content to **TFTP Server** path.
# ls /usr/share/syslinux
![Syslinux Files](http://www.tecmint.com/wp-content/uploads/2014/10/syslinux-files.jpg)
Syslinux Files
### Step 3: Install TFTP-Server and Populate it with SYSLINUX Bootloaders ###
**6.** Now, lets move to next step and install **TFTP-Server** and, then, copy all bootloders files provided by Syslinux package from the above listed location to **/var/lib/tftpboot** path by issuing the following commands.
# yum install tftp-server
# cp -r /usr/share/syslinux/* /var/lib/tftpboot
![Install TFTP Server](http://www.tecmint.com/wp-content/uploads/2014/10/install-tftp-server.jpg)
Install TFTP Server
### Step 4: Setup PXE Server Configuration File ###
**7.** Typically the **PXE Server** reads its configuration from a group of specific files (**GUID** files first, **MAC** files next, **Default** file last) hosted in a folder called **pxelinux.cfg**, which must be located in the directory specified in **tftp-root** statement from DNSMASQ main configuration file.
Create the required directory **pxelinux.cfg** and populate it with a **default** file by issuing the following commands.
# mkdir /var/lib/tftpboot/pxelinux.cfg
# touch /var/lib/tftpboot/pxelinux.cfg/default
**8.** Now its time to edit **PXE Server** configuration file with valid Linux distributions installation options. Also note that all paths used in this file must be relative to the **/var/lib/tftpboot** directory.
Below you can see an example configuration file that you can use it, but modify the installation images (kernel and initrd files), protocols (FTP, HTTP, HTTPS, NFS) and IPs to reflect your network installation source repositories and paths accordingly.
# nano /var/lib/tftpboot/pxelinux.cfg/default
Add the following whole excerpt to the file.
default menu.c32
prompt 0
timeout 300
ONTIMEOUT local
menu title ########## PXE Boot Menu ##########
label 1
menu label ^1) Install CentOS 7 x64 with Local Repo
kernel centos7/vmlinuz
append initrd=centos7/initrd.img method=ftp://192.168.1.20/pub devfs=nomount
label 2
menu label ^2) Install CentOS 7 x64 with http://mirror.centos.org Repo
kernel centos7/vmlinuz
append initrd=centos7/initrd.img method=http://mirror.centos.org/centos/7/os/x86_64/ devfs=nomount ip=dhcp
label 3
menu label ^3) Install CentOS 7 x64 with Local Repo using VNC
kernel centos7/vmlinuz
append initrd=centos7/initrd.img method=ftp://192.168.1.20/pub devfs=nomount inst.vnc inst.vncpassword=password
label 4
menu label ^4) Boot from local drive
![Configure PXE Server](http://www.tecmint.com/wp-content/uploads/2014/10/configure-pxe-server.jpg)
Configure PXE Server
As you can see CentOS 7 boot images (kernel and initrd) reside in a directory named **centos7** relative to **/var/lib/tftpboot** (on an absolute system path this would mean **/var/lib/tftpboot/centos7**) and the installer repositories can be reached by using FTP protocol on **192.168.1.20/pub** network location in this case the repos are hosted locally because the IP address is the same as the PXE server address).
Also menu **label 3** specifies that the client installation should be done from a remote location via **VNC** (here replace VNC password with a strong password) in case you install on a headless client and the menu **label 2** specifies as installation sources a CentOS 7 official Internet mirror (this case requires an Internet connection available on client through DHCP and NAT).
**Important**: As you see in the above configuration, weve used CentOS 7 for demonstration purpose, but you can also define RHEL 7 images, and following whole instructions and configurations are based on CentOS 7 only, so be careful while choosing distribution.
### Step 5: Add CentOS 7 Boot Images to PXE Server ###
**9.** For this step CentOS kernel and initrd files are required. To get those files you need the **CentOS 7 DVD ISO** Image. So, go ahead and download CentOS DVD Image, put it in your DVD drive and mount the image to **/mnt** system path by issuing the below command.
The reason for using the DVD and not a Minimal CD Image is the fact that later this DVD content would be used to create the locally installer repositories for **FTP** sources.
# mount -o loop /dev/cdrom /mnt
# ls /mnt
![Mount CentOS DVD](http://www.tecmint.com/wp-content/uploads/2014/10/mount-centos-dvd.jpg)
Mount CentOS DVD
If your machine has no DVD drive you can also download **CentOS 7 DVD ISO** locally using **wget** or **curl** utilities from a [CentOS mirror][7] and mount it.
# wget http://mirrors.xservers.ro/centos/7.0.1406/isos/x86_64/CentOS-7.0-1406-x86_64-DVD.iso
# mount -o loop /path/to/centos-dvd.iso /mnt
**10.** After the DVD content is made available, create the **centos7** directory and copy CentOS 7 bootable kernel and initrd images from the DVD mounted location to centos7 folder structure.
# mkdir /var/lib/tftpboot/centos7
# cp /mnt/images/pxeboot/vmlinuz /var/lib/tftpboot/centos7
# cp /mnt/images/pxeboot/initrd.img /var/lib/tftpboot/centos7
![Copy CentOS Bootable Files](http://www.tecmint.com/wp-content/uploads/2014/10/copy-centos-bootable-files.jpg)
Copy CentOS Bootable Files
The reason for using this approach is that, later you can create new separate directories in **/var/lib/tftpboot** path and add other Linux distributions to PXE menu without messing up the entire directory structure.
### Step 6: Create CentOS 7 Local Mirror Installation Source ###
**11.** Although you can setup **Installation Source Mirrors** via a variety of protocols such as HTTP, HTTPS or NFS, for this guide, I have chosen **FTP** protocol because is very reliable and easy to setup with the help of **vsftpd** server.
Further install vsftpd daemon, copy all DVD mounted content to **vsftpd** default server path (**/var/ftp/pub**) this can take a while depending on your system resources and append readable permissions to this path by issuing the following commands.
# yum install vsftpd
# cp -r /mnt/* /var/ftp/pub/
# chmod -R 755 /var/ftp/pub
![Install Vsftpd Server](http://www.tecmint.com/wp-content/uploads/2014/10/install-vsftpd-in-centos.jpg)
Install Vsftpd Server
![Copy Files to FTP Path](http://www.tecmint.com/wp-content/uploads/2014/10/copy-files-to-ftp-path.jpg)
Copy Files to FTP Path
![Set Permissions on FTP Path](http://www.tecmint.com/wp-content/uploads/2014/10/set-permission-on-ftp-path.jpg)
Set Permissions on FTP Path
### Step 7: Start and Enable Daemons System-Wide ###
**12.** Now that the PXE server configuration is finally finished, start **DNSMASQ** and **VSFTPD** servers, verify their status and enable it system-wide, to automatically start after every system reboot, by running the below commands.
# systemctl start dnsmasq
# systemctl status dnsmasq
# systemctl start vsftpd
# systemctl status vsftpd
# systemctl enable dnsmasq
# systemctl enable vsftpd
![Start Dnsmasq Service](http://www.tecmint.com/wp-content/uploads/2014/10/start-dnsmasq.jpg)
Start Dnsmasq Service
![Start Vsftpd Service](http://www.tecmint.com/wp-content/uploads/2014/10/start-vsftpd.jpg)
Start Vsftpd Service
### Step 8: Open Firewall and Test FTP Installation Source ###
**13.** To get a list of all ports that needs to be open on your Firewall in order for client machines to reach and boot from PXE server, run **netstat** command and add CentOS 7 Firewalld rules accordingly to dnsmasq and vsftpd listening ports.
# netstat -tulpn
# firewall-cmd --add-service=ftp --permanent ## Port 21
# firewall-cmd --add-service=dns --permanent ## Port 53
# firewall-cmd --add-service=dhcp --permanent ## Port 67
# firewall-cmd --add-port=69/udp --permanent ## Port for TFTP
# firewall-cmd --add-port=4011/udp --permanent ## Port for ProxyDHCP
# firewall-cmd --reload ## Apply rules
![Check Listening Ports](http://www.tecmint.com/wp-content/uploads/2014/10/check-listening-ports.jpg)
Check Listening Ports
![Open Ports in Firewall](http://www.tecmint.com/wp-content/uploads/2014/10/open-ports-on-firewall.jpg)
Open Ports in Firewall
**14.** To test FTP Installation Source network path open a browser locally ([**lynx**][8] should do it) or on a different computer and type the IP Address of your PXE server with FTP protocol followed by **/pub** network location on URL filed and the result should be as presented in the below screenshot.
ftp://192.168.1.20/pub
![Access FTP Files via Browser](http://www.tecmint.com/wp-content/uploads/2014/10/browse-ftp-files.jpg)
Access FTP Files via Browser
**15.** To debug PXE server for eventual misconfigurations or other information and diagnostics in live mode run the following command.
# tailf /var/log/messages
![Check PXE Logs for Errors](http://www.tecmint.com/wp-content/uploads/2014/10/check-pxe-errors.jpg)
Check PXE Logs for Errors
**16.** Finally, the last required step that you need to do is to unmount CentOS 7 DVD and remove the physical medium.
# umount /mnt
### Step 9: Configure Clients to Boot from Network ###
**17.** Now your clients can boot and install CentOS 7 on their machines by configuring Network Boot as **primary boot device** from their systems BIOS or by hitting a specified key during **BIOS POST** operations as specified in motherboard manual.
In order to choose network booting. After first PXE prompt appears, press **F8** key to enter presentation and then hit **Enter** key to proceed forward to PXE menu.
![PXE Network Boot](http://www.tecmint.com/wp-content/uploads/2014/10/pxe-network-boot.jpg)
PXE Network Boot
![PXE Network OS Boot](http://www.tecmint.com/wp-content/uploads/2014/10/pxe-network-os-boot.jpg)
PXE Network OS Boot
**18.** Once you have reached PXE menu, choose your CentOS 7 installation type, hit **Enter** key and continue with the installation procedure the same way as you might install it from a local media boot device.
Please note down that using variant 2 from this menu requires an active Internet connection on the target client. Also, on below screenshots you can see an example of a client remote installation via VNC.
![PXE Menu](http://www.tecmint.com/wp-content/uploads/2014/10/pxe-menu.jpg)
PXE Menu
![Remote Linux Installation via VNC](http://www.tecmint.com/wp-content/uploads/2014/10/os-installation-via-vnc.jpg)
Remote Linux Installation via VNC
![Remote Installation of CentOS](http://www.tecmint.com/wp-content/uploads/2014/10/remote-centos-installation.jpg)
Remote Installation of CentOS
Thats all for setting up a minimal **PXE Server** on **CentOS 7**. On my next article from this series, I will discuss other issues concerning this PXE server configuration such as how to setup automated installations of **CentOS 7** using **Kickstart** files and adding other Linux distributions to PXE menu **Ubuntu Server** and **Debian 7**.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/install-pxe-network-boot-server-in-centos-7/
作者:[Matei Cezar][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/cezarmatei/
[1]:http://www.tecmint.com/centos-7-installation/
[2]:http://www.tecmint.com/redhat-enterprise-linux-7-installation/
[3]:http://www.tecmint.com/configure-network-interface-in-rhel-centos-7-0/
[4]:http://www.tecmint.com/remove-unwanted-services-in-centos-7/
[5]:http://www.tecmint.com/install-ntp-server-in-centos/
[6]:http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html
[7]:http://isoredirect.centos.org/centos/7/isos/x86_64/
[8]:http://www.tecmint.com/command-line-web-browsers/

View File

@ -0,0 +1,75 @@
Linux FAQs with Answers--How to check SSH protocol version on Linux
================================================================================
> **Question**: I am aware that there exist SSH protocol version 1 and 2 (SSH1 and SSH2). What is the difference between SSH1 and SSH2, and how can I check which SSH protocol version is supported on a Linux server?
Secure Shell (SSH) is a network protocol that enables remote login or remote command execution between two hosts over a cryptographically secure communication channel. SSH was designed to replace insecure clear-text protocols such as telnet, rsh or rlogin. SSH provides a number of desirable features such as authentication, encryption, data integrity, authorization, and forwarding/tunneling.
### SSH1 vs. SSH2 ###
The SSH protocol specification has a number of minor version differences, but there are two major versions of the protocol: **SSH1** (SSH version 1.XX) and **SSH2** (SSH version 2.00).
In fact, SSH1 and SSH2 are two entirely different protocols with no compatibility in between. SSH2 is a significantly improved version of SSH1 in many respects. First of all, while SSH1 is a monolithic design where several different functions (e.g., authentication, transport, connection) are packed into a single protocol, SSH2 is a layered architecture designed with extensibility and flexibility in mind. In terms of security, SSH2 comes with a number of stronger security features than SSH1, such as MAC-based integrity check, flexible session re-keying, fully-negotiable cryptographic algorithms, public-key certificates, etc.
SSH2 is standardized by IETF, and as such its implementation is widely deployed and accepted in the industry. Due to SSH2's popularity and cryptographic superiority over SSH1, many products are dropping support for SSH1. As of this writing, OpenSSH still [supports][1] both SSH1 and SSH2, while on all modern Linux distributions, OpenSSH server comes with SSH1 disabled by default.
### Check Supported SSH Protocol Version ###
#### Method One ####
If you want to check what SSH protocol version(s) are supported by a local OpenSSH server, you can refer to **/etc/ssh/sshd_config** file. Open /etc/ssh/sshd_config with a text editor, and look for "Protocol" field.
If it shows the following, it means that OpenSSH server supports SSH2 only.
Protocol 2
If it displays the following instead, OpenSSH server supports both SSH1 and SSH2.
Protocol 1,2
#### Method Two ####
If you cannot access /etc/ssh/sshd_config because OpenSSH server is running on a remote server, you can test its SSH protocol support by using SSH client program called ssh. More specifically, we force ssh to use a specific SSH protocol, and see how the remote SSH server responds.
The following command will force ssh command to use SSH1:
$ ssh -1 user@remote_server
The following command will force ssh command to use SSH2:
$ ssh -2 user@remote_server
If the remote SSH server supports SSH2 only, the first command with "-1" option will fails with an error message like this:
Protocol major versions differ: 1 vs. 2
If the SSH server supports both SSH1 and SSH2, both commands should work successfully.
### Method Three ###
Another method to check supported SSH protocol version of a remote SSH server is to run an SSH scanning tool called [scanssh][2]. This command-line tool is useful when you want to check SSH protocol versions for a bulk of IP addresses or the entire local network to upgrade SSH1-capable SSH servers.
Here is the basic syntax of scanssh for SSH version scanning.
$ sudo scanssh -s ssh -n [ports] [IP addresses or CIDR prefix]
The "-n" option can specify the SSH port number(s) to scan. You can specify multiple port numbers separated by comma. Without this option, scanssh will scan port 22 by default.
Use the following command to discover SSH servers on 192.168.1.0/24 local nework, and detect their SSH protocol versions:
$ sudo scan -s ssh 192.168.1.0/24
![](https://farm8.staticflickr.com/7550/15460750074_95f83217a2_b.jpg)
If scanssh reports "SSH-1.XX-XXXX" for a particular IP address, it implies that the minimum SSH protocol version supported by the corresponding SSH server is SSH1. If the remote server supports SSH2 only, scanssh will show "SSH-2.0-XXXX".
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/check-ssh-protocol-version-linux.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://www.openssh.com/specs.html
[2]:http://www.monkey.org/~provos/scanssh/

View File

@ -0,0 +1,60 @@
Linux FAQs with Answers--How to fix “XXX is not in the sudoers file” error
================================================================================
> **Question**: I am trying to use sudo to run some privileged command on my Linux. However, when I attempt to do so, I am getting "[my-user-id] is not in the sudoers file. This incident will be reported." error message. How can I resolve this sudo error?
sudo is a program which enables specific groups of users to run a command with the privilege of another user (typically root). sudo comes with detailed logging capability, and offers finer granular control over which user can type what commands via sudo.
### Sudo vs. Su ###
The su command also offers similar privilege escalation capability. Difference between sudo and su is their authentication process and the granularity of priviledge change. su allows you to switch your login session to another user's, where you can then run any arbitrary programs with the user's privilege as many times as you want. You will have to know the target user's password to switch to the user though. On the other hand, sudo works on a per-command basis, allowing you to run a single command with root privilege. To use sudo, you don't have to know the root password, but enter your own password at sudo password prompt.
### Add a User to Sudoers List ###
As a new user, if you attempt to run sudo command, you will encounter the following error. This means that you are not in the sudoers list which contains a group of users who are authorized to use sudo.
[my-user-id] is not in the sudoers file. This incident will be reported.
There are two ways to add you to sudoers list.
### Metho One ###
The first method is to add you to the Linux group named sudo. This special Linux group is pre-configured to be able to use sudo. Thus once you are in the group, you can run sudo command.
The following command will add you to sudo Linux group. Make sure to run the command as the root.
# adduser <username> sudo
Now confirm that your group membership has been updated. Use groups command to see a list of groups you currently belong to. The list must include sudo group.
$ groups
----------
alice adm cdrom sudo dip plugdev fuse lpadmin netdev sambashare davfs2 libvirtd docker promiscuous
he group membership change (and sudo access) will be activated once you log out and log back in.
### Metho Two ###
The second method to enable you to use sudo is to add yourself to /etc/sudoers configuration file directly.
To change /etc/sudoers file, you can use a special sudo editor command called visudo. Simply invoke the following command as the root.
# visudo
This will open up /etc/sudoers file for editing. Append the following line to the end of the file, and press Ctrl+X. When prompted, save the change, and exit.
<username> ALL=(ALL) ALL
This change will be effective immediately, and you will be able to use sudo right away.
![](https://farm8.staticflickr.com/7511/15866443418_e147329e1b_c.jpg)
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/fix-is-not-in-the-sudoers-file.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,74 @@
Linux FAQs with Answers--How to install 7zip on Linux
================================================================================
> **Question**: I need to extract files from an ISO image, and for that I want to use 7zip program. How can I install 7zip on [insert your Linux distro]?
7zip is an open-source archive program originally developed for Windows, which can pack or unpack a variety of archive formats including its native format 7z as well as XZ, GZIP, TAR, ZIP and BZIP2. 7zip is also popularly used to extract RAR, DEB, RPM and ISO files. Besides simple archiving, 7zip can support AES-256 encryption as well as self-extracting and multi-volume archiving. For POSIX systems (Linux, Unix, BSD), the original 7zip program has been ported as p7zip (short for "POSIX 7zip").
Here is how to install 7zip (or p7zip) on Linux.
### Install 7zip on Debian, Ubuntu or Linux Mint ###
Debian-based distributions come with three packages related to 7zip.
- **p7zip**: contains 7zr (a minimal 7zip archive tool) which can handle its native 7z format only.
- **p7zip-full**: contains 7z which can support 7z, LZMA2, XZ, ZIP, CAB, GZIP, BZIP2, ARJ, TAR, CPIO, RPM, ISO and DEB.
- **p7zip-rar**: contains a plugin for extracting RAR files.
It is recommended to install p7zip-full package (not p7zip) since this is the most complete 7zip package which supports many archive formats. In addition, if you want to extract RAR files, you also need to install p7zip-rar package as well. The reason for having a separate plugin package is because RAR is a proprietary format.
$ sudo apt-get install p7zip-full p7zip-rar
### Install 7zip on Fedora or CentOS/RHEL ###
Red Hat-based distributions offer two packages related to 7zip.
- **p7zip**: contains 7za command which can support 7z, ZIP, GZIP, CAB, ARJ, BZIP2, TAR, CPIO, RPM and DEB.
- **p7zip-plugins**: contains 7z command and additional plugins to extend 7za command (e.g., ISO extraction).
On CentOS/RHEL, you need to enable [EPEL repository][1] before running yum command below. On Fedora, there is not need to set up additional repository.
$ sudo yum install p7zip p7zip-plugins
Note that unlike Debian based distributions, Red Hat based distributions do not offer a RAR plugin. Therefore you will not be able to extract RAR files using 7z command.
### Create or Extract an Archive with 7z ###
Once you installed 7zip, you can use 7z command to pack or unpack various types of archives. The 7z command uses other plugins to handle the archives.
![](https://farm8.staticflickr.com/7583/15874000610_878a85b06a_b.jpg)
To create an archive, use "a" option. Supported archive types for creation are 7z, XZ, GZIP, TAR, ZIP and BZIP2. If the specified archive file already exists, it will "add" the files to the existing archive, instead of overwriting it.
$ 7z a <archive-filename> <list-of-files>
To extract an archive, use "e" option. It will extract the archive in the current directory. Supported archive types for extraction are a lot more than those for creation. The list includes 7z, XZ, GZIP, TAR, ZIP, BZIP2, LZMA2, CAB, ARJ, CPIO, RPM, ISO and DEB.
$ 7z e <archive-filename>
Another way to unpack an archive is to use "x" option. Unlike "e" option, it will extract the content with full paths.
$ 7z x <archive-filename>
To see a list of files in an archive, use "l" option.
$ 7z l <archive-filename>
You can update or remove file(s) in an archive with "u" and "d" options, respectively.
$ 7z u <archive-filename> <list-of-files-to-update>
$ 7z d <archive-filename> <list-of-files-to-delete>
To test the integrity of an archive:
$ 7z t <archive-filename>
--------------------------------------------------------------------------------
via:http://ask.xmodulo.com/install-7zip-linux.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://xmodulo.com/how-to-set-up-epel-repository-on-centos.html

View File

@ -0,0 +1,79 @@
Linux FAQs with Answers--How to install Kingsoft Office on Linux
================================================================================
> **Question**: I heard good things about Kingsoft Office, so I would like to try it out on my Linux. How can I install Kingsoft Office on [insert your Linux distro]?
Kingsoft Office is an office suite available for muliple platforms including Windows, Linux, iOS and Android. It comes with three programs: Writer for word processing, Presentation for presentations, and Spreadsheets for spreadsheets. It is freemium model, where the basic version is free to use. Compared to other Linux office suites such as LibreOffice or OpenOffice, the best advantage of Kingsoft Office is its **excellent compatibility with Microsoft Office**. Thus for those of you who need to use an office suite on Linux and Windows platforms interchangeably, Kingsoft Office is a good choice for Linux platform.
### Install Kingsoft Office on CentOS, Fedora or RHEL ###
Download a RPM file for the [official site][1]. The official RPM package is available as a 32-bit version only, but you can install it on both 32-bit and 64-bit systems.
Use yum command with "localinstall" option to install the RPM file.
$ sudo yum localinstall kingsoft-office-9.1.0.4244-0.1.a12p3.i686.rpm
Note that do NOT use rpm command to install it. Otherwise, you will get unmet dependency errors, which are not easy to solve manually:
error: Failed dependencies:
libICE.so.6 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
libSM.so.6 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
libX11.so.6 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
libXext.so.6 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
libXrender.so.1 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
libc.so.6 is needed by kingsoft-office-9.1.0.4244-0.1.a12p3.i686
Red Hat based distributions have multilib support. If the RPM package you are trying to install is 32-bit and has 32-bit library dependencies, a better way is to use yum to install it as shown above. As long as the RPM is properily built with all dependency information, yum should be able to install it using yum repositories.
![](https://farm9.staticflickr.com/8626/16040291445_ca62275064_c.jpg)
### Install Kingsoft Office on Debian, Ubuntu or Linux Mint ###
Download a DEB package from the [official site][2]. The official DEB package is available as a 32-bit version only, but you can install it on both 32-bit and 64-bit systems.
The DEB package has a set of dependencies to meet. Therefore use [gdebi][3] instead of dpkg command to automatically resolve dependencies.
$ sudo apt-get install gdebi-core
$ sudo gdebi kingsoft-office_9.1.0.4244~a12p3_i386.deb
### Launch Kingsoft Office ###
Once Kingsoft Office is installed, you can launch Witer, Presentation, and Spreadsheets from the desktop manager easily.
On Ubuntu Unity:
![](https://farm9.staticflickr.com/8591/16039583702_632a49779f_z.jpg)
On GNOME:
![](https://farm9.staticflickr.com/8617/16039583622_4e7c1d8545_b.jpg)
Alternatively, you can also launch Kingsoft Office from the command line.
To launch Kingsoft Writer from the command line, use this command:
![](https://farm8.staticflickr.com/7525/16039583642_7202457899_c.jpg)
To launch Kingsoft Presentation from the command line, use this command:
$ wpp
![](https://farm8.staticflickr.com/7570/15420632223_4243cc99d9_c.jpg)
To launch Kingsoft Spreadsheets from the command line, use this command:
$ et
![](https://farm9.staticflickr.com/8682/15852842558_97edda4afd_c.jpg)
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/install-kingsoft-office-linux.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://ksosoft.com/product/office-2013-linux.html
[2]:http://ksosoft.com/product/office-2013-linux.html
[3]:http://xmodulo.com/how-to-install-deb-file-with-dependencies.html

View File

@ -0,0 +1,74 @@
Linux FAQs with Answers--How to install kernel headers on Linux
================================================================================
> **Question**: I need to install kernel headers to install a device driver for my kernel. What is a proper way to install matching kernel headers on [insert your Linux distro]?
When you compile a device driver a custom kernel module, you need to have kernel header files installed on your Linux system. Kernel headers are needed also when you build a userspace application which links directly against the kernel. When you install kernel headers in such cases, you must make sure to kernel headers are exactly matched with the kernel version of your system (e.g., 3.13.0-24-generic).
If your kernel is the default version that comes with the distribution, or you upgraded it using the default package manager (e.g., apt-get, aptitude or yum) from base repositories, you can install matching kernel headers using the package manager as well. On the other hand, if you downloaded the [kernel source][1] and compiled it manually, you can install matching kernel headers by using [make command][2].
Here we assume that your kernel comes from base repositories of your Linux distribution, and see how we can install matching kernel headers.
### Install Kernel Headers on Debian, Ubuntu or Linux Mint ###
Assuming that you did not manually compile the kernel, you can install matching kernel headers using apt-get command.
First, check if matching kernel headers are already available on your system using dpkg-query command.
$ dpkg-query -s linux-headers-$(uname -r)
----------
dpkg-query: package 'linux-headers-3.11.0-26-generic' is not installed and no information is available
Go ahead and install matching kernel headers as follows.
$ sudo apt-get install linux-headers-$(uname -r)
![](https://farm9.staticflickr.com/8681/16000652415_a7c399992e_z.jpg)
Verify that matching kernel headers are successfully installed.
$ dpkg-query -s linux-headers-$(uname -r)
----------
Package: linux-headers-3.11.0-26-generic
Status: install ok installed
The default location of kernel headers on Debian, Ubuntu or Linux Mint is **/usr/src**.
### Install Kernel Headers on Fedora, CentOS or RHEL ###
If you did not manually upgrade the kernel, you can install matching kernel headers using yum command.
First, check if matching kernel headers are already installed on your system. If the following command does not produce any output, it means kernel headers are not available.
$ rpm -qa | grep kernel-headers-$(uname -r)
Go ahead and install kernel headers with yum command. This command will automatically find a package of matching kernel headers, and install it.
$ sudo yum install kernel-headers
![](https://farm9.staticflickr.com/8594/15378403114_c51ff6f4ae_z.jpg)
Verify the status of the installed package.
$ rpm -qa | grep kernel-headers-$(uname -r)
----------
kernel-headers-3.10.0-123.9.3.el7.x86_64
The default location of kernel headers on Fedora, CentOS or RHEL is **/usr/include/linux**.
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/install-kernel-headers-linux.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://www.kernel.org/pub/linux/kernel/
[2]:https://www.kernel.org/doc/Documentation/kbuild/headers_install.txt

View File

@ -0,0 +1,47 @@
Linux FAQs with Answers--How to install non-free packages on Debian
================================================================================
> **Question**: I need to install some proprietary device driver on Debian, which is part of a non-free package. However, I cannot find and install the package in my Debian box. How can I install non-free packages on Debian?
The Debian project is distributed as a collection of packages, [48,000][1] of them, as of Debian Wheezy. These packages are categorized into three areas: main, contrib and non-free, mainly based on licensing requirements, e.g., [Debian Free Software Guidelines][2] (DFSG).
The main area contains free software that complies with DFSG. The contrib area contains free software that complies with DFSG, but relies on non-free software for compilation or execution. Finally, the non-free area contains non-free packages that are not compliant with DFSG but redistributable. The main repository is considered a part of Debian, but neither contrib or non-free repository is. The latter two are maintained and provided only as a convenience to users.
If you want to install a non-free package maintained by Debian, you need to enable contrib and non-free repositories. To do so, open /etc/apt/sources.list with a text editor, and append "contrib non-free" to each source.
The following is an example of /etc/apt/sources.list for Debian Wheezy.
deb http://ftp.us.debian.org/debian/ wheezy main contrib non-free
deb-src http://ftp.us.debian.org/debian/ wheezy main contrib non-free
deb http://security.debian.org/ wheezy/updates main contrib non-free
deb-src http://security.debian.org/ wheezy/updates main contrib non-free
# wheezy-updates, previously known as 'volatile'
deb http://ftp.us.debian.org/debian/ wheezy-updates main contrib non-free
deb-src http://ftp.us.debian.org/debian/ wheezy-updates main contrib non-free
![](https://farm8.staticflickr.com/7562/16063758036_0ef8fce075_b.jpg)
After modifying sources of packages, run the following command to download package index files for contrib and non-free repositories.
$ sudo apt-get update
If you are using aptitude, run the following instead.
$ sudo aptitude update
Now you are ready to search and install any non-free package on Debian.
![](https://farm9.staticflickr.com/8593/16089610915_b638fce55d_c.jpg)
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/install-nonfree-packages-debian.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://packages.debian.org/stable/allpackages?format=txt.gz
[2]:https://www.debian.org/social_contract.html#guidelines

View File

@ -0,0 +1,82 @@
Linux FAQs with Answers--How to rename multiple files on Linux
================================================================================
> **Question**: I know I can rename a file using mv command. But what if I want to change the name of many files? It will be tedius to invoke mv command for every such file. Is there a more convenient way to rename multiple files at once?
In Linux, when you want to change a file name, mv command gets the job done. However, mv cannot rename multiple files using wildcard. There are ways to deal with multiple files by using a combination of sed, awk or find in conjunction with [xargs][1]. However, these CLIs are rather cumbersome and not user-friendly, and can be error-prone if you are not careful. You don't want to undo incorrect name change for 1,000 files.
When it comes to renaming multiple files, the rename utility is probably the easiest, the safest, and the most powerful command-line tool. The rename command is actually a Perl script, and comes pre-installed on all modern Linux distributions.
Here is the basic syntax of rename command.
rename [-v -n -f] <pcre> <files>
<pcre> is a Perl-compatible regular expression (PCRE) which represents file(s) to rename and how. This regular expression is in the form of 's/old-name/new-name/'.
The '-v' option shows the details of file name changes (e.g., XXX renamed as YYY).
The '-n' option tells rename to show how the files would be renamed without actually changing the names. This option is useful when you want to simulate filename change without touching files.
The '-f' option force overwriting existing files.
In the following, let's see several rename command examples.
### Change File Extensions ###
Suppose you have many image files with .jpeg extension. You want to change their file names to *.jpg. The following command converts *.jpeg files to *.jpg.
$ rename 's/\.jpeg$/\.jpg/' *.jpeg
### Convert Uppercase to Lowercase and Vice-Versa ###
In case you want to change text case in filenames, you can use the following commands.
To rename all files to lower-case:
# rename 'y/A-Z/a-z/' *
To rename all files to upper-case:
# rename 'y/a-z/A-Z/' *
![](https://farm9.staticflickr.com/8655/16054304245_bcf9d23b59_b.jpg)
### Change File Name Patterns ###
Now let's consider more complex regular expressions which involve subpatterns. In PCRE, a subpattern captured within round brackets can be referenced by a number preceded by a dollar sign (e.g., $1, $2).
For example, the following command will rename 'img_NNNN.jpeg' to 'dan_NNNN.jpg'.
# rename -v 's/img_(\d{4})\.jpeg$/dan_$1\.jpg/' *.jpeg
----------
img_5417.jpeg renamed as dan_5417.jpg
img_5418.jpeg renamed as dan_5418.jpg
img_5419.jpeg renamed as dan_5419.jpg
img_5420.jpeg renamed as dan_5420.jpg
img_5421.jpeg renamed as dan_5421.jpg
The next command will rename 'img_000NNNN.jpeg' to 'dan_NNNN.jpg'.
# rename -v 's/img_\d{3}(\d{4})\.jpeg$/dan_$1\.jpg/' *jpeg
----------
img_0005417.jpeg renamed as dan_5417.jpg
img_0005418.jpeg renamed as dan_5418.jpg
img_0005419.jpeg renamed as dan_5419.jpg
img_0005420.jpeg renamed as dan_5420.jpg
img_0005421.jpeg renamed as dan_5421.jpg
In both cases above, the subpattern '\d{4}' captures four consecutive digits. The captured four digits are then referred to as $1, and used as part of new filenames.
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/rename-multiple-files-linux.html
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://xmodulo.com/xargs-command-linux.html

View File

@ -0,0 +1,68 @@
红帽反驳“grinch”算不上Linux漏洞
================================================================================
![](http://images.techhive.com/images/article/2014/12/grinch-linux-100536132-primary.idge.png)
图片来源:[Natalia Wilson受Creative Commons许可][1]
> 安全专家表示Linux处理权限的方式仍有可能导致潜在的误操作。
但红帽对此不以为然,称 Alert Logic 于本周二译者注12月16日公布的 grinch (“鬼精灵”) Linux漏洞根本算不上是安全漏洞。
[红帽于周三发表简报][2] 回应Alert Logic 说法表示Alert Logic的这份报告错误地将正常预期动作归为安全问题。”
安全公司Alert Logic于本周二声称“鬼精灵”漏洞其严重性堪比 Heartbleed 臭虫,并称其是 [Linux 系统处理用户权限时的重大设计缺陷][3]恶意攻击者可借此获取机器的root权限。
Alert Logic 称攻击者可以使用第三方Linux 软件框架Policy Kit (Polkit)达到利用“鬼精灵”漏洞的目的。Polkit旨在帮助用户安装与运行软件包此开源程序由红帽维护。Alert Logic 声称允许用户安装软件程序的过程中往往需要超级用户权限如此一来Polkit也在不经意间或通过其它形式为恶意程序的运行洞开方便之门。
红帽对此不以为意,表示系统就是这么设计的,换句话说,“鬼精灵”不是臭虫而是一项特性。
安全监控公司Threat Stack联合创造人 Jen Andre [就此在一篇博客][4]中写道“如果你任由用户通过使用那些利用了Policykit的软件无需密码就可以在系统上安装任何软件实际上也就绕过了Linux内在授权与访问控制。”
Alert Logic 高级安全研究员 James Staten 在发给国际数据集团新闻社(IDG News Service)的电子邮件中写道,虽然这种行为是设计使然,有意为之,但“鬼精灵”仍然可能被加以利用或修改来攻陷系统。
“现在的问题是表面存在一个薄弱环节,可以被用来攻击系统,如果安装软件包象其它操作一样,比如删除软件包或添加软件源,没有密码不行,那么就不会存在被恶意利用的可能性了。”
不过 Andre 在一次采访中也表示对那些跃跃欲试的攻击者来说想利用Polkit还是有一些苛刻限制的。
攻击者需要能够物理访问机器,并且还须通过外设键鼠与机器互动。如果攻击者能够物理访问机器,可以象重启机器进入恢复模式访问数据与程序一样地轻而易举的得手。
Andre表示不是所有Linux机器都默认安装Polkit -- 事实上其主要用于拥有桌面图形界面的工作站在当今运行的Linux机器中占有很小的份额。
换句话说,“鬼精灵”并不具有象[Shellshock][5]那样广泛的攻击面, 后者存在于Bash shell中几乎所有发行版无一幸免。
其他安全专家对“鬼精灵”漏洞也不以为然。
系统网络安全协会SANS Institute互联网风暴中心Internet Storm Center咨询网站的 Johanners Ullrich 在[一篇博文][6]中写道“某种程度上与很多Linux系统过分随意的设置相比这个并算不上多大的漏洞。”
Ullrich 同时还指出“鬼精灵”漏洞也并非完全“良性”“可以很容易地加以利用获得超出Polkit设置预期的权限。”
Andre指出负责管理运行Polkit桌面Linux机器的管理员要做到心中有数了解潜在的危险检查那些程序是靠Polkit来管理的确保系统无虞。
他还表示应用开发者与Linux 发行者也应确保正确使用Polkit框架。
原始报告的另一位作者Even Tyler似乎也承认“鬼精灵”并非十分严重。
[在开源安全邮件列表的一封邮件中][7]Bourland 提到攻击者需要借助其它漏洞,连同“鬼精灵”才能发起攻击时,他写道,“鬼精灵”就象个“开启界面的熟练工,但是本身并不能翻多高的浪。”
Lucian Constantin 对本文也有贡献。)
--------------------------------------------------------------------------------
via:http://www.computerworld.com/article/2861392/security0/the-grinch-isnt-a-linux-vulnerability-red-hat-says.html
作者:[Joab Jackson][a]
译者:[yupmoon](https://github.com/yupmoon)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Joab-Jackson/
[1]:http://www.flickr.com/photos/moonrat/4571563485/
[2]:https://access.redhat.com/articles/1298913
[3]:http://www.pcworld.com/article/2860032/this-linux-grinch-could-put-a-hole-in-your-security-stocking.html
[4]:http://blog.threatstack.com/the-linux-grinch-vulnerability-separating-the-fact-from-the-fud
[5]:http://www.computerworld.com/article/2687983/shellshock-flaws-roils-linux-server-shops.html
[6]:https://isc.sans.edu/diary/Is+the+polkit+Grinch+Going+to+Steal+your+Christmas/19077
[7]:http://seclists.org/oss-sec/2014/q4/1078

View File

@ -0,0 +1,106 @@
NetHack
================================================================================
## 一直以来最好的游戏? ##
**这款游戏非常容易让你上瘾。你可能需要花费一生的时间来掌握它。许多人玩了几十年也没有通关。欢迎来到 NetHack 的世界...**
不管你信不信,在 NetHack 里你见到字母 D 的时候你会被吓着。但是当你看见一个 % 的时候,你将会欣喜若狂。(忘了说 ^,你看见它将会更激动)在你寻思我们的脑子是不是烧坏了并准备关闭浏览器标签之前,请给我们一点时间解释:这些符号分别代表龙、食物以及陷阱。欢迎来到 NetHack 的世界,在这里你的想象力需要发挥巨大的作用。
如你所见NetHack 是一款文字模式的游戏:它仅仅使用标准终端字符集来刻画玩家、敌人、物品还有环境。游戏的图形版是存在的,不过 NetHack 的骨灰级玩家们都倾向于不去使用它们,问题在于假如你使用图形界面,当你通过 SSH 登录到你的古董级的运行着 NetBSD 的 Amiga 3000 上时你还能进行游戏吗在某些方面NetHack 和 Vi 非常相似 - 几乎被移植到了现存的所有的操作系统上,并且依赖都非常少。
那么问题来了,和现代游戏相比如此简陋的画面,是什么造就了 NetHack 如此巨大的吸引力的呢?
事实上,这款地牢探险类神作有着令人难以置信的丰富细节。有太多的东西等着你去发掘:法术释放、怪物战斗以及技巧学习 - 以及随机生成的地牢。有太多的东西等着你去探索,几乎没有哪两局游戏会是完全一样的。许多人玩了几十年也没有通关,每次游戏依然能发现一些以前不知道的秘密。
接下来,我们会向你讲述 NetHack 的历史,给你地牢探险的基本指导,再告诉你一些技巧。免责:
在你继续阅读本文之前,视为你已经自动同意了当你沉溺于 NetHack 以致影响到你的现实生活时,你不会起诉我们。
![NetHack 界面](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_annotated.png)
NetHack 界面
### 也许是最古老的仍在开发的游戏里 ###
名非其实NetHack 并不是一款网络游戏。它只不过是基于一款出现较早的名为 Hack 的地牢探险类游戏开发出来的,而这款 Hack 游戏是 1980 年的游戏 Rogue 的后代。NetHack 在 1987 年发布了第一个版本,并于 2003 年发布了 3.4.3 版本,尽管在这期间一直没有加入新的功能,但各种补丁、插件,以及衍生作品还是在网络上疯狂流传。这使得它可以说是最古老的、拥有众多对游戏乐此不疲的粉丝的游戏。当你访问 [www.reddit.com/r/nethack][1] 之后,你就会了解我们的意思了 - 骨灰级的 NetHack 的玩家们仍然聚集在一起讨论新的策略、发现和技巧。偶尔你也可以发现 NetHack 的元老级玩家在历经千辛万苦终于通关之后发出的欢呼。
但怎样才能通关呢首先NetHack 被设定在既大又深的地牢中。游戏开始时你在最顶层 - 第 1 层 - 你的目标是不断往下深入直到你找到一个非常宝贵的物品,护身符 Yendor。通常来说 Yendor 在 第 20 层或者更深的地方,但它是可以变化的。随着你在地牢的不断深入,你会遇到各种各样的怪物、陷阱以及 NPC有些会试图杀掉你有些会挡在你前进的路上还有些... 总而言之,在你靠近 TA 们之前你永远不知道 TA 们会怎样。
> 要学习的有太多太多,绝大多数物品只有在和其他物品同时使用的情况下才会发挥最好的效果。
是 NetHack 如此引人入胜的原因是游戏中所加入的大量物品。武器、盔甲、附魔书、戒指、宝石 - 要学习的有太多太多绝大多数物品只有在和其他物品同时使用的情况下才会发挥最好的效果。怪物在死亡后经常会掉落一些有用的物品以及某些物品如果你不正确使用的话会产生及其不良的作用。你可以在地牢找到商店里面有许多看似平凡实则非常有用的物品不过别指望店主能给你详细的描述。你只能靠自己的经验来了解各个物品的用途。有些物品确实没有太大用处NetHack 中有很多的恶搞元素 - 比如你可以把一块奶油砸到自己的脸上。
不过在你踏入地牢之前NetHack 会询问你要选择哪种角色进行游戏。你可以为你接下来的地牢之行选择骑士、修道士、巫师或者卑微的旅者还有许多其他的角色类型。每种角色都有其独特的优势与弱点NetHack 的重度玩家喜欢选择那些相对较弱的角色来挑战游戏。你懂的,这样可以向其他玩家炫耀自己的实力。
> ## 情报不会降低游戏的乐趣 ##
> 用 NetHack 的说法来讲,“情报员”给指其他玩家提供关于怪物、物品、武器和盔甲信息的玩家。理论上来说,完全可以不借助任何外来信息而通关,但几乎没有几个玩家能做到,游戏实在是太难了。因此使用情报并不会被视为一件糟糕的事情 - 但是一开始由你自己来探索游戏和解决难题,这样才会获得更多的乐趣,只有当你遇到瓶颈的时候再去使用那些情报。
> 在这里给出一个比较有名的情报站点 [www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html][2],其中的情报被分为了不同的类别。游戏中随机发生的事,比如在喷泉旁饮水可能导致的不同结果,从这里你可以得知已确定的不同结果的发生概率。
>
> ### 你的首次地牢之行 ###
NetHack 几乎可以在所有的主流操作系统以及 Linux 发行版上运行,因此你可以通过 "apt-get install nethack" 或者 "yum install nethack" 等适合你用的发行版的命令来安装游戏。安装完毕后,在一个命令行窗口中键入 "nethack" 就可以开始游戏了。游戏开始时系统会询问是否为你随机挑选一位角色 - 但作为一个新手,你最好自己从里面挑选一位比较强的角色。所以,你应该点 "n",然后点 "v" 以选取女武神Valkyrie最后点 "d" 选择成为侏儒dwarf
接着 NetHack 上会显示出剧情,说你的神正在寻找护身符 Yendor你的目标就是找到它并将它带给神。阅读完毕后点击空格键其他任何时候当你见到屏幕上的 "-More-" 时都可以这样)。接着就让我们出发 - 开始地牢之行吧!
先前已经介绍过了,你的角色用 @ 来表示。你可以看见角色所出房间周围的墙壁房间里显示点的那些地方是你可以移动的空间。首先你得明白怎样移动角色h、j、k 以及 l。是的和 Vim 中移动光标的操作相同)这些操作分别会使角色向向左、向下、向上以及向右移动。你也可以通过 y、u、b 和 n 来使角色斜向移动。在你熟悉如何控制角色移动前你最好在房间里来回移动你的角色。
NetHack 采用了回合制,因此即使你不进行任何动作,游戏仍然在进行。这是你可以提前计划你的行动。你可以看见一个 "d" 字符或者 "f" 字符在房间里来回移动:这是你的宠物狗/猫,(通常情况下)它们 不会伤害你而是帮助你击杀怪物。但是宠物也会被惹怒 - 它们偶尔也会抢在你接近食物或者怪物尸体之前吃掉它们。
![点击 “i” 列出你当前携带的物品清单](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_inventory.png)
点击 “i” 列出你当前携带的物品清单
### 门后有什么? ###
接下来,让我们离开房间。房间四周的墙壁某处会有缝隙,可能是 "+" 号。"+" 号表示一扇关闭的门,这时你应该靠近它然后点击 "o" 来开门。接着系统会询问你开门的方向,假如门在你的左方,就点击 "h"。(如果门被卡住了,就多试几次)然后你就可以看见门后的走廊了,它们由 "#" 号表示,沿着走廊前进直到你找到另一个房间。
地牢之行中你会见到各种各样的物品。某些物品,比如金币(由 "$" 号表示)会被自动捡起来;至于另一些物品,你只能站在上面按下逗号键手动拾起。如果同一位置有多个物品,系统会给你显示一个列表,你只要通过合适的案件选择列表中你想要的物品最后按下 "Enter" 键即可。任何时间你都可以点击 "i" 键在屏幕上列出你当前携带的物品清单。
如果看见了怪物该怎么办?在游戏早期,你可能会遇到的怪物会用符号 "d"、"x" 和 ":" 表示。想要攻击的话,只要简单地朝怪物的方向移动即可。系统会在屏幕顶部通过信息显示来告诉你你的攻击是否成功 - 以及怪物做出了何种反应。早期的怪物很容易击杀,所以你可以毫不费力地打败他们,但请留意底部状态栏里显示的角色的 HP 值。
> 早期的怪物很容易击杀,但请留意角色的 HP 值。
如果怪物死后掉落了一具尸体("%"),你可以点击逗号进行拾取,并点击 "e" 来食用。(在任何时候系统提示你选择一件物品,你都可以从物品列表中点击相应的按键,或者点击 "?" 来查询迷你菜单。)主意!有些尸体是有毒的,这些知识你将在日后的冒险中逐渐学会掌握。
如果你在走廊里行进时遇到了死胡同,你可以点击 "s" 进行搜寻直到找到一扇门。这会花费时间,但是你由此加速了游戏进程:输入 "10" 并点击 "s" 你将一下搜索 10 次。这将花费游戏中进行 10 次动作的时间,不过如果你正在饥饿状态,你将有可能会被饿死。
通常你可以在地牢顶部找到 "{"(喷泉)以及 "!"(药水)。当你找到喷泉的时候,你可以站在上面并点击 "q" 键开始 “畅饮quaff” - 引用后会得到积极的到致命的多种效果。当你找到药水的时候,将其拾起并点击 "q" 来引用。如果你找到一个商店,你可以拾取其中的物品并在离开前点击 "p" 键进行支付。当你负重过大时,你可以点击 "d" 键丢掉一些东西。
![现在已经有带音效的 3D 版 Nethack 了Falcons Eye](http://www.linuxvoice.com/wp-content/uploads/2014/12/falcon.jpg)
现在已经有带音效的 3D 版 Nethack 了Falcons Eye
> ## 愚蠢的死法 ##
> 在 NetHack 玩家中流行着一个缩写词 "YASD" - 又一种愚蠢的死法Yet Another Stupid Death。这个缩写词表示了玩家由于自身的的愚蠢或者粗心大意导致了角色的死亡。我们搜集了很多这类死法但我们最喜欢的是下面描述的
> 我们正在商店浏览商品,这时一条蛇突然从药剂后面跳了出来。在杀死蛇之后,系统弹出一条信息提醒我们角色饥饿值过低了,因此我们顺手食用了蛇的尸体。坏事了!这使得我们的角色失明,导致我们的角色再也不能看见商店里的其他角色及地上的商品了。我们试图离开商店,但在慌乱中却撞在了店主身上并攻击了他。这种做法激怒了店主:他立即向我们的角色使用了火球术。我们试图逃到商店外的走廊上,但却在逃亡的过程中被烧死。
> 如果你有类似的死法,一定要来我们的论坛告诉我们。不要担心 - 没有人会嘲笑你。经历这样的死法也是你在 NetHack 的世界里不断成长的一部分。
### 武装自己 ###
地牢里,尤其是在你击杀怪物后,你可能会发现武器或盔甲。在这里再说一次,点击逗号把它们拾起,接着点击 "w"(小写的)来使用武器或者点击 "W"(大写的)来穿上盔甲。你可以用 "T" 来脱掉盔甲或者 "t" 来取下武器 - 如果你陷入了困境,请确保你总是在使用最好的装备。
在靠近掉在地下的装备之前最好检查一下身上的东西。点击 ";"(分号)后,"Pick an object"(选择一样物品)选项将出现在屏幕顶部。选择该选项,使用移动键直到选中你想要检查的物品,然后点击 ":"(冒号)。接着屏幕顶部将出现这件物品的描述。
因为你的目标是不断深入地牢直到找到护身符 Yendor所以请随时留意周围的 "<" 和 ">" 符号。这两个符号分别表示向上和向下的楼梯,你可以用与之对应的按键来上楼或下楼。注意!如果你想让宠物跟随你进入下/上一层地牢,下/上楼前请确保你的宠物在你邻近的方格内。若果你想退出,点击 "S"(大写的 s来保存进度输入 #quit 退出游戏。当你再次运行 NetHack 时,系统将会自动读取你上次退出时的游戏进度。
我们就不继续剧透了,地牢深处还有更多的神秘细节、陌生的 NPC 以及不为人知的秘密等着你去发掘。那么,我们再给你点建议:当你遇到了让你困惑不已的物品时,你可以尝试去 NetHack 维基 [http://nethack.wikia.com][3] 进行搜索。你也可以在 [www.nethack.org/v343/Guidebook.html][4] 找到一本非常不错(尽管很长)的指导手册。最后,祝游戏愉快!
--------------------------------------------------------------------------------
via: http://www.linuxvoice.com/nethack/
作者:[Mike Saunders][a]
译者:[Stevearzh](https://github.com/Stevearzh)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxvoice.com/author/mike/
[1]:http://www.reddit.com/r/nethack
[2]:http://www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html
[3]:http://nethack.wikia.com/
[4]:http://www.nethack.org/v343/Guidebook.html

View File

@ -0,0 +1,88 @@
安卓编年史
================================================================================
youtube视频地址
<iframe width="640" height="360" frameborder="0" src="http://www.youtube-nocookie.com/embed/e52TSXwj774?start=0&amp;wmode=transparent" type="text/html" style="display:block"></iframe>
### Android 2.0, Éclair——带动GPS产业 ###
41天——这是从安卓1.6到安卓2.0所经历的时间。安卓的第一个大的版本号更迭发生在2009年10月的[摩托罗拉Droid][1]身上它是第一部“第二代”安卓设备。相对于G1而言Droid进行了大幅的硬件升级拥有巨大的3.7英寸在当时而言分辨率854×480的LCD屏幕。它同样带来了更强劲的性能一个600Mhz的德州仪器TI OMAP Cortex A8处理器还是单核以及256MB的RAM内存。
![摩托罗拉Droid凝视着你的灵魂。](http://cdn.arstechnica.net/wp-content/uploads/2014/03/2181.jpg)
摩托罗拉Droid凝视着你的灵魂。
但Droid最重要的部分是围绕它的大型广告活动。Droid是美国运营商威瑞森Verizon的旗舰设备这个头衔给它从美国最大运营商那里带来了不少收入。威瑞森从卢卡斯影业那获得了单词“droid”的授权并且开始了[“Droid Does”运动][2]——通过广告将设备定位成一个敢于发声充满突破的由此也延伸到安卓身上强力iPhone替代品。媒体常常说T-Mobile G1想要成为一个“iPhone杀手”但是Droid走了出来并拥有了这个称号。
就和G1一样Droid有个侧滑实体键盘。轨迹球已经不见了但是还是强制性要求有类似十字方向键的东西所以摩托罗拉把一个五键十字方向键放在了键盘右侧。Droid正面按键从实体按键变成了电容式触摸按键它们只是被印在了玻璃触摸屏上。安卓2.0同样最终取消了必须有“呼叫”和“结束”按钮的强制要求。所以连同十字方向键移到键盘那里的变动正面的按键可以排成漂亮又整洁的一行。所有这些精简结果带来的是有史以来最好看的安卓设备。T-Mobile G1看起来像是费雪牌的玩具但摩托罗拉Droid看起来像个可以用来削人的工业工具。
![安卓2.0和1.6的锁屏和主屏幕。](http://cdn.arstechnica.net/wp-content/uploads/2014/01/intro202.png)
安卓2.0和1.6的锁屏和主屏幕。
Ron Amadeo供图
威瑞森的一些差劲的广告活动泄漏了这个软件原本平静水汪汪的远景默认壁纸变成了脏兮兮的混凝土。开机动画用了红色脉动的哈儿HAL 9000的眼球译注哈儿是英国小说家亚瑟·克拉克所著《太空漫游》小说中出现的一部拥有强人工智能的超级电脑每当你收到邮件的时候默认通知铃声还会高喊“[DRRRRROOOOIIIIDDDD][3]”。Éclair泡芙就像是安卓的忧郁少年阶段。
安卓2.0中最先呈现给用户的事情之一就是新的锁屏。滑动解锁是苹果的专利,因此谷歌就采用了来自旋转手机的灵感,使用了弧线解锁手势。把你的手指放在锁定图标上并且向右滑动可以解锁设备,从音量图标向左滑动可以让手机静音。手指自然移动是圆弧状的,所以这手势感觉比按直线滑动更加自然。
默认主屏幕布局取消了多余的模拟时钟小部件,引入了现如今安卓的一个主要部分:主屏幕顶端的一个搜索栏。短信和安卓市场同样花了大功夫在新布局上。应用抽屉页同样被经过了明显的重新设计。
![应用抽屉和“添加到主屏幕”菜单截图。](http://cdn.arstechnica.net/wp-content/uploads/2014/01/icons.png)
应用抽屉和“添加到主屏幕”菜单截图。
Ron Amadeo供图
安卓在早期的阶段以极快的步伐开发前进而安卓团队在考虑界面设计时也从来没有对未来的设备有过真正的规划。摩托罗拉Droid——拥有854×480分辨率的LCD显示屏——相对于320×480分辨率的G1时代设备在分辨率上是个巨大的提升。几乎所有的东西都需要重绘。界面设计的“从头开始”就几乎成为了安卓2.0的主要课题。
谷歌借此机会几乎重新设计了安卓的所有图标从带有等距轴线的卡通风格图标转变为风格更为正式直观的图标。唯一一套没有重绘的图标是状态栏图标和经过修改的系统其它部分相比显得格格不入。这些图标会从安卓0.9一直使用到安卓2.3。
应用阵容上同样也有一些改变。摄像机被合并到相机中IM应用被去除并增加了两个新的谷歌应用Car Home一个被设计为在驾驶时使用的带有大按钮的启动器还有企业日历除了它支持Exchange而不是谷歌日历以外它和常规的日历没什么不一样。奇怪的是谷歌还预装了两个第三方应用程序Facebook和Verizon的Visual VM应用 (现在都不能用了)。第二组图片显示的是“添加到主屏幕”菜单,它同样经过了全新的设计。
![一个地点页面,显示“导航”选项,导航免责声明,实际的导航画面,以及交通信息。](http://cdn.arstechnica.net/wp-content/uploads/2014/01/nav2.png)
一个地点页面,显示“导航”选项,导航免责声明,实际的导航画面,以及交通信息。
Ron Amadeo供图
除了新的设计以外安卓2.0最突出的亮点是谷歌地图导航。谷歌更新了地图以支持免费的逐向导航配有兴趣点搜索和文本到语音引擎这使得它可以像一个独立的GPS设备一样大声读出街道名称。把GPS导航从一个单独的产品变成免费的智能手机功能这几乎一夜之间[摧毁][4]了独立GPS市场。TomTom的股票在安卓2.0推出的一周内下跌了近40
但一开始导航非常难以找到。你必须打开搜索框键入一个地点或地址并点击搜索结果。接下来点击了“导航”按钮后谷歌会显示一个警告声明导航正处于beta测试阶段不应该被信任。点击“接受”后你可以跳上车一个粗糙的合成语音会引导你到达目的地。菜单按钮背后隐藏着一个选项可以查看整个路线上的交通状况和突发事件。导航的设计一直徘徊不前。甚至连谷歌地图主界面都在安卓4.0时更新了安卓2.0风格的导航部分还是那么放着这几乎持续到了安卓4.3才有所改观。
地图还会显示路线的概览其中包含你的路线的交通数据。起初数据只是由常规的交通数据提供商授权但后来谷歌使用运行谷歌地图的安卓和iOS手机[收集原始交通数据][5]。这是在移动设备地图游戏中谷歌迈向霸主地位的第一步。毕竟实时交通流量的监控确实仅仅取决于你有多少数据点来源。现在伴随着数以亿计的iOS和安卓的谷歌地图的用户谷歌已经成为世界上最好的交通数据提供商。
地图导航安卓终于找到了自己的杀手级应用。谷歌公司那时提供了其他人提供不了的东西。“为什么我应该买这个而不是买个iPhone”问题终于有了个答案。谷歌地图也不需要像许多GPS设备一样通过PC更新。有了云地图能够始终保持最新状态所有这些更新都是免费的。唯一的缺点是你需要一个互联网连接来使用谷歌地图。
精确的地图在[苹果地图的惨败][6]中被大大宣传,它已经成为了智能手机的最重要的功能之一,即使没有人真正在它们工作的时候赞赏它们。绘制世界真的只借助无数人的力量,今天,谷歌的“地球”部门是公司最大的部门,拥有超过[7000名员工][7]。对于这里的大多数人来说,他们的工作是驾驶着公司充满相机的街景车驶过世界上的每一条道路。经过八年的数据收集,谷歌拥有超过[五百万英里][8]的360度街景视图谷歌地图成为了公司不可撼动的支柱之一。
![Car Home主屏幕并且因为有空间加入了一个横版的导航。](http://cdn.arstechnica.net/wp-content/uploads/2014/01/carhome1.png)
Car Home主屏幕并且因为有空间加入了一个横版的导航。
Ron Amadeo供图
随着和谷歌地图导航一起到来的还有“Car Home”一个大按钮设计的主屏幕旨在帮助你在驾驶时使用手机。这不是定制的每一个按钮只是一个独立应用的快捷方式。摩托罗拉Droid和其官方的[车载dock配件][9]有特殊的磁铁二者接触将自动触发Car Home。在手机接入dock时按压Droid的实体home键会打开Car Home主屏而不是正常的主屏幕屏幕上的触摸Home键可以打开正常的主屏幕。
Car Home虽然很有用但并没有存在多久——它在安卓3.0中被去掉了再也没有回来过。GPS系统几乎全部用在汽车驾驶时但它鼓励用户使用如“搜索”这样的功能它会弹出一个键盘谷歌的律师可能并不是很喜欢这种功能。随着[苹果的CarPlay][10]和谷歌的[开放汽车联盟][11]的到来,车载电脑看到了复苏的希望。这一次,重点更多的是在安全上,政府机构(如美国国家公路交通安全管理局)也在协助着这一方面的发展。
----------
![Ron Amadeo](http://cdn.arstechnica.net/wp-content//uploads/authors/ron-amadeo-sq.jpg)
[Ron Amadeo][a] / Ron是Ars Technica的评论编缉专注于安卓系统和谷歌产品。他总是在追寻新鲜事物还喜欢拆解事物看看它们到底是怎么运作的。
[@RonAmadeo][t]
--------------------------------------------------------------------------------
via: http://arstechnica.com/gadgets/2014/06/building-android-a-40000-word-history-of-googles-mobile-os/10/
译者:[alim0x](https://github.com/alim0x) 校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://arstechnica.com/gadgets/2009/12/review-of-the-motorola-droid/
[2]:http://www.youtube.com/watch?v=e52TSXwj774
[3]:http://www.youtube.com/watch?v=UBL47tHrvMA
[4]:http://techcrunch.com/2009/10/28/googles-new-mobile-app-cuts-gps-nav-companies-at-the-knees/
[5]:http://googleblog.blogspot.com/2009/08/bright-side-of-sitting-in-traffic.html
[6]:http://arstechnica.com/apple/2012/09/apple-ceo-tim-cook-apologizes-for-ios-6-maps-promises-improvements/
[7]:http://www.businessinsider.com/apple-has-7000-fewer-people-working-on-maps-than-google-2012-9
[8]:https://developers.google.com/events/io/sessions/383278298
[9]:http://www.amazon.com/Motorola-Generation-Vehicle-Charger-Packaging/dp/B002Y3BYQA
[10]:http://arstechnica.com/apple/2014/03/ios-in-the-car-becomes-carplay-coming-to-select-dashboards-this-year/
[11]:http://arstechnica.com/information-technology/2014/01/open-automotive-alliance-aims-to-bring-android-inside-the-car/
[a]:http://arstechnica.com/author/ronamadeo
[t]:https://twitter.com/RonAmadeo

View File

@ -1,290 +0,0 @@
25个linux性能监控工具
================================================================================
一段时间以来我们在网上向读者介绍了如何为Linux以及类Linux操作系统配置多种不同的性能监控工具。在这篇文章中我们将罗列一系列使用最频繁的性能监控工具并对介绍到的每一个工具提供了相应的简介链接大致将其划分为两类基于命令行的和提供图形化接口的。
### 基于命令行的性能监控工具 ###
#### 1. dstat - 多类型资源统计工具 ####
该命令整合了**vmstat****iostat**和**ifstat**三种命令。同时增加了新的特性和功能允许你能够看到及时的不同的资源使用情况,从而能够使你对比和整合不同的资源使用情况。通过不同颜色和代码块布局的界面帮助你能够更加清晰容易的获取信息。它同时支持将信息数据导出到**cvs**格式文件中,从而用其他应用程序打开,或者导入到数据库中。你可以用该命令来[监控cpu内存和网络状态随着时间的变化][1]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/dstat.png)
#### 2. atop - 相比top更好的ASCII码体验 ####
使用**ASCII**码的命令行工具来显示一个性能监控工具能够记录显示所有进程活动。它不但能够展示每日的系统日志也能够进行长期的进程活动分析同时也能够高亮过载的系统使用资源。它包含了CPU内存交换空间磁盘和网络层的度量指标。使用所用的功能只需在终端运行**atop**即可。当然你也可以使用[交互接口来显示][2]数据并进行排序。
# atop
![](http://blog.linoxide.com/wp-content/uploads/2014/10/atop1.jpg)
#### 3. Nmon - 类Unix系统的性能监控 ####
Nmon为**Nigel's Monitor**缩写,它最早开发用来作为**AIX**的系统监控工具。它的特征是**在线模式**,该模式在终端中实时更新监控信息,同时使用光标操作来提高屏幕事件处理效率。使用**捕捉模式**能够将数据保存为**CSV**格式,方便进一步的处理和图形化展示。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nmon_interface.png)
更多的信息参考我们的[nmon性能监控文章][3]。
#### 4. slabtop - 显示内核slab缓存信息 ####
个应用能够显示**缓存分配器**是如何管理Linux内核缓存不同类型的对象的。这个命令类似于top命令区别是它重点实时显示内核slab缓存信息。它能够显示按照不同排序条件来显示最靠前的缓存列表。它同时也能够显示一个以slab层信息填充的统计题头。举例如下
# slabtop --sort=a
# slabtop -s b
# slabtop -s c
# slabtop -s l
# slabtop -s v
# slabtop -s n
# slabtop -s o
**更多信息参阅**[内核slab缓存文章][4]。
#### 5. sar - 性能监控和瓶颈检查 ####
**sar** 命令是为了标准输出在操作系统上所选的累积活动计数器内容信息。该基于计数值和时间间隔参数的**审计系统**会按照指定的时间间隔输出指定次数的监控信息。如果时间间隔参数为设置为0那么[sar命令将会显示系统从开机到当时时刻的平均统计信息][5]。有用的命令如下:
# sar -u 2 3
# sar u f /var/log/sa/sa05
# sar -P ALL 1 1
# sar -r 1 3
# sar -W 1 3
#### 6. Saidar - 简单的统计监控工具 ####
Saidar是一个**简单**且**轻量**的系统信息监控工具。虽然它无法提供大多性能报表,但是它能够通过一个简短友好的方式显示最有用的系统运行状况数据。你可以很容易地看到[up-time, average load,CPU,内存,进程,磁盘和网络接口][6]统计信息。
Usage: saidar [-d delay] [-c] [-v] [-h]
-d Sets the update time in seconds
-c Enables coloured output
-v Prints version number
-h Displays this help information.
![](http://blog.linoxide.com/wp-content/uploads/2014/10/saidar-e1413370985588.png)
#### 7. top - 经典的Linux任务管理工具 ####
作为一个广为认知的**Linux**工具,**top**出现在大多数的类Unix操作系统任务管理中。它可以显示当前正在运行的进程的列表同时用户可以按照不同的查询条件对该列表进行排序。它主要显示了系统进程对**CPU**和内存的使用状况。top可以快速检查是哪个或哪几个进程挂起了你的系统。你可以在[这里][7]看到top使用的例子。 你可以在终端输入top来运行它并进入到交互模式
Quick cheat sheet for interactive mode:
GLOBAL_Commands: <Ret/Sp> ?, =, A, B, d, G, h, I, k, q, r, s, W, Z
SUMMARY_Area_Commands: l, m, t, 1
TASK_Area_Commands Appearance: b, x, y, z Content: c, f, H, o, S, u Size: #, i, n Sorting: <, >, F, O, R
COLOR_Mapping: <Ret>, a, B, b, H, M, q, S, T, w, z, 0 - 7
COMMANDS_for_Windows: -, _, =, +, A, a, G, g, w
![](http://blog.linoxide.com/wp-content/uploads/2014/10/top.png)
#### 8. Sysdig - 系统进程的高级视图 ####
**Sasdig**是一个能够让系统管理员和开发人员前所未有的洞察其系统行为的监控工具。其开发团队出于改善系统层次的监控方式以及通过提供关于**存储,进程,网络和内存**子系统的**统一有序**以及**粒度可见**的方式来进行错误排查,通过创建系统活动记录文件使得你可以在任何时间轻松分析。
简单例子:
# sysdig proc.name=vim
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig evt.type=chdir and user.name=root
# sysdig -l
# sysdig -L
# sysdig -c topprocs_net
# sysdig -c fdcount_by fd.sport "evt.type=accept"
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig -c topprocs_file
# sysdig -c fdcount_by proc.name "fd.type=file"
# sysdig -p "%12user.name %6proc.pid %12proc.name %3fd.num %fd.typechar %fd.name" evt.type=open
# sysdig -c topprocs_cpu
# sysdig -c topprocs_cpu evt.cpu=0
# sysdig -p"%evt.arg.path" "evt.type=chdir and user.name=root"
# sysdig evt.type=open and fd.name contains /etc
![](http://blog.linoxide.com/wp-content/uploads/2014/10/sysdig.jpg)
**更多信息** 可以在 [如何利用sysdig改善系统层次的监控和错误排查][8]
#### 9. netstat - 显示开放的端口和连接 ####
它是**Linux管理员**使用来显示不同网络信息的工具,如查看什么端口开放和什么网络连接已经建立以及何种进程运行在这种连接之上。同时它也显示了**Unix套接**字的信息这些套接字在不同的程序中为打开状态。作为大多数Linux发行版本的一部分netstat的许多命令在 [netstat和它的不同输出][9]中有详细的描述。最为常用的如下:
$ netstat | head -20
$ netstat -r
$ netstat -rC
$ netstat -i
$ netstat -ie
$ netstat -s
$ netstat -g
$ netstat -tapn
### 10. tcpdump - 洞察网络包 ###
**tcpdump**可以用来查看**网络连接**包的内容。它显示了传输过程中包内容的各种信息。为了使得输出信息更为有用,它允许使用者通过不同的过滤器获取自己想要的信息。可以参照的例子如下:
# tcpdump -i eth0 not port 22
# tcpdump -c 10 -i eth0
# tcpdump -ni eth0 -c 10 not port 22
# tcpdump -w aloft.cap -s 0
# tcpdump -r aloft.cap
# tcpdump -i eth0 dst port 80
你可以找到详细的[描述在topdump和捕捉包][10]文章中。
#### 11. vmstat - 虚拟内存统计信息 ####
**vmstat**是虚拟内存(**virtual memory** statistics)的缩写,作为一个**内存监控**工具,它收集和显示概括关于**内存****进程****终端**和**分页**和**I/O阻塞**的信息。作为一个开源程序它可以再大部分Linux发行版本中找到包括Solaris和FreeBSD。它用来诊断大部分的内存性能问题和其他相关问题
![](http://blog.linoxide.com/wp-content/uploads/2014/10/vmstat_delay_5.png)
**M更多信息** 参考 [vmstat命令文章][11]。
#### 12. free - 内存统计信息 ####
free是另一个能够在终端中标准输出内存和交换空间使用的命令行工具。由于它的简易它经常用于快速查看内存使用或者是应用于不同的脚本和应用程序中。在这里你可以看到[这个小程序的许多应用][12]。几乎所有的系统管理员日常都会用这个工具。:-)
![](http://blog.linoxide.com/wp-content/uploads/2014/10/free_hs3.png)
#### 13. Htop - 更加友好的top ####
**Htop**基本上是一个top改善版本它能够显示更多的统计信息和更加多彩的方式同时允许你采用不同的方式进行排序它提供了一个**用户友好**的接口。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/htop.png)
你可以找到 **更多的信息** 在 [关于htop和top的比较][13]文章中。
#### 14. ss - 更现代感的网络管理替代工具 ####
**ss**是**iproute2**包的一部分。iproute2趋向于替代一整套标准的**Unix网络**工具组件,它曾经用来完成[网络接口配置路由表和管理ARP表][14]任务。ss工具用来存储套接字统计信息也能够类似netstat一样显示信息同时也能显示更多TCP和状态信息。一些例子如下
# ss -tnap
# ss -tnap6
# ss -tnap
# ss -s
# ss -tn -o state established -p
#### 15. lsof - 列表显示打开的文件 ####
**lsof**命令,意为“**list open files**”, 用于在许多类Unix系统中显示所有打开状态的文件和打开它们的进程。在大部分Linux发行版和其他类Linux操作系统中系统管理员用它来检查不同的进程打开了哪些文件。你可以在这里找到更多的例子。
# lsof +p process_id
# lsof | less
# lsof u username
# lsof /etc/passwd
# lsof i TCP:ftp
# lsof i TCP:80
你可以找到 **更多例子** 在[lsof 文章][15]
#### 16. iftop - 类似top的了网络连接工具 ####
**iftop**是一个基于网络信息的类似top的程序。它能够显示当前时刻按照**带宽使用**量或者上传或者下载量排序的**网络连接**状况。它同事提供了下载文件的预估完成时间。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iftop.png)
**更多信息**可以参考[网络流量iftop文章][16]
#### 17. iperf - 网络性能工具 ####
**iperf**是一个**网络测试**工具,能够创建**TCP**和**UDP**数据连接和测量该网络能够传输它们的**性能**。它支持调节关于时间,协议和缓冲等不同的参数。对于每一个测试,它会报告带宽,丢包和其他的一些参数。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iperf-e1413378331696.png)
如果你想用使用这个工具,可以参考这篇文章: [如何安装和使用iperf][17]
#### 18. Smem - 高级内存报表工具 ####
**Smem**是一个比较高级的**Linux**命令行工具,它提供关于系统中已经使用的和共享的实际内存,试图提供一个更为可靠地当前**内存**使用数据。
$ smem -m
$ smem -m -p | grep firefox
$ smem -u -p
$ smem -w -p
参考我们的文章:[Smem更多的例子][18]
### 图形化或基于Web的性能工具 ###
#### 19. Icinga - Nagios的社区分支版本 ####
**Icinga**是一个**开源免费**的网络监控程序作为Nagios的分支它获取了前者现存的大部分功能同时基于这些功能又增加了社区用户要求已久的功能和补丁。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/Icinga-e1413377995731.png)
**更多信息**参考[安装和配置lcinga文章][19].
#### 20. Nagios - 最为流行的监控工具. ####
作为在Linux上使用最为广泛和流行的**监控方案**,它有一个守护程序用来收集不同进程和远程主机的信息,这些收集到的信息都通过功能强大**的web界面**进行呈现。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nagios-e1413305858732.png)
你可以在 **找到更多的信息** 在[如何安装nagios][20]
#### 21. Linux process explorer - Linux下的procexp ####
**Linux process explorer**是一个Linux下的图形化进程浏览工具。它能够显示不同的进程信息如进程数TCP/IP连接和每一个进程的性能指标。作为**Windows**下**procexp**在Linux的替代品是由**Sysinternals**开发的,其目标是相比**top**和**ps**用户体验更加的友好。
查看 [linux process explorer 文章][21]获取更多信息。
#### 22. Collectl - 性能监控工具 ####
你可以既可以通过交互的方式使用这个**性能监控**工具,也可以用它产生**报表**并通过web服务器来访问它在磁盘上的数据。它以一种**易读易管理**的文件格式,记录了**CPU磁盘内存网络网络文件系统进程slabs**等统计信息于。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/collectl.png)
**更多** 关于[Collectl的文章][22]。
#### 23. MRTG - 经典网络流量监控图形工具 ####
这是一个采用**rrdtool**的提供给用户图形化流量监控工具。作为**最早**的提供**图形化界面**的工具它被广泛应用在类Unix的操作系统中。查看我们关于[如何使用MRTG][23]的文章获取更多关于安装和配置的信息。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/mrtg.png)
#### 24. Monit - 简单易用的监控工具 ####
**Monit**是一个用来**监控进程****系统加载****文件系统**和**目录文件**等的开源的Linux工具。你能够让它自动化维护和修复也能够在运行错误的情景下执行动作或者发邮件报告提醒系统管理员。如果你想要用这个工具你可以查看[如何使用Monit的文章][24]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/monit.png)
#### 25. Munin - 为服务器提供监控和提醒服务 ####
作为一个网络资源监控工具,*Munin**能够帮助分析**资源趋势**和**查看弱节点**以及导致产生**性能问题**的原因。开发此软件的团队系统它能够易用和用户体验友好。该软件是用Perl开发的同时采用**rrdtool**来绘制图形,使用了**web接口**进行呈现。开发人员推广此应用时声称“**插件化和易用**”目前已有500多个监控插件可结合使用。
**更多信息**可以在[关于Munin的文章][25]。
--------------------------------------------------------------------------------
via: http://linoxide.com/monitoring-2/linux-performance-monitoring-tools/
作者:[Adrian Dinu][a]
译者:[译者ID](https://github.com/andyxue)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/
[1]:http://linoxide.com/monitoring-2/dstat-monitor-linux-performance/
[2]:http://linoxide.com/monitoring-2/guide-using-linux-atop/
[3]:http://linoxide.com/monitoring-2/install-nmon-monitor-linux-performance/
[4]:http://linoxide.com/linux-command/kernel-slab-cache-information/
[5]:http://linoxide.com/linux-command/linux-system-performance-monitoring-using-sar-command/
[6]:http://linoxide.com/monitoring-2/monitor-linux-saidar-tool/
[7]:http://linoxide.com/linux-command/linux-top-command-examples-screenshots/
[8]:http://linoxide.com/tools/sysdig-performance-linux-tool/
[9]:http://linoxide.com/linux-command/netstat-commad-with-all-variant-outputs/
[10]:http://linoxide.com/linux-how-to/network-traffic-capture-tcp-dump-command/
[11]:http://linoxide.com/linux-command/linux-vmstat-command-tool-report-virtual-memory-statistics/
[12]:http://linoxide.com/linux-command/linux-free-command/
[13]:http://linoxide.com/linux-command/linux-htop-command/
[14]:http://linoxide.com/linux-command/ss-sockets-network-connection/
[15]:http://linoxide.com/how-tos/lsof-command-list-process-id-information/
[16]:http://linoxide.com/monitoring-2/iftop-network-traffic/
[17]:http://linoxide.com/monitoring-2/install-iperf-test-network-speed-bandwidth/
[18]:http://linoxide.com/tools/memory-usage-reporting-smem/
[19]:http://linoxide.com/monitoring-2/install-configure-icinga-linux/
[20]:http://linoxide.com/how-tos/install-configure-nagios-centos-7/
[21]:http://sourceforge.net/projects/procexp/
[22]:http://linoxide.com/monitoring-2/collectl-tool-install-examples/
[23]:http://linoxide.com/tools/multi-router-traffic-grapher/
[24]:http://linoxide.com/monitoring-2/monit-linux/
[25]:http://linoxide.com/ubuntu-how-to/install-munin/

View File

@ -0,0 +1,186 @@
10个重要的Linux ps命令实战
================================================================================
Linux作为Unix的衍生操作系统Linux拥有内建用来查看当前进程的工具。这个工具能在命令行中使用。
### PS 命令是什么 ###
查看它的man手册可以看到ps命令能够给出当前系统中进程的快照。它能捕获系统在某一事件的进程状态。如果你想实时更新这个状态可以使用top命令。
ps命令支持三种使用的语法格式
1. UNIX 风格一定要被分组并且必须有Dash引导使用(可以理解为必须在dash中使用dash是一种shell)
2. BSD 风格一点要被分组但不一定要在dash中使用
3. GNU 风格能够在两种dash中使用
我们能够混用这几种风格但是可能会发生冲突。本文使用UNIX风格的ps命令。这里有在日常生活中使用较多的ps命令的例子。
### 1. 不加参数执行ps命令 ###
这是一个基本的 **ps** 使用。只要在控制台中执行这个命令并查看结果。
![不加选项执行ps命令](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_no_options.png)
结果默认会显示4列信息。
- PID: 运行命令(CMD)的进程编号
- TTY: 命令运行的位置
- TIME: 说明运行这个命令所用的CPU时间
- CMD: 作为当前进程运行的命令
这些信息在显示时未排序。
### 2. 显示所有当前进程 ###
使用 **-a** 参数。**-a 代表 all**。同时加上x参数会显示没有控制终端的进程。
$ ps -ax
这个命令的结果或许会很长。为获得简练的信息可以结合less命令和管道来使用。
$ ps -ax | less
![ps all 信息](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_ax.png)
### 3. 根据用户过滤进程 ###
在需要查看特点用户的进程是情况下,我们可以使用 **-u** 参数。比如我们要查看用户'pungki'的进程,可以通过下面的命令
$ ps -u pungki
![通过user过滤](http://blog.linoxide.com/wp-content/uploads/2014/10/ps__u.png)
### 4. 通过cpu和内存使用来过滤进程 ###
可以使用 **aux 参数**,来显示全面的信息:
$ ps -aux | less
![显示全面信息](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux.png)
当结果很长时我们可以使用管道和less命令来筛选。
默认的结果集是未排好序的。可以通过 **--sort**命令好排序。
根据 **CPU 使用**来升序排序
$ ps -aux --sort -pcpu | less
![根据cpu使用排序](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_cpu.png)
根据 **内存使用** 来升序排序
$ ps -aux --sort -pmem | less
![根据内存使用来排序](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_mem.png)
我们也可以通过管道显示前10个结果
$ ps -aux --sort -pcpu,+pmem | head -n 10
### 5. 通过进程name和id过滤 ###
使用 **-C 参数**后面跟你要找的进程的name。比如想显示一个名为getty的进程的信息就可以使用下面的命令
$ ps -C getty
![通过进程name和id过滤](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C.png)
如果想要看到更多的细节,我们可以使用-f参数来查看格式化的信息列表
$ ps -f -C getty
![通过进程name和id过滤](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C_f.png)
### 6. 根据线程来过滤进程 ###
如果我们想知道特定进程的线程,可以使用**-L 参数**后面加上特定的PID。
$ ps -L 1213
![根据线程来过滤进程](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_L.png)
### 7. 分层显示进程 ###
使用 **-axjf** 参数。
$ps -axjf
![分层显示进程](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_axjf.png)
或者可以使用另一个命令。
$ pstree
![分层显示进程](http://blog.linoxide.com/wp-content/uploads/2014/10/pstree.png)
### 8. 显示安全信息 ###
如果想要查看现在有谁登入了你的server。可以使用ps命令加上相关参数:
$ ps -eo pid,user,args
**参数 -e** 显示所有进程信息 **-o 参数**控制输出。**Pid**,**User 和 Args**参数显示**PID,运行应用的用户**和**运行的应用**。
![显示安全信息](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_security_1.png)
能够与**-e 参数** 一起使用的关键字是**args, cmd, comm, command, fname, ucmd, ucomm, lstart, bsdstart and start**。
### 9. 格式化输出root用户创建的进程 ###
系统管理员想要查看由root用户运行的进程和这个进程的其他相关信息时可以通过下面的命令:
$ ps -U root -u root u
**-U 参数**用来选择特定的用户ID(在userlist中存在的用户名或ID)。用户ID用来标识创建进程的用户。
While the **-u paramater** will select by effective user ID (EUID)
**-u** 参数用来筛选有效的用户ID。
最后的**u**参数用来确定结果的输出格式,由**User, PID, %CPU, %MEM, VSZ, RSS, TTY, STAT, START, TIME and COMMAND**这几列组成。
这里有上面的命令的输出结果
![show real and effective User ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_root_real_effective_ID.png)
### 10. 使用PS实时监控进程状态 ###
ps 命令会显示你系统当前的进程状态,但是这个结果是静态的。
当有一种情况我们需要想上面第四点中提到的通过CPU和内存的使用率来过滤进程。并且我们希望结果能够每秒更新一次。为此我们可以**将ps命令和watch命令结合起来**。
$ watch -n 1 ps -aux --sort -pmem, -pcpu
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_1.png)
并且可以通过**head**命令还进行限制。
$ watch -n 1 ps -aux --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_2.png)
这里的动态查看不想top或者htop命令。**但是使用ps的好处是**你能够定义显示的字段。你能够选择你想查看的字段。
举个例子,**如果你只先看名为'pungki'用户的信息**,你可以使用下面的命令:
$ watch -n 1 ps -aux -U pungki u --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_3.png)
### 结论 ###
你可能会使用ps命令来监控你的Linux系统。但是事实上你可以通过ps命令的参数来生成各种你需要的报表。
ps命令的另一个优势是ps是系统默认安装的。因此你只要用就行了。
可以通过 man ps来查看更多的参数。
--------------------------------------------------------------------------------
via: http://linoxide.com/how-tos/linux-ps-command-examples/
作者:[Pungki Arianto][a]
译者:[johnhoow](https://github.com/johnhoow)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/pungki/

View File

@ -1,202 +0,0 @@
一些关于Java的句子
================================================================================
本文并没有什么新鲜的。我只是收集了一些不太重要的语句,
但这些语句可能对初级程序员来说很重要。也就是些无聊的旧东西。
如果以下的这些你都知道的话那么你比Java的了解已经超过了对一个平常的家庭主妇的了解。我不知
道清楚所有的这些是否是有意义的。即使不知道其中的一些特性,你照样也可以成
为一个相当不错的Java程序员。然而本文中许多的新信息可能表明你还有很大
的发展空间。
### Java中有四种不同的访问类型(而不是三种) ###
这四种类型包括:`private`, package private (包访问权限,无修饰符,又叫
default, 译者注)。如果你在类中定义一个元素时并不加任何访问类型修饰符,
它将被默认设置为包访问权限(package private),而不是`public`或者`protected`。
![Java中有四种级别的访问类型](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/four-levels-of-protection.png)
Java有四个级别的访问类型。
从另一方面来说,如果在接口中,你不指定方法的访问修饰符,那么它将是
`public`类型的。你也可以显式地指定它为`public`类型, 但这并不符合SONAR
(一个开源代码质量管理平台,译者注)的代码质量管理思想。
![访问类型是传递的](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-transitive.png)
访问类型是传递的
> 我的在Java中允许选择性的在接口的方法中写`public`的观点是一个技术错误。
同样你也可在接口的字段前写`final`,甚至是`static`。这说明这些字段可以
是非静态或非final吗不是的接口中的字段中总是final和static的。
### Protected和package private是不一样的 ###
Package private或者default访问类型可以使得相同包(package)下其他类
能够访问这些字段或方法。保护类型(`protected`)的方法和字段可以被相同包
下的类使用(这和package private是一样的),同时它也可以被其他类使用,只
要那个类继承了这个包含这些`protected`方法或字段的类。
### Protected是可传递的 ###
如果有三个包a、b、c每个包都分别包含A、B、C类而且B继承AC继承B
么C可以访问A中的protected字段和方法。
package a;
public class A {
protected void a() {
}
}
package b;
import a.A;
public class B extends A {
protected void b() {
a();
}
}
package c;
import b.B;
public class C extends B {
protected void c() {
a();
}
}
### 接口不能定义protected方法 ###
很多人认为可以在接口中定义`protected`方法。如果你这么做的话,编译器很
快就会毫不留情地给你报错。顺便说下,这也就是我为什么认为允许`public`关键字在接口
中是一个技术错误,它会让人觉得还可以写其他访问类型似的。
![Private is the new public](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/private-is-the-new-public.png)
private是一种新的public
如果你还想在一个接口的方法中声明protected方法,你可能还不理解封装的含义。
### 此private非彼private ###
私有变量和方法在编译单元内是可见的。如果这听起来太神秘的话,换种说法:几
乎就是在同一个Java文件中。这比“在它们被定义的类中”听起来好理解些。它们在
同一编译单元的类和接口中也是可见的。嵌套类可以看到类中封装的私有字段和
方法。然而,当前封闭类也可以看到该类下任何深度下类中的私有方法和字段。
package a;
class Private {
private class PrivateInPrivate {
private Object object;
}
Object m() {
return new PrivateInPrivate().object;
}
}
后者并不广为人知,事实上也很少有用到。
### Private是类的访问级别而不是对象 ###
如果你可以访问一个变量或方法,那么不管它属于哪个对象你都可以访问它。如
果`this.a`可以访问到,那`another.a`也可以访问到,只要它们是同一个类的
实例。同一个类的实例对象可以随意调用其他实例的变量或方法。不过这样的代
码一般都没有意义。现实生活中异常是`equals()`(由Eclipse生成 15 - 18行)
package a;
public class PrivateIsClass {
private Object object;
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PrivateIsClass other = (PrivateIsClass) obj;
if (object == null) {
if (other.object != null)
return false;
} else if (!object.equals(other.object))
return false;
return true;
}
}
###静态(static)类可能有很多实例 ###
![Protection is not object level. It is class level.](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-class-feature.png)
访问类型不是对象级别的而是类级别的。
那些不支持有任何实例的类,通常被称为实用工具类。它们只包含静态字段和静
态方法以及唯一的不被该类的任何静态方法调用的私有构造函数。在Java 8中也
可以有这样的一个野兽(这个词翻译不通,译者注)在接口中实现因为Java 8的
接口可以有静态方法。我不觉得我们应该使用这个特性而不是实用工具类。我也
不完全确信我们应该使用实用工具类。
静态类总是在另一个类或接口中。它们是嵌套类。他们是静态的,就像静态方法
不能访问类的实例方法和字段一样,静态内部类也不能访问嵌入类的实例方法和
字段。这是因为内部类没有嵌入类实例的引用(或者说是指针,如果你喜欢这么
叫的话)。内部类(内部类,也即非静态嵌套类, 译者注),而非静态嵌套类, 没
有嵌入类的一个实例,它是无法被创建的。每个内部类的实例都具有嵌入类实例
的一个引用,因此一个内部类可以访问嵌入类的实例方法和字段。
因为这个原因,要是没有外部类的一个实例,你就不能创建一个内部类。当然,
如果是当前对象,也就是`this`的话,你就可以不需要指定它。在这种情况下你
可以使用`new`, 在这种情况下,也就是`this.new`的简式。在一个静态的环境中
,例如从一个静态方法你必须指定内部类应该创建哪个封闭类的实例。见第10
行:
package a;
class Nesting {
static class Nested {}
class Inner {}
void method(){
Inner inner = new Inner();
}
static void staticMethod(){
Inner inner = new Nesting().new Inner();
}
}
### 匿名类只能访问final变量 ###
![Variable has to be effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/effective-final.png)
变量必须是有效的final
当一个匿名类被定义在一个方法中,它可以访问局部变量如果该变量是`final`的
。但这说的有点模糊。它们不得不声明成final,他们还必须是有效final。这也
是Java 8中发布的一些特性。你不需要声明这些变量为`final`型,但它们仍然
必须是有效的`final`。
![Java 8 does not require final, only effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/java_ee_-_javabeantester_src_main_java_com_javax0_jbt_blog_java_-_eclipse_-__users_verhasp_github_javax_blog.png)
Java 8并不要求`final`只要求有效final.
为什么你需要对一些东西声明`final`,当它被检查必须是这样的。就像方法的参
数。它们也必须是`final`的。你说这不是Java所必须的吗?嗯,你是对的。这只
是一个良好的编程风格所必须的。
--------------------------------------------------------------------------------
via: http://www.javacodegeeks.com/2014/11/some-sentences-about-java.html
作者:[Peter Verhas][a]
译者:[a598799539](https://github.com/a598799539)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.javacodegeeks.com/author/peter-verhas/

View File

@ -0,0 +1,92 @@
Attic——重复数据删除备份程序
================================================================================
Attic是一个Python写的重复数据删除备份程序其主要目标是提供一种高效安全的数据备份方式。重复数据消除技术的使用使得Attic适用于日常备份因为它可以只存储那些修改过的数据。
### Attic特性 ###
#### 空间高效存储 ####
可变块大小重复数据消除技术用于减少检测到的冗余数据存储字节数量。每个文件被分割成若干可变长度组块,只有那些从没见过的组块会被压缩并添加到仓库中。
#### 可选数据加密 ####
所有数据可以使用256位AES加密进行保护并使用HMAC-SHA256验证数据完整性和真实性。
#### 离场备份 ####
Attic可以通过SSH将数据存储到安装有Attic的远程主机上。
#### 备份可作为文件系统挂载 ####
备份归档可作为用户空间文件系统挂载,用于便捷地验证和恢复备份。
#### 安装attic到ubuntu 14.10 ####
打开终端并运行以下命令
sudo apt-get install attic
### 使用Attic ###
#### 手把手实例教学 ####
在进行备份之前,首先要对仓库进行初始化:
$ attic init /somewhere/my-repository.attic
将~/src和~/Documents目录备份到名为Monday的归档
$ attic create /somwhere/my-repository.attic::Monday ~/src ~/Documents
第二天创建一个新的名为Tuesday的归档
$ attic create --stats /somwhere/my-repository.attic::Tuesday ~/src ~/Documents
该备份将更快些,也更小些,因为只有之前从没见过的新数据会被存储。--stats选项会让Attic输出关于新创建的归档的统计数据比如唯一数据不和其它归档共享的数量
归档名Tuesday
归档指纹387a5e3f9b0e792e91ce87134b0f4bfe17677d9248cb5337f3fbf3a8e157942a
开始时间: Tue Mar 25 12:00:10 2014
结束时间: Tue Mar 25 12:00:10 2014
持续时间: 0.08 seconds
文件数量: 358
最初大小 压缩后大小 重复数据删除后大小
本归档: 57.16 MB 46.78 MB 151.67 kB
所有归档114.02 MB 93.46 MB 44.81 MB
列出仓库中所有归档:
$ attic list /somewhere/my-repository.attic
Monday Mon Mar 24 11:59:35 2014
Tuesday Tue Mar 25 12:00:10 2014
列出Monday归档的内容
$ attic list /somewhere/my-repository.attic::Monday
drwxr-xr-x user group 0 Jan 06 15:22 home/user/Documents
-rw-r--r-- user group 7961 Nov 17 2012 home/user/Documents/Important.doc
恢复Monday归档
$ attic extract /somwhere/my-repository.attic::Monday
通过手动删除Monday归档恢复磁盘空间
$ attic delete /somwhere/my-backup.attic::Monday
详情请查阅[Attic文档][1]
--------------------------------------------------------------------------------
via: http://www.ubuntugeek.com/attic-deduplicating-backup-program.html
作者:[ruchi][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.ubuntugeek.com/author/ubuntufix
[1]:https://attic-backup.org/index.html

View File

@ -0,0 +1,208 @@
在Linux上使用Python和Flask创建你的第一个应用
================================================================================
![](http://techarena51.com/wp-content/uploads/2014/12/python-logo.png)
无论你在linux上娱乐还是工作这对你而言都是一个很好的机会使用python来编程。回到大学我希望他们教我的是Python而不是Java这学起来很有趣且在实际的应用如yum包管理器中很有用。
本篇教程中我会带你使用python和一个称为flask的微型框架来构建一个简单的应用来显示诸如[每个进程的内存使用][1]CPU百分比之类有用的信息。
### 前提 ###
Python基础、列表、类、函数、模块。
HTML/CSS (基础)
学习这篇教程你不必是一个python高级开发者但是首先我建议你阅读https://wiki.python.org/moin/BeginnersGuide/NonProgrammers。
### I在Linux上安装Python 3 ###
在大多数Linux发行版上Python是默认安装的。下面的你命令可以让你看到安装的版本。
[root@linux-vps ~]# python -V
Python 2.7.5
我们会使用3.x的版本来构建我们的app。根据[Python.org][2]所说这版本上面所有的改进都不向后兼容Python 2。
**注意**: 在开始之前我强烈建议你在虚拟机中尝试这个教程因为Python许多Linux发行版的核心组建任何意外都可能会损坏你的系统。
这步是基于红帽的版本如CentOS6和7基于Debian的版本如UbuntuMint和Resbian可以跳过这步Pythonn 3应该默认已经安装了。如果没有安装请用apt-get而不是yum来安装下面相应的包。
[leo@linux-vps] yum groupinstall 'Development Tools'
[leo@linux-vps] yum install -y zlib-dev openssl-devel sqlite-devel bzip2-devel
[leo@linux-vps] wget https://www.python.org/ftp/python/3.4.2/Python-3.4.2.tgz
[leo@linux-vps] tar -xvzf Python-3.4.2.tgz
[leo@linux-vps] cd Python-3.4.2
[leo@linux-vps] ./configure
[leo@linux-vps] make
# make altinstall is recommended as make install can overwrite the current python binary,
[leo@linux-vps] make altinstall
成功安装后你应该可以用下面的命令进入Python3.4的shell了。
[leo@linux-vps]# python3.4
Python 3.4.2 (default, Dec 12 2014, 08:01:15)
[GCC 4.8.2 20140120 (Red Hat 4.8.2-16)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> exit ()
### 使用pip来安装包 ###
Python有它自己的包管理去与yum和apt-get相似。你将需要它来下载、安装和卸载包。
[leo@linux-vps] pip3.4 install "packagename"
[leo@linux-vps] pip3.4 list
[leo@linux-vps] pip3.4 uninstall "packagename"
### Python虚拟环境 ###
在Python中虚拟环境是一个你项目依赖的目录。隔离项目的一个好主意是使用不同的依赖。这可以让你不用sudo命令就能安装包。
[leo@linux-vps] mkdir python3.4-flask
[leo@linux-vps] cd python3.4-flask
[leo@linux-vps python3.4-flask] pyvenv-3.4 venv
要创建虚拟环境你需要使用“pyvenv-3.4”命令。这会在venv文件夹的内部创建一个名为lib的目录这里会安装项目所依赖的包。这里同样会创建一个bin文件夹容纳该环境下的pip和python可执行文件。
### 为我们的Linux系统信息项目激活虚拟环境 ###
[leo@linux-vps python3.4-flask] source venv/bin/activate
[leo@linux-vps python3.4-flask] which pip3.4
~/python3.4-flask/venv/bin/pip3.4
[leo@linux-vps python3.4-flask] which python3.4
~/python3.4-flask/venv/bin/python3.4
### 使用pip安装flask ###
让我们继续安装第一个模块flask框架它可以处理路由和渲染我们app的模板。
[leo@linux-vps python3.4-flask]pip3.4 install flask
### 在flask中创建第一个应用 ###
第一步创建你app的目录
[leo@linux-vps python3.4-flask] mkdir app
[leo@linux-vps python3.4-flask]mkdir app/static
[leo@linux-vps python3.4-flask]mkdir app/templates
在python3.4-flask文件家中创建一个一个名为app的文件夹它包含了两个子文件夹“static”和“templates”。我们的Python脚本会在app文件夹像css/js这类文件会在static文件夹template文件夹会包含我们的html模板。
第二步在app文件夹内部创建一个初始化文件。
[leo@linux-vps python3.4-flask] vim app/_init_.py
from flask import Flask
app = Flask(__name__)
from app import index
这个文件创建一个Flask的新的实例并加载我们存储在index.py文件中的python程序这个文件我们之后会创建。
[leo@linux-vps python3.4-flask]vim app/index.py
from app import app
@app.route('/')
def index():
import subprocess
cmd = subprocess.Popen(['ps_mem'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,error = cmd.communicate()
memory = out.splitlines()
return
flask中的路由由路由装饰器处理。这用于给函数绑定URL。
@app.route('/')
@app.route('/index')
要在python中运行shell命令你可以使用Subprocess模块中的Popen类。
subprocess.Popen(['ps_mem'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
这个类会使用一个列表作为参数,列表的第一项默认是可执行的程序,下一项会是参数,这里是个另外一个例子。
subprocess.Popen(['ls', -l],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout和stderr会相应地存储命令的输出和错误。你可以使用Popen的communicate方法来访问输出了。
out,error = cmd.communicate()
要更好地用html模板显示输出我会使用splitlines()方法,
memory = out.splitlines()
关于subprocess模块更多的信息会在教程的最后给出。
第三步创建一个html模板来显示我们命令的输出。
要做到这个我们使用flask中的Jinja2模板引擎来为我们渲染。
最后你的index.py文件应该看起来像这样
from flask import render_template
from app import app
def index():
import subprocess
cmd = subprocess.Popen(['ps_mem'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,error = cmd.communicate()
memory = out.splitlines()
return render_template('index.html', memory=memory)
现在在你的模板目录下创建一个index.html模板flask会自动搜索这个目录下的模板。
[leo@linux-vps python3.4-flask]vim app/templates/index.html
Memory usage per process
{% for line in memory %}
{{ line.decode('utf-8') }}
{% endfor %}
Jinja2模板引擎允许你使用“{{ … }}”分隔符来打印结果,{% … %}来做循环和赋值。我使用“decode()”方法来格式化。
第四步运行app
[leo@linux-vps python3.4-flask]vim run.py
from app import app
app.debug = True
app.run(host='174.140.165.231', port=80)
上面的代码会在debug模式下运行app。如果你不写IP地址和端口默认则是localhost:5000。
[leo@linux-vps python3.4-flask] chmod +x run.py
[leo@linux-vps python3.4-flask] python3.4 run.py
![](http://techarena51.com/wp-content/uploads/2014/12/install-python3-flask.png)
我已经加了更多的带来来显示CPU、I/O和平均负载。
![](http://techarena51.com/wp-content/uploads/2014/12/install-python3-flask-on-linux.png)
你可以在[这里][3]浏览代码。
这是一个对flask的简短教程我建议你阅读下面的教程和文档来更深入地了解。
http://flask.pocoo.org/docs/0.10/quickstart/#
https://docs.python.org/3.4/library/subprocess.html#popen-constructor
http://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world
--------------------------------------------------------------------------------
via: http://techarena51.com/index.php/how-to-install-python-3-and-flask-on-linux/
作者:[Leo G][a]
译者:[geekpi](https://github.com/gekpi)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://techarena51.com/
[1]:http://techarena51.com/index.php/linux-memory-usage/
[2]:https://wiki.python.org/moin/Python2orPython3
[3]:https://github.com/Leo-g/python-flask-cmd

View File

@ -0,0 +1,163 @@
如何在Linux有效地屏蔽不需要的IP
================================================================================
你可能需要在Linux的不同的环境下屏蔽IP地址。比如作为一个终端用户你可能想要免受间谍软件或者IP追踪的困扰。或者当你在运行P2P软件时。你可能想要过滤反P2P活动的网络链接。如果你是一名系统管理员你可能想要禁止垃圾IP地址访问你们的生产邮件服务器。或者你因一些原因想要禁止某些国家访问web服务。在许多情况下然而你的IP地址屏蔽列表可能会很快地增长到几万的IP。该如何处理这个
### Netfilter/IPtables 的问题 ###
在Linux中可以很简单地用netfilter/iptables框架禁止IP地址
$ sudo iptables -A INPUT -s 1.1.1.1 -p TCP -j DROP
如果你想要完全屏蔽一个IP地址你可以用下面的命令很简单地做到
$ sudo iptables -A INPUT -s 1.1.2.0/24 -p TCP -j DROP
然而当你有1000个独立IP地址且不带CIDR无类别域间路由前缀你该怎么做你要有1000条iptable规则这显然无法扩展。
$ sudo iptables -A INPUT -s 1.1.1.1 -p TCP -j DROP
$ sudo iptables -A INPUT -s 2.2.2.2 -p TCP -j DROP
$ sudo iptables -A INPUT -s 3.3.3.3 -p TCP -j DROP
. . . .
### 什么是IP集? ###
这时候就是[IP集][1]登场了。IP集是一个内核特性它允许多个独立IP地址、MAC地址或者甚至是端口号编码并有效地存储在位图/哈希内核数据结构中。一旦IP集创建之后你可以创建一条iptable规则来匹配这个集合。
你应该马上看见IP集合的好处了它可以让你用一条iptable规则匹配多个ip地址你可以用多个IP地址和端口号的方式来构造IP集并且可以动态地更新规则而没有请能影响。
### 在Linux中安装IPset工具 ###
为了创建和管理IP集你需要使用成为ipset的用户空间工具。
要在Debian、Ubuntu或者Linux Mint上安装
$ sudo apt-get install ipset
Fedora或者CentOS/RHEL 7上安装
$ sudo yum install ipset
### 使用IPset命令禁止IP ###
让我通过简单的示例告诉你该如何使用ipset命令。
首先让我们创建一条新的IP集名为banthis名字任意
$ sudo ipset create banthis hash:net
第二个参数(hash:net)是必须的代表的是集合的类型。IP集有[多个类型][2]。hash:net类型的IP集使用哈希来存储多个CIDR块。如果你想要在一个集合中存储独立的IP地址你可以使用hash:ip类型。
一旦创建了一个IP集之后你可以用下面的命令来检查
$ sudo ipset list
![](https://farm8.staticflickr.com/7483/15380353464_825dbc45c2_z.jpg)
这显示了一个可用的IP集合列表并有包含了集合成员的详细信息。默认上每个IP集合可以包含65536个元素这里是CIDR块。你可以通过追加"maxelem N"选项来增加限制。
$ sudo ipset create banthis hash:net maxelem 1000000
现在让我们来增加IP块到这个集合中
$ sudo ipset add banthis 1.1.1.1/32
$ sudo ipset add banthis 1.1.2.0/24
$ sudo ipset add banthis 1.1.3.0/24
$ sudo ipset add banthis 1.1.4.10/24
你会看到集合成员已经改变了。
$ sudo ipset list
![](https://farm8.staticflickr.com/7518/15380353474_4d6b9dbf63_z.jpg)
现在是时候去创建一个使用IP集的iptable规则了。这里的关键是使用"-m set --match-set <name>"选项。
现在让我们创建一条让之前那些IP块不能通过80端口访问web服务的iptable规则。可以通过下面的命令
$ sudo iptables -I INPUT -m set --match-set banthis src -p tcp --destination-port 80 -j DROP
如果你想你可以保存特定的IP集到一个文件中以后可以从文件中还原
$ sudo ipset save banthis -f banthis.txt
$ sudo ipset destroy banthis
$ sudo ipset restore -f banthis.txt
上面的命令中我使用了destory选项来删除一个已有的IP集来见证我可以还原它。
### 自动IP地址禁用 ###
现在你应该看到了IP集合的强大了。维护IP黑名单是一件繁琐和费时的工作。实际上有很多免费或者收费的服务可以来帮你完成这个。一个额外的好处是让我们看看如何自动将IP黑名单家到IP集中。
首先让我们从[iblocklist.com][3]抓取免费的黑名单这个网站u有不同的免费和收费的名单。免费的版本是P2P格式。
接下来我要使用一个名为iblocklist2ipset的开源Python工具来将P2P格式的黑名单转化成IP集。
首先你需要安装了pip参考[这个指导][4]来安装pip
使用的下面命令安装iblocklist2ipset。
$ sudo pip install iblocklist2ipset
在一些发行版如Fedora你可能需要运行
$ sudo python-pip install iblocklist2ipset
现在到[iblocklist.com][5]抓取任何一个P2P列表的URL比如"level1"列表)。
![](https://farm8.staticflickr.com/7523/15976824856_80632f35e1_z.jpg)
粘帖URL到下面的命令中。
$ iblocklist2ipset generate \
--ipset banthis "http://list.iblocklist.com/?list=ydxerpxkpcfqjaybcssw&fileformat=p2p&archiveformat=gz" \
> banthis.txt
上面的命令运行之后你会得到一个名为banthis.txt的文件。如果查看它的内容你会看到像这些
create banthis hash:net family inet hashsize 131072 maxelem 237302
add banthis 1.2.4.0/24
add banthis 1.2.8.0/24
add banthis 1.9.75.8/32
add banthis 1.9.96.105/32
add banthis 1.9.102.251/32
add banthis 1.9.189.65/32
add banthis 1.16.0.0/14
你可以用下面的ipset命令来加载这个文件
$ sudo ipset restore -f banthis.txt
现在可以查看自动创建的IP集
$ sudo ipset list banthis
在写这篇文章时候“level1”类表包含了237,000个屏蔽的IP列表。你可以看到很多IP地址已经加入到IP集中了。
最后创建一条iptable命令来屏蔽它们
### 总结 ###
这篇文章中我描述了你该如何用强大的ipset来·屏蔽不想要的IP地址。同时结合了第三方工具iblocklist2ipset这样你就可以流畅地维护你的IP屏蔽列表了。对于那些对于ipset的速度提升好奇的人来说下图显示了iptables在使用和不使用ipset的基准测试结果。
![](https://farm8.staticflickr.com/7575/15815220998_e1935c94c0_z.jpg)
告诉我你多么喜欢这个。:-)
--------------------------------------------------------------------------------
via: http://xmodulo.com/block-unwanted-ip-addresses-linux.html
作者:[Dan Nanni][a]
译者:[geekpi](https://github.com/geekpi)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/nanni
[1]:http://ipset.netfilter.org/
[2]:http://ipset.netfilter.org/features.html
[3]:https://www.iblocklist.com/lists.php
[4]:http://ask.xmodulo.com/install-pip-linux.html
[5]:https://www.iblocklist.com/lists.php
[6]:http://daemonkeeper.net/781/mass-blocking-ip-addresses-with-ipset/

View File

@ -0,0 +1,120 @@
如何在Linux终端下安排约会和待办事项
================================================================================
如果你是一个享受linux终端的系统管理员但同样需要一中方法来安排每天最重要的任务、约会和会议你会发现[calcurse][1]是一个很有用的工具。calcurse包含了日历、一个待办事项管理、一个调度程序和一个可配置的通知系统这些都集成进了一个软件中基于的都是ncurse的接口。同时它不会限制你在终端中你可以将你的日历和笔记导出成可以打印的格式。
本片文章我们会探索如何在Linux上安装calcurse并且教你如何利用它的特性。
### 在Linux上安装Culcurse ###
calcurse在大多数Linux发行版的标准仓库都有。万一在你的发行版上没有比如CentOS/RHEL一旦你安装了gcc和ncurse开发文件后就可以很简单地从源码安装。
Debian、Ubuntu或者Linux Mint
# aptitude install calcurse
Fedora
# yum install calcurse
CentOS/RHEL
# yum install gcc ncurses-devel
# wget http://calcurse.org/files/calcurse-3.2.1.tar.gz
# tar xvfvz calcurse-3.2.1.tar.gz
# cd calcurse-3.2.1
# ./configure
# make
# make install
### 启动 Calcurse ###
安装完成后你就可以用下面的命令启动calcurse了
$ calcurse
你将会看到下面的空白界面。如果配色不吸引你,你可以以后改变它们。
![](https://farm8.staticflickr.com/7567/15410270544_0af50a4eb6_c.jpg)
我们现在可以按下回车-q- 再次按下回车- y来退出主界面。这个序列会告诉地不的菜单退出、保存当前的笔记并确认退出。
When we run calcurse for the first time, the following directory structure is created in our home directory:
![](https://farm8.staticflickr.com/7482/15845194188_2ba15035e7_o.png)
这里是每一项的简要描述:
- **apts** 文件包含了用户所有的约会和事项todo文件包含了所有的**todo** 列表。
- **conf**文件,如你所想的那样,包含当前用户的独立设置。
- **keys**文件包含了用户定义的按键绑定比如q或者Q推出x或者X导出内容等等
- 在**notes**子目录你会看到包含了笔记描述的文件,你可以附到任何一个安排事项中。
### 改变配色 ###
要改变配色,按照下面的步骤:
![](https://farm9.staticflickr.com/8595/16006755476_5289384f81_z.jpg)
使用最后一幅图的按键绑定来选色前景色和背景色配置来更好地适应你的需求:
![](https://farm8.staticflickr.com/7499/15845274420_70bb95c221_b.jpg)
### A添加约会和待办任务 ###
在前面的选项卡中浏览命令菜单时我们看到按下o可以从一个菜单到下一个菜单。我们可以把第二个菜单作为**安排编辑菜单**。
![](https://farm9.staticflickr.com/8634/16031851732_b947951f76_c.jpg)
那么我们用Ctrl + A 和 Ctrl + T组合键为今天添加一个新的约会和一个新的待办任务。如果我们希望为约会指定一个具体的日期而不是今天我们可以在添加约会和待办事项前使用Ctrl + L (+1 天)、Ctrl + H (-1 天)、Ctrl + J (+1 周)和Ctrl + K (-1 周)组合键。
![](https://farm8.staticflickr.com/7498/15410270594_dc282928ac_z.jpg)
添加待办任务的步骤是相似的只是用Ctrl + T之前已经解释了
![](https://farm8.staticflickr.com/7520/15845386020_9799fe7378_o.png)
你还会被要求输入一个优先级,这样待办任务就会显示在主页上了:
![](https://farm8.staticflickr.com/7498/15413012243_e081b4e0b3_o.png)
你现在可以验证待办任务和约会已经相应地添加到了.culcurse文件夹下的todo和apts文件中了
![](https://farm8.staticflickr.com/7569/16030583401_0a07d007aa_z.jpg)
注意你可以使用你最喜欢的编辑器或者菜单底部的calcurse屏幕来编辑这些文件。你可以按下TAB来在不同的面板间切换并选择你想要编辑的项目
![](https://farm9.staticflickr.com/8663/16032536475_2fd68e16bf_z.jpg)
### 为事项设置通知 ###
你可以在通知菜单下配置通知。按照相同的步骤来以防改变了配色,但是选择**Notify**而不是**Colour**
![](https://farm8.staticflickr.com/7569/15412900863_eaf2767e19_z.jpg)
假设你想要设置email通知。按下数字5来编辑**notify-bar_command**的值:
![](https://farm8.staticflickr.com/7531/16030583451_6d116b5f63_z.jpg)
按照上面的设置之后如果这个任务被标为重要那么root@localhost会在300秒或者5分钟后收到邮件通知这会发生在下一个安排的任务之前。如果你想要即使calcurse不在运行也启用这个功能那么将notify-daemon_enable设成yes。在本例中dev2是本机的主机名。
![](https://farm8.staticflickr.com/7552/16031851862_afbf1937d0_z.jpg)
请注意出于演示目的,我已经在这篇教程中改变了原始约会的开始和/或者结束时间。
### 总结 ###
本篇教程中我们展示了如何设置一个多样化的调度器和提醒器来帮助你组织每日的活动和提前安排重要的事项。你或许还要calcurse的[PDF 手册][2],还请随意在下面的评论中提出你的疑问。欢迎你的评论,我也很高兴看到这些。
--------------------------------------------------------------------------------
via: http://xmodulo.com/schedule-appointments-todo-tasks-linux-terminal.html
作者:[Gabriel Cánepa][a]
译者:[geekpi](https://github.com/geekpi)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/gabriel
[1]:http://calcurse.org/
[2]:http://calcurse.org/files/manual.pdf

View File

@ -0,0 +1,97 @@
一个用%显示Linux命令进度预计完成时间的伟大工具
================================================================================
Coreutils Viewer**cv**是一个简单的程序它可以用于显示任何核心组件命令的进度。它使用文件描述信息来确定一个命令的进度比如cp命令。**cv**之美在于它能够和其它Linux命令一起使用比如你所知道的watch以及I/O重定向命令。这样你就可以在脚本中使用或者你能想到的所有方式别让你的想象力束缚住你。
### 安装 ###
你可以从cv的[github仓库那儿][1]下载所需的源文件。把zip文件下载下来后将它解压缩然后进入到解压后的文件夹。
该程序依赖于**ncurses library**。如果你已经在你的Linux系统中安装了ncurses那么cv的安装过程对你而言就是那么得轻松写意。
通过以下两个简单步骤来进行编译和安装吧。
$ make
$ sudo make install
### 运行cv ###
要运行cv只需要想其它程序一样在命令行输入此命令即可。如果你没有执行make install而选择从当前目录中去运行那么你可以运行以下命令
$ ./cv
否则,就运行以下命令吧。
$ cv
如果没有核心组件命令在运行那么cv程序会退出并告诉你No coreutils is running。
![cv no command](http://blog.linoxide.com/wp-content/uploads/2014/11/cv-no-command.png)
要有效使用该程序,请在你系统上运行某个核心组件程序。在本例中,我们将使用**cp**命令。
当拷贝一个打文件时,你就可以看到进度了,以百分比显示。
![cv default](http://blog.linoxide.com/wp-content/uploads/2014/11/cv-default.png)
### 添加选项到cv ###
你也可以添加几个选项到cv命令就像其它命令一样。一个有用的选项是让你了解到拷贝或移动大文件时的预计剩余时间。
添加**-w**选项,它会帮你做以上这些事。
$ cv -w
![cv estimated throughput](http://blog.linoxide.com/wp-content/uploads/2014/11/cv-estimated-throughput.png)
试着添加更多的命令选项吧。像下面这样添加其它选项:
$ cv -wq
### cv和watch命令 ###
watch是一个用于周期性运行程序并显示输出结果的程序。有时候你可能想要看看命令运行期间的状况而不想存储数据到日志文件中。在这种情况下watch就会派上用场了它可以和cv一起使用。
$ watch cv -qw
该命令将会显示所有运行着的核心组件命令的实例。它也会显示进度和预计完成时间。
![cv and watch](http://blog.linoxide.com/wp-content/uploads/2014/11/cv-and-watch-e1416519384265.png)
### 在日志文件中查看输出结果 ###
正如所承诺的那样你可以使用cv来重定向它的输出结果到一个日志文件。这功能在命令运行太快而看不到任何有意义的内容时特别有用。
要在日志文件中查看进度,你仅仅需要重定向输出结果,就像下面这样。
$ cv -w >> log.txt
要查看该命令的输出结果请用你喜爱的文本编辑器打开日志文件也可以用cat命令就像下面这样
$ cat log.txt
### 获得帮助 ###
如果你在任何地方受到阻碍你总是可以通过查阅手册页或使用help选项来获取帮助信息。
要获取帮助信息,可以使用带**-h**选项的cv命令。
$ cv -h
如果需要更多详细信息,那么手册页是个很不错的地方。
$ man cv
但是要获取上述手册页你必须执行make install来安装cv。
现在你的Linux工具箱中又多了个伟大的工具。
--------------------------------------------------------------------------------
via: http://linoxide.com/linux-command/tool-show-command-progress/
作者:[Allan Mbugua][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/allan/
[1]:http://github.com/Xfennec/cv

View File

@ -0,0 +1,73 @@
CentOS 7.x中正确设置时间与时钟服务器同步
================================================================================
**Chrony**是一个开源而自由的应用它能帮助你保持系统时钟与时钟服务器同步因此让你的时间保持精确。它由两个程序组成分别是chronyd和chronyc。chronyd是一个后台运行的守护进程用于调整内核中运行的系统时钟和时钟服务器同步。它确定计算机获取或丢失时间的比率并对此进行补偿。chronyc提供了一个用户界面用于监控性能并进行多样化的配置。它可以在chronyd实例控制的计算机上干这些事也可以在一台不同的远程计算机上干这些事。
在像CentOS 7之类基于RHEL的操作系统上已经默认安装有Chrony。
### Chrony配置 ###
当Chrony启动时它会读取/etc/chrony.conf配置文件中的设置。CentOS 7操作系统上最重要的设置有
**server** - 该参数可以多次用于添加时钟服务器,必须以"server "格式使用。一般而言,你想添加多少服务器,就可以添加多少服务器。
Example:
server 0.centos.pool.ntp.org
server 3.europe.pool.ntp.org
**stratumweight** - stratumweight指令设置当chronyd从可用源中选择同步源时每个层应该添加多少距离到同步距离。默认情况下CentOS中设置为0让chronyd在选择源时忽略层。
**driftfile** - chronyd程序的主要行为之一就是根据实际时间计算出计算机获取或丢失时间的比率将它记录到一个文件中是最合理的它会在重启后为系统时钟作出补偿甚至它可能有机会从时钟服务器获得好的估值。
**rtcsync** - rtcsync指令将启用一个内核模式在该模式中系统时间每11分钟会拷贝到实时时钟RTC
**allow / deny** - 这里你可以指定一台主机、子网或者网络以允许或拒绝NTP连接到扮演时钟服务器的机器。
Examples:
allow 192.168.4.5
deny 192.168/16
**cmdallow / cmddeny** - 跟上面相类似只是你可以指定哪个IP地址或哪台主机可以通过chronyd使用控制命令
**bindcmdaddress** - 该指令允许你限制chronyd监听哪个网络接口的命令包由chronyc执行。该指令通过cmddeny机制提供了一个除上述限制以外可用的额外的访问控制等级。
Example:
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
**makestep** - 通常chronyd将根据需求通过减慢或加速时钟使得系统逐步纠正所有时间偏差。在某些特定情况下系统时钟可能会漂移过快导致该回转过程消耗很长的时间来纠正系统时钟。该指令强制chronyd在调整期大于某个阀值时调停系统时钟但只有在因为chronyd启动时间超过指定限制可使用负值来禁用限制没有更多时钟更新时才生效。
### 使用chronyc ###
你也可以通过运行chronyc命令来修改设置命令如下
**accheck** - 检查NTP访问是否对特定主机可用
**activity** - 该命令会显示有多少NTP源在线/离线
![](http://blog.linoxide.com/wp-content/uploads/2014/10/chrony-activity.jpg)
**add server** - 手动添加一台新的NTP服务器。
**clients** - 在客户端报告已访问到服务器
**delete** - 手动移除NTP服务器或对等服务器
**settime** - 手动设置守护进程时间
**tracking** - 显示系统时间信息
你可以通过使用帮助命令查看完整的命令列表:
![](http://blog.linoxide.com/wp-content/uploads/2014/10/commands.jpg)
--------------------------------------------------------------------------------
via: http://linoxide.com/linux-command/chrony-time-sync/
作者:[Adrian Dinu][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/

Some files were not shown because too many files have changed in this diff Show More