Merge pull request #2 from LCTT/master

Merge from base
This commit is contained in:
ZhouJ-sh 2014-12-31 10:39:52 +08:00
commit 146f514cb1
123 changed files with 7004 additions and 4122 deletions

View File

@ -30,6 +30,26 @@ LCTT的组成
请阅读[WIKI](https://github.com/LCTT/TranslateProject/wiki)。
历史
-------------------------------
* 2013/09/10 倡议并得到了大家的积极响应,成立翻译组。
* 2013/09/11 采用github进行翻译协作并开始进行选题翻译。
* 2013/09/16 公开发布了翻译组成立消息后,又有新的成员申请加入了。并从此建立见习成员制度。
* 2013/09/24 鉴于大家使用Github的水平不一容易导致主仓库的一些错误因此换成了常规的fork+PR的模式来进行翻译流程。
* 2013/10/11 根据对LCTT的贡献划分了Core Translators组最先的加入成员是vito-L和tinyeyeser。
* 2013/10/12 取消对LINUX.CN注册用户的依赖在QQ群内、文章内都采用github的注册ID。
* 2013/10/18 正式启动man翻译计划。
* 2013/11/10 举行第一次北京线下聚会。
* 2014/01/02 增加了Core Translators 成员: geekpi。
* 2014/05/04 更换了新的QQ群198889102
* 2014/05/16 增加了Core Translators 成员: will.qian、vizv。
* 2014/06/18 由于GOLinux令人惊叹的翻译速度和不错的翻译质量升级为Core Translators成员。
* 2014/09/09 LCTT 一周年,做一年[总结](http://linux.cn/article-3784-1.html)。并将曾任 CORE 的成员分组为 Senior以表彰他们的贡献。
* 2014/10/08 提升bazz2为Core Translators成员。
* 2014/11/04 提升zpl1025为Core Translators成员。
* 2014/12/25 提升runningwater为Core Translators成员。
活跃成员
-------------------------------
@ -119,21 +139,3 @@ LCTT的组成
谢谢大家的支持!
历史
-------------------------------
* 2013/09/10 倡议并得到了大家的积极响应,成立翻译组。
* 2013/09/11 采用github进行翻译协作并开始进行选题翻译。
* 2013/09/16 公开发布了翻译组成立消息后,又有新的成员申请加入了。并从此建立见习成员制度。
* 2013/09/24 鉴于大家使用Github的水平不一容易导致主仓库的一些错误因此换成了常规的fork+PR的模式来进行翻译流程。
* 2013/10/11 根据对LCTT的贡献划分了Core Translators组最先的加入成员是vito-L和tinyeyeser。
* 2013/10/12 取消对LINUX.CN注册用户的依赖在QQ群内、文章内都采用github的注册ID。
* 2013/10/18 正式启动man翻译计划。
* 2013/11/10 举行第一次北京线下聚会。
* 2014/01/02 增加了Core Translators 成员: geekpi。
* 2014/05/04 更换了新的QQ群198889102
* 2014/05/16 增加了Core Translators 成员: will.qian、vizv。
* 2014/06/18 由于GOLinux令人惊叹的翻译速度和不错的翻译质量升级为Core Translators成员。
* 2014/09/09 LCTT 一周年,做一年[总结](http://linux.cn/article-3784-1.html)。并将曾任 CORE 的成员分组为 Senior以表彰他们的贡献。
* 2014/10/08 提升bazz2为Core Translators成员。
* 2014/11/04 提升zpl1025为Core Translators成员。

View File

@ -73,21 +73,21 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim linux-server.cfg
-
define host{
name linux-server ; 名称,需修改
name linux-server ; 名称,需修改
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
failure_prediction_enabled 1
process_perf_data 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
check_command example-host-check ; 检查所用脚本,需修改
check_interval 3 ; 连续检查的间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
check_command example-host-check ; 检查所用脚本,需修改
check_interval 3 ; 连续检查的间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
notification_options d,u,r
contact_groups admins ; 邮件将要发送至的组,需修改
contact_groups admins ; 邮件将要发送至的组,需修改
register0
}
@ -100,22 +100,22 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim cisco-device.cfg
-
define host{
name cisco-device ;名称,需修改
name cisco-device ;名称,需修改
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
failure_prediction_enabled 1
process_perf_data 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
check_command example-host-check ; 检查时使用的脚本,需修改
check_interval 3 ; 连续检查间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
check_command example-host-check ; 检查时使用的脚本,需修改
check_interval 3 ; 连续检查间隔,需修改
max_check_attempts 3 ; 产生邮件告警前的自检次数,需修改
notification_interval 0
notification_period 24x7
notification_options d,u,r
contact_groups admins ; 邮件将要发至的组,需修改
register 0
contact_groups admins ; 邮件将要发至的组,需修改
register 0
}
### 添加主机 ###
@ -148,13 +148,13 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
root@mrtg:/etc/nagios3/conf.d/# vim hostgroups_nagios2.cfg
-
define hostgroup {
hostgroup_name linux-server ; 主机组名
hostgroup_name linux-server ; 主机组名
alias Linux Servers
members our-server ; 组员列表
}
define hostgroup {
hostgroup_name cisco-device ; 主机组名
hostgroup_name cisco-device ; 主机组名
alias Cisco Devices
members our-server ; comma separated list of members
}
@ -176,18 +176,18 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
-
define service {
hostgroup_name linux-server
service_description Linux Servers
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
service_description Linux Servers
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
}
define service {
hostgroup_name cisco-device
service_description Cisco Devices
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
check_command example-host-check
use generic-service
notification_interval 0 ; 初始化设置为0
}
### 联系人定义 ###
@ -205,12 +205,12 @@ Nagios安装过程中可以设置邮件服务器安装后也可以进行自
host_notification_options d,r
service_notification_commands notify-service-by-email
host_notification_commands notify-host-by-email
email root@localhost, sentinel@example.tst
email root@localhost, sentinel@example.tst
}
最后试运行初始化检测是否有配置错误。如果没有错误Nagios开始安全运行。
root@mrtg:~#nagios v /etc/nagios3/nagios.cfg
root@mrtg:~# nagios -v /etc/nagios3/nagios.cfg
root@mrtg:~# service nagios3 restart
## CentOS/RHEL上的Nagios配置 ##
@ -229,33 +229,33 @@ Redhat系统中Nagios的配置文件地址如下所示。
[root@mrtg objects]# vim templates.cfg
-
define host{
name linux-server
use generic-host
name linux-server
use generic-host
check_period 24x7
check_interval 3
retry_interval 1
check_interval 3
retry_interval 1
max_check_attempts 3
check_command example-host-check
notification_period 24x7
check_command example-host-check
notification_period 24x7
notification_interval 0
notification_options d,u,r
contact_groups admins
register 0
notification_options d,u,r
contact_groups admins
register 0
}
define host{
name cisco-router
use generic-host
define host{
name cisco-router
use generic-host
check_period 24x7
check_interval 3
retry_interval 1
check_interval 3
retry_interval 1
max_check_attempts 3
check_command example-host-check
notification_period 24x7
check_command example-host-check
notification_period 24x7
notification_interval 0
notification_options d,u,r
contact_groups admins
register 0
notification_options d,u,r
contact_groups admins
register 0
}
### 添加主机和主机组 ###
@ -267,7 +267,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
-
#Adding Linux server
define host{
use linux-server
use linux-server
host_name our-server
alias our-server
address 172.17.1.23
@ -275,7 +275,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
#Adding Cisco Router
define host{
use cisco-router
use cisco-router
host_name our-router
alias our-router
address 172.17.1.1
@ -310,10 +310,10 @@ Redhat系统中Nagios的配置文件地址如下所示。
告警要发送的邮件地址添加至Nagios中。
[root@objects objects]# vim contacts.cfg
-
-
define contact{
contact_name nagiosadmin
use generic-contact
use generic-contact
alias Nagios Admin
email nagios@localhost, sentinel@example.tst
}
@ -326,7 +326,7 @@ Redhat系统中Nagios的配置文件地址如下所示。
### 配置后访问Nagios ###
现在一切就绪可以开始Nagios之旅了。Ubuntu/Debian用户可以通过打开http://IP地址/nagios3网页访问NagiosCentOS/RHEL用户可以打开http://IP地址/nagios如http://172.17.1.23/nagios3来访问Nagios。“nagiosadmin”用户则需要认证来访问页面。
现在一切就绪可以开始Nagios之旅了。Ubuntu/Debian用户可以通过打开 http://IP地址/nagios3 网页访问NagiosCentOS/RHEL用户可以打开 http://IP地址/nagios ,如 http://172.17.1.23/nagios3 来访问Nagios。“nagiosadmin”用户则需要认证来访问页面。
[![](http://farm4.staticflickr.com/3834/11198394806_4f4a753778_z.jpg)][9]

View File

@ -1,18 +1,21 @@
为什么你的公司需要参与更多开源软件的编写
为什么公司需要参与更多开源软件的编写
================================================================================
>闭关锁国是产生不了创新的。
> 闭门造车是产生不了创新的。
![](http://a5.files.readwrite.com/image/upload/c_fill,h_900,q_70,w_1600/MTE5NDg0MDYxMTkxMzQxNTgz.jpg)
**华尔街日报 [称][1]有消息表明Zulily正在开发** 更多的内部软件,但实际上根本不是。多年前[Eric Raymond写道][2]全世界95%的软件写来用的而不是售卖。原因很多但是其中有一个比较突出正如Zulily的CIO Luke Friang所说几乎没有一个[非定制]软件解决方案能跟上我们的步伐。
[据华尔街日报称][1]有消息表明Zulily正在开发更多的内部软件,但实际上根本不是。多年前[Eric Raymond写道][2]全世界95%的软件写来用的而不是售卖。原因很多但是其中有一个比较突出正如Zulily的CIO Luke Friang所说几乎没有一个[非定制]软件解决方案能跟上我们的步伐。
20年前是这样现在也是这样。
但是有一点是不同的,这也正是华尔街日报完全忽略的地方。而这也正是历史上开发的内部软件始终保持着专有的原因了,因为她是一个公司的 核心竞争力。然而今天,越来越多的公司意识到另一面:开源内部软件将会比保持专有获益更多。
但是有一点是不同的,这也正是华尔街日报完全忽略的地方。而这也正是历史上开发的内部软件始终保持着专有的原因了,因为它是一个公司的核心竞争力。然而今天,越来越多的公司意识到另一面:开源内部软件将会比保持专有获益更多。
这也就是为什么你的公司需要为开源项目做出更多的贡献。记住是更多。
### 不寻常的那些年
我们刚刚经历了一个很不一样的20年那时很多软件的开发都是为了内部的使用大多数人的精力都放在由SAP和微软这样的厂商建立的应用广泛的企业级解决方案。
不管怎么说,这都是一个理论。
@ -27,32 +30,37 @@
然而,开源的道路上,一些公司也发现,有些销售商不能很好地描述他们所想要的,即便是很好理解的产品类别,如像内容管理系统,他们需要 知道的是产品亮点,而不希望是一个模子刻出来的。
所以顾客没了,他们中有一部分变成了供应商。
所以顾客没了,他们中有一部分转变成了供应商。
这也是常有的事,[O'Grady指出了][4]这一点。2010年O'Grady发现了一个有趣的现象“软件提供商正面对着一个强有力的市场竞争者他们 的顾客。”
### 自己动手,丰衣足食
这也是常有的事,[O'Grady指出了][4]这一点。2010年O'Grady发现了一个有趣的现象“软件提供商正面对着一个强有力的市场竞争者他们的顾客。”
回想一下今天的高科技大多数都是开源的几乎所有的项目一开始都是某些公司的内部项目或者仅仅是有些开发者的爱好LinuxGitHadoopCassandraMongDBAndroid等等。没有一个项目起初是为了售卖而产生的。
相反,这些项目通常是由一些公司维护,他们使用开源的资源来构建软件并[完善软件][5]这主要是一些Web公司。不像以前银行医院和一些组织开发的软件只供内部使用他们开源源码。
虽然,[有些公司避免定制软件][6],因为他们不想自己维护它,开源(稍微)减轻了这些发展中公司来维护一个项目的压力。从而为项目发起人均摊项目的开发成本Yahoo开始于Hadoop但是现在最大的贡献者是Cloudera和Hortonworks。Facebook开始于Cassandra但是现在主要是靠DataStax在维护。等等。
虽然,[有些公司避免定制软件][6],因为他们不想自己维护它,开源(稍微)减轻了这些发展中公司来维护一个项目的压力。从而为项目发起人均摊项目的开发成本Yahoo建立了 Hadoop但是现在最大的贡献者是Cloudera和Hortonworks。Facebook 建立了 Cassandra但是现在主要是靠DataStax在维护。等等。
### 现在就走出来吧!
今天,真正的软件创新并不是闭门造车能造出来的,即便是可以,它也不会在那儿,开源项目颠覆了几十年的软件开发传统。
这不仅仅是一个人的一点点力量。
最好的开源项目都[发展得很快][7],但是这并不意味着别人在乎你的开源代码。[开放你的源码有显著的优缺点][8],其中一个很重要的优点是 很多伟大的开发者都希望为开源做出贡献:如果你也想找一个伟大的开发者跟你一起,你需要给他们一个开放的源代码来让他们工作。([Netflix][9]说)
最好的开源项目都[发展得很快][7],但是这并不意味着别人在乎你的开源代码。[开放你的源码有显著的优缺点][8],其中一个很重要的优点是很多伟大的开发者都希望为开源做出贡献:如果你也想找一个伟大的开发者跟你一起,你需要给他们一个开放的源代码来让他们工作。([Netflix][9]说)
但是,我们没有理由站在一边看,现在正是时候参与开源社区了,而不是一些不清楚的社区。是的,开源最大的参与者正是你们和你们的公司。 赶紧开始吧。
但是,我们没有理由站在一边看,现在正是时候参与开源社区了,而不是把“社区”妖魔化。是的,开源最大的参与者正是你们和你们的公司。 赶紧开始吧。
主要图片来自于Shutterstock. (注Shutterstock是美国的一家摄影图片网站。)
--------------------------------------------------------------------------------
via: http://readwrite.com/2014/08/16/open-source-software-business-zulily-erp-wall-street-journal
作者:[Matt Asay][a]
译者:[barney-ro](https://github.com/barney-ro)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -8,23 +8,23 @@ Linux能够提供消费者想要的东西吗
Linux需要深深凝视自己的水晶球仔细体会那场浏览器大战留下的尘埃然后留意一下这点建议
如果你不能提供他们想要的,他们就会离开。
> 如果你不能提供他们想要的,他们就会离开。
而这种事与愿违的另一个例子是Windows 8。消费者不喜欢那套界面。而微软却坚持使用因为这是把所有东西搬到Surface平板上所必须的。相同的情况也可能发生在Canonical和Ubuntu Unity身上 -- 尽管它们的目标并不是单一独特地针对平板电脑来设计(所以,整套界面在桌面系统上仍然很实用而且直观)。
一直以来Linux开发者和设计者们看上去都按照他们自己的想法来做事情。他们过分在意“吃你自家的狗粮”这句话了。以至于他们忘记了一件非常重要的事情
没有新用户,他们的“根基”也仅仅只属于他们自己。
> 没有新用户,他们的“根基”也仅仅只属于他们自己。
换句话说,唱诗班不仅仅是被传道,他们也同时在宣传。让我给你看三个案例来完全掌握这一点。
- 多年以来有在Linux系统中替代活动目录Active Directory的需求。我很想把这个名称换成LDAP但是你真的用过LDAP吗那就是个噩梦。开发者们也努力了想让LDAP能易用一点但是没一个做到了。而让我很震惊的是这样一个从多用户环境下发展起来的平台居然没有一个能和AD正面较量的功能。这需要一组开发人员从头开始建立一个AD的开源替代。这对那些寻求从微软产品迁移的中型企业来说是非常大的福利。但是在这个产品做好之前他们还不能开始迁移。
- 多年以来,一直有在Linux系统中替代活动目录Active Directory的需求。我很想把这个名称换成LDAP但是你真的用过LDAP吗那就是个噩梦。开发者们也努力了想让LDAP能易用一点但是没一个做到了。而让我很震惊的是这样一个从多用户环境下发展起来的平台居然没有一个能和AD正面较量的功能。这需要一组开发人员从头开始建立一个AD的开源替代。这对那些寻求从微软产品迁移的中型企业来说是非常大的福利。但是在这个产品做好之前他们还不能开始迁移。
- 另一个从微软激发的需求是Exchange/Outlook。是我也知道许多人都开始用云。但是事实上中等和大型规模生意仍然依赖于Exchange/Outlook组合直到能有更好的产品出现。而这将非常有希望发生在开源社区。整个拼图的一小块已经摆好了虽然还需要一些工作- 群件客户端Evolution。如果有人能够从Zimbra拉出一个分支然后重新设计成可以配合Evolution甚至Thunderbird来提供服务实现Exchange的简单替代那这个游戏就不是这么玩了而消费者获得的利益将是巨大的。
- 便宜,便宜,还是便宜。这是大多数人都得咽下去的苦药片 - 但是消费者和生意就是希望便宜。看看去年一年Chromebook的销量吧。现在搜索一下Linux笔记本看能不能找到700美元以下的。而只用三分之一的价格就可以买到一个让你够用的Chromebook一个使用了Linux内核的平台。但是因为Linux仍然是一个细分市场很难降低成本。像红帽那种公司也许可以改变现状。他们也已经推出了服务器硬件。为什么不推出一些和Chromebook有类似定位但是却运行完整Linux环境的低价中档笔记本呢请看“[Cloudbook是Linux的未来吗][1]”)其中的关键是这种设备要低成本并且符合普通消费者的要求。不要站在游戏玩家/开发者的角度去思考了,记住普通消费者真正的需求 - 一个网页浏览器不会有更多了。这是Chromebook为什么可以这么轻松地成功。Google精确地知道消费者想要什么然后推出相应的产品。而面对Linux一些公司仍然认为他们吸引买家的唯一途径是高端昂贵的硬件。而有一点讽刺的是口水战中最经常听到的却是Linux只能在更慢更旧的硬件上运行。
最后Linux需要看一看乔布斯传Book Of Jobs搞清楚如何说服消费者们他们真正要的就是Linux。在生意上和在家里 -- 每个人都可以享受到Linux带来的好处。说真的开源社区怎么可能做不到这点呢Linux本身就已经带有很多漂亮的时髦术语标签稳定性,可靠性,安全性,云,免费 -- 再加上Linux实际已经进入到绝大多数人手中了只是他们自己还不清楚罢了。现在是时候让他们知道这一点了。如果你是用Android或者Chromebooks那么你就在用某种形式上的Linux。
最后Linux需要看一看乔布斯传Book Of Jobs搞清楚如何说服消费者们他们真正要的就是Linux。在公司里和在家里 -- 每个人都可以享受到Linux带来的好处。说真的开源社区怎么可能做不到这点呢Linux本身就已经带有很多漂亮的时髦术语标签稳定性、可靠性、安全性、云、免费 -- 再加上Linux实际已经进入到绝大多数人手中了只是他们自己还不清楚罢了。现在是时候让他们知道这一点了。如果你是用Android或者Chromebooks那么你就在用某种形式上的Linux。
搞清楚消费者需求一直以来都是Linux社区的绊脚石。而且我知道 -- 太多的Linux开发都基于某个开发者有个特殊的想法。这意味着这些开发都针对的“微型市场”。是时候无论如何让Linux开发社区能够进行全球性思考了。“一般用户有什么需求我们怎么满足他们”让我提几个最基本的点。
搞清楚消费者需求一直以来都是Linux社区的绊脚石。而且我知道 -- 太多的Linux开发都基于某个开发者有个特殊的想法。这意味着这些开发都针对的“微型市场”。是时候无论如何让Linux开发社区能够进行全球性思考了。“一般用户有什么需求我们怎么满足他们”让我提几个最基本的点。
一般用户想要:
@ -43,7 +43,7 @@ via: http://www.techrepublic.com/article/will-linux-ever-be-able-to-give-consume
作者:[Jack Wallen][a]
译者:[zpl1025](https://github.com/zpl1025)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,16 +1,16 @@
为什么一些古老的编程语言不会消亡?
================================================================================
> 我们中意于我们所知道的。
> 我们钟爱我们已知的。
![](http://a4.files.readwrite.com/image/upload/c_fill,h_900,q_70,w_1600/MTIzMDQ5NjY0MTUxMjU4NjM2.jpg)
当今许多知名的编程语言已经都非常古老了。PHP 语言20年、Python 语言23年、HTML 语言21年、Ruby 语言和 JavaScript 语言已经19年C 语言更是高达42年之久。
这是没人能预料得到的,即使是计算机科学家 [Brian Kernighan][1] 也一样。他是写著第一本关于 C 语言的作者之一,到今天这本书还在印刷着。C 语言本身的发明者 [Dennis Ritchie][2] 是 Kernighan 的合著者,他于 2011 年已辞世。)
这是没人能预料得到的,即使是计算机科学家 [Brian Kernighan][1] 也一样。他是写著第一本关于 C 语言的作者之一,到今天这本书还在印刷着。C 语言本身的发明者 [Dennis Ritchie][2] 是 Kernighan 的合著者,他于 2011 年已辞世。)
“我依稀记得早期跟编辑们的谈话告诉他们我们已经卖出了5000册左右的量”最近采访 Kernighan 时他告诉我说。“我们设法做的更好。我没有想到的是在2014年的教科书里学生仍然在使用第一个版本的书。”
关于 C 语言的持久性特别显著的就是 Google 开发出了新的语言 Go,解决同一问题比用 C 语言更有效率
关于 C 语言的持久性特别显著的就是 Google 开发出了新的语言 Go,解决同一问题比用 C 语言更有效率。不过,我仍然很难想象 Go 能彻底杀死 C无论它有多么好
“大多数语言并不会消失或者至少很大一部分用户承认它们不会消失”他说。“C 语言仍然在一定的领域独领风骚,所以它很接地气。”
@ -20,13 +20,13 @@
分别来自普林斯顿大学和加州大学伯克利分校的研究者 Ari Rabkin 和 Leo Meyerovich 花费了两年时间来研究解决上面的问题。他们的研究报告,[《编程语言使用情况实例分析》][3],记录了对超过 200,000 个 Sourceforge 项目和超过 13,000 个程序员投票结果的分析。
他们主要的发现呢?大多数时候程序员选择的编程语言都是他们所熟悉的。
他们主要的发现是什么呢?大多数时候程序员选择的编程语言都是他们所熟悉的。
存在着我们使用的语言是因为我们经常使用他们,” Rabkin 告诉我。“例如:天文学家就经常使用 IDL [交互式数据语言]来开发他们的计算机程序,并不是因为它具有什么特殊的星级功能或其它特点,而是因为用它形成习惯了。他们已经用些语言构建出很优秀的程序了,并且想保持原状。”
这些我们使用的语言还继续存在是因为我们经常使用他们,” Rabkin 告诉我。“例如:天文学家就经常使用 IDL [交互式数据语言]来开发他们的计算机程序,并不是因为它具有什么特殊的亮点功能或其它特点,而是因为用它形成习惯了。他们已经用些语言构建出很优秀的程序了,并且想保持原状。”
换句话说,它部分要归功于创建其的语言的的知名度仍保留较大劲头。当然这并不意味着流行的语言不会变化。Rabkin 指出我们今天在使用的 C 语言就跟 Kernighan 第一次创建时的一点都不同,那时的 C 编译器跟现代的也不是完全兼容。
换句话说,它部分要归功于这些语言所创立的知名度仍保持较高。当然这并不意味着流行的语言不会变化。Rabkin 指出我们今天在使用的 C 语言就跟 Kernighan 第一次创建时的一点都不同,那时的 C 编译器跟现代的也不是完全兼容。
“有一个古老的关于工程师的笑话。工程师被问到哪一种编程语言人们会使用30年他说我不知道但它总会被叫做 Fortran” Rabkin 说到。“长期存活的语言跟他们在70年代和80年代刚设计出来的时候不一样了。人们通常都是在上面增加功能而不会删除功能因为要保持向后兼容但有些功能会被修正。”
“有一个古老的关于工程师的笑话。工程师被问到哪一种编程语言人们会使用30年他说我不知道但它总会被叫做 Fortran” Rabkin 说到。“长期存活的语言跟他们在70年代和80年代刚设计出来的时候不一样了。人们通常都是在上面增加功能,而不会删除功能,因为要保持向后兼容,但有些功能会被修正。”
向后兼容意思就是当语言升级后,程序员不仅可以使用升级语言的新特性,也不用回去重写已经实现的老代码块。老的“遗留代码”的语法规则已经不用了,但舍弃是要花成本的。只要它们存在,我们就有理由相信相关的语言也会存在。
@ -34,17 +34,17 @@
遗留代码指的是用过时的源代码编写的程序或部分程序。想想看,一个企业或工程项目的关键程序功能部分是用没人维护的编程语言写出来的。因为它们仍起着作用,用现代的源代码重写非常困难或着代价太高,所以它们不得不保留下来,即使其它部分的代码都变动了,程序员也必须不断折腾以保证它们能正常工作。
任何编程语言,存在了超过几十年时间都具有某种形式的遗留代码问题, PHP 也不例外。PHP 是一个很有趣的例子,因为它的遗留代码跟现在的代码明显不同,支持者或评论家都承认这是一个巨大的进步。
任何编程语言,存在了超过几十年时间都具有某种形式的遗留代码问题, PHP 也不例外。PHP 是一个很有趣的例子,因为它的遗留代码跟现在的代码明显不同,支持者或评论家都承认这是一个巨大的进步。
Andi Gutmans 是 已经成为 PHP4 的标准编译器的 Zend Engine 的发明者之一。Gutmans 说他和搭档本来是想改进完善 PHP3 的,他们的工作如此成功,以至于 PHP 的原发明者 Rasmus Lerdorf 也加入他们的项目。结果就成为了 PHP4 和他的后续者 PHP5 的编译器。
Andi Gutmans 是已经成为 PHP4 的标准编译器的 Zend Engine 的发明者之一。Gutmans 说他和搭档本来是想改进完善 PHP3 的,他们的工作如此成功,以至于 PHP 的原发明者 Rasmus Lerdorf 也加入他们的项目。结果就成为了 PHP4 和他的后续者 PHP5 的编译器。
因此,当今的 PHP 与它的祖先即最开始的 PHP 是完全不同的。然而,在 Gutmans 看来,在用古老的 PHP 语言版本写的遗留代码的地方一直存在着偏见以至于上升到整个语言的高度。比如 PHP 充满着安全漏洞或没有“集群”功能来支持大规模的计算任务等概念。
因此,当今的 PHP 与它的祖先——即最开始的 PHP 是完全不同的。然而,在 Gutmans 看来,在用古老的 PHP 语言版本写的遗留代码的地方一直存在着偏见以至于上升到整个语言的高度。比如 PHP 充满着安全漏洞或没有“集群”功能来支持大规模的计算任务等概念。
“批评 PHP 的人们通常批评的是在 1998 年时候的 PHP 版本,”他说。“这些人都没有与时俱进。当今的 PHP 已经有了很成熟的生态系统了。”
如今Gutmans 说他作为一个管理者最重要的事情就是鼓励人们升级到最新版本。“PHP有个很大的社区足以支持您的遗留代码的问题”他说。“但总的来说我们的社区大部分都在 PHP5.3 及以上的。”
问题是,任何语言用户都不会全部升级到最新版本。这就是为什么 Python 用户仍在使用 2000 年发布的 Python 2而不是使用 2008 年发布的 Python 3 的原因。甚至是已经六年了喜欢 Google 的大多数用户仍没有升级。这种情况是多种原因造成的,但它使得很多开发者在承担风险。
问题是,任何语言用户都不会全部升级到最新版本。这就是为什么 Python 用户仍在使用 2000 年发布的 Python 2而不是使用 2008 年发布的 Python 3 的原因。甚至在六年后,大多数像 Google 这样的用户仍没有升级。这种情况是多种原因造成的,但它使得很多开发者在承担风险。
“任何东西都不会消亡的”Rabkin 说。“任何语言的遗留代码都会一直存在。重写的代价是非常高昂的,如果它们不出问题就不要去改动。”
@ -54,15 +54,15 @@ Andi Gutmans 是 已经成为 PHP4 的标准编译器的 Zend Engine 的发明
> 有一件事使我们被深深震撼到了。这事最重要的就是我们给人们按年龄分组然后询问他们知道多少编程语言。我们主观的认为随着年龄的增长知道的会越来越多但实际上却不是25岁年龄组和45岁年龄组知道的语言数目是一样的。几个反复询问的问题这里持续不变的。您知道一种语言的几率并不与您的年龄挂钩。
换句话说,不仅仅年长的开发者坚持传统,年轻的程序员会认并采用古老的编程语言作为他们的第一们语言。这可能是因为这些语言具有很有趣的开发库及功能特点,也可能是因为在社区里开发者都是一个组的都喜爱这种开发语言。
换句话说,不仅仅年长的开发者坚持传统,年轻的程序员会认并采用古老的编程语言作为他们的第一们语言。这可能是因为这些语言具有很有趣的开发库及功能特点,也可能是因为在社区里开发者都是喜爱这种开发语言的一伙人
“在全球程序员关注的语言的数量是有定数的,” Rabkin 说。“如果一们语言表现出足够独特的价值,人们将会学习和使用它。如果是和您交流代码和知识的的某个人分享一门编程语言,您将会学习它。因此,例如,只要那些开发库是 Python 库和社区特长是 Python 语言的经验,那么 Python 将会大行其道。”
“在全球程序员关注的语言的数量是有定数的,” Rabkin 说。“如果一们语言表现出足够独特的价值,人们将会学习和使用它。如果是和您交流代码和知识的的某个人分享一门编程语言,您将会学习它。因此,例如,只要那些 Python 库存在、 社区也对 Python 语言很有经验的话,那么 Python 仍将会大行其道。”
研究人员发现关于语言实现的功能,社区是一个巨大的因素。虽然像 Python 和 Ruby 这样的高级语言并没有太大的差别,但,例如程序员就更容易觉得一种比另一种优越。
研究人员发现关于语言实现的功能,社区是一个巨大的因素。虽然像 Python 和 Ruby 这样的高级语言并没有太大的差别,但,程序员总是容易觉得一种比另一种优越。
“Rails 不一定要用 Ruby 语言编写,但它用了,这就是社因素在起作用,” Rabkin 说。“例如,复活 Objective-C 语言这件事就是苹果的工程师团队说‘让我们使用它吧,’ 他们就没得选择了。”
“Rails 不一定要用 Ruby 语言编写,但它用了,这就是社因素在起作用,” Rabkin 说。“例如,复活 Objective-C 语言这件事就是苹果的工程师团队说‘让我们使用它吧,’ 他们就没得选择了。”
通观社会的影响及老旧代码这些问题我们发现最古老的和最新的计算机语言都有巨大的惰性。Go 语言怎么样能超越 C 语言呢?如果有合适的人或公司说它超越它就超越。
通观社会的影响及老旧代码这些问题我们发现最古老的和最新的计算机语言都有巨大的惰性。Go 语言怎么样能超越 C 语言呢?如果有合适的人或公司说它超越它就超越。
“它归结为谁传播的更好谁就好,” Rabkin 说。
@ -74,7 +74,7 @@ via: http://readwrite.com/2014/09/02/programming-language-coding-lifetime
作者:[Lauren Orsini][a]
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,27 +1,26 @@
让下载更方便
================================================================================
下载管理器是一个电脑程序专门处理下载文件优化带宽占用以及让下载更有条理等任务。有些网页浏览器例如Firefox也集成了一个下载管理器作为功能但是它们的方式还是没有专门的下载管理器或者浏览器插件那么专业没有最佳地使用带宽也没有好用的文件管理功能。
下载管理器是一个电脑程序专门处理下载文件优化带宽占用以及让下载更有条理等任务。有些网页浏览器例如Firefox也集成了一个下载管理器作为功能但是它们的使用方式还是没有专门的下载管理器(或者浏览器插件)那么专业,没有最佳地使用带宽,也没有好用的文件管理功能。
对于那些经常下载的人使用一个好的下载管理器会更有帮助。它能够最大化下载速度加速下载断点续传以及制定下载计划让下载更安全也更有价值。下载管理器已经没有之前流行了但是最好的下载管理器还是很实用包括和浏览器的紧密结合支持类似YouTube的主流网站以及更多。
有好几个能在Linux下工作都非常优秀的开源下载管理器以至于让人无从选择。我整理了一个摘要是我喜欢的下载管理器以及Firefox里的一个非常好用的下载插件。这里列出的每一个程序都是开源许可发布的。
----------
![](http://www.linuxlinks.com/portal/content2/png/uGet.png)
###uGet
![](http://www.linuxlinks.com/portal/content/reviews/Utilities/Screenshot-uGet.png)
uGet是一个轻量级容易使用功能完备的开源下载管理器。uGet允许用户从不同的源并行下载来加快速度添加文件到下载序列暂停或继续下载提供高级分类管理和浏览器集成监控剪贴板批量下载支持26种语言以及其他许多功能。
uGet是一个成熟的软件保持开发超过11年。在这个时间里,它发展成一个非常多功能的下载管理器,拥有一套很高价值的功能集,还保持了易用性。
uGet是一个成熟的软件持续开发超过了11年。在这段时间里,它发展成一个非常多功能的下载管理器,拥有一套很高价值的功能集,还保持了易用性。
uGet是用C语言开发的使用了cURL作为底层支持以及应用库libcurl。uGet有非常好的平台兼容性。它一开始是Linux系统下的项目但是被移植到在Mac OS XFreeBSDAndroid和Windows平台运行。
#### 功能点: ####
- 容易使用
- 下载队列可以让下载任务按任意多或少或你希望的数量同时进行。
- 下载队列可以让下载任务按任意数量或你希望的数量同时进行。
- 断点续传
- 默认分类
- 完美实现的剪贴板监控功能
@ -43,19 +42,19 @@ uGet是用C语言开发的使用了cURL作为底层支持以及应用库li
- 支持GnuTLS
- 支持26种语言包括阿拉伯语白俄罗斯语简体中文繁体中文捷克语丹麦语英语默认法语格鲁吉亚语德语匈牙利语印尼语意大利语波兰语葡萄牙语巴西俄语西班牙语土耳其语乌克兰语以及越南语。
---
- 网站:[ugetdm.com][1]
- 开发人员C.H. Huang and contributors
- 许可GNU LGPL 2.1
- 版本1.10.5
----------
![](http://www.linuxlinks.com/portal/content2/png/DownThemAll%21.png)
###DownThemAll!
![](http://www.linuxlinks.com/portal/content/reviews/Utilities/Screenshot-DownThemAll%21.png)
DownThemAll!是一个小巧的,可靠的以及易用的,开源下载管理器加速器是Firefox的一个组件。它可以让用户下载一个页面上所有链接和图片以及更多功能。它可以让用户完全控制下载任务随时分配下载速度以及同时下载的任务数量。通过使用Metalinks或者手动添加镜像的方式可以同时从不同的服务器下载同一个文件。
DownThemAll!是一个小巧可靠的、易用的开源下载管理器加速器是Firefox的一个组件。它可以让用户下载一个页面上所有链接和图片,还有更多功能。它可以让用户完全控制下载任务随时分配下载速度以及同时下载的任务数量。通过使用Metalinks或者手动添加镜像的方式可以同时从不同的服务器下载同一个文件。
DownThemAll会根据你要下载的文件大小切割成不同的部分然后并行下载。
@ -69,6 +68,7 @@ DownThemAll会根据你要下载的文件大小切割成不同的部分
- 高级重命名选项
- 暂停和继续下载任务
---
- 网站:[addons.mozilla.org/en-US/firefox/addon/downthemall][2]
- 开发人员Federico Parodi, Stefano Verna, Nils Maier
@ -77,13 +77,13 @@ DownThemAll会根据你要下载的文件大小切割成不同的部分
----------
![](http://www.linuxlinks.com/portal/content2/png/JDownloader.png)
###JDownloader
![](http://www.linuxlinks.com/portal/content/reviews/Utilities/Screenshot-JDownloader.png)
JDownloader是一个免费开源的下载管理工具拥有一个大型社区的开发者支持让下载更简单和快捷。用户可以开始停止或暂停下载设置带宽限制自动解压缩包以及更多功能。它提供了一个容易扩展的框架。
JDownloader简化了从一键下载网站下载文件。它还支持从不同并行资源下载,手势识别,自动文件解压缩以及更多功能。另外还支持许多“加密链接”网站所以你只需要复制粘贴“加密的”链接然后JDownloader会处理剩下的事情。JDownloader还能导入CCFRSDF和DLC文件。
JDownloader简化了从一键下载网站下载文件。它还支持从不同并行资源下载、手势识别、自动文件解压缩以及更多功能。另外还支持许多“加密链接”网站所以你只需要复制粘贴“加密的”链接然后JDownloader会处理剩下的事情。JDownloader还能导入CCFRSDF和DLC文件。
#### 功能点: ####
@ -98,6 +98,7 @@ JDownloader简化了从一键下载网站下载文件。它还支持从不同并
- 网页更新
- 集成包管理器支持额外模块例如WebinterfaceShutdown
---
- 网站:[jdownloader.org][3]
- 开发人员AppWork UG
@ -106,11 +107,11 @@ JDownloader简化了从一键下载网站下载文件。它还支持从不同并
----------
![](http://www.linuxlinks.com/portal/content2/png/FreeRapidDownloader.png)
###FreeRapid Downloader
![](http://www.linuxlinks.com/portal/content/reviews/Utilities/Screenshot-FreeRapidDownloader.png)
FreeRapid Downloader是一个易用的开源下载程序支持从RapidshareYoutubeFacebookPicasa和其他文件分享网站下载。他的下载引擎基于一些插件所以可以从特殊站点下载。
FreeRapid Downloader是一个易用的开源下载程序支持从RapidshareYoutubeFacebookPicasa和其他文件分享网站下载。他的下载引擎基于一些插件所以可以从那些特别的站点下载。
对于需要针对特定文件分享网站的下载管理器用户来说FreeRapid Downloader是理想的选择。
@ -133,6 +134,7 @@ FreeRapid Downloader使用Java语言编写。需要至少Sun Java 7.0版本才
- 支持多国语言:英语,保加利亚语,捷克语,芬兰语,葡萄牙语,斯洛伐克语,匈牙利语,简体中文,以及其他
- 支持超过700个站点
---
- 网站:[wordrider.net/freerapid/][4]
- 开发人员Vity and contributors
@ -141,7 +143,7 @@ FreeRapid Downloader使用Java语言编写。需要至少Sun Java 7.0版本才
----------
![](http://www.linuxlinks.com/portal/content2/png/FlashGot.png)
###FlashGot
![](http://www.linuxlinks.com/portal/content/reviews/Utilities/Screenshot-FlashGot.png)
@ -151,7 +153,7 @@ FlashGot把所支持的所有下载管理器统一成Firefox中的一个下载
#### 功能点: ####
- Linux下支持Aria, Axel Download Accelerator, cURL, Downloader 4 X, FatRat, GNOME Gwget, FatRat, JDownloader, KDE KGet, pyLoad, SteadyFlow, uGet, wxDFast, 和wxDownload Fast
- Linux下支持Aria, Axel Download Accelerator, cURL, Downloader 4 X, FatRat, GNOME Gwget, FatRat, JDownloader, KDE KGet, pyLoad, SteadyFlow, uGet, wxDFast 和 wxDownload Fast
- 支持图库功能,可以帮助把原来分散在不同页面的系列资源,整合到一个所有媒体库页面中,然后可以轻松迅速地“下载所有”
- FlashGot Link会使用默认下载管理器下载当前鼠标选中的链接
- FlashGot Selection
@ -160,12 +162,13 @@ FlashGot把所支持的所有下载管理器统一成Firefox中的一个下载
- FlashGot Media
- 抓取页面里所有链接
- 抓取所有标签栏的所有链接
- 链接过滤(例如只下载指定类型文件)
- 链接过滤(例如只下载指定类型文件)
- 在网页上抓取点击所产生的所有链接
- 支持从大多数链接保护和文件托管服务器直接和批量下载
- 隐私选项
- 支持国际化
---
- 网站:[flashgot.net][5]
- 开发人员Giorgio Maone
@ -178,7 +181,7 @@ via: http://www.linuxlinks.com/article/20140913062041384/DownloadManagers.html
作者Frazer Kline
译者:[zpl1025](https://github.com/zpl1025)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,79 @@
如何在Ubuntu桌面上使用Steam Music音乐播放器
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/steam-music.jpg)
**‘音乐让人们走到一起’ 麦当娜曾这样唱道。但是Steam的新音乐播放器特性能否很好的混搭小资与叛逆**
如果你曾与世隔绝充耳不闻你就会错过与Steam Music的相识。它的特性并不是全新的。从今年的早些时候开始它就已经以这样或那样的形式进行了测试。
但Steam客户端最近一次在Windows、Mac和Linux上的定期更新中所有的客户端都能使用它了。你会问为什么一个游戏客户端会添加一个音乐播放器呢当然是为了让你能一边玩游戏一边一边听你最喜欢的音乐了。
别担心在游戏的音乐声中再加上你自己的音乐听起来并不会像你想象的那么糟哈哈。Steam会帮你减少或消除游戏的背景音乐但在混音器中保持效果音的高音量以便于你能和平时一样听到那些叮嘭和各种爆炸声。
### 使用Steam Music音乐播放器 ###
![Music in Big Picture Mode](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music-bpm.jpg)
*大图模式*
任何使用最新版客户端的人都能使用Steam Music音乐播放器。它是个相当简单的附加程序它让你能从你的电脑中添加、浏览并播放音乐。
播放器可以以两种方式进入桌面和超棒的Steam大图模式。在两种方式下控制播放都超级简单。
作为一个Rhythmbox的对手或是Spotify的继承者把**为玩游戏时放音乐而设计**作为特点一点也不吸引人。事实上他没有任何可购买音乐的商店也没有整合RdioGrooveshark这类在线服务或是桌面服务。没错你的多媒体键在Linux的播放器上完全不能用。
Valve说他们“*……计划增加更多的功能以便用户能以新的方式体验Steam Music。我们才刚刚开始。*”
#### Steam Music的重要特性####
- 只能播放MP3文件
- 与游戏中的音乐相融
- 在游戏中可以控制音乐
- 播放器可以在桌面上或在大图模式下运行
- 基于播放列表的播放方式
**它没有整合到Ubuntu的声音菜单里而且目前也不支持键盘上的多媒体键。**
### 在Ubuntu上使用Steam Music播放器 ###
显然添加音乐是你播放音乐前的第一件事。在Ubuntu上默认设置下Steam会自动添加两个文件夹Home下的标准Music目录和它自带的Steam Music文件夹。任何可下载的音轨都保存在其中。
注意:目前**Steam Music只能播放MP3文件**。如果你的大部分音乐都是其他文件格式(比如.acc、.m4a等等这些文件不会被添加也不能被播放。
若想添加其他的文件夹或重新扫描:
- 到**View > Settings > Music**。
- 点击‘**Add**‘将其他位置的文件夹添加到已列出两个文件夹的列表下。
- 点击‘**Start Scanning**
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/Tardis.jpg)
你还可以在这个对话框中调整其他设置包括scan at start。如果你经常添加新音乐而且很容易忘记手动启动扫描请标记此项。你还可以选择当路径变化时是否显示提示设置默认的音量还能调整当你打开一个应用软件或语音聊天时的播放状态的改变。
一旦你的音乐源成功的被添加并扫描后,你就可以通过主客户端的**Library > Music**区域浏览你的音乐了。
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/browser.jpg)
Steam Music会默认的将音乐按照专辑进行分组。若想按照乐队名进行浏览你需要点击Albums然后从下拉菜单中选择Artists
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-selection.jpg)
Steam Music是一个以队列方式工作的系统。你可以通过双击浏览器里的音乐或右键单击并选择Add to Queue来把音乐添加到播放队列里。
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music-queue.jpg)
若想**启动桌面播放器**请点击右上角的音符图标或通过**View > Music Player**菜单。
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music.jpg)
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/10/use-steam-music-player-linux
作者:[Joey-Elijah Sneddon][a]
译者:[H-mudcup](https://github.com/H-mudcup)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author

View File

@ -1,24 +1,26 @@
使用条块化I/O管理多个逻辑卷管理磁盘
使用条块化I/O管理多个LVM磁盘第五部分
================================================================================
在本文中我们将了解逻辑卷是如何通过条块化I/O来写入数据到磁盘的。逻辑卷管理的酷炫特性之一就是它能通过条块化I/O跨多个磁盘写入数据。
![Manage LVM Disks Using Striping I/O](http://www.tecmint.com/wp-content/uploads/2014/09/LVM-Striping.jpeg)
使用条块化I/O管理LVM磁盘
### LVM条块化是什么 ###
**LVM条块化**是LVM功能之一该技术会跨多个磁盘写入数据而不是对单一物理卷持续写入。
![Manage LVM Disks Using Striping I/O](http://www.tecmint.com/wp-content/uploads/2014/09/LVM-Striping.jpeg)
*使用条块化I/O管理LVM磁盘*
#### 条块化特性 ####
- 它会改善磁盘性能。
- 挽救对单一磁盘的重复硬写入。
- 避免对单一磁盘的不断的大量写入。
- 使用对多个磁盘的条块化写入,可以减少磁盘填满的几率。
在逻辑卷管理中,如果我们需要创建一个逻辑卷,扩展的卷会完全映射到卷组和物理卷。在此种情形中,如果其中一个**PV**物理卷被填满我们需要从其它物理卷中添加更多扩展。这样添加更多扩展到PV中后我们可以指定逻辑卷使用特定的物理卷写入I/O。
假设我们**四个磁盘**驱动器,分别指向了四个物理卷,如果各个物理卷总计可以达到**100 I/O**,我们卷组就可以获得**400 I/O**。
假设我们**四个磁盘**驱动器,分别指向了四个物理卷,如果各个物理卷总计可以达到**100 I/O**,我们卷组就可以获得**400 I/O**。
如果我们不使用**条块化方法**文件系统将横跨基础物理卷写入。例如写入一些数据到物理卷达到100 I/O这些数据只会写入到第一个PV**sdb1**。如果我们在写入时使用条块化选项创建逻辑卷它会分割100 I/O分别写入到四个驱动器中这就是说每个驱动器中都会接收到25 I/O。
@ -41,27 +43,31 @@
# fdisk -l | grep sd
![List Hard Drives](http://www.tecmint.com/wp-content/uploads/2014/09/List-Hard-Drives.png)
列出硬盘驱动器
现在我们必须为这4个硬盘驱动器**sdb****sdc****sdd**和**sde**创建分区,我们将用‘**fdisk**’命令来完成该工作。要创建分区,请遵从本文**第一部分**中**步骤#4**的说明,并在创建分区时确保你已将类型修改为**LVM8e**。
*列出硬盘驱动器*
现在我们必须为这4个硬盘驱动器**sdb****sdc****sdd**和**sde**创建分区,我们将用‘**fdisk**’命令来完成该工作。要创建分区,请遵从本文**[第一部分][1]**中**步骤#4**的说明,并在创建分区时确保你已将类型修改为**LVM8e**。
# pvcreate /dev/sd[b-e]1 -v
![Create Physical Volumes in LVM](http://www.tecmint.com/wp-content/uploads/2014/09/Create-Physical-Volumes-in-LVM.png)
在LVM中创建物理卷
*在LVM中创建物理卷*
PV创建完成后你可以使用**pvs**’命令将它们列出来。
# pvs
![Verify Physical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Verify-Physical-Volumes.png)
验证物理卷
*验证物理卷*
现在我们需要使用这4个物理卷来定义卷组。这里我定义了一个物理扩展大小PE为**16MB**,名为**vg_strip**的卷组。
# vgcreate -s 16M vg_strip /dev/sd[b-e]1 -v
上面命令中选项的说明。
上面命令中选项的说明:
- **[b-e]1** 定义硬盘驱动器名称如sdb1sdc1sdd1sde1。
- **-s** 定义物理扩展大小。
- **-v** 详情。
@ -71,14 +77,16 @@ PV创建完成后你可以使用**pvs**’命令将它们列出来。
# vgs vg_strip
![Verify Volume Group](http://www.tecmint.com/wp-content/uploads/2014/09/Verify-Volume-Group.png)
验证卷组
*验证卷组*
要获取VG更详细的信息可以在**vgdisplay**命令中使用‘-v选项它将给出**vg_strip**卷组中所使用的全部物理卷的详细情况。
# vgdisplay vg_strip -v
![Volume Group Information](http://www.tecmint.com/wp-content/uploads/2014/09/Volume-Group-Information.png)
卷组信息
*卷组信息*
回到我们的话题,现在在创建逻辑卷时,我们需要定义条块化值,就是数据需要如何使用条块化方法来写入到我们的逻辑卷中。
@ -91,46 +99,54 @@ PV创建完成后你可以使用**pvs**’命令将它们列出来。
- **-i** –条块化
![Create Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Create-Logical-Volumes.png)
创建逻辑卷
*创建逻辑卷*
在上面的图片中,我们可以看到条块尺寸的默认大小为**64 KB**,如果我们需要自定义条块值,我们可以使用**-I**大写I。要确认逻辑卷已经是否已经创建请使用以下命令。
# lvdisplay vg_strip/lv_tecmint_strp1
![Confirm Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Confirm-Logical-Volumes.png)
确认逻辑卷
*确认逻辑卷*
现在接下来的问题是我们怎样才能知道条块被写入到了4个驱动器。这里我们可以使用**lvdisplay**’和**-m**(显示逻辑卷映射)命令来验证。
# lvdisplay vg_strip/lv_tecmint_strp1 -m
![Check Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/09/Check-Logical-Volumes.png)
检查逻辑卷
*检查逻辑卷*
要创建自定义的条块尺寸,我们需要用我们自定义的条块大小**256KB**来创建一个**1GB**大小的逻辑卷。现在我打算将条块分布到3个PV上。这里我们可以定义我们想要哪些pv条块化。
# lvcreate -L 1G -i3 -I 256 -n lv_tecmint_strp2 vg_strip /dev/sdb1 /dev/sdc1 /dev/sdd1
![Define Stripe Size](http://www.tecmint.com/wp-content/uploads/2014/09/Define-Stripe-Size.png)
定义条块大小
*定义条块大小*
接下来,检查条块大小和条块化的卷。
# lvdisplay vg_strip/lv_tecmint_strp2 -m
![Check Stripe Size](http://www.tecmint.com/wp-content/uploads/2014/09/Check-Stripe-Size.png)
检查条块大小
*检查条块大小*
是时候使用设备映射了,我们使用‘**dmsetup**’命令来完成这项工作。它是一个低级别的逻辑卷管理工具,它用于管理使用了设备映射驱动的逻辑设备。
# dmsetup deps /dev/vg_strip/lv_tecmint_strp[1-2]
![Device Mapper](http://www.tecmint.com/wp-content/uploads/2014/09/Device-Mapper.png)
设备映射
*设备映射*
这里我们可以看到strp1依赖于4个驱动器strp2依赖于3个设备。
希望你已经明白,我们怎样能让逻辑卷条块化来写入数据。对于此项设置,必须掌握逻辑卷管理基础知识。在我的下一篇文章中,我将给大家展示怎样在逻辑卷管理中迁移数据。到那时,请静候更新。同时,别忘了对本文提出有价值的建议。
希望你已经明白,我们怎样能让逻辑卷条块化来写入数据。对于此项设置,必须掌握逻辑卷管理基础知识。
在我的下一篇文章中,我将给大家展示怎样在逻辑卷管理中迁移数据。到那时,请静候更新。同时,别忘了对本文提出有价值的建议。
--------------------------------------------------------------------------------
@ -138,8 +154,9 @@ via: http://www.tecmint.com/manage-multiple-lvm-disks-using-striping-io/
作者:[Babin Lonston][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://linux.cn/article-3965-1.html

View File

@ -1,10 +1,11 @@
迁移LVM分区到新的逻辑卷(驱动器)——第六部分
迁移LVM分区到新的逻辑卷/驱动器(第六部分)
================================================================================
这是我们正在开展的逻辑卷管理系列的第六部分。在本文中,我们将为大家展示怎样在线将现存的逻辑卷迁移到其它新的驱动器。在开始之前我想要先来介绍一下LVM迁移及其特性。
这是我们正在进行的LVM系列的第六部分。在本文中我们将为大家展示怎样在线将现存的逻辑卷迁移到其它新的驱动器。在开始之前我想要先来介绍一下LVM迁移及其特性。
![LVM Storage Migration](http://www.tecmint.com/wp-content/uploads/2014/10/LVM-Migrations.png)
LVM存储迁移
*LVM存储迁移*
### 什么是LVM迁移 ###
@ -17,7 +18,7 @@ LVM存储迁移
- 我们可以使用任何类型的磁盘如SATA、SSD、SAS、SAN storage iSCSI或者FC。
- 在线迁移磁盘,而且数据不会丢失。
在LVM迁移中我们将交换各个卷、文件系统以及位于现存存储中的数据。例如,如果我们有一个单一逻辑卷,它已经映射到了物理卷,而该物理卷是一个物理硬盘驱动器。
在LVM迁移中我们将交换各个卷、文件系统以及位于已有的存储中的数据。例如,如果我们有一个单一逻辑卷,它已经映射到了物理卷,而该物理卷是一个物理硬盘驱动器。
现在如果我们需要升级服务器存储为SSD硬盘驱动器我们首先需要考虑什么重新格式化磁盘我们不必重新格式化服务器LVM可以选择将这些旧的SATA驱动器上的数据迁移到新的SSD驱动器上。在线迁移将会支持任何类型的磁盘不管是本地驱动器还是SAN或者光纤通道都可以。
@ -35,7 +36,8 @@ LVM存储迁移
# lvs
![Check Logical Volume Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Logical-Volume-Disk.png)
检查逻辑卷磁盘
*检查逻辑卷磁盘*
### 步骤2 检查新添加的驱动器 ###
@ -44,7 +46,8 @@ LVM存储迁移
# fdisk -l | grep dev
![Check New Added Drive](http://www.tecmint.com/wp-content/uploads/2014/10/Check-New-Added-Drive.png)
检查新添加的驱动器
*检查新添加的驱动器*
**注意**:你看到上面屏幕中的内容了吗?新的驱动器已经被成功添加了,其名称为“**/dev/sda**”。
@ -57,7 +60,8 @@ LVM存储迁移
# cat tecmint.txt
![Check Logical Volume Data](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Logical-Volume-Data.png)
检查逻辑卷数据
*检查逻辑卷数据*
**注意**:出于演示的目的,我们已经在**/mnt/lvm**挂载点下创建了两个文件,我们将在线将这些数据迁移到新的驱动器中。
@ -67,7 +71,8 @@ LVM存储迁移
# vgs -o+devices | grep tecmint_vg
![Confirm Logical Volume Names](http://www.tecmint.com/wp-content/uploads/2014/10/Confirm-Logical-Volume-Names.png)
确认逻辑卷名称
*确认逻辑卷名称*
**注意**:看到上面屏幕中的内容了吗?“**vdb**”容纳了卷组**tecmint_vg**。
@ -79,7 +84,8 @@ LVM存储迁移
# pvs
![Create Physical Volume](http://www.tecmint.com/wp-content/uploads/2014/10/Create-Physical-Volume.png)
创建物理卷
*创建物理卷*
**6.**接下来使用vgextend命令来添加新创建的物理卷到现存卷组tecmint_vg。
@ -87,14 +93,16 @@ LVM存储迁移
# vgs
![Add Physical Volume](http://www.tecmint.com/wp-content/uploads/2014/10/Add-Physical-Volume.png)
添加物理卷
*添加物理卷*
**7.**要获得卷组的完整信息列表请使用vgdisplay命令。
# vgdisplay tecmint_vg -v
![List Volume Group Info](http://www.tecmint.com/wp-content/uploads/2014/10/List-Volume-Group-Info.png)
列出卷组信息
*列出卷组信息*
**注意**在上面屏幕中我们可以看到在输出结果的结束处我们的PV已经添加到了卷组中。
@ -108,7 +116,8 @@ LVM存储迁移
# ls -l /dev | grep vd
![List Device Information](http://www.tecmint.com/wp-content/uploads/2014/10/List-Device-Information.png)
列出设备信息
*列出设备信息*
**注意**:在上面的命令中,我们可以看到主设备号是**252**,次设备号是**17**,它连接到了**vdb1**。希望你理解了上面命令的输出。
@ -122,7 +131,8 @@ LVM存储迁移
- **1** = 添加单个镜像
![Mirroring Method Migration](http://www.tecmint.com/wp-content/uploads/2014/10/Mirroring-Method-Migration.png)
镜像法迁移
*镜像法迁移*
**注意**:上面的迁移过程根据卷的大小会花费一段时间。
@ -131,14 +141,16 @@ LVM存储迁移
# lvs -o+devices
![Verify Converted Mirror](http://www.tecmint.com/wp-content/uploads/2014/10/Verify-Converted-Mirror.png)
验证转换的镜像
*验证转换的镜像*
**11.**当你确认转换的镜像没有任何问题后,你可以移除旧的虚拟磁盘**vdb1**。**-m**选项将移除镜像,先前我们使用**l**来添加镜像。
# lvconvert -m 0 /dev/tecmint_vg/tecmint_lv /dev/vdb1
![Remove Virtual Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Remove-Virtual-Disk.png)
移除虚拟磁盘
*移除虚拟磁盘*
**12.**在旧虚拟磁盘移除后,你可以使用以下命令来再次检查逻辑卷设备。
@ -147,7 +159,8 @@ LVM存储迁移
# ls -l /dev | grep sd
![Check New Mirrored Device](http://www.tecmint.com/wp-content/uploads/2014/10/Check-New-Mirrored-Device.png)
检查新镜像的设备
*检查新镜像的设备*
在上面的图片中,你看到了吗?我们的逻辑卷现在依赖于**8,1**,名称为**sda1**。这说明我们的迁移过程已经完成了。
@ -157,7 +170,8 @@ LVM存储迁移
# cat tecmin.txt
![Check Mirrored Data](http://www.tecmint.com/wp-content/uploads/2014/10/Check-Mirrored-Data.png)
检查镜像的数据
*检查镜像的数据*
# vgreduce /dev/tecmint_vg /dev/vdb1
@ -170,7 +184,8 @@ LVM存储迁移
# lvs
![Delete Virtual Disk](http://www.tecmint.com/wp-content/uploads/2014/10/Delete-Virtual-Disk.png)
删除虚拟磁盘
*删除虚拟磁盘*
### 步骤6 LVM pvmove镜像法 ###
@ -190,7 +205,7 @@ via: http://www.tecmint.com/lvm-storage-migration/#comment-331336
作者:[Babin Lonston][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
在Linux上使用smartmontools查看硬盘的健康状态
使用 smartmontools 查看硬盘的健康状态
================================================================================
要说Linux用户最不愿意看到的事情莫过于在毫无警告的情况下发现硬盘崩溃了。诸如[RAID][2]的[备份][1]和存储技术可以在任何时候帮用户恢复数据,但为预防硬件突然崩溃造成数据丢失所花费的代价却是相当可观的,特别是在用户从来没有提前考虑过在这些情况下的应对措施时。
@ -28,7 +28,7 @@
![](https://farm4.staticflickr.com/3953/15352881249_96c09f7ccc_o.png)
其中sdx代表分配给机器上对应硬盘上的设备名。
其中sdX代表分配给机器上对应硬盘上的设备名。
如果想要显示出某个指定硬盘的信息比如设备模式、S/N、固件版本、大小、ATA版本/修订号、SMART功能的可用性和状态,在运行smartctl命令时添加"--info"选项,并按如下所示指定硬盘的设备名。
@ -67,8 +67,8 @@
- **THRESH**在报告硬盘FAILED状态前WORST可以允许的最小值。
- **TYPE**属性的类型Pre-fail或Old_age。Pre-fail类型的属性可被看成一个关键属性表示参与磁盘的整体SMART健康评估PASSED/FAILED。如果任何Pre-fail类型的属性故障那么可视为磁盘将要发生故障。另一方面Old_age类型的属性可被看成一个非关键的属性如正常的磁盘磨损表示不会使磁盘本身发生故障。
- **UPDATED**表示属性的更新频率。Offline代表磁盘上执行离线测试的时间。
- **WHEN_FAILED**如果VALUE小于等于THRESH会被设置成“FAILING_NOW”如果WORST小于等于THRESH会被设置成“In_the_past”如果都不是会被设置成“-”。在“FAILING_NOW”情况下需要备份重要文件ASAP特别是属性是Pre-fail类型时。“In_the_past”代表属性已经故障了但在运行测试的时候没问题。“-”代表这个属性从没故障过。
- **RAW_VALUE**制造商定义的原始值从VALUE派生。
- **WHEN\_FAILED**如果VALUE小于等于THRESH会被设置成“FAILING\_NOW”如果WORST小于等于THRESH会被设置成“In\_the\_past”如果都不是会被设置成“-”。在“FAILING\_NOW”情况下需要尽快备份重要文件特别是属性是Pre-fail类型时。“In\_the\_past”代表属性已经故障了但在运行测试的时候没问题。“-”代表这个属性从没故障过。
- **RAW\_VALUE**制造商定义的原始值从VALUE派生。
这时候你可能会想“是的smartctl看起来是个不错的工具但我更想知道如何避免手动运行的麻烦。”如果能够以指定的间隔运行同时又能通知我测试结果那不是更好吗
@ -134,7 +134,7 @@ via: http://xmodulo.com/check-hard-disk-health-linux-smartmontools.html
作者:[Gabriel Cánepa][a]
译者:[KayGuoWhu](https://github.com/KayGuoWhu)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -76,7 +76,7 @@ via: http://www.ubuntugeek.com/configuring-layer-two-peer-to-peer-vpn-using-n2n.
作者:[ruchi][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,8 +1,8 @@
Linux 系统中使用 logwatch 监控日志文件
================================================================================
Linux 操作系统和许多应用程序会创建特殊的文件来记录它们的运行事件,这些文件通常被称作“日志”。当要了解操作系统或第三方应用程序的行为或进行故障排队的话,这些系统日志或特写的应用程序日志文件是必不可少的的工具。但是,日志文件并没有您们所谓的“清晰”或“容易”这种程度的可读性。手工分析原始的日志文件简直是浪费时间,并且单调乏味。出于这个原因,对于系统管理员来说,发现任何一款能把原始的日志文件转换成更人性化的记录摘要的工具,将会受益无穷。
Linux 操作系统和许多应用程序会创建特殊的文件来记录它们的运行事件,这些文件通常被称作“日志”。当要了解操作系统或第三方应用程序的行为或进行故障排查时,这些系统日志或特定的应用程序日志文件是必不可少的的工具。但是,日志文件并没有您们所谓的“清晰”或“容易”这种程度的可读性。手工分析原始的日志文件简直是浪费时间,并且单调乏味。出于这个原因,对于系统管理员来说,发现任何一款能把原始的日志文件转换成更人性化的记录摘要的工具,将会受益无穷。
[logwatch][1] 是一款用 Perl 语言编写的开源日志解析分析器。它能对原始的日志文件进行解析并转换成结构化格式的文档也能根据您的使用情况和需求来定制报告。logwatch 的主要目的是生成更易于使用的日志摘要并不是用来对日志进行实时的处理和监控的。正因为如此logwatch 通常被设定好时间和频率的自动定时任务来调度运行或者是有需要日志处理的时候从命令行里手动运行。一旦日志报告生成logwatch 通过电子邮件把这报告发送给您,您可以把它保存成文件或者在屏幕上直接显示
[logwatch][1] 是一款用 Perl 语言编写的开源日志解析分析器。它能对原始的日志文件进行解析并转换成结构化格式的文档也能根据您的使用情况和需求来定制报告。logwatch 的主要目的是生成更易于使用的日志摘要并不是用来对日志进行实时的处理和监控的。正因为如此logwatch 通常被设定好时间和频率的自动定时任务来调度运行或者是有需要日志处理的时候从命令行里手动运行。一旦日志报告生成logwatch 可以通过电子邮件把这报告发送给您,您可以把它保存成文件或者直接显示在屏幕上。
Logwatch 报告的详细程度和报告覆盖范围是完全可定制化的。Logwatch 的日志处理引擎也是可扩展的,从某种意义上来说,如果您想在一个新的应用程序中使用 logwatch 功能的话,只需要为这个应用程序的日志文件编写一个日志处理脚本(使用 Perl 语言),然后挂接到 logwatch 上就行。
@ -20,13 +20,13 @@ logwatch 有一点不好的就是,在它生成的报告中没有详细的时
### 配置 Logwatch ###
安装时主要的配置文件logwatch.conf被放到 **/etc/logwatch/conf** 目录中。此文件定义的设置选项会覆盖掉定义在 /usr/share/logwatch/default.conf/logwatch.conf 文件中的系统级设置。
安装时主要的配置文件logwatch.conf被放到 **/etc/logwatch/conf** 目录中。此文件(默认是空的)定义的设置选项会覆盖掉定义在 /usr/share/logwatch/default.conf/logwatch.conf 文件中的系统级设置。
在命令行中,启动 logwatch, 如果不带参数的话,将会使用 /etc/logwatch/conf/logwatch.conf 文件中定义的自定义选项。但,只要一指定参数,它们就会覆盖 /etc/logwatch/conf/logwatch.conf 文件中的任意默认/自定义设置。
在命令行中,启动 logwatch, 如果不带参数的话,将会使用 /etc/logwatch/conf/logwatch.conf 文件中定义的选项。但,只要一指定参数,它们就会覆盖 /etc/logwatch/conf/logwatch.conf 文件中的任意默认/自定义设置。
这篇文章里,我们会编辑 /etc/logwatch/conf/logwatch.conf 文件来对一些默认的设置项做些个性化设置。
Detail = <Low, Med, High, or a number>
Detail = <Low, Med, High, 或数字>
“Detail” 配置指令控制着 logwatch 报告的详细程度。它可以是个正整数也可以是分别代表着10、5和0数字的 High、Med、Low 几个选项。
@ -53,7 +53,7 @@ logwatch 有一点不好的就是,在它生成的报告中没有详细的时
Service = <service-name-2>
. . .
“Service” 选项指定想要监控的一个或多个服务。在 /usr/share/logwatch/scripts/services 目录下列出的服务都能被监控,它们已经涵盖了重要的系统服务(例如,pam,secure,iptables,syslogd 等),也涵盖了一些像 sudo、sshd、http、fail2ban、samba等主流的应用服务。如果您想添加新的服务到列表中得编写一个相应的日志处理 Perl 脚本,并把它放在这个目录中。
“Service” 选项指定想要监控的一个或多个服务。在 /usr/share/logwatch/scripts/services 目录下列出的服务都能被监控,它们已经涵盖了重要的系统服务(例如pam,secure,iptables,syslogd 等),也涵盖了一些像 sudo、sshd、http、fail2ban、samba等主流的应用服务。如果您想添加新的服务到列表中得编写一个相应的日志处理 Perl 脚本,并把它放在这个目录中。
如果这个选项要用来选择特定的服务话,您需要把 /usr/share/logwatch/default.conf/logwatch.conf 文件中的 "Service = All " 这一行注释掉。
@ -123,7 +123,7 @@ via: http://xmodulo.com/monitor-log-file-linux-logwatch.html
作者:[Gabriel Cánepa][a]
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,295 @@
你值得拥有 —— 25 个 Linux 性能监控工具
================================================================================
一段时间以来我们在网上向读者介绍了如何为Linux以及类Linux操作系统配置多种不同的性能监控工具。在这篇文章中我们将罗列一系列使用最频繁的性能监控工具并对介绍到的每一个工具提供了相应的简介链接大致将其划分为两类基于命令行的和提供图形化接口的。
### 基于命令行的性能监控工具 ###
#### 1. dstat - 多类型资源统计工具 ####
该命令整合了**vmstat****iostat**和**ifstat**三种命令。同时增加了新的特性和功能可以让你能及时看到各种的资源使用情况,从而能够使你对比和整合不同的资源使用情况。通过不同颜色和区块布局的界面帮助你能够更加清晰容易的获取信息。它也支持将信息数据导出到**cvs**格式文件中,从而用其他应用程序打开,或者导入到数据库中。你可以用该命令来[监控cpu内存和网络状态随着时间的变化][1]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/dstat.png)
#### 2. atop - 相比top更好的ASCII码体验 ####
这个使用**ASCII**码显示方式的命令行工具是一个显示所有进程活动的性能监控工具。它可以展示每日的系统日志以进行长期的进程活动分析并高亮显示过载的系统使用资源。它包含了CPU内存交换空间磁盘和网络层的度量指标。所有这些功能只需在终端运行**atop**即可。
# atop
当然你也可以使用[交互界面来显示][2]数据并进行排序。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/atop1.jpg)
#### 3. Nmon - 类Unix系统的性能监控 ####
Nmon是**Nigel's Monitor**缩写,它最早开发用来作为**AIX**的系统监控工具。如果使用**在线模式**,可以使用光标键在屏幕上操作实时显示在终端上的监控信息。使用**捕捉模式**能够将数据保存为**CSV**格式,方便进一步的处理和图形化展示。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nmon_interface.png)
更多的信息参考我们的[nmon性能监控文章][3]。
#### 4. slabtop - 显示内核slab缓存信息 ####
这个应用能够显示**缓存分配器**是如何管理Linux内核中缓存的不同类型的对象。这个命令类似于top命令区别是它的重点是实时显示内核slab缓存信息。它能够显示按照不同排序条件来排序显示缓存列表。它同时也能够显示一个slab层信息的统计信息的题头。举例如下
# slabtop --sort=a
# slabtop -s b
# slabtop -s c
# slabtop -s l
# slabtop -s v
# slabtop -s n
# slabtop -s o
**更多信息参阅**[内核slab缓存文章][4]。
#### 5. sar - 性能监控和瓶颈检查 ####
**sar** 命令可以将操作系统上所选的累积活动计数器内容信息输出到标准输出上。其基于计数值和时间间隔参数的**审计系统**会按照指定的时间间隔输出指定次数的监控信息。如果时间间隔参数为设置为0那么[sar命令将会显示系统从开机到当时时刻的平均统计信息][5]。有用的命令如下:
# sar -u 2 3
# sar -u -f /var/log/sa/sa05
# sar -P ALL 1 1
# sar -r 1 3
# sar -W 1 3
#### 6. Saidar - 简单的统计监控工具 ####
Saidar是一个**简单**且**轻量**的系统信息监控工具。虽然它无法提供大多性能报表,但是它能够通过一个简单明了的方式显示最有用的系统运行状况数据。你可以很容易地看到[运行时间、平均负载、CPU、内存、进程、磁盘和网络接口][6]统计信息。
Usage: saidar [-d delay] [-c] [-v] [-h]
-d 设置更新时间(秒)
-c 彩色显示
-v 显示版本号
-h 显示本帮助
![](http://blog.linoxide.com/wp-content/uploads/2014/10/saidar-e1413370985588.png)
#### 7. top - 经典的Linux任务管理工具 ####
作为一个广为人知的**Linux**工具,**top**是大多数的类Unix操作系统任务管理器。它可以显示当前正在运行的进程的列表用户可以按照不同的条件对该列表进行排序。它主要显示了系统进程对**CPU**和内存的使用状况。top可以快速检查是哪个或哪几个进程挂起了你的系统。你可以在[这里][7]看到top使用的例子。 你可以在终端输入top来运行它并进入到交互模式
交互模式的一些快捷操作:
全局命令: <回车/空格> ?, =, A, B, d, G, h, I, k, q, r, s, W, Z
统计区的命令: l, m, t, 1
任务区的命令:
外观: b, x, y, z 内容: c, f, H, o, S, u 大小: #, i, n 排序: <, >, F, O, R
色彩方案: <Ret>, a, B, b, H, M, q, S, T, w, z, 0 - 7
窗口命令: -, _, =, +, A, a, G, g, w
![](http://blog.linoxide.com/wp-content/uploads/2014/10/top.png)
#### 8. Sysdig - 系统进程的高级视图 ####
**Sysdig**是一个能够让系统管理员和开发人员以前所未有方式洞察其系统行为的监控工具。其开发团队希望改善系统级的监控方式,通过提供关于**存储,进程,网络和内存**子系统的**统一有序**以及**粒度可见**的方式来进行错误排查,并可以创建系统活动记录文件以便你可以在任何时间轻松分析。
简单例子:
# sysdig proc.name=vim
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig evt.type=chdir and user.name=root
# sysdig -l
# sysdig -L
# sysdig -c topprocs_net
# sysdig -c fdcount_by fd.sport "evt.type=accept"
# sysdig -p"%proc.name %fd.name" "evt.type=accept and proc.name!=httpd"
# sysdig -c topprocs_file
# sysdig -c fdcount_by proc.name "fd.type=file"
# sysdig -p "%12user.name %6proc.pid %12proc.name %3fd.num %fd.typechar %fd.name" evt.type=open
# sysdig -c topprocs_cpu
# sysdig -c topprocs_cpu evt.cpu=0
# sysdig -p"%evt.arg.path" "evt.type=chdir and user.name=root"
# sysdig evt.type=open and fd.name contains /etc
![](http://blog.linoxide.com/wp-content/uploads/2014/10/sysdig.jpg)
**更多信息** 可以在 [如何利用sysdig改善系统层次的监控和错误排查][8]
#### 9. netstat - 显示开放的端口和连接 ####
它是**Linux管理员**使用来显示各种网络信息的工具,如查看什么端口开放和什么网络连接已经建立以及何种进程运行在该连接之上。同时它也显示了不同程序间打开的**Unix套接字**的信息。作为大多数Linux发行版本的一部分netstat的许多命令在 [netstat和它的不同输出][9]中有详细的描述。最为常用的如下:
$ netstat | head -20
$ netstat -r
$ netstat -rC
$ netstat -i
$ netstat -ie
$ netstat -s
$ netstat -g
$ netstat -tapn
### 10. tcpdump - 洞察网络封包 ###
**tcpdump**可以用来查看**网络连接**的**封包**内容。它显示了传输过程中封包内容的各种信息。为了使得输出信息更为有用,它允许使用者通过不同的过滤器获取自己想要的信息。可以参照的例子如下:
# tcpdump -i eth0 not port 22
# tcpdump -c 10 -i eth0
# tcpdump -ni eth0 -c 10 not port 22
# tcpdump -w aloft.cap -s 0
# tcpdump -r aloft.cap
# tcpdump -i eth0 dst port 80
你可以文章“[在topdump和捕捉包][10]”中找到详细描述。
#### 11. vmstat - 虚拟内存统计信息 ####
**vmstat**是虚拟内存(**virtual memory** statistics)的缩写,作为一个**内存监控**工具,它收集和显示关于**内存****进程****终端**和**分页**和**I/O阻塞**的概括信息。作为一个开源程序它可以在大部分Linux发行版本中找到包括Solaris和FreeBSD。它用来诊断大部分的内存性能问题和其他相关问题。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/vmstat_delay_5.png)
**M更多信息** 参考 [vmstat命令文章][11]。
#### 12. free - 内存统计信息 ####
free是另一个能够在终端中显示内存和交换空间使用的命令行工具。由于它的简易它经常用于快速查看内存使用或者是应用于不同的脚本和应用程序中。在这里你可以看到[这个小程序的许多应用][12]。几乎所有的系统管理员日常都会用这个工具。:-)
![](http://blog.linoxide.com/wp-content/uploads/2014/10/free_hs3.png)
#### 13. Htop - 更加友好的top ####
**Htop**基本上是一个top改善版本它能够以更加多彩的方式显示更多的统计信息同时允许你采用不同的方式进行排序它提供了一个**用户友好**的接口。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/htop.png)
你可以在文章“[关于htop和top的比较][13]”中找到**更多的信息** 。
#### 14. ss - 网络管理的现代替代品 ####
**ss**是**iproute2**包的一部分。iproute2是用来替代一整套标准的**Unix网络**工具组件,它曾经用来完成[网络接口配置路由表和管理ARP表][14]任务。ss工具用来记录套接字统计信息它可以显示类似netstat一样的信息同时也能显示更多TCP和状态信息。一些例子如下
# ss -tnap
# ss -tnap6
# ss -tnap
# ss -s
# ss -tn -o state established -p
#### 15. lsof - 列表显示打开的文件 ####
**lsof**命令,意为“**list open files**”, 用于在许多类Unix系统中显示所有打开的文件及打开它们的进程。在大部分Linux发行版和其他类Linux操作系统中系统管理员用它来检查不同的进程打开了哪些文件。
# lsof +p process_id
# lsof | less
# lsof u username
# lsof /etc/passwd
# lsof i TCP:ftp
# lsof i TCP:80
你可以找到 **更多例子** 在[lsof 文章][15]
#### 16. iftop - 类似top的了网络连接工具 ####
**iftop**是另一个基于网络信息的类似top的程序。它能够显示当前时刻按照**带宽使用**量或者上传或者下载量排序的**网络连接**状况。它同时提供了下载文件的预估完成时间。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iftop.png)
**更多信息**可以参考[网络流量iftop文章][16]
#### 17. iperf - 网络性能工具 ####
**iperf**是一个**网络测试**工具,能够创建**TCP**和**UDP**数据连接并在网络上测量它们的**传输性能**。它支持调节关于时间,协议和缓冲等不同的参数。对于每一个测试,它会报告带宽,丢包和其他的一些参数。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/iperf-e1413378331696.png)
如果你想用使用这个工具,可以参考这篇文章: [如何安装和使用iperf][17]
#### 18. Smem - 高级内存报表工具 ####
**Smem**是最先进的**Linux**命令行工具之一,它提供关于系统中已经使用的和共享的实际内存大小,试图提供一个更为可靠的当前**内存**使用数据。
$ smem -m
$ smem -m -p | grep firefox
$ smem -u -p
$ smem -w -p
参考我们的文章:[Smem更多的例子][18]
### 图形化或基于Web的性能工具 ###
#### 19. Icinga - Nagios的社区分支版本 ####
**Icinga**是一个**开源免费**的网络监控程序作为Nagios的分支它继承了前者现有的大部分功能同时基于这些功能又增加了社区用户要求已久的功能和补丁。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/Icinga-e1413377995731.png)
**更多信息**请参考[安装和配置lcinga文章][19].
#### 20. Nagios - 最为流行的监控工具. ####
作为在Linux上使用最为广泛和最为流行的**监控方案**,它有一个守护程序用来收集不同进程和远程主机的信息,这些收集到的信息都通过功能强大**的web界面**进行呈现。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/nagios-e1413305858732.png)
你可以在文章“[如何安装nagios][20]”里面**找到更多的信息**
#### 21. Linux process explorer - Linux下的procexp ####
**Linux process explorer**是一个Linux下的图形化进程浏览工具。它能够显示不同的进程信息如进程数TCP/IP连接和每一个进程的性能指标。作为**Windows**下**procexp**在Linux的替代品是由**Sysinternals**开发的,其目标是比**top**和**ps**提供更好用户体验。
![](http://a.fsdn.com/con/app/proj/procexp/screenshots/tcpipview.png)
查看 [linux process explorer 文章][21]获取更多信息。
#### 22. Collectl - 性能监控工具 ####
你可以既可以通过交互的方式使用这个**性能监控**工具,也可以用它把**报表**写到磁盘上并通过web服务器来访问。它以一种**易读易管理**的格式,显示了**CPU磁盘内存网络网络文件系统进程slabs**等统计信息。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/collectl.png)
**更多** 关于[Collectl的文章][22]。
#### 23. MRTG - 经典网络流量监控图形工具 ####
这是一个采用**rrdtool**的生成图形的流量监控工具。作为**最早**的提供**图形化界面**的流量监控工具它被广泛应用在类Unix的操作系统中。查看我们关于[如何使用MRTG][23]的文章获取更多关于安装和配置的信息。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/mrtg.png)
#### 24. Monit - 简单易用的监控工具 ####
**Monit**是一个用来**监控进程****系统加载****文件系统**和**目录文件**等的开源的Linux工具。你能够让它自动化维护和修复也能够在运行错误的情景下执行特定动作或者发邮件报告提醒系统管理员。如果你想要用这个工具你可以查看[如何使用Monit的文章][24]。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/monit.png)
#### 25. Munin - 为服务器提供监控和提醒服务 ####
作为一个网络资源监控工具,*Munin**能够帮助分析**资源趋势**和**查看薄弱环节**以及导致产生**性能问题**的原因。开发此软件的团队希望它能够易用和用户体验友好。该软件是用Perl开发的并采用**rrdtool**来绘制图形,使用了**web界面**进行呈现。开发人员推广此应用时声称当前已有500多个监控插件可以“**即插即用**”。
![](http://blog.linoxide.com/wp-content/uploads/2014/10/Ubuntu-2014-10-13-10-37-34-e1413185930801.png)
**更多信息**可以在[关于Munin的文章][25]。
--------------------------------------------------------------------------------
via: http://linoxide.com/monitoring-2/linux-performance-monitoring-tools/
作者:[Adrian Dinu][a]
译者:[andyxue](https://github.com/andyxue)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/
[1]:http://linux.cn/article-3215-1.html
[2]:http://linoxide.com/monitoring-2/guide-using-linux-atop/
[3]:http://linoxide.com/monitoring-2/install-nmon-monitor-linux-performance/
[4]:http://linux.cn/article-3702-1.html
[5]:http://linoxide.com/linux-command/linux-system-performance-monitoring-using-sar-command/
[6]:http://linoxide.com/monitoring-2/monitor-linux-saidar-tool/
[7]:http://linux.cn/article-2352-1.html
[8]:http://linux.cn/article-4341-1.html
[9]:http://linux.cn/article-2434-1.html
[10]:http://linoxide.com/linux-how-to/network-traffic-capture-tcp-dump-command/
[11]:http://linux.cn/article-2472-1.html
[12]:http://linux.cn/article-2443-1.html
[13]:http://linux.cn/article-3141-1.html
[14]:http://linux.cn/article-4372-1.html
[15]:http://linux.cn/article-4099-1.html
[16]:http://linux.cn/article-1843-1.html
[17]:http://linoxide.com/monitoring-2/install-iperf-test-network-speed-bandwidth/
[18]:http://linoxide.com/tools/memory-usage-reporting-smem/
[19]:http://linoxide.com/monitoring-2/install-configure-icinga-linux/
[20]:http://linux.cn/article-2436-1.html
[21]:http://sourceforge.net/projects/procexp/
[22]:http://linux.cn/article-3154-1.html
[23]:http://linoxide.com/tools/multi-router-traffic-grapher/
[24]:http://linoxide.com/monitoring-2/monit-linux/
[25]:http://linoxide.com/ubuntu-how-to/install-munin/

View File

@ -27,7 +27,7 @@
### eCryptFS基础 ###
eCrypFS是一个基于FUSE的用户空间加密文件系统在Linux内核2.6.19及更高版本中可用作为encryptfs模块。eCryptFS加密的伪文件系统挂载到当前文件系统顶部。它可以很好地工作在EXT文件系统家族和其它文件系统如JFS、XFS、ReiserFS、Btrfs甚至是NFS/CIFS共享文件系统上。Ubuntu使用eCryptFS作为加密其家目录的默认方法ChromeOS也是。在eCryptFS底层默认使用的是AES算法但是它也支持其它算法如blowfish、des3、cast5、cast6。如果你是通过手工创建eCryptFS设置你可以选择其中一种算法。
eCrypFS是一个基于FUSE的用户空间加密文件系统在Linux内核2.6.19及更高版本中可用作为encryptfs模块。eCryptFS加密的伪文件系统挂载到当前文件系统顶部。它可以很好地工作在EXT文件系统家族和其它文件系统如JFS、XFS、ReiserFS、Btrfs甚至是NFS/CIFS共享文件系统上。Ubuntu使用eCryptFS作为加密其家目录的默认方法ChromeOS也是。在eCryptFS底层默认使用的是AES算法但是它也支持其它算法如blowfish、des3、cast5、cast6。如果你是通过手工创建eCryptFS设置你可以选择其中一种算法。
就像我所的Ubuntu让我们在安装过程中选择是否加密/home目录。好吧这是使用eCryptFS的最简单的一种方法。
@ -63,13 +63,13 @@ Arch Linux
![](https://farm6.staticflickr.com/5608/15453440890_3b4be6d5a7_z.jpg)
它会要求你输入登录密码和挂载密码。登录密码和你常规登录的密码一样,而挂载密码用于派生一个文件加密主密钥。留空来生成一个,这样会更安全。登出然后重新登录。
它会要求你输入登录密码和挂载密码。登录密码和你常规登录的密码一样,而挂载密码用于派生一个文件加密主密钥。这里留空可以生成一个(复杂的),这样会更安全。登出然后重新登录。
你会注意到eCryptFS默认在你的家目录中创建了两个目录Private和.Private。~/.Private目录包含有加密的数据而你可以在~/Private目录中访问到相应的解密后的数据。在你登录时~/.Private目录会自动解密并映射到~/Private目录因此你可以访问它。当你登出时~/Private目录会自动卸载而~/Private目录中的内容会加密回到~/.Private目录。
eCryptFS怎么会知道你拥有~/.Private目录并自动将其解密到~/Private目录而不需要我们输入密码呢这就是eCryptFS的PAM模块捣的鬼它为我们提供了这项便利服务。
如果你不想~/Private目录在登录时自动挂载只需要在运行ecryptfs-setup-private工具时添加“--noautomount”选项。同样如果你不想要~/Private目录在登出后自动卸载也可以自动“--noautoumount”选项。但是那样后你需要自己手工挂载或卸载~/Private目录
如果你不想~/Private目录在登录时自动挂载只需要在运行ecryptfs-setup-private工具时添加“--noautomount”选项。同样如果你不想要~/Private目录在登出后自动卸载也可以自动“--noautoumount”选项。但是那样后你需要自己手工挂载或卸载~/Private目录
$ ecryptfs-mount-private ~/.Private ~/Private
$ ecryptfs-umount-private ~/Private
@ -94,7 +94,7 @@ via: http://xmodulo.com/encrypt-files-directories-ecryptfs-linux.html
作者:[Christopher Valerio][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -51,7 +51,7 @@ Shell 脚本 - 使用 if 语句进行条件检测
echo "Number is smaller"
fi
### If..elif..else..fi 语句 (Short for else if) ###
### If..elif..else..fi 语句 (简写的 else if) ###
Bourne Shell 的 if 语句语法中else 语句里的代码块会在 if 条件为假时执行。我们还可以将 if 语句嵌套到一起,来实现多重条件的检测。我们可以使用 elif 语句else if 的缩写)来构建多重条件的检测。
@ -94,7 +94,7 @@ Bourne Shell 的 if 语句语法中else 语句里的代码块会在 if 条件
If 和 else 语句可以在一个 bash 脚本里相互嵌套。关键词 “fi” 表示里层 if 语句的结束,所有 if 语句必须使用 关键词 “fi” 来结束。
基本 if 语句的 **嵌套语法**
基本 if 语句的**嵌套语法**
if [ 判断条件1 ]
then
@ -139,7 +139,7 @@ via: http://www.linuxtechi.com/shell-scripting-checking-conditions-with-if/
作者:[Pradeep Kumar][a]
译者:[ThomazL](https://github.com/ThomazL)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,10 +1,10 @@
Linux中使用rsync——文件和目录排除列表
================================================================================
**rsync**是一个十分有用而且十分流行的linux工具。它用于备份和恢复文件也用于对比和同步文件。我们已经在前面的文章讲述了[Linux中rsync命令的使用实例][1]而今天我们将增加一些更为有用的rsync使用技巧。
**rsync**是一个十分有用而且十分流行的linux工具。它用于备份和恢复文件也用于对比和同步文件。我们已经在前面的文章讲述了[如何在Linux下使用rsync][1]而今天我们将增加一些更为有用的rsync使用技巧。
### 排除文件和目录列表 ###
有时候,当我们做大量同步的时候,我们可能想要从同步的文件和目录中排除一个文件和目录的列表。一般来说,像不能被同步的设备文件和某些系统文件,或者像临时文件或者缓存文件这类占据不必要磁盘空间的文件,这类文件时我们需要排除的。
有时候,当我们做大量同步的时候,我们可能想要从同步的文件和目录中排除一个文件和目录的列表。一般来说,像设备文件和某些系统文件,或者像临时文件或者缓存文件这类占据不必要磁盘空间的文件是不合适同步的,这类文件是我们需要排除的。
首先让我们创建一个名为“excluded”的文件当然你想取什么名都可以然后将我们想要排除的文件夹或文件写入该文件一行一个。在我们的例子中如果你想要对根分区进行完整的备份你应该排除一些在启动时创建的设备目录和放置临时文件的目录列表看起来像下面这样
@ -19,7 +19,8 @@ Linux中使用rsync——文件和目录排除列表
### 从命令行排除文件 ###
你也可以从命令行直接排除文件该方法在你要排除的文件数量较少并且在你想要将它写成脚本或加到crontab中又不想脚本或cron依赖于另外一个文件运行时十分有用。
For example if you wish to sync /var to a backup directory but you don't wish to include cache and tmp folder that usualy don't hold important content between restarts you can use the following command:
例如,如果你想要同步/var到一个备份目录但是你不想要包含cache和tmp这些通常不会有重要内容的文件夹你可以使用以下命令
$ sudo rsync -aAXhv --exclude={"/var/cache","/var/tmp"} /var /home/adrian/var
@ -34,9 +35,9 @@ via: http://linoxide.com/linux-command/exclude-files-rsync-examples/
作者:[Adrian Dinu][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/adriand/
[1]:http://linoxide.com/how-tos/rsync-copy/
[1]:http://linux.cn/article-4503-1.html

View File

@ -1,25 +1,25 @@
Pitivi 发布 0.94 版本,使用 GTK HeaderBar修复无数 Bugs
Pitivi 0.94 切换到 GTK HeaderBar修复无数 Bugs
=====================================
** 我是 [Pitivi 视频编辑器][1] 的狂热爱好者。Pitivi 可能不是至少现在不是Linux 上可用的最拉风的,功能完善的非线性视频编辑器,但是它绝对是最可靠的一个。 **
** 我是 [Pitivi 视频编辑器][1] 的狂热爱好者。Pitivi 可能不是至少现在不是Linux 上可用的、最拉风的、功能完善的、非线性视频编辑器,但是它绝对是最可靠的一个。 **
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/pitivi-tile.jpg)
自然而然地,我一直在期待这个开源视频编辑器在 [这周末][2] 发布的新的 beta 测试版。
自然而然地,我一直在期待这个开源视频编辑器[这次][2]发布的新的 beta 测试版。
Pitivi 0.94 是基于新的 “GStreamer Editing Service”GES的第四个发行版本。
开发组成员 Jean-François Fortin Tam,称号 “Nekohayo” 将本次升级描述为 “** ...主要作为一个维护版本发布,但是除了对 Bug 的修复之外,还是增加了几个有意思的改进和功能。 **
开发组成员 Jean-François Fortin Tam“Nekohayo”将本次升级描述为 “**...主要作为一个维护版本发布,但是除了对 Bug 的修复之外,还是增加了几个有意思的改进和功能。**”
## 有什么新改进? ##
### 有什么新改进? ###
有不少有意思的改进!作为 Pitivi 0.94 版本中最明显的变化Pitivi 添加了如同 GNOME 客户端一般的 GTK HeaderBar 装饰。HeaderBar 整合了桌面窗口栏,标题栏以及工具栏,节省了大块浪费的垂直以及水平的占用空间。
“*当你用过一次后,你就在也回不来了,*” Fortin Tam 介绍说。欣赏一下下面这张截图,你肯定会同意的。
“*当你用过一次后,你就再也不会走了*” Fortin Tam 介绍说。欣赏一下下面这张截图,你肯定会同意的。
![Pitivi now uses GTK HeaderBar and menu button (image: Nekohayo)](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/pitivi-0.94-headerbar.jpeg)
Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo
*Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo*
那么应用菜单又怎么样呢?别担心,应用菜单遵循了 GNOME 交互界面的标准,看一下自己机器上的应用菜单确认一下吧。
@ -49,13 +49,11 @@ Pitivi 现在使用了 GTK HeaderBar 以及菜单键。image: Nekohayo
上面这些信息听起来都很不错吧?下一次更新会更好!这不只是一个通常的来自开发者的夸张,如同 Jean François 解释的一般:
> “下一次更新0.95)会运行在难以置信的强大的后端上。感谢 Mathieu [Duponchelle] 和 Thibault [Saunier] 在用 NLE新的为了 GES 的非线性引擎)替代 GNonLin 并修复问题等工作中做出的努力。”
> “下一次更新0.95)会运行在令人难以置信的强大的后端上。感谢 Mathieu [Duponchelle] 和 Thibault [Saunier] 在用 NLE新的为了 GES 的非线性引擎)替代 GNonLin 并修复问题等工作中做出的努力。”
Ubuntu 14.10 带有老的(更容易崩溃)的软件中心,进入 Pitivi 官网¹下载 [安装包][5] 来体验最新杰作。
Ubuntu 14.10 带有老的(更容易崩溃)的软件中心,进入 Pitivi 官网下载 [安装包][5] 来体验最新杰作。
** Pitivi 基金会酬了将近 €20,000使我们能够向着约定的 1.0 版本迈出一大步。如果你也想早点看到 1.0 版本的到来的话,省下你在星巴克买的格郎德香草奶油咖啡,捐赠我们! **
*¹目前 0.94 安装包还没发布,你可以下载 nightly tar*
**Pitivi 基金会筹了将近 €20,000使我们能够向着约定的 1.0 版本迈出一大步。如果你也想早点看到 1.0 版本的到来的话,省下你在星巴克买的格郎德香草奶油咖啡,捐赠我们!**
--------------------------------------------------------------------------------
@ -64,7 +62,7 @@ via: http://www.omgubuntu.co.uk/2014/11/pitivi-0-94-header-bar-more-features
作者:[Joey-Elijah Sneddon][a]
译者:[ThomazL](https://github.com/ThomazL)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,45 +1,36 @@
如何从Ubuntu的声音菜单中移除音乐播放器
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/music-players.jpg)
**自从2010年首次出现Ubuntu 的声音菜单已经被证明是Unity 桌面上的最流行的独有特性之一。**
**自从2010年的介绍一来Ubuntu声音菜单已经被证明是最流行和个性的统一桌面之一.**
把音乐播放器与音量控制程序集成到一个标准的界面里是一种看起来很聪明的做法,这样就不用到处找声音相关的各种程序。人们不禁要问,为什么其它操作系统没有效仿这种做法!
随着音乐播放器与音量程序合成小体积的应用程序-即集成,其中一个希望找到与声音相关的蠢事-通过标准接口的灵感。人们不禁要问,为什么其它操作系统没有效仿这种做法!
#### 冗长的 ####
尽管它看起来很方便但是这个小应用当前存在一个问题相当多的东西集在一起看起来想一个MP3,是否真正的把想要的东西都放在里面了。虽然有用,但是一个无所不再的应用程序清单已经安装了,这让一些不经常适用的人看着很累赘和反感。
我将要打赌上面的截图看起来一定很熟悉,你们中的很多人一定阅读过吧!不要害怕,**dconf-editor **就在这里。
#### 臃肿 ####
尽管它看起来很方便,但是这个小应用当前存在一个问题:很多播放器都堆在一起,像一个组合音响一样。也许你用得着,但是你安装的所有的媒体播放器都挤在这里,这会让人看着很累赘和反感。
我将要打赌,当你读到这里时,一定发现上面的截图看起来很熟悉!不要担心,**dconf-editor**可以解决它。
### 从Ubuntu 声音菜单中移除播放器 ###
#### 第一部分: 基础知识 ####
最快速和最简单地从声音菜单中移除播放器的方法就是卸载相关的应用程序。但这是极端的方式,我的意思是指你也许想要保留应用程序,但是不需要它集成。
最快速和最简单地从声音菜单中移除播放器的方法就是卸载相关的应用程序。但这是极端的方式,我的意思是指你也许想要保留应用程序,但是不需要它集成到菜单里面
只删除播放器但是保留我们需要的应用程序我们用到一个看起来令人惊讶的工具叫“dconf-editor”.
只删除播放器但是保留我们需要的应用程序我们用到一个看起来令人惊讶的工具叫“dconf-editor”
你可能已经安装了如果没有安装的话那么你从Ubuntu软件中心找出。
- [在Ubuntu中点击安装Dconf-Editor][1]
一旦安装完毕找到Unity Dash并打开。打开的时候不要惊慌你不会再回到2002年了它确实是这样子的。
一旦安装完毕找到Unity Dash并打开。打开的时候不要惊慌你没有到2002年它确实是这种古老的样子。
使用右侧菜单栏,你需要从导航到 com > canonical > indicator > sound.下面的面板将会出现。
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/dconf-editor.jpg)
双击靠近interested-media-players的比括号并删除你希望从声音菜单里移除掉的播放器但需要保留在方括号中且不要删除任何你想保留逗号或者撇号。
双击“interested-media-players”旁的闭括号并删除你希望从声音菜单里移除掉的播放器但需要保留方括号中且不要删除任何需要保留的逗号或者单引号。
举个例子,我移除掉这些
@ -55,9 +46,9 @@
#### 第二部分:黑名单 ####
等等还不能关闭dconf-editor。尽管上面的步骤看起来把事情处理得干净利落但是一些播放器在打开时会立即重新加载到声音菜单。为了避免重复这个过程将它们添加到**媒体播放器黑名单**中。
等等还不能关闭dconf-editor。尽管上面的步骤看起来把事情处理得干净利落但是一些播放器在打开时会立即重新加载到声音菜单。为了避免重复这个过程将它们添加到**blacklisted-media-player**中。
记得每个在括号里的播放器都用逗号分隔多个条目。他们也必须在方括号内,所以在退出之前请务必仔细检查。
记得每个在括号里的播放器都用逗号分隔多个条目。他们也必须在方括号内,所以在退出之前请务必仔细检查。
最终结果如下:
@ -69,7 +60,7 @@ via: http://www.omgubuntu.co.uk/2014/11/remove-players-ubuntu-sound-menu
作者:[Joey-Elijah Sneddon][a]
译者:[disylee](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
运行级别与服务管理命令systemd简介
systemd的运行级别与服务管理命令简介
================================================================================
![](http://www.linux.com/images/stories/41373/Linux_kernel_unified_hierarchy_cgroups_and_systemd.svg.png)
@ -6,20 +6,21 @@
在开始介绍systemd命令前让我们先简单的回顾一下历史。在Linux世界里有一个很奇怪的现象一方面Linux和自由软件FOSS在不断的向前推进另一方面人们对这些变化却不断的抱怨。这就是为什么我要在此稍稍提及那些反对systemd所引起的争论的原因因为我依然记得历史上有不少类似的争论
- 软件包Pacakge是邪恶的因为真的Linux用户会从源码构建他所想要的的一切并严格的管理系统中安装的软件。
- 解析依赖关系的包管理器是邪恶的,真的Linux用户会手动解决这些该死的依赖关系。
- 软件包Pacakge是邪恶的因为真的Linux用户会从源码构建他所想要的的一切并严格的管理系统中安装的软件。
- 解析依赖关系的包管理器是邪恶的,真的Linux用户会手动解决这些该死的依赖关系。
- apt-get总能把事情干好所以只有Yum是邪恶的。
- Red Hat简直就是Linux中的微软。
- 好样的Ubuntu
- 滚蛋吧Ubuntu
诸如此类...就像我之前常常说的一样变化总是让人沮丧。这些该死的变化搅乱了我的工作流程这可不是一件小事情任何业务流程的中断都会直接影响到生产力。但是我们现在还处于计算机发展的婴儿期在未来的很长的一段时间内将会持续有快速的变化和发展。想必大家应该都认识一些因循守旧的人在他们的心里商品一旦买回家以后就是恒久不变的就像是买了一把扳手、一套家具或是一个粉红色的火烈鸟草坪装饰品。就是这些人仍然在坚持使用Windows Vista甚至还有人在使用运行Windows95的老破烂机器和CRT显示器。他们不能理解为什么要去换一台新机器。老的还能用啊不是么
诸如此类...就像我之前常常说的一样变化总是让人沮丧。这些该死的变化搅乱了我的工作流程这可不是一件小事情任何业务流程的中断都会直接影响到生产力。但是我们现在还处于计算机发展的婴儿期在未来的很长的一段时间内将会持续有快速的变化和发展。想必大家应该都认识一些因循守旧的人在他们的心里商品一旦买回家以后就是恒久不变的就像是买了一把扳手、一套家具或是一个粉红色的火烈鸟草坪装饰品。就是这些人仍然在坚持使用Windows Vista甚至还有人在使用运行Windows 95的老破烂机器和CRT显示器。他们不能理解为什么要去换一台新机器。老的还能用啊不是么
这让我回忆起了我在维护老电脑上的一项伟大的成就那台破电脑真的早就该淘汰掉。从前我有个朋友有一台286的老机器安装了一个极其老的MS-DOS版本。她使用这台电脑来处理一些简单的任务比如说约会、日记、记账等我还用BASIC给她写了一个简单的记账软件。她不用关注任何安全更新是这样么因为它压根都没有联网。所以我会时不时给她维修一下电脑更换电阻、电容、电源或者是CMOS电池什么的。它竟然还一直能用。它那袖珍的琥珀CRT显示器变得越来越暗在使用了20多年后终于退出了历史舞台。现在我的这位朋友换了一台运行Linux的老Thinkpad来干同样的活。
前面的话题有点偏题了下面抓紧时间开始介绍systemd。
###运行级别 vs. 状态###
SysVInit使用静态的运行级别来构建不同的启动状态大部分发布版本中提供了以下5个运行级别
- 单用户模式Single-user mode
@ -28,7 +29,7 @@ SysVInit使用静态的运行级别来构建不同的启动状态大部分发
- 系统关机System shutdown
- 系统重启System reboot
对于我来说,使用多个运行级别并没有太大的好处,但它们却一直在系统中存在着。 不同于运行级别systemd可以创建不同的状态状态提供了灵活的机制来设置启动时的配置项。这些状态是由多个unit文件组成的状态又叫做启动目标target。启动目标有一个漂亮的描述性命名而不是像运行级别那样使用数字。unit文件可以控制服务、设备、套接字和挂载点。参考/usr/lib/systemd/system/graphical.target这是CentOS 7默认的启动目标
对于我来说,使用多个运行级别并没有太大的好处,但它们却一直在系统中存在着。 不同于运行级别systemd可以创建不同的状态状态提供了灵活的机制来设置启动时的配置项。这些状态是由多个unit文件组成的状态又叫做启动目标target。启动目标有一个清晰的描述性命名而不是像运行级别那样使用数字。unit文件可以控制服务、设备、套接字和挂载点。参考/usr/lib/systemd/system/graphical.target这是CentOS 7默认的启动目标
[Unit]
Description=Graphical Interface
@ -71,15 +72,16 @@ SysVInit使用静态的运行级别来构建不同的启动状态大部分发
DIR_SUFFIX="${APACHE_CONFDIR##/etc/apache2-}"
else
DIR_SUFFIX=
整个文件一共有410行。
你可以检查unit件的依赖关系我常常被这些复杂的依赖关系给吓到
你可以检查unit件的依赖关系,我常常被这些复杂的依赖关系给吓到:
$ systemctl list-dependencies httpd.service
### cgroups ###
cgroups或者叫控制组在Linux内核里已经出现好几年了但直到systemd的出现才被真正使用起来。[The kernel documentation][1]中是这样描述cgroups的“控制组提供层次化的机制来管理任务组使用它可以聚合和拆分任务组并管理任务组后续产生的子任务。”换句话说它提供了多种有效的方式来控制、限制和分配资源。systemd使用了cgroups你可以便捷查看它使用下面的命令可以展示你系统中的整个cgroup树
cgroups或者叫控制组在Linux内核里已经出现好几年了但直到systemd的出现才被真正使用起来。[The kernel documentation][1]中是这样描述cgroups的“控制组提供层次化的机制来管理任务组使用它可以聚合和拆分任务组并管理任务组后续产生的子任务。”换句话说它提供了多种有效的方式来控制、限制和分配资源。systemd使用了cgroups你可以便捷查看它使用下面的命令可以展示你系统中的整个cgroup树
$ systemd-cgls
@ -115,7 +117,7 @@ via: http://www.linux.com/learn/tutorials/794615-systemd-runlevels-and-service-m
作者:[Carla Schroder][a]
译者:[coloka](https://github.com/coloka)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
Linux问答时间--如何在CentOS上安装phpMyAdmin
Linux有问必答:如何在CentOS上安装phpMyAdmin
================================================================================
> **问题**:我正在CentOS上运行一个MySQL/MariaDB服务并且我想要通过网络接口来用phpMyAdmin来管理数据库。在CentOS上安装phpMyAdmin的最佳方法是什么
@ -108,7 +108,7 @@ phpMyAdmin是一款以PHP为基础基于Web的MySQL/MariaDB数据库管理工
### 测试phpMyAdmin ###
测试phpMyAdmin是否设置成功访问这个页面http://<web-server-ip-addresss>/phpmyadmin
测试phpMyAdmin是否设置成功访问这个页面http://\<web-server-ip-addresss>/phpmyadmin
![](https://farm6.staticflickr.com/5606/15550758749_0f7ab66b5b_z.jpg)
@ -153,14 +153,14 @@ phpMyAdmin是一款以PHP为基础基于Web的MySQL/MariaDB数据库管理工
via: http://ask.xmodulo.com/install-phpmyadmin-centos.html
译者:[ZTinoZ](https://github.com/ZTinoZ)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://xmodulo.com/set-web-based-database-management-system-adminer.html
[2]:http://xmodulo.com/install-lamp-stack-centos.html
[3]:http://xmodulo.com/install-lemp-stack-centos.html
[4]:http://xmodulo.com/how-to-set-up-epel-repository-on-centos.html
[2]:http://linux.cn/article-1567-1.html
[3]:http://linux.cn/article-4314-1.html
[4]:http://linux.cn/article-2324-1.html
[5]:
[6]:
[7]:

View File

@ -1,6 +1,6 @@
Linux的十条SCP传输命令
十个 SCP 传输命令例子
================================================================================
Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是不安装**GUI**的。**SSH**可能是Linux系统管理员通过远程方式安全管理服务器的最流行协议。在**SSH**命令中内置了一种叫**SCP**的命令,用来在服务器之间安全传输文件。
Linux系统管理员应该很熟悉**CLI**环境,因为通常在Linux服务器中是不安装**GUI**的。**SSH**可能是Linux系统管理员通过远程方式安全管理服务器的最流行协议。在**SSH**命令中内置了一种叫**SCP**的命令,用来在服务器之间安全传输文件。
![](http://www.tecmint.com/wp-content/uploads/2013/10/SCP-Commands.png)
@ -10,7 +10,7 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
scp source_file_name username@destination_host:destination_folder
**SCP**命令有很多参数供你使用,这里指的是每次都会用到的参数。
**SCP**命令有很多可以使用的参数,这里指的是每次都会用到的参数。
### 用-v参数来提供SCP进程的详细信息 ###
@ -53,7 +53,7 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
### 用-C参数来让文件传输更快 ###
有一个参数能让传输文件更快,就是“**-C**”参数,它的作用是不停压缩所传输的文件。它特别之处在于压缩是在网络中进行,当文件传到目标服务器时,它会变回压缩之前的原始大小。
有一个参数能让传输文件更快,就是“**-C**”参数,它的作用是不停压缩所传输的文件。它特别之处在于压缩是在网络传输中进行,当文件传到目标服务器时,它会变回压缩之前的原始大小。
来看看这些命令,我们使用一个**93 Mb**的单一文件来做例子。
@ -121,18 +121,18 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
看到了吧,压缩了文件之后,传输过程在**162.5**秒内就完成了,速度是不用“**-C**”参数的10倍。如果你要通过网络拷贝很多份文件那么“**-C**”参数能帮你节省掉很多时间。
有一点我们需要注意,这个压缩的方法不是适用于所有文件。当源文件已经被压缩过了,那就没办法再压缩了。诸如那些像**.zip****.rar****pictures**和**.iso**的文件,用“**-C**”参数就无效
有一点我们需要注意,这个压缩的方法不是适用于所有文件。当源文件已经被压缩过了,那就没办法再压缩很多了。诸如那些像**.zip****.rar****pictures**和**.iso**的文件,用“**-C**”参数就没什么意义
### 选择其它加密算法来加密文件 ###
**SCP**默认是用“**AES-128**”加密算法来加密文件的。如果你想要改用其它加密算法来加密文件,你可以用“**-c**”参数。我们来瞧瞧。
**SCP**默认是用“**AES-128**”加密算法来加密传输的。如果你想要改用其它加密算法来加密传输,你可以用“**-c**”参数。我们来瞧瞧。
pungki@mint ~/Documents $ scp -c 3des Label.pdf mrarianto@202.x.x.x:.
mrarianto@202.x.x.x's password:
Label.pdf 100% 3672KB 282.5KB/s 00:13
上述命令是告诉**SCP**用**3des algorithm**来加密文件。要注意这个参数是“**-c**”而不是“**-C**“。
上述命令是告诉**SCP**用**3des algorithm**来加密文件。要注意这个参数是“**-c**”(小写)而不是“**-C**“(大写)
### 限制带宽使用 ###
@ -143,24 +143,24 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
mrarianto@202.x.x.x's password:
Label.pdf 100% 3672KB 50.3KB/s 01:13
在“**-l**”参数后面的这个**400**值意思是我们给**SCP**进程限制了带宽为**50 KB/秒**。有一点要记住,带宽是以**千比特/秒** (**kbps**)表示的,**8 比特**等于**1 字节**。
在“**-l**”参数后面的这个**400**值意思是我们给**SCP**进程限制了带宽为**50 KB/秒**。有一点要记住,带宽是以**千比特/秒** (**kbps**)表示的,**8 比特**等于**1 字节**。
因为**SCP**是用**千字节/秒** (**KB/s**)计算的,所以如果你想要限制**SCP**的最大带宽只有**50 KB/s**,你就需要设置成**50 x 8 = 400**。
### 指定端口 ###
通常**SCP**是把**22**作为默认端口。但是为了安全起见,你可以改成其它端口。比如说,我们想用**2249**端口,命令如下所示。
通常**SCP**是把**22**作为默认端口。但是为了安全起见SSH 监听端口改成其它端口。比如说,我们想用**2249**端口,这种情况下就要指定端口。命令如下所示。
pungki@mint ~/Documents $ scp -P 2249 Label.pdf mrarianto@202.x.x.x:.
mrarianto@202.x.x.x's password:
Label.pdf 100% 3672KB 262.3KB/s 00:14
确认一下写的是大写字母“**P**”而不是“**p**“,因为“**p**”已经被用来保留源文件的修改时间和模式。
确认一下写的是大写字母“**P**”而不是“**p**“,因为“**p**”已经被用来保留源文件的修改时间和模式LCTT 译注:和 ssh 命令不同了)
### 递归拷贝文件和文件夹 ###
有时我们需要拷贝文件夹及其内部的所有**文件** / **文件夹**,我们如果能用一条命令解决问题那就更好了。**SCP**用“**-r**”参数就能做到。
有时我们需要拷贝文件夹及其内部的所有**文件**/**文件夹**,我们如果能用一条命令解决问题那就更好了。**SCP**用“**-r**”参数就能做到。
pungki@mint ~/Documents $ scp -r documents mrarianto@202.x.x.x:.
@ -172,7 +172,7 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
### 禁用进度条和警告/诊断信息 ###
如果你不想从SCP中看到进度条和警告/诊断信息,你可以用“**-q**”参数来禁用它们,举例如下。
如果你不想从SCP中看到进度条和警告/诊断信息,你可以用“**-q**”参数来静默它们,举例如下。
pungki@mint ~/Documents $ scp -q Label.pdf mrarianto@202.x.x.x:.
@ -207,7 +207,7 @@ Linux系统管理员应该很熟悉**CLI**环境因为在Linux服务器中是
### 选择不同的ssh_config文件 ###
对于经常在公司网络和公共网络之间切换的移动用户来说一直改变SCP的设置显然是很痛苦的。如果我们能放一个不同的**ssh_config**文件来匹配我们的需求那就很好了。
对于经常在公司网络和公共网络之间切换的移动用户来说一直改变SCP的设置显然是很痛苦的。如果我们能放一个保存不同配置的**ssh_config**文件来匹配我们的需求那就很好了。
#### 以下是一个简单的场景 ####
@ -231,7 +231,7 @@ via: http://www.tecmint.com/scp-commands-examples/
作者:[Pungki Arianto][a]
译者:[ZTinoZ](https://github.com/ZTinoZ)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,10 +1,10 @@
5最佳开源的浏览器安全应用
5最佳开源的浏览器安全应用
================================================================================
浏览器是现在各种在线服务的入口。电脑安全问题迄今仍未得到解决,技术进步为恶意软件提供了新的途径,感染我们的设备入侵商业网络。例如,智能手机与平板为恶意软件--及其同伙“[恶意广告][1]”--带来一片全新天地,它们在其中腾挪作乱。
浏览器是现在各种在线服务的入口。电脑安全问题迄今仍未得到解决,技术进步为恶意软件提供了新的途径,感染我们的设备入侵商业网络。例如,智能手机与平板为恶意软件--及其同伙“[恶意广告][1]”--带来一片全新天地,它们在其中腾挪作乱。
恶意广告在合法广告与合法网络中注入恶意软件。当然你可能会认为“合法”广告与网络与非法广告与网络之间仅有一线之隔。但是请不要偏题哦。隐私与安全天生就是一对兄弟,保护隐私也就是保护你的安全。
Firefox, Chrome, 以及 Opera当仁不让属最棒的浏览器性能最佳、兼容性最好、以及安全性最优。以下五个开源安全应用安装于浏览器后会助你抵御种种威胁。
Firefox, Chrome, 以及 Opera 当仁不让属最棒的浏览器:性能最佳、兼容性最好、以及安全性最优。以下五个开源安全应用安装于浏览器后会助你抵御种种威胁。
### 保护隐私: 开源浏览器安全应用 ###
@ -12,11 +12,11 @@ Firefox, Chrome, 以及 Opera当仁不让属最棒的浏览器性能最佳、
广告网络为恶意软件提供了肥沃的土壤。一个广告网络可以覆盖数千站点因此攻陷一个广告网络就相当于攻陷数千台机器。AdBlock及其衍生品—[AdBlock Plus][2], [AdBlock Pro][3], 与 [AdBlock Edge][4]--都是屏蔽广告的优秀工具,可以让那些充斥烦人广告的网站重新还你一片清静。
当然,凡事都有两面性:上述做法损害了依靠广告收入的站点的利益。这些工具一键式白名单功能,对于那些你希望支持的网站,你可以通过白名单功能关闭这些网站的广告屏蔽。(真的,我亲爱的站长们,如果你不希望网站访问者屏蔽你的广告,那么就适可而止,不要让人反感。)
当然,凡事都有两面性:上述做法损害了依靠广告收入的站点的利益。这些工具一键式白名单功能,对于那些你希望支持的网站,你可以通过白名单功能关闭这些网站的广告屏蔽。(真的,我亲爱的站长们,如果你不希望网站访问者屏蔽你的广告,那么就适可而止,不要让人反感。当然,作为粉丝,也请您支持您喜爱的站点,将它们放到白名单吧。
![](http://www.smallbusinesscomputing.com/imagesvr_ce/5731/fig-1-easylist_1.jpg)
图1:在Ad Blocker中添加其它过滤规则。
*图1:在Ad Blocker中添加其它过滤规则。*
Ad Blocker们不仅能屏蔽广告它们还能屏蔽网站跟踪爬虫与恶意域名。要打开额外过滤规则点击ad blocker图标 > 点击**首选项**,转至**过滤规则订阅**标签。点击按纽**添加订阅过滤规则**,然后加入**Easy Privacy + EasyList**规则。加入恶意域名过滤也是个不错的选择它会屏蔽那些供恶意软件与间谍软件寄生的域名。Adblock可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
@ -24,7 +24,7 @@ Ad Blocker们不仅能屏蔽广告它们还能屏蔽网站跟踪爬虫与恶
浏览器扩展HTTPS Everywhere可确保在网站HTTPS可用的时候总是以HTTPS方式连接到站点。HTTPS意味着你的连接是以SSL安全套接层方式加密的SSL协议通常用于加密网站与电子邮件连接。HTTPS Everywhere可在Firefox, Chrome, 及Opera下使用。
安装了HTTPS Everywhere之后它会询问你是否希望启用SSL检测程序。点击是因为SSL检测程序会提供额外保护防止中间人攻击与虚假SSL证书攻击。HTTPS Everywhere可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
安装了HTTPS Everywhere之后它会询问你是否希望启用SSL检测程序。点击因为SSL检测程序会提供额外保护防止中间人攻击与虚假SSL证书攻击。HTTPS Everywhere可在Firefox, Chrome, Opera, Safari, IE, 以及Android平台下工作。
#### 3. [Social Fixer][6] ####
@ -37,7 +37,9 @@ Social Fixer本身不是安全工具但它具有两个重要的安全特性
![](http://www.smallbusinesscomputing.com/imagesvr_ce/2858/fig-2-socialfixer_1.jpg)
图2: 使用Social Fixer匿名化Facebook网面。
*图2: 使用Social Fixer匿名化Facebook网面。*
LCTT 译注:好吧,这个应用和我等无关~~
#### 4. [Privacy Badger][7] ####
@ -47,7 +49,7 @@ AdBlock也能拦截这些乌七八糟的东西不过Privacy Badger在此方
![](http://www.smallbusinesscomputing.com/imagesvr_ce/9256/fig-3-privacybadger_1.jpg)
图3: Privacy Badger拦截跟踪站点。
*图3: Privacy Badger拦截跟踪站点。*
Privacy Badger装好后就能使用了。点击图标看看它对你浏览的网页都拦截了哪些东西。你可以试试访问Huffingtonpost.com这是一家不在每一个页面塞满第三方组件誓不罢休的网站图3
@ -63,15 +65,15 @@ Disconnect还有安全搜索功能可以阻止搜索引擎爱挖数据的癖
想象一下,网页上所有东西都腾空而出,奔你而去。当然这一切都是抽象的且在幕后悄然发生,不象有人正在猛击窗户试图进入你家那么明显罢了。但是,威胁倒是实实在在的,而且数不胜数,所以你必须采取预防措施,来保护自己。
Carla Schroder著有The Book of Audacity, Linux Cookbook, Linux Networking Cookbook等书并撰写了上百篇Linux指南文章。她曾担任Linux Planet与Linux Today网站总编。
本文作者 Carla Schroder 著有The Book of Audacity, Linux Cookbook, Linux Networking Cookbook等书并撰写了上百篇Linux指南文章。她曾担任Linux Planet与Linux Today网站总编。
--------------------------------------------------------------------------------
via: http://www.smallbusinesscomputing.com/biztools/5-best-open-source-web-browser-security-apps.html
作者:[Carla Schroder][a]
译者:[译者ID](https://github.com/yupmoon)
校对:[校对者ID](https://github.com/校对者ID)
译者:[yupmoon](https://github.com/yupmoon)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,14 +1,16 @@
图形化显示Linux内存使用情况
使用 smem 可视化显示Linux内存使用情况
================================================================================
物理内存不足对Linux桌面系统和服务器系统的性能影响都很大。当你的电脑变慢时,要做的第一件事就是释放内存。尤其是在多用户环境以及执行关键任务的服务器环境下,内存消耗会变得更加关键,因为多个用户和应用线程会同时争更多的内存空间。
物理内存不足对Linux桌面系统和服务器系统的性能影响都很大。当你的计算机变慢时,要做的第一件事就是释放内存。尤其是在多用户环境以及执行关键任务的服务器环境下,内存消耗会变得更加关键,因为多个用户和应用线程会同时争更多的内存空间。
如果要监测系统内各种资源的使用情况比如说CPU或内存图形化显示是一种高效的方法通过图形界面可以快速分析各用户和进程的资源消耗情况。本教程将给大家介绍**在linux下图形化分析内存使用情况**的方法,使用到命令行工具是[smem][1].
### 物理内存使用情况: RSS vs. PSS vs. USS ###
### 物理内存使用情况: RSS 、 PSS 和 USS ###
由于Linux使用到了虚拟内存virtual memory因此要准确的计算一个进程实际使用的物理内存就不是那么简单。 只知道进程的虚拟内存大小也并没有太大的用处,因为还是无法获取到实际分配的物理内存大小。
**RSSResident set size**使用top命令可以查询到是最常用的内存指标表示进程占用的物理内存大小。但是将各进程的RSS值相加通常会超出整个系统的内存消耗这是因为RSS中包含了各进程间共享的内存。**PSSProportional set size**会更准确一些,它将共享内存的大小进行平均后,再分摊到各进程上去。**USS(Unique set size )**是PSS的自己它只计算了进程独自占用的内存大小不包含任何共享的部分。
- **RSSResident set size**使用top命令可以查询到是最常用的内存指标表示进程占用的物理内存大小。但是将各进程的RSS值相加通常会超出整个系统的内存消耗这是因为RSS中包含了各进程间共享的内存。
- **PSSProportional set size**会更准确一些,它将共享内存的大小进行平均后,再分摊到各进程上去。
- **USS(Unique set size )**是PSS中自己的部分它只计算了进程独自占用的内存大小不包含任何共享的部分。
### 安装Smem ###
@ -20,7 +22,7 @@ smem是一个能够生成多种内存耗用报告的命令行工具它从/pro
#### 在Fedora 或 CentOS/RHEL上安装Smem ####
在CentOS/RHEL上你首先得[使能][2]EPEL仓
在CentOS/RHEL上你首先得[启用][2]EPEL仓库
$ sudo yum install smem python-matplotlib
@ -44,17 +46,17 @@ smem是一个能够生成多种内存耗用报告的命令行工具它从/pro
![](https://farm9.staticflickr.com/8543/15798375491_510698d98f_z.jpg)
smem提供了以下选项来对输出结果进行筛选支持按映射方式mapping,进程和用户三个维度的筛选:
smem提供了以下选项来对输出结果进行筛选支持按映射方式mapping进程和用户三个维度的筛选:
- -M <mapping-filtering-regular-expression>
- -P <process-filtering-regular-expression>
- -U <user-filtering-regular-expression>
- -M <正则表达式>
- -P <正则表达式>
- -U <正则表达式>
想了解smem更多的使用方式可以查询用户手册man page
### 使用smem图形化显示内存使用情况 ###
图形化的报告使用起来会更加方便快捷。smem支持支持两种格式的图形显示方式:直方图和饼图。
图形化的报告使用起来会更加方便快捷。smem支持两种格式的图形显示方式直方图和饼图。
下面是一些图形化显示的实例。
@ -78,7 +80,7 @@ via: http://xmodulo.com/visualize-memory-usage-linux.html
作者:[Dan Nanni][a]
译者:[coloka](https://github.com/coloka)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,4 +1,4 @@
Postfix提示和故障排除命令
Postfix 技巧和故障排除命令
================================================================================
这里是一些我每天用的命令当然其他的email管理员也会使用因此我写下来以防我忘记。
@ -16,7 +16,7 @@ Postfix提示和故障排除命令
# postqueue -f
立即交付所有某domain.com域名的所有邮件
立即投递某domain.com域名的所有邮件
# postqueue -s domain.com
@ -39,7 +39,7 @@ Postfix提示和故障排除命令
你也可以查看下面的连接这个连接有很多例子和不错的可用的解释文档可以用来配置postfix.
[Postfix Configuration - ][1]
[Postfix Configuration][1]
--------------------------------------------------------------------------------
@ -47,7 +47,7 @@ via: http://techarena51.com/index.php/postfix-configuration-and-explanation-of-p
作者:[Leo G][a]
译者:[Vic020](http://www.vicyu.net)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,163 @@
一些关于Java的句子
================================================================================
本文并没有什么新鲜的。我只是收集了一些不太重要的语句,但这些语句可能对初级程序员来说很重要。也就是些无聊的旧东西。
如果以下的这些你都知道的话那么你比Java的了解已经超过了对一个平常的家庭主妇的了解。我不知道清楚所有的这些是否是有意义的。即使不知道其中的一些特性你照样也可以成为一个相当不错的Java程序员。然而本文中许多的新信息可能表明你还有很大的发展空间。
### Java中有四种不同的访问类型(而不是三种) ###
这四种类型包括:`private`, package private (包访问权限无修饰符又叫default, 译者注)。如果你在类中定义一个元素时并不加任何访问类型修饰符,它将被默认设置为包访问权限(package private),而不是`public`或者`protected`。
![Java中有四种级别的访问类型](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/four-levels-of-protection.png)
*Java有四个级别的访问类型。*
从另一方面来说,如果在接口中,你不指定方法的访问修饰符,那么它将是`public`类型的。你也可以显式地指定它为`public`类型, 但这并不符合SONAR一个开源代码质量管理平台译者注的代码质量管理思想。
![访问类型是传递的](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-transitive.png)
*访问类型是传递的*
> 我的“在Java中允许选择性的在接口的方法中写`public`”的观点是一个技术错误。
同样你也可在接口的字段前写`final`,甚至是`static`。这说明这些字段可以是非静态或非final吗不是的接口中的字段中总是final和static的。
### Protected和package private是不一样的 ###
Package private或者default访问类型可以使得相同包(package)下其他类能够访问这些字段或方法。保护类型(`protected`)的方法和字段可以被相同包下的类使用(这和package private是一样的),同时它也可以被其他类使用,只要那个类继承了这个包含这些`protected`方法或字段的类。
### Protected是可传递的 ###
如果有三个包a、b、c每个包都分别包含A、B、C类而且B继承AC继承B那么C可以访问A中的protected字段和方法。
package a;
public class A {
protected void a() {
}
}
package b;
import a.A;
public class B extends A {
protected void b() {
a();
}
}
package c;
import b.B;
public class C extends B {
protected void c() {
a();
}
}
### 接口不能定义protected方法 ###
很多人认为可以在接口中定义`protected`方法。如果你这么做的话,编译器很快就会毫不留情地给你报错。顺便说下,这也就是我为什么认为允许`public`关键字在接口中是一个技术错误,它会让人觉得还可以写其他访问类型似的。
![Private is the new public](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/private-is-the-new-public.png)
*private是一种新的public*
如果你还想在一个接口的方法中声明protected方法,你可能还不理解封装的含义。
### 此private非彼private ###
私有变量和方法在编译单元内是可见的。如果这听起来太神秘的话换种说法几乎就是在同一个Java文件中。这比“在它们被定义的类中”听起来好理解些。它们在同一编译单元的类和接口中也是可见的。嵌套类可以看到类中封装的私有字段和方法。然而当前封闭类也可以看到该类下任何深度下类中的私有方法和字段。
package a;
class Private {
private class PrivateInPrivate {
private Object object;
}
Object m() {
return new PrivateInPrivate().object;
}
}
后者并不广为人知,事实上也很少有用到。
### Private是类的访问级别而不是对象 ###
如果你可以访问一个变量或方法,那么不管它属于哪个对象你都可以访问它。如果`this.a`可以访问到,那`another.a`也可以访问到,只要它们是同一个类的实例。同一个类的实例对象可以随意调用其他实例的变量或方法。不过这样的代码一般都没有意义。现实生活中异常是`equals()`(由Eclipse生成 15 - 18行)
package a;
public class PrivateIsClass {
private Object object;
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PrivateIsClass other = (PrivateIsClass) obj;
if (object == null) {
if (other.object != null)
return false;
} else if (!object.equals(other.object))
return false;
return true;
}
}
###静态(static)类可能有很多实例 ###
![Protection is not object level. It is class level.](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/protection-is-class-feature.png)
*访问类型不是对象级别的而是类级别的。*
那些不支持有任何实例的类通常被称为实用工具类。它们只包含静态字段和静态方法以及唯一的不被该类的任何静态方法调用的私有构造函数。在Java 8中也可以有这样的一个野兽(这个词翻译不通,译者注)在接口中实现因为Java 8的接口可以有静态方法。我不觉得我们应该使用这个特性而不是实用工具类。我也不完全确信我们应该使用实用工具类。
静态类总是在另一个类或接口中。它们是嵌套类。他们是静态的,就像静态方法不能访问类的实例方法和字段一样,静态内部类也不能访问嵌入类的实例方法和字段。这是因为内部类没有嵌入类实例的引用(或者说是指针,如果你喜欢这么叫的话)。内部类(内部类,也即非静态嵌套类, 译者注),而非静态嵌套类, 没有嵌入类的一个实例,它是无法被创建的。每个内部类的实例都具有嵌入类实例的一个引用,因此一个内部类可以访问嵌入类的实例方法和字段。
因为这个原因,要是没有外部类的一个实例,你就不能创建一个内部类。当然,如果是当前对象,也就是`this`的话,你就可以不需要指定它。在这种情况下你可以使用`new`, 在这种情况下,也就是`this.new`的简式。在一个静态的环境中,例如从一个静态方法你必须指定内部类应该创建哪个封闭类的实例。见第10行:
package a;
class Nesting {
static class Nested {}
class Inner {}
void method(){
Inner inner = new Inner();
}
static void staticMethod(){
Inner inner = new Nesting().new Inner();
}
}
### 匿名类只能访问final变量 ###
![Variable has to be effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/effective-final.png)
*变量必须是有效的final*
当一个匿名类被定义在一个方法中,它可以访问局部变量如果该变量是`final`的。但这说的有点模糊。它们不得不声明成final,他们还必须是有效final。这也是Java 8中发布的一些特性。你不需要声明这些变量为`final`型,但它们仍然必须是有效的`final`。
![Java 8 does not require final, only effective final](http://a3ab771892fd198a96736e50.javacodegeeks.netdna-cdn.com/wp-content/uploads/2014/11/java_ee_-_javabeantester_src_main_java_com_javax0_jbt_blog_java_-_eclipse_-__users_verhasp_github_javax_blog.png)
*Java 8并不要求`final`只要求有效final。*
为什么你需要对一些东西声明`final`,当它被检查必须是这样的。就像方法的参数。它们也必须是`final`的。你说这不是Java所必须的吗?嗯,你是对的。这只是一个良好的编程风格所必须的。
--------------------------------------------------------------------------------
via: http://www.javacodegeeks.com/2014/11/some-sentences-about-java.html
作者:[Peter Verhas][a]
译者:[a598799539](https://github.com/a598799539)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.javacodegeeks.com/author/peter-verhas/

View File

@ -1,35 +1,23 @@
如何不使用DBCA在Oracle 11中删除数据库
================================================================================
本文简短的教程将会向你展示如何不使用DBCA数据库配置助手在Oracle 11中删除数据
本文简短的教程将会向你展示如何不使用DBCA数据库配置助手在Oracle 11中删除数据库。
#### 1- 导入数据库的SID如果没有定义的话 ####
命令:
export ORACLE_SID=database
#### 2- 以操作系统认证连接数据库 ####
命令:
[oracle@Oracle11 ~]$ sqlplus / as sysdba
提示:
----------
SQL*Plus: Release 11.2.0.1.0 Production on Mon Dec 1 17:38:02 2014
----------
Copyright (c) 1982, 2009, Oracle. All rights reserved.
----------
Connected to an idle instance.
#### 3- 启动数据库实例 ####
命令:
SQL> startup
提示:
ORACLE instance started.
Total System Global Area 3340451840 bytes
Fixed Size 2217952 bytes
@ -41,22 +29,18 @@
#### 4- 关闭数据库 ####
命令:
SQL> shutdown immediate;
提示:
Database closed.
Database dismounted.
ORACLE instance shut down.
#### 5- 启动独占模式 ####
命令:
SQL> startup mount exclusive restrict
提示:
ORACLE instance started.
----------
Total System Global Area 3340451840 bytes
Fixed Size 2217952 bytes
Variable Size 1828718624 bytes
@ -66,19 +50,14 @@
#### 6- 删除数据库 ####
命令:
SQL> drop database;
提示:
----------
Database dropped.
----------
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
SQL>
完成!
@ -88,7 +67,7 @@ via: http://www.unixmen.com/drop-database-oracle-11-without-using-dcba/
作者:[M.el Khamlichi][a]
译者:[VicYu/Vic020](http://vicyu.net/)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,548 @@
Linux Journal杂志2014读者选择奖
================================================================================
又到了Linux Journal杂志刊发2014读者选择奖的时候了鉴于去年的形式好评如潮因此我们仍沿续旧年格式让你的意见再次得到回响。虽然有些地方我们会稍加评论不过基本上还是以报道结果为主。以下敬请欣赏本年度读者选择奖名单
我们希望读者选择奖一年好似一年。如果你对新分类有任何建议,或者有任何评价与反馈,都可以通过以下方式联系我们:[http://www.linuxjournal.com/contact][1]
如欲了解完整获奖名单请查阅本杂志2014年12月刊。
### 最佳Linux发行版 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f1.jpg)
虽然本年度基于Debian/Ubuntu的发行版获得最多票数但“最佳Linux发行版”分类有点类似于“最好吃的比萨”--就算得票垫底但它仍算是比萨呀选择Linux不会错的而投票之五花八门恰恰呈现出开源世界里的选择多样性。
- Ubuntu 16.5%
- Debian 16.4%
- Linux Mint 11%
- Arch Linux 8.5%
- Fedora 8.3%
- CentOS 6%
- openSUSE 5.3%
- Kubuntu 4.1%
- Gentoo 2.9%
- Slackware 2.7%
- Xubuntu 2.5%
- 其它 2.3%
- Red Hat Enterprise Linux 1.6%
- NixOS 1.4%
- elementary OS 1.3%
- Lubuntu 1.2%
- CrunchBang 1%
- Mageia .7%
- LXLE .4%
- Tails .4%
- Android-x86 .3%
- Bodhi Linux .3%
- Chakra .3%
- Kali Linux .3%
- PCLinuxOS .3%
- SolydK .3%
- Mandriva .1%
- Oracle Linux .1%
### 最佳Linux移动系统 ###
安卓在移动领域是如此的举足轻重,所以我们决定让安卓的各种版本独立参与投票。因此,尽管以下系统本质上属于安卓,但我们仍沿用其名而不改称安卓,因为这样更加一目了然。
- Stock Android 37.1%
- Sailfish OS 27.6%
- CyanogenMod 20.2%
- 其它 3%
- Ubuntu Phone 3%
- Amazon Fire OS 1.5%
- Ubuntu for Android 1.4%
- Replicant .8%
- Tizen .8%
### 最佳Linux智能手机厂商 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f3.jpg)
- Samsung 29%
- Jolla 26.7%
- Nexus 16.5%
- 其它 7.1%*
- HTC 7%
- LG 5.3%
- Sony 3.7%
- Nokia 1.8%
- Huawei 1.4%
- GeeksPhone 1%
- Amazon .6%
*在"其它"当中,摩托罗拉获得最多提名,其次是一加。
### 最佳Linux平板 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f4.jpg)
- Google Nexus 7 35.3%
- Google Nexus 10 14.8%
- Samsung Galaxy Tab 14%
- Samsung Galaxy Note 9.8%
- ASUS Transformer Pad 8.4%
- 其它 6.4%
- Kindle Fire HD 4.7%
- ASUS MeMO Pad 2%
- Dell Venue 1.6%
- Acer Iconia One 1.4%
- Samsung Galaxy Note Edge .9%
- Ekoore Python S3 .7%
### 最佳基于Linux的其它配件不含智能手机或平板###
我们是一群树莓派粉如假包换不过说真的这怎么能怪我们呢树莓派又出了新款B+,让原本就美妙绝伦的树莓派愈发的标致可人。并非我有未卜先知之功,但我对明年的冠军早就心中有数了。
- Raspberry Pi 71.4%
- BeagleBone Black 8.1%
- 其它 4.3%*
- Lego Mindstorms Ev3 3.7%
- Moto 360 3.4%
- Cubieboard 1.7%
- Parrot A.R Drone 1.7%
- Samsung Gear S 1.4%
- Yamaha Motif XF8 1.1%
- Nvidia Jetson-K1 Development System .8%
- Cloudsto EVO Ubuntu Linux Mini PC .5%
- VoCore Open Hardware Computer .5%
- LG G Watch .4%
- RaZberry .4%
- VolksPC .4%
- IFC6410 Pico-ITX Board .2%
- JetBox 5300 .1%
*在“其它”当中提名最多是Odroid与CuBox。
### 最佳笔记本厂商 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/lenovo.jpg)
本分类原本用于评价哪个厂商对Linux最花心思不过谢天谢地如今大多数笔记本运行起Linux来还是相当不错的。因此无需我们将重点放在“嗯这台能运行Linux”这种问题上面而可以切切实实地看到精华之作。把眼光放长远些。
- Lenovo 32%
- ASUS 19.3%
- Dell 18.5%
- System76 10.6%
- 其它 7.9%*
- Acer 4.5%
- ThinkPenguin 1.9%
- LinuxCertified 1.8%
- ZaReason 1.6%
- EmperorLinux 1.5%
- CyberPower .3%
- Eurocom .1%
*在“其它”当中提名最多的依次是运行Linux的苹果、惠普、东芝以及三星。
### 最佳内容管理系统 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f6.jpg)
- WordPress 34.7%
- Drupal 25.3%
- Joomla! 11.1%
- MediaWiki 10.5%
- 其它 10%*
- Alfresco 4.3%
- WebGUI 1.3%
- ikiwiki 1.1%
- eZ publish .7%
- Wolf CMS .4%
- Elgg .3%
- Blosxom .2%
*在“其它”当中提名最多的依次是DokuWiki, Plone, Django 以及 Typo3。
### 最佳对Linux友好的虚拟主机公司 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/question.jpg)
提到虚拟主机这年头要找到不对Linux友好的公司那是相当之难。事实上要找到一家提供Windows的主机服务商才是一种挑战。这一类别的冠军“其它”就显而易见的说明了这一问题或许设一个“最差虚拟主机”分类更加有用
- 其它 22.8%*
- Amazon 22.5%
- Rackspace 13.1%
- Linode 10.4%
- GoDaddy.com 6.5%
- OVH 5.6%
- DreamHost 5.4%
- 1&1 4.8%
- LAMP Host 2.9%
- Hurricane Electric 2.6%
- Liquid Web .6%
- RimuHosting .6%
- Host Media .5%
- Savvis .5%
- Blacknight Solutions .4%
- Netfirms .4%
- Prgmr .4%
*在“其它”当中提名最多的依次是Digital Ocean (压倒性优势), Hetzner, BlueHost 以及 WebFaction。
### 最佳浏览器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f7.jpg)
Firefox显著优势拨得今年的头筹。即使以Chrome加Chromium计算Firefox仍位居榜首。我们曾经担心Firefox死忠会悄然流失不过还好Firefox依然宝马未老仍是一款快速、可行以及兼容度极佳的浏览器。
- Firefox 53.8%
- Chrome 26.9%
- Chromium 8.1%
- Iceweasel 4%
- Opera 3%
- 其它 2%
- SeaMonkey .8%
- rekonq .5%
- dwb .4%
- QupZill .4%
- Dillo .2%
### 最佳电邮客户端###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f8.jpg)
如果我没有亲身了解到身边有多少铁杆极客粉的话我也许会指责Kyle Rankin投票有作弊嫌疑。他的最爱--Mutt电邮客户端并未登顶但是对于一个没有图形界面的程序来说获得第三名也算是个比较骄人的成绩了。
- Mozilla Thunderbird 44.4%
- Gmail 24.7%
- Mutt 6.8%
- Evolution 5.5%
- KMail 5.3%
- 其它 3.2%
- Claws Mail 2.2%
- Zimbra 2%
- Alpine 1.8%
- Geary 1.7%
- SeaMonkey 1%
- Opera Mail .9%
- Sylpheed .4%
### 最佳音频编辑工具###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f9.jpg)
- Audacity 69.1%
- FFmpeg 10.8%
- VLC 9.7%
- Ardour 4.9%
- 其它 1.9%
- SoX 1.3%
- Mixxx 1.1%
- LMMS .7%
- Format Junkie .5%
### 最佳音频播放器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10.jpg)
VLC登上视频播放器分类的榜首见下文应该是毫无悬念的但让人大跌眼镜的是它居然在音频播放器分类中也有不俗的成绩。或许它可以考虑成为一站式媒体播放器。不管怎样我们都乐见其取得好成绩。
- VLC 25.2%
- Amarok 15.3%
- Rhythmbox 10.4%
- Clementine 8.6%
- MPlayer 6.1%
- Spotify 5.9%
- Audacious 5.5%
- Banshee 4.6%
- 其它 4%*
- XBMC 3.1%
- foobar2000 3%
- Xmms 2.4%
- DeaDBeeF 1.2%
- MOC .9%
- cmus .8%
- Ncmpcpp .8%
- Guayadeque .6%
- Mixxx .4%
- MPC-HC .4%
- Subsonic .4%
- Nightingale .3%
- Decibel Audio Player .2%
*在"其它"当中Quod Libet获得最多提名。
### 最佳视频播放器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_0.jpg)
- VLC 64.7%
- MPlayer 14.5%
- XBMC 6.4%
- Totem 2.7%
- 其它 2.7%*
- Plex 2%
- Kaffeine 1.9%
- mpv 1.6%
- MythTV 1.6%
- Amarok 1.4%
- Xmms .3%
- Daum Potplayer .2%
- Clementine .1%
*在“其它”当中提名最多是SMPlayer。
### 最佳视频编辑器 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_1.jpg)
再次证明了我们的读者群深具极客色彩。我们未指定“非线性编辑器”因此就转码技术而言VLC在视频编辑类别中勉强获胜。干得好VLC干得好
- VLC 17.5%
- Kdenlive 16.4%
- Blender 15.1%
- Avidemux 13.2%
- OpenShot 13.2%
- Cinelerra 7.5%
- PiTiVi 4.9%
- LightWorks 4.8%
- 其它 4.7%
- LiVES 1.4%
- Shotcut .6%
- Jahshaka .4%
- Flowblade .4%
### 最佳云存储 ###
[](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f11.jpg)
- Dropbox 30.5%
- ownCloud 23.6%
- Google Drive 16%
- rsync 8.3%
- 其它 7.5%*
- Amazon S3 6.6%
- SpiderOak 4.4%
- Box 1.8%
- Copy 1%
- AjaXplorer .3%
Dropbox在这一领域曾经独步天下几无对手虽然这次仍为头魁但优势已经不那么明显了。Dropbox的方便与稳定无可否认但是将你的宝贵数据托管在ownCloud上可管可控也让ownCloud登上第二名的宝座。
*在“其它”当中,提名最多是 Younited 与 MEGA。当然很多人可能会说“非万不得已时不会选择云存储/我的文件都是存在本地”。
### 最佳Linux游戏 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/CIVILIZATION-V-FRONT-OF-BOX.jpg)
我很少玩游戏所以每年我都特期待这一类别排名希望可以从中找到最受欢迎的游戏以供闲暇之需。看到NetHack排名这么靠前我倒觉得挺开心的尤其是在联想到竞争对手后更是心满意足。徘徊在让我们这些老派的龙与地下城玩家痴迷的随机通道确实有点意思。
- Civilization 5 26.5%
- 其它 23.5%*
- Team Fortress 2 8.7%
- NetHack 8.4%
- X-Plane 10 7.1%
- Dota 6.1%
- Bastion 5.4%
- Scorched 3D 3.7%
- Destiny 3.6%
- Ultima IV 1.9%
- FreeCol 1.8%
- Kpat 1.4%
- FreeOrion 1.1%
- Ryzom .9%
*在“其它”当中提名最多的依次是Minecraft, 0 A.D., Frozen Bubble, Battle for Wesnoth, Portal 以及 Counter Strike。
### 最佳虚拟方案 ###
我认为与Vagrant的关系大大带动了Oracle旗下VirtualBox的普及。当然Vagrant也与其它虚拟平台合作但自从其与VirtualBox无缝结合后我认为对VirtualBox是极大提升。虚拟化实现系统是如此的高效与可靠从裸机开始重构系统的方案几近历史。
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Virtualbox_logo_0.jpg)
- Oracle VM VirtualBox 33.4%
- VMware 22.3%
- KVM 21.1%
- XEN 5.7%
- QEMU 5.3%
- OpenStack 4.9%
- 其它 4.2%*
- OpenVZ 1.7%
- Linux-VServer 1.3%
- Symantec Workspace Virtualization .1%
*在“其它”当中提名最多的依次是Docker, ProxMox 与 LXC。
### 最佳监控应用 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Nagios-Core-4.0.8.png)
- Nagios 27.1%
- Wireshark 20.7%
- htop 12.3%
- Zabbix 10.5%
- 其它 8.6%*
- Zenoss 6.2%
- Munin 3.4%
- PC Monitor 2.8%
- New Relic 1.9%
- Opsview 1.2%
- SaltStack 1%
- NTM (Network Traffic Monitor) .7%
- xosview .7%
- Manage Engine .5%
- FlowViewer .3%
- Circonus .2%
- SysPeek .2%
*在“其它”当中提名最多是Icinga 与 OpenNMS。
### 最佳开发运维配置管理工具###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Git-Logo-2Color.jpg)
Git能拿到本类别第一名倒是蛮有趣的虽然针对配置文件使用标准版本控制工具当然无可厚非但我总觉得它应该配合Chef或Puppet一起使用。至少开发运维DevOps让我们这些执拗的老派系统管理员象对待代码一样处理配置文件。版本控制真令人难以置信这一点似乎绝大多数读者均无异议。
- Git 39.4%
- Puppet 17.2%
- Ansible 8.9%
- cron jobs 8.8%
- Subversion 7.6%
- Chef 5%
- SaltStack 5.4%
- 其它 4.6%*
- CFEngine 3%
*在“其它”当中,提名最多是 NixOps。
### 最佳编程语言 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13.jpg)
- Python 30.2%
- C++ 17.8%
- C 16.7%
- Perl 7.1%
- Java 6.9%
- 其它 4.6%
- Ruby 4.3%
- Go 2.4%
- JavaScript 2.4%
- QML 2.2%
- Fortran 1.4%
- Haskell 1.4%
- Lisp 1.2%
- Erlang .6%
- Rust .6%
- D .4%
- Hack .1%
*在“其它”当中提名最多的依次是Scala, PHP 以及 Clojure。
### 最佳脚本语言 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13_0.jpg)
Python强悍无比无论在脚本及编程分类都有大量拥趸。对于象我这样懂Bash以及一点PHP皮毛的人来说很明显在我一头扎进开发过程中我需要重点突破。敢说空格无用我--空格来也!
- Python 37.1%
- Bash/Shell scripts 27%
- Perl 11.8%
- PHP 8.4%
- JavaScript 6.7%
- Ruby 4.9%
- 其它 2.1%
- Lua 2%
### 最佳Linux/开源新产品/新项目 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f14.jpg)
Docker无疑是大赢家当之无愧--游戏规则改变者嘛。 Jolla/Sailfish也小受欢迎真是令人欣慰。我们爱安卓不过多个选择不正是我们作为开源鼓手所提倡的一个重要方面吗。
- Docker 28%
- Jolla and Sailfish OS 19%
- LibreOffice 7%
- ownCloud 5%
- Steam 5%
- Zenoss Control Center 5%
- Raspberry Pi 4%
- Git 4%
- Apache Cordova/OpenOffice/Spark/Tika 3%
- Ansible 2%
- Elementary OS 2%
- OpenStack 2%
- Zabbix 2%
- CoreOS 2%
- Firefox OS 2%
- KDE Connect 1%
- NixOS and NixOps 1%
- Open Media Vault 1%
###你用Linux做过的最酷的事情 ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/tux_cruise.png)
这是读者选择奖里我最钟爱的新分类。想象一下你参加某次Linux会议期间询问人们他们用Linux做过的最酷的事情。这里所做的与之大同小异这里我们仅列出部分我们比较喜欢的如欲了解完整列表请访问[http://www.linuxjournal.com/rc2014/coolest][2]。
注:最常见的答案是:“使用它”;“挽救数据/照片/导致Windows 机器罢工的任何东西”;“说服朋友/家人/业务转向使用Linux”“学习”“讲授”“获得工作”“家庭自动化”“构建家庭媒体服务器”。下表是我们选出的并非最常见的答案而是一些比较具体与有个性的答案。
- 在上世纪90年代中期建立procmail垃圾邮件预过滤规则。
- 450-节点计算集群。
- 7.1 通道前置放大器集成Mopidy音乐播放器
- Linux机器人 参加Eurobot年度比赛
- 无意间打印到错误的大陆。
- 视频同步时增加音频通道。
- 使用自已编写的代码分析NASA卫星数据。
- 远程逗着猫玩。
- 通过声音以及移动应用自动控制家里整个灯光设置。
- 窗台植物自动浇水系统。
- 浴室收音机。
- 配制啤酒。
- 创建了一个运行在国际空间站的应用。
- 为某大型收费高速公路系统建立一套实时收费系统。
- 自己装配智能手机。
- 使用树莓派建立基于网络的家庭报警系统。
- 树莓派集群破解加密的办公文档。
- 控制我的Parrot无人机。
- 控制186台风力涡轮机的通信。
- 在Linux下使用Stellarium控制我的米德望远镜。
- 用一台十几年的老笔记本转换卡带式家庭视频的格式。
- 在靠近北极地区创建网状网络。
- 使用无线数据发射器创建海洋环境下的传感器浮标。
- 发现新行星。
- 修复位于美国丹佛的jabber服务器 而我当时却身在约丹安曼一家酒店大堂。
- 得到一张Linus亲笔签名的Red Hat 5.0 CD。
- 入侵我的咖啡机,在咖啡做好后给我一条消息。
- 给我女儿介绍乐高机器人EV3。
- 监控酒窖温度与湿度,过热或过温时开门。
- 用树莓派代替温泉浴缸上的控制器。
- 使用脚本连接四天每隔15秒开关一次同事的CD托盘。
- 使用LFS系统为一家全国性石油公司迁移ACH自动转帐系统。
- 身在其它城市冲我家里的马桶。
- 远程控制鸡舍门。
- 使用树莓派为16个站点部署基于网络的洒水器控制器并控制水池与庭院灯光
- 链接SSH通道通过三级跳连接家与工作因网络设置方面的限制
- 建立一套系统,监控可再生能源的安装部分:两套固定的太阳能电池阵,一套两轴太阳跟踪太阳能电池阵,以及一台风力涡轮机。生产以及天气数据实时显示在网络站点。
- 还是在“猫”时代,我用电脑每天早上叫醒我女朋友去上班。
- 使用一个Wii摇控器通过蓝牙将我的笔记本作为红外摄像机侦测我女儿的旋转木马的运动以及控制视频游戏。
--------------------------------------------------------------------------------
via: http://www.linuxjournal.com/rc2014
作者:[Shawn Powers][a]
译者:[yupmoon](https://github.com/yupmoon)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxjournal.com/users/shawn-powers
[1]:http://www.linuxjournal.com/contact
[2]:http://www.linuxjournal.com/rc2014/coolest

View File

@ -1,27 +1,26 @@
、Linux 3.18 内核发布了,下面的是更新的内容
Linux 3.18 新内核带来了什么新东西?
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2011/07/Tux-psd3894.jpg)
新的一月意味着新的稳定版Linux内核的发布今天Linus Torvalds[宣布Linux 3.18 很快就会发布了][1]。
新的一月意味着新的稳定版Linux内核的发布前一段时间,Linus Torvalds[宣布Linux 3.18 很快就会发布了][1]。
Torvalds在Linux内核邮件列表中解释到由于在3.17中还存在几个令一小部分用户烦心的问题,‘**绝不可以在一些人积极解决老问题时其他人无所事事。**
Torvalds在Linux内核邮件列表中解释到由于在3.17中还存在几个令一小部分用户烦心的问题,但是**绝不可以在一些人积极解决老问题时其他人无所事事。**
### Linux 3.18中有什么新的? ###
Linux 3.18内核主要致力于硬件支持、电源效率、bug修复和可靠性。
如往常一样,这些内容跨很大,容易让人迷惑 。比如:加密层多重缓冲操作 - 到气冲感知, 就像对雷蛇游戏手柄的支持。
如往常一样,这些内容跨很大,容易让人迷惑 。比如:加密层多重缓冲操作 - 到气冲感知, 就像对雷蛇游戏手柄的支持。
下面我们收集了这个版本的重要的改变。这远远不是所有的,只是选取了一些更相关的内容。
- Nouveau (免费 Nvidia GPU 驱动) 现在支持基础 DisplayPort 音频
- Nouveau (开源的 Nvidia GPU 驱动) 现在支持基础 DisplayPort 音频
- 对雷蛇游戏手柄的支持用在Xbox 360上
- Xilinx USB2 外设
- 对Microchip AR1021 i2c、PenMount 6000 touch的触摸屏支持
- 音频编码: Cirrus Logic CS35L32、 Everest ES8328and Freescale ES8328
- 音频支持: 通用飞思卡尔声卡, A模拟SSM4567音频放大器
- 不同的文件系统提升, 包括 Btrfs 和 F2FS
- 对Microchip AR1021 i2c、PenMount 6000 touch的触摸屏支持
- 音频编码: Cirrus Logic CS35L32、 Everest ES8328 Freescale ES8328
- 音频支持: 通用飞思卡尔声卡, Analog Devices SSM4567音频放大器
- 几个文件系统提升, 包括 Btrfs 和 F2FS
- 现在支持了DCTCP拥塞控制算法
- JIT 编译64位 eBPF程序
- “Tinification” 帮助开发人员编译更精简更小的内核
@ -34,7 +33,7 @@ Linux 3.18内核主要致力于硬件支持、电源效率、bug修复和可靠
- [下载Linux内核源码包][2]
有一个由Canonical维护的最新Linux内核归档。尽管你可能在其他地方看到过但是请注意这不是针对终端用户的。没有任何保证与支持你自己承担风险。
这里有一个由Canonical维护的最新Linux内核归档。尽管你可能在其他地方看到过但是请注意这不是针对终端用户的。没有任何保证与支持你自己承担风险。
- [访问Ubuntu内核主线归档][3]
@ -44,7 +43,7 @@ via: http://www.omgubuntu.co.uk/2014/12/linux-kernel-3-18-released-whats-new
作者:[Joey-Elijah Sneddon][a]
译者:[geekpi](https://github.com/geekpi)
校对:[校对者ID](https://github.com/校对者ID)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -0,0 +1,47 @@
2014年会是 "Linux桌面年"吗?
================================================================================
> Linux桌面现在终于发出最强音
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-2.jpg)
**看来Linux在2014年有很多改变许多用户都表示今年Linux的确有进步但是仅凭这个就能断定2014年就是"Linux桌面年"吗?**
"Linux桌面年"这句话在过去几年就被传诵得像句颂歌一样可以说是在试图用一种比较有意义的方式来标记它的发展进程。此类事情目前还没有发生过在我们的见证下也从无先例所以这就不难理解为什么Linux用户会用这个角度去看待这句话。
大多数软件和硬件领域不太会有这种快速的进步都以较慢的速度发展但是对于那些在工业领域有更好眼光的人来说事情就会变得疯狂。即使有可能针对某一时刻或某一事件还是比较困难的但是Linux在几年的过程中还是以指数方式迅速发展成长。
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-3.jpg)
### Linux桌面年这句话不可轻言 ###
没有一个比较权威的人和机构能判定Linux桌面年已经到来或者已经过去所以我们只能尝试根据迄今为止我们所看到的和用户所反映的去推断。有一些人比较保守改变对他们影响不大还有一些人则比较激进永远不知满足。这真的要取决于你的见解了。
点燃这一切的火花似乎就是Linux上的Steam平台尽管在这变成现实之前我们已经看到了一些Linux游戏已经开始有重要的动作了。在任何情况下Valve都可能是我们今天所看到的一系列复苏事件的催化剂。
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-6.jpg)
在过去的十年里Linux桌面以一种缓慢的速度在发展并没有什么真正的改变。创新肯定是有的但是市场份额几乎还是保持不变。无论桌面变得多么酷或Linux相比之前的任何一版多出了多少特点很大程度上还是在原地踏步包括那些开发商业软件的公司他们的参与度一直很小基本上就忽略掉了Linux。
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-4.jpg)
现在相比过去的十年里更多的公司表现出了对Linux平台的浓厚兴趣。或许这是一种自然地演变Valve并没有做什么但是Linux最终还是达到了一个能被普通用户接受并理解的水平并不只是因为令人着迷的开源技术。
驱动程序能力强了游戏工作室就会定期移植游戏在Linux中我们前所未见的应用和中间件就会开始出现。Linux内核发展达到了难以置信的速度大多数发行版的安装过程通常都不怎么难所有这一切都只是冰山一角。
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-5.jpg)
所以当有人问你2014年是不是Linux桌面年时你可以说“是的因为Linux桌面完全统治了2014年。
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Was-2014-The-Year-of-Linux-Desktop-467036.shtml
作者:[Silviu Stahie][a]
译者:[ZTinoZ](https://github.com/ZTinoZ)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie

View File

@ -0,0 +1,92 @@
Attic——删除重复数据的备份程序
================================================================================
Attic是一个Python写的删除重复数据的备份程序其主要目标是提供一种高效安全的数据备份方式。重复数据消除技术的使用使得Attic适用于日常备份因为它可以只存储那些修改过的数据。
### Attic特性 ###
#### 空间高效存储 ####
可变块大小重复数据消除技术用于减少检测到的冗余数据存储字节数量。每个文件被分割成若干可变长度组块,只有那些从没见过的组合块会被压缩并添加到仓库中。
#### 可选数据加密 ####
所有数据可以使用256位AES加密进行保护并使用HMAC-SHA256验证数据完整性和真实性。
#### 离场备份 ####
Attic可以通过SSH将数据存储到安装有Attic的远程主机上。
#### 备份可作为文件系统挂载 ####
备份归档可作为用户空间文件系统挂载,用于便捷地验证和恢复备份。
#### 安装attic到ubuntu 14.10 ####
打开终端并运行以下命令
sudo apt-get install attic
### 使用Attic ###
#### 手把手实例教学 ####
在进行备份之前,首先要对仓库进行初始化:
$ attic init /somewhere/my-repository.attic
将~/src和~/Documents目录备份到名为Monday的归档
$ attic create /somwhere/my-repository.attic::Monday ~/src ~/Documents
第二天创建一个新的名为Tuesday的归档
$ attic create --stats /somwhere/my-repository.attic::Tuesday ~/src ~/Documents
该备份将更快些,也更小些,因为只有之前从没见过的新数据会被存储。--stats选项会让Attic输出关于新创建的归档的统计数据比如唯一数据不和其它归档共享的数量
归档名Tuesday
归档指纹387a5e3f9b0e792e91ce87134b0f4bfe17677d9248cb5337f3fbf3a8e157942a
开始时间: Tue Mar 25 12:00:10 2014
结束时间: Tue Mar 25 12:00:10 2014
持续时间: 0.08 seconds
文件数量: 358
最初大小 压缩后大小 重复数据删除后大小
本归档: 57.16 MB 46.78 MB 151.67 kB
所有归档114.02 MB 93.46 MB 44.81 MB
列出仓库中所有归档:
$ attic list /somewhere/my-repository.attic
Monday Mon Mar 24 11:59:35 2014
Tuesday Tue Mar 25 12:00:10 2014
列出Monday归档的内容
$ attic list /somewhere/my-repository.attic::Monday
drwxr-xr-x user group 0 Jan 06 15:22 home/user/Documents
-rw-r--r-- user group 7961 Nov 17 2012 home/user/Documents/Important.doc
恢复Monday归档
$ attic extract /somwhere/my-repository.attic::Monday
通过手动删除Monday归档恢复磁盘空间
$ attic delete /somwhere/my-backup.attic::Monday
详情请查阅[Attic文档][1]。
--------------------------------------------------------------------------------
via: http://www.ubuntugeek.com/attic-deduplicating-backup-program.html
作者:[ruchi][a]
译者:[GOLinux](https://github.com/GOLinux)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.ubuntugeek.com/author/ubuntufix
[1]:https://attic-backup.org/index.html

View File

@ -0,0 +1,68 @@
红帽反驳“grinch鬼精灵”算不上Linux漏洞
================================================================================
![](http://images.techhive.com/images/article/2014/12/grinch-linux-100536132-primary.idge.png)
图片来源:[Natalia Wilson受Creative Commons许可][1]
> 安全专家表示Linux处理权限的方式仍有可能导致潜在的误操作。
但红帽对此不以为然,称 Alert Logic 于本周二译者注12月16日公布的 grinch (“鬼精灵”) Linux漏洞根本算不上是安全漏洞。
[红帽于周三发表简报][2] 回应Alert Logic 说法表示Alert Logic的这份报告错误地将正常预期动作归为安全问题。”
安全公司Alert Logic于本周二声称“鬼精灵”漏洞其严重性堪比 Heartbleed 臭虫,并称其是 [Linux 系统处理用户权限时的重大设计缺陷][3]恶意攻击者可借此获取机器的root权限。
Alert Logic 称攻击者可以使用第三方Linux 软件框架Policy Kit (Polkit)达到利用“鬼精灵”漏洞的目的。Polkit旨在帮助用户安装与运行软件包此开源程序由红帽维护。Alert Logic 声称允许用户安装软件程序的过程中往往需要超级用户权限如此一来Polkit也在不经意间或通过其它形式为恶意程序的运行洞开方便之门。
红帽对此不以为意,表示系统就是这么设计的,换句话说,**“鬼精灵”不是臭虫而是一项特性。**
安全监控公司Threat Stack联合创造人 Jen Andre [就此在一篇博客][4]中写道“如果你任由用户通过使用那些利用了Policykit的软件无需密码就可以在系统上安装任何软件实际上也就绕过了Linux内在授权与访问控制。”
Alert Logic 高级安全研究员 James Staten 在发给国际数据集团新闻社(IDG News Service)的电子邮件中写道,虽然这种行为是设计使然,有意为之,但“鬼精灵”仍然可能被加以利用或修改来攻陷系统。
“现在的问题是表面存在一个薄弱环节,可以被用来攻击系统,如果安装软件包象其它操作一样,比如删除软件包或添加软件源,没有密码不行,那么就不会存在被恶意利用的可能性了。”
不过 Andre 在一次采访中也表示对那些跃跃欲试的攻击者来说想利用Polkit还是有一些苛刻限制的。
攻击者需要能够物理访问机器,并且还须通过外设键鼠与机器互动。如果攻击者能够物理访问机器,可以象重启机器进入恢复模式访问数据与程序一样地轻而易举的得手。
Andre表示不是所有Linux机器都默认安装Polkit -- 事实上其主要用于拥有桌面图形界面的工作站在当今运行的Linux机器中占有很小的份额。
换句话说,“鬼精灵”并不具有象[Shellshock][5]那样广泛的攻击面, 后者存在于Bash shell中几乎所有发行版无一幸免。
其他安全专家对“鬼精灵”漏洞也不以为然。
系统网络安全协会SANS Institute互联网风暴中心Internet Storm Center咨询网站的 Johanners Ullrich 在[一篇博文][6]中写道“某种程度上与很多Linux系统过分随意的设置相比这个并算不上多大的漏洞。”
Ullrich 同时还指出“鬼精灵”漏洞也并非完全“良性”“可以很容易地加以利用获得超出Polkit设置预期的权限。”
Andre指出负责管理运行Polkit桌面Linux机器的管理员要做到心中有数了解潜在的危险检查那些程序是靠Polkit来管理的确保系统无虞。
他还表示应用开发者与Linux 发行者也应确保正确使用Polkit框架。
原始报告的另一位作者Even Tyler似乎也承认“鬼精灵”并非十分严重。
[在开源安全邮件列表的一封邮件中][7]Bourland 提到攻击者需要借助其它漏洞,连同“鬼精灵”才能发起攻击时,他写道,“鬼精灵”就象个“开启界面的熟练工,但是本身并不能翻多高的浪。”
Lucian Constantin 对本文也有贡献。)
--------------------------------------------------------------------------------
via:http://www.computerworld.com/article/2861392/security0/the-grinch-isnt-a-linux-vulnerability-red-hat-says.html
作者:[Joab Jackson][a]
译者:[yupmoon](https://github.com/yupmoon)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Joab-Jackson/
[1]:http://www.flickr.com/photos/moonrat/4571563485/
[2]:https://access.redhat.com/articles/1298913
[3]:http://www.pcworld.com/article/2860032/this-linux-grinch-could-put-a-hole-in-your-security-stocking.html
[4]:http://blog.threatstack.com/the-linux-grinch-vulnerability-separating-the-fact-from-the-fud
[5]:http://www.computerworld.com/article/2687983/shellshock-flaws-roils-linux-server-shops.html
[6]:https://isc.sans.edu/diary/Is+the+polkit+Grinch+Going+to+Steal+your+Christmas/19077
[7]:http://seclists.org/oss-sec/2014/q4/1078

View File

@ -0,0 +1,112 @@
如何在Linux下使用rsync
================================================================================
对于各种组织和公司数据对他们是最重要的即使对于电子商务数据也是同样重要的。Rsync是一款通过网络备份重要数据的工具/软件。它同样是一个在类Unix和Window系统上通过网络在系统间同步文件夹和文件的网络协议。Rsync可以复制或者显示目录并复制文件。Rsync默认监听TCP 873端口通过远程shell如rsh和ssh复制文件。Rsync必须在远程和本地系统上都安装。
rsync的主要好处是
**速度**:最初会在本地和远程之间拷贝所有内容。下次,只会传输发生改变的块或者字节。
**安全**传输可以通过ssh协议加密数据。
**低带宽**rsync可以在两端压缩和解压数据块。
语法:
#rsysnc [options] source path destination path
### 示例: 1 - 启用压缩 ###
[root@localhost /]# rsync -zvr /home/aloft/ /backuphomedir
building file list ... done
.bash_logout
.bash_profile
.bashrc
sent 472 bytes received 86 bytes 1116.00 bytes/sec
total size is 324 speedup is 0.58
上面的rsync命令使用了-z来启用压缩-v是可视化-r是递归。上面在本地的/home/aloft/和/backuphomedir之间同步。
### 示例: 2 - 保留文件和文件夹的属性 ###
[root@localhost /]# rsync -azvr /home/aloft/ /backuphomedir
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面我们使用了-a选项它保留了所有人和所属组、时间戳、软链接、权限并以递归模式运行。
### 示例: 3 - 同步本地到远程主机 ###
root@localhost /]# rsync -avz /home/aloft/ azmath@192.168.1.4:192.168.1.4:/share/rsysnctest/
Password:
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面的命令允许你在本地和远程机器之间同步。你可以看到在同步文件到另一个系统时提示你输入密码。在做远程同步时你需要指定远程系统的用户名和IP或者主机名。
### 示例: 4 - 远程同步到本地 ###
[root@localhost /]# rsync -avz azmath@192.168.1.4:192.168.1.4:/share/rsysnctest/ /home/aloft/
Password:
building file list ... done
./
.bash_logout
.bash_profile
.bashrc
sent 514 bytes received 92 bytes 1212.00 bytes/sec
total size is 324 speedup is 0.53
上面的命令同步远程文件到本地。
### 示例: 5 - 找出文件间的不同 ###
[root@localhost backuphomedir]# rsync -avzi /backuphomedir /home/aloft/
building file list ... done
cd+++++++ backuphomedir/
>f+++++++ backuphomedir/.bash_logout
>f+++++++ backuphomedir/.bash_profile
>f+++++++ backuphomedir/.bashrc
>f+++++++ backuphomedir/abc
>f+++++++ backuphomedir/xyz
sent 650 bytes received 136 bytes 1572.00 bytes/sec
total size is 324 speedup is 0.41
上面的命令帮助你找出源地址和目标地址之间文件或者目录的不同。
### 示例: 6 - 备份 ###
rsync命令可以用来备份linux。
你可以在cron中使用rsync安排备份。
0 0 * * * /usr/local/sbin/bkpscript &> /dev/null
----------
vi /usr/local/sbin/bkpscript
rsync -avz -e ssh -p2093 /home/test/ root@192.168.1.150:/oracle/data/
--------------------------------------------------------------------------------
via: http://linoxide.com/how-tos/rsync-copy/
作者:[Bobbin Zachariah][a]
译者:[geekpi](https://github.com/geekpi)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/bobbin/

View File

@ -0,0 +1,47 @@
Linux有问必答如何在Debian下安装闭源软件包
================================================================================
> **提问**: 我需要在Debian下安装特定的闭源设备驱动。然而, 我无法在Debian中找到并安装软件包。如何在Debian下安装闭源软件包?
Debian是一个拥有[48,000][1]软件包的发行版. 这些软件包被分为三类: main, contrib 和 non-free, 主要是根据许可证要求, 参照[Debian开源软件指南][2] (DFSG)。
main软件仓库包括符合DFSG的开源软件。contrib也包括符合DFSG的开源软件但是依赖闭源软件来编译或者执行。non-free包括不符合DFSG的、可再分发的闭源软件。main仓库被认为是Debian项目的一部分但是contrib和non-free不是。后两者只是为了用户的方便而维护和提供。
如果你想一直能够在Debian上安装闭源软件包你需要添加contrib和non-free软件仓库。这样做,用文本编辑器打开 /etc/apt/sources.list 添加"contrib non-free""到每个源。
下面是适用于 Debian Wheezy的 /etc/apt/sources.list 例子。
deb http://ftp.us.debian.org/debian/ wheezy main contrib non-free
deb-src http://ftp.us.debian.org/debian/ wheezy main contrib non-free
deb http://security.debian.org/ wheezy/updates main contrib non-free
deb-src http://security.debian.org/ wheezy/updates main contrib non-free
# wheezy-updates, 之前叫做 'volatile'
deb http://ftp.us.debian.org/debian/ wheezy-updates main contrib non-free
deb-src http://ftp.us.debian.org/debian/ wheezy-updates main contrib non-free
![](https://farm8.staticflickr.com/7562/16063758036_0ef8fce075_b.jpg)
修改完源后, 运行下面命令去下载contrib和non-free软件仓库的文件索引。
$ sudo apt-get update
如果你用 aptitude, 运行下面命令。
$ sudo aptitude update
现在你在Debian上搜索和安装任何闭源软件包。
![](https://farm9.staticflickr.com/8593/16089610915_b638fce55d_c.jpg)
--------------------------------------------------------------------------------
via: http://ask.xmodulo.com/install-nonfree-packages-debian.html
译者:[mtunique](https://github.com/mtunique)
校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://packages.debian.org/stable/allpackages?format=txt.gz
[2]:https://www.debian.org/social_contract.html#guidelines

View File

@ -1,68 +1,68 @@
The history of Android
安卓编年史5
================================================================================
![闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/calclockonpresszx.png)
闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能。
Ron Amadeo供图
安卓0.9第一次给我们展现了闹钟和计算器应用程序。闹钟应用的特征是有个扁平的模拟时钟,下方是一排设置的闹钟的滚动列表。不同于其它种类的开关,闹钟使用一个复选框来设置。闹钟可以设置为每周特定几天重复,以及它还有一整个列表的可选的,独特的闹钟铃声。
*闹钟主屏幕,设置一个闹钟,计算器,以及计算器高级功能* [Ron Amadeo供图]
安卓0.9第一次给我们展现了闹钟和计算器应用程序。闹钟应用的特征是有个扁平的模拟时钟,下方是一排设置的闹钟的滚动列表。不同于其它种类的开关,闹钟使用一个复选框来设置。闹钟可以设置为每周特定几天重复,以及它还有一整个列表的可选的、独特的闹钟铃声。
计算器是一个全黑色的应用带有有光泽的圆形按钮。通过菜单可以打开带有高级功能的附加面板。再次强调一致性不是谷歌的强项所在。按键中的Pi键按下的高亮是红色的——在安卓0.9的其它地方,按键按下的高亮通常是橙色的。实际上,计算器中用到的所有东西是仅用于计算器的百分百定制设计。
![打开菜单的谷歌地图和新路线界面。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/mps092.png)
打开菜单的谷歌地图和新路线界面。
Ron Amadeo供图
*打开菜单的谷歌地图和新路线界面* [Ron Amadeo供图]
谷歌地图在安卓0.9中真正能够运行——客户端能够连接到谷歌地图服务器并下载地图块。给予我们地图图像——要记住谷歌地图是个基于云的应用。连最老旧的版本也会下载更为现代的地图块所以忽略实际的地图块的样子吧。地图的菜单获得了和浏览器菜单相同的全灰设计待遇缩放控件也和浏览器的相同。最重要的“我的位置”按钮最终来到了安卓0.9这意味着该版本的地图支持GPS定位。
路线界面得到了改进。奇怪的聊天气泡附加不对齐的按钮已经被去除换为更具交互性的书签图标切换地点按钮移动到了左边“go”按钮的现在被标记为“获取路线(Route)”。
![谷歌地图图层选择,搜索历史,新加入的街景视图。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/maps3.png)
谷歌地图图层选择,搜索历史,新加入的街景视图。
Ron Amadeo供图
*谷歌地图图层选择,搜索历史,新加入的街景视图* [Ron Amadeo供图]
“图层(Layers)”被重命名为“地图模式(Map Mode)”并且变成一个单选列表。一次只能选择一个地图类型——举个例子,你在卫星地图视图下不能查看交通状况。埋藏在菜单中的还有被匆忙放到一起的搜索记录界面。搜索历史看起来只是个概念验证,带着巨大的,模糊的搜索图标填充的搜索项被放置于半透明的背景之上。
街景曾经是个单独的应用尽管它从没提供给公众但在0.9中它被作为一个地图模式内置于谷歌地图之中。你可以拖拽小Pegman街景小人到地图上它会显示一个弹出气泡来展示街景的快照。点击快照会启动那个位置的街景。这时街景除了可滚动的360度影像之外不会显示任何东西——在显示界面上根本就没有用户界面UI
![我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面。 ](http://cdn.arstechnica.net/wp-content/uploads/2013/12/manystarbucks.png)
我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面。
Ron Amadeo供图
*我们第一次见到谷歌地图搜索界面。这些截图展示了搜索栏,搜索结果列表,显示在地图上的搜索结果,以及一个商业页面* [Ron Amadeo供图]
安卓0.9同样第一次给我们展示了信息应用,称为“信息”(Messaging)。就像一些早期的安卓设计,信息并不确定它应该是一个暗色系应用还是亮色系应用。第一眼可以看到的屏幕是信息列表,一个极力避免空白的质朴黑色界面,看起来像是建立在设置界面的设计之上。但点击“新信息”或已存在的会话后,你会被带到一个白色以及蓝色的文本信息的滚动列表这里。这两个相连的界面真是没法再更不一样一点了。
![信息应用的会话窗口,附件窗口,会话列表,以及设置。](http://cdn.arstechnica.net/wp-content/uploads/2014/03/sms09.png)
信息应用的会话窗口,附件窗口,会话列表,以及设置。
Ron Amadeo供图
*信息应用的会话窗口,附件窗口,会话列表,以及设置* [Ron Amadeo供图]
信息支持一定范围的附件你可以附上图片声音或者一个幻灯片到你的信息之中。图片和声音可以实时录制或是从手机存储中拉取。另一个奇怪的UI选择是对于附件菜单中的每一项安卓基本都已经有现成的图标可用但信息却全部使用了另外定制的设计。
信息是最先带有自己设置界面的应用之一。用户可以请求已读以及送达报告以及设置下载偏好。
![幻灯片制作器。右边图片显示了菜单选项。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/slideshow.png)
幻灯片制作器。右边图片显示了菜单选项。
Ron Amadeo供图
附件选项中的“幻灯片”选项实际上是以一个全功能的幻灯片制作器的形式到来的。你可以添加图片,选择幻灯顺序,添加音乐,修改每张幻灯片的显示时间,以及添加文字。这已经复杂到足够给它一个自己的应用图标了,但令人惊奇的是它被隐藏在信息应用的菜单之中。在纵向模式下这是为数不多的完全无用的安卓应用之一——唯一的看图片方式以及控制是在横向显示之中。奇怪的是,纵向模式它仍然能够旋转,但显示输出变得一团糟。
*幻灯片制作器。右边图片显示了菜单选项* [Ron Amadeo供图]
附件选项中的“幻灯片”选项实际上是以一个全功能的幻灯片制作器的形式到来的。你可以添加图片,选择幻灯顺序,添加音乐,修改每张幻灯片的显示时间,以及添加文字。这已经复杂到足够给它一个自己的应用图标了,但令人惊奇的是它被隐藏在信息应用的菜单之中。在纵向模式下这是为数不多的完全无法使用的安卓应用之一——唯一的看图片方式以及控制是在横向显示之中。奇怪的是,纵向模式它仍然能够旋转,但显示输出变得一团糟。
![音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/music09.png)
音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面。
Ron Amadeo供图
*音乐播放器的主导航页面,歌曲列表,专辑列表,以及“正在播放”界面* [Ron Amadeo供图]
安卓0.9第一次将音乐应用带进了安卓。首屏基本上只是几个将你带到各个功能视图的巨大的,矮胖的导航按钮。在应用底部是一个“正在播放”栏,仅仅包含了音轨名,艺术家,以及一个播放/暂停按钮。歌曲列表仅仅有个最简的无修饰界面,仅仅显示了歌曲名,艺术家,专辑以及时长。艺术家专辑是这个应用中唯一有希望看到色彩的地方。它在专辑视图里显示为一个小快照,在正在播放界面显示为巨大的,四分之一屏的图片。
正如安卓在这个时期的系统绝大多数部分,音乐应用的界面可能没什么好多看几眼的,但功能已经基本齐全。正在播放界面有一个让你拖动歌曲的播放列表按钮,随机播放,重复播放,搜索,以及选择背景声音按钮。
正如安卓在这个时期的系统绝大多数部分,音乐应用的界面可能没什么值得看的,但功能已经基本齐全。正在播放界面有一个让你拖动歌曲的播放列表按钮,随机播放,重复播放,搜索,以及选择背景声音按钮。
![“相册”的所有相册视图,单个相册视图,以及单张图片视图。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/pictures09.png)
“相册”的所有相册视图,单个相册视图,以及单张图片视图。
Ron Amadeo供图
*“相册”的所有相册视图,单个相册视图,以及单张图片视图* [Ron Amadeo供图]
相册被简单地称为“图片”。初始视图显示你的所有相册。两个默认的相册是“相机”和巨大的合集相册叫做“全部图片”。每个相册的快照由2x2的图片组成每张图片有个白色的粗边框。
单个相册视图的样子大概是你所希望的:一个可滚动的图片方阵。你不能在单个图片大小的范围内向左右滑动来移动图片,而是应该轻点图片来移动图片。相册同样没有双指捏合缩放,你只能使用按钮来缩放图片。
![图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置。](http://cdn.arstechnica.net/wp-content/uploads/2013/12/pics209.png)
图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置。
Ron Amadeo供图
*图片编缉!这些截图显示了一个打开的菜单,“更多”菜单,截取,以及设置* [Ron Amadeo供图]
“图片”看起来十分简单,直到你点击菜单按钮并突然看到无数的选项。图片可以截取,旋转,删除,或设置壁纸或联系人图标。就像浏览器一样,所有的这一切通过一个笨拙的二级菜单系统完成。但是,我们为何又将看起来完全不同的菜单练联系到一起?
@ -81,7 +81,7 @@ Ron Amadeo供图
via: http://arstechnica.com/gadgets/2014/06/building-android-a-40000-word-history-of-googles-mobile-os/5/
译者:[alim0x](https://github.com/alim0x) 校对:[校对者ID](https://github.com/校对者ID)
译者:[alim0x](https://github.com/alim0x) 校对:[wxy](https://github.com/wxy)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出

View File

@ -1,38 +0,0 @@
Translating By instdio
"Fork Debian" Project Aims to Put Pressure on Debian Community and Systemd Adoption
================================================================================
> There is still a great deal of resistance in the Debian community towards the upcoming adoption of systemd
**The Debian project decided to adopt systemd a while ago and ditch the upstart counterpart. The decision was very controversial and it's still contested by some users. Now, a new proposition has been made, to fork Debian into something that doesn't have systemd.**
![](http://i1-news.softpedia-static.com/images/news2/Fork-Debian-Project-Started-to-Put-Pressure-on-Debian-Community-and-Systemd-Adoption-462598-2.jpg)
systemd is the replacement for the init system and it's the daemon that starts right after the Linux kernel. It's responsible for initiating all the other components in a system and it's also responsible for shutting them down in the correct order, so you might imagine why people think this is an important piece of software.
The discussions in the Debian community have been very heated, but systemd prevailed and it looked like the end of it. Linux distros based on it have already started to make the changes. For example, Ubuntu is already preparing to adopt systemd, although it's still pretty far off.
### Forking Debian, not really a solution ###
Developers have already forked systemd, but the projects resulted don't have a lot of support from the community. As you can imagine, systemd also has a big following and people are not giving up so easily. Now, someone has made a website called debianfork.org to advocate for a Debian without systemd, in an effort to put pressure on the developers.
"We are Veteran Unix Admins and we are concerned about what is happening to Debian GNU/Linux to the point of considering a fork of the project. Some of us are upstream developers, some professional sysadmins: we are all concerned peers interacting with Debian and derivatives on a daily basis. We don't want to be forced to use systemd in substitution to the traditional UNIX sysvinit init, because systemd betrays the UNIX philosophy."
"We contemplate adopting more recent alternatives to sysvinit, but not those undermining the basic design principles of 'do one thing and do it well' with a complex collection of dozens of tightly coupled binaries and opaque logs," reads the [website][1], among a lot of other things.
Basically, the new website is not actually about a Debian fork, but more like a form of pressure for the [upcoming vote][2] that will be taken for the "Re-Proposal - preserve freedom of choice of init systems." This is a general resolution made by Ian Jackson and he hopes to get enough support in order to turn back the decision made by the Technical Committee regarding systemd.
It's clear that the debate is still not over in the Debian community, but it remains to be seen if the decisions already made can be overturned.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Fork-Debian-Project-Started-to-Put-Pressure-on-Debian-Community-and-Systemd-Adoption-462598.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://debianfork.org/
[2]:https://lists.debian.org/debian-vote/2014/10/msg00001.html

View File

@ -1,30 +0,0 @@
Red Hat acquires FeedHenry to get mobile app chops
================================================================================
Red Hat wants a piece of the enterprise mobile app market, so it has acquired Irish company FeedHenry for approximately $82 million.
The growing popularity of mobile devices has put pressure on enterprise IT departments to make existing apps available from smartphones and tablets -- a trend that Red Hat is getting in on with the FeedHenry acquisition.
The mobile app segment is one of the fastest growing in the enterprise software market, and organizations are looking for better tools to build mobile applications that extend and enhance traditional enterprise applications, according to Red Hat.
"Mobile computing for the enterprise is different than Angry Birds. Enterprise mobile applications need a backend platform that enables the mobile user to access data, build backend logic, and access corporate APIs, all in a scalable, secure manner," Craig Muzilla, senior vice president for Red Hat's Application Platform Business, said in a [blog post][1].
FeedHenry provides a cloud-based platform that lets users develop and deploy applications for mobile devices that meet those demands. Developers can create native apps for Android, iOS, Windows Phone and BlackBerry as well as HTML5 apps, or a mixture of native and Web apps.
A key building block is Node.js, an increasingly popular platform based on Chrome's JavaScript runtime for building fast and scalable applications.
From Red Hat's point of view, FeedHenry is a natural fit with the company's strengths in enterprise middleware and PaaS (platform-as-a-service). It adds better mobile capabilities to the JBoss Middleware portfolio and OpenShift PaaS offerings, Red Hat said.
Red Hat plans to continue to sell and support FeedHenry's products, and will continue to honor client contracts. For the most part, it will be business as usual, according to Red Hat. The transaction is expected to close in the third quarter of its fiscal 2015.
--------------------------------------------------------------------------------
via: http://www.computerworld.com/article/2685286/red-hat-acquires-feedhenry-to-get-mobile-app-chops.html
作者:[Mikael Ricknäs][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Mikael-Rickn%C3%A4s/
[1]:http://www.redhat.com/en/about/blog/its-time-go-mobile

View File

@ -1,58 +0,0 @@
Suse enterprise Linux can take your system back in time
================================================================================
> Suse Linux Enterprise Server 12 features a new system snapshot and rollback capability
The newest enterprise edition of the Suse Linux distribution allows administrators to go back in time, for instance, to immediately before they made that fatal system-crippling mistake.
Suse Linux Enterprise Server 12 (SLES 12) features a system snapshot and rollback capability that allows the user to boot the system to an earlier configuration, should the latest one unexpectedly fail.
Such a capability can be handy for undoing a system configuration change that did not turn out as expected. For instance, an administrator might have the SLES computer in a perfectly fine running state, but then install a botched software update, or make a change that destroys the kernel. Typically, Unix systems have been unforgiving about such mistakes, forcing the administrator to reinstall the system software from scratch, should they not know how to undo the unfortunate change.
"This stuff happens, for whatever reason," said Matthias Eckermann, Suse senior product manager. "So the admin has an emergency exit, so to speak."
Users of Microsoft Windows and Apple Macintosh systems have long enjoyed rollback functionality within their respective OSes, but this capability had been missing in Unix-based systems such as Linux, at least as a native function of the OS.
For this functionality, the Suse team used the [Btrfs][1] file system (B-tree file system, often pronounced as "Butter FS"), an open-source file system developed by Oracle engineer Chris Mason ([now at Facebook][2]). Mason created Btrfs to address emerging enterprise requirements such as the ability to make snapshots and to scale across multiple storage nodes.
Although Btrfs is supported in the mainline Linux kernel, SLES is the first major Linux distribution to use Btrfs as the default file system. "Over the last five years, we specifically focused on making Btrfs enterprise-ready," Eckermann said.
The rollback capability also relies on the open-source tool [Snapper][3], first developed by Suse, to manage the snapshots.
The Suse team integrated Snapper with SLES so that users now have the ability, when the OS is first being loaded, to boot into an earlier snapshot of the system. "Whoever installs SLES 12 gets this capability by default," Eckermann said.
SLES also integrated Btrfs with the [Samba Windows file server][4], which makes Linux files accessible to Windows machines. For Windows users, SLES can now make multiple snapshots of a file appear as different versions of a file, which are all accessible.
Initially, Enterprise Suse supports rollbacks for only system changes, though users can also deploy it to handle changes in a user's home directory, in which data is typically kept. "We already have it running, but it is not supported," Eckermann said. Users can continue to use ext3, ext4 or some other traditional Linux file system as their default.
SLES 12, released Monday, comes with a number of other features as well. Like other distributions, SLES has [caught the fever for Docker containers][5] and now comes with a built-in framework to run this virtualization technology. For the first time, the package also provides geo-clustering, which allows the user to build replicate clusters across different geographic regions.
An organization could use geo-clustering, for instance, to set up multiple copies of a single cluster in data centers around the world, so if one or more regions go offline, the others can continue operations unabated, Eckermann said.
Suse [is among the world's most widely used distributions][6] of Linux, along with Ubuntu/Debian, and Red Hat Enterprise Linux. A free version is available under OpenSuse and Suse Linux offers a commercial edition packaged for enterprise usage.
Suse Linux's parent company, Attachmate, is in the process of merging with Micro Focus. Eckermann expects no major changes in the operations of Suse Linux resulting from the new ownership.
SLES 12 is [offered at an annual subscription][7] of US$349 per server. A free 60-day trial is also available.
![](http://images.techhive.com/images/article/2014/10/sle_12_installed_system_08_snapper_gui-2-100527225-large.idge.png)
Through the combined powers of the Btrfs file system and the Snapper utility, SUSE Enterprise Linux can now take snapshots of the system, and roll back to an earlier configuration if necessary.
--------------------------------------------------------------------------------
via: http://www.computerworld.com/article/2838950/suse-enterprise-linux-can-take-your-system-back-in-time.html
作者:[Joab Jackson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.computerworld.com/author/Joab-Jackson/
[1]:https://btrfs.wiki.kernel.org/index.php/Main_Page
[2]:http://www.phoronix.com/scan.php?page=news_item&px=MTUzNTE
[3]:http://snapper.io/
[4]:http://www.samba.org/
[5]:http://www.pcworld.com/article/2838452/canonical-celebrates-cloud-freedoms-with-new-ubuntu.html
[6]:http://distrowatch.com/table.php?distribution=suse
[7]:https://www.suse.com/products/server/how-to-buy/

View File

@ -1,41 +0,0 @@
Mozilla to Launch Brand New Developer Web Browser Next Week
================================================================================
**When you woke up this morning you probably didnt expect to come online and see the words Mozilla, New, and Web Browser writ large across the web. **
But that my bed-headed compadre is precisely what youre looking at.
youtube 地址,发布的时候不行做个链接吧
<iframe width="750" height="422" frameborder="0" allowfullscreen="" src="https://www.youtube.com/embed/Ehv5u-5rE8c?feature=oembed"></iframe>
### Mozilla Pushing Boundaries ###
Mozilla has always been at the forefront of pushing open source, open standards and open access. They steer one of the most popular desktop browsers in the world. Their open-source Linux mobile OS [is sold on 12 smartphones from 13 operators in 24 countries][1]. Theyre [even taking on the Google Chromecast][2]!
Their desire to democratise the web shows no sign of abating. In a teaser posted on the Mozilla Blog this morning the company has announced a new effort to push boundaries further — this time for developers rather than users.
Teased as something “unique but familiar”, the company intend to release a brand new browser based on Firefox but designed by developers, for developers. Mozilla say it integrates “powerful new tools like [WebIDE][3] and the [Firefox Tools Adapter][4]”.
> “When building for the Web, developers tend to use a myriad of different tools which often dont work well together. This means you end up switching between different tools, platforms and browsers which can slow you down and make you less productive.”
### #Fx10 ###
The “Firefox Developer Browser” is being touted for an initial release date of November 10. Its not yet known what platforms it will target but since this is a) Mozilla and b) aimed at developers itd be a huge shock if Linux builds werent readily available on day dot.
Mozilla say those interested should sign up for their [Hacks newsletter][5] to receive notification when the browser is released.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/mozilla-launch-brand-new-developer-focused-web-browser
作者:[ Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:https://twitter.com/firefox/status/522175938952716289
[2]:http://www.omgchrome.com/mozillas-chromecast-rival-leaks-online/
[3]:https://hacks.mozilla.org/2014/06/webide-lands-in-nightly/
[4]:https://hacks.mozilla.org/2014/09/firefox-tools-adapter/
[5]:https://hacks.mozilla.org/newsletter/

View File

@ -1,36 +0,0 @@
Ubuntu Touch RTM Gets Major Update Video Tour
================================================================================
**new Ubuntu Touch RTM version has been released and the developers have made a number of important fixes, not to mention all the improvements that have been made to the backend.**
The Ubuntu Touch RTM stable images don't arrive all that often. Only six have been launched so far and each new version is sensibly better than the previous one. The current release is no exception, although it seems to have a longer and more complex changelog than the previous one.
Long gone are the days when a Mir update would break Ubuntu, but now all sorts of smaller problems are cropping up. In fact, all landings have been suspended before this new update was released, in an effort to track down and correct all the major bugs. Some problems still remain, but none of them should be an inconvenience.
### This is just the RTM branch, not the final version ###
Ubuntu Touch is still a work in progress, but, from the looks of it, the developers are homing in the final version. It shouldn't take too long now and we might get it in a month or so. That would be a fair assessment, if Meizu's plans to launch an Ubuntu phone in December holds.
"Good news! As per earlier announcement we promoted a new image to the ubuntu-rtm/14.09 channel! Please enjoy image #6 (previously known as #140 for krillin, #118 for mako and #112 for x86). Because of those we plan on promoting another image as soon as possible if those issues get fixed. But no worries - no freezes required this time. The landing gates will remain opened until the next serious promotion! Once again big thanks to everyone involved!" [said][1] Canonical's Łukasz Zemczak.
There are still a few minor issues, they will be corrected very soon. For example, the user metrics that could be found on the lock screen are no longer changing with a double tap, the media hug might strain the CPU in certain situations, and the Unity 8 environment might crash from time to time.
On the upside, Ubuntu Touch should be much more stable now, the video playback now works properly in landscape mode, the Unity 8 desktop has been updated, and a lot of critical changes have been made.
Users can test Ubuntu Touch RTM on Nexus 4 and Nexus 7 devices, and the official website has comprehensive [wiki][2] that details the installation.
youtu.be链接地址[http://youtu.be/_DtNvz_WVu8][3]
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Ubuntu-Touch-RTM-Gets-Major-Update-Video-Tour-464075.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:https://lists.launchpad.net/ubuntu-phone/msg10368.html
[2]:https://wiki.ubuntu.com/Touch/DualBootInstallation
[3]:http://youtu.be/_DtNvz_WVu8

View File

@ -1,39 +0,0 @@
Canonical Launches LXD Open Source Virtualization Container
================================================================================
> Canonical is launching a new container-based virtualization hypervisor for its open source Ubuntu Linux operating system, called LXD. How will it get along with Docker?
As open source container-based virtualization explodes in popularity, perhaps it was only a matter of time before [Canonical][1] announced its own, homegrown virtualization container system to contend with [Docker][2]. That's what the company has now done with the launch of [LXD][3] for [Ubuntu Linux][4].
Canonical announced the hypervisor— which the company is pronouncing "lex-dee," the better, I suppose, to avoid confusion with the Schedule 1 drug of similar nomenclature—Nov. 4. The pitch for the tool, which is basically a server for the [LXC][5] virtualized container system built into the Linux kernel, goes like this:
> Imagine you could launch a new machine in under a second, and that you could launch hundreds of them on a single server. Hundreds! Now, imagine that you have hardware-guaranteed security to ensure that those machines cant pry or spy on one another. Imagine you can connect them separately and securely to networks. And imagine that you can run that on a single node or a million, live migrate machines between those nodes, and talk to all of it through a clean, extensible REST API. That's what LXD sets out to deliver.
LXD will also feature tight integration with OpenStack—in fact, it's part of Canonical's [OpenStack][6] Juno for Ubuntu—as well as hardware-level security protections, according to the company, which said it is working with chip manufacturers (it hasn't indicated which ones) on the latter technology.
This is all pretty cool. If Canonical fully implements these features, LXD could go a long way toward making LXC a truly enterprise-ready containerized virtualization platform.
But to do that, Canonical needs to siphon off some of the momentum Docker is currently enjoying and reorient part of the open source container-based virtualization world toward LXD. So far, Canonical appears eager to position LXD as a technology that can complement and enhance Docker, not compete directly with it. That makes sense to a degree, since LXD and Docker are somewhat different sorts of beasts, at least for now. But Canonical has stated its ambition "to bring much of the awesome security and isolation of LXD to docker [sic] as well," an idea that may not sit well with the Docker community, especially if LXD remains closely intertwined with Ubuntu rather than being distribution-agnostic.
It doesn't help that what Canonical is doing with LXD is very similar to what it has already done with technologies including [Unity][7], the desktop interface it designed for Ubuntu. Like LXD, Unity was a way for Canonical to replace a major part of the Ubuntu software stack—specifically, the [GNOME][8] desktop environment—with a homegrown alternative, providing the company more control over Ubuntu, yet also making Ubuntu less readily compatible with many open source apps that were not designed for Ubuntu and Unity. The move engendered more than a little ill-will among the Ubuntu user base, although most of that sentiment has long since dissipated.
It's hard to imagine Canonical marginalizing Docker in the same way it has GNOME, and even harder to imagine many people feeling emotional about this in the way they did when Unity replaced GNOME. But time will tell.
--------------------------------------------------------------------------------
via: http://thevarguy.com/ubuntu/110514/canonical-launches-lxd-open-source-virtualization-container
作者:[Christopher Tozzi][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://thevarguy.com/author/christopher-tozzi
[1]:http://canonical.com/
[2]:http://docker.com/
[3]:http://www.ubuntu.com/cloud/tools/lxd
[4]:http://ubuntu.com/
[5]:https://linuxcontainers.org/
[6]:http://openstack.org/
[7]:http://unity.ubuntu.com/
[8]:http://www.gnome.org/

View File

@ -1,43 +0,0 @@
Massive 20% Improvement to Land in Intel's Mesa Driver Thanks to Valve's Efforts
================================================================================
> A group of devs from LunarG found a bottleneck in the driver
**Intel users should see a major improvement with their hardware after a group of developers from LunarG found out that there was a bottleneck in the DRM driver.**
![](http://i1-news.softpedia-static.com/images/news2/Massive-20-Improvement-to-Land-in-Intel-s-Mesa-Driver-Thanks-to-Valve-s-Efforts-464233-2.jpg)
The drivers on the Linux platform are not stellar, and most of the time, pieces of hardware work better on other operating systems, like Windows, for example. It might be strange that the same game, on the same hardware, works better on one platform than it does on the other, but things have been like this forever and no one expects any kind of big breakthroughs.
To be fair, the drivers from AMD, NVIDIA, and Intel have been improving in the last couple of years, especially after Steam for Linux was released. Actually, LunarG works with Valve to improve the state of the Intel drivers and to find ways to boost the performance on Linux. They had a big breakthrough and a kernel update should arrive very soon.
### Users with Intel-powered machines should be very happy ###
Valve tasked LunarG with improving the Intel drivers, which are lagging a little bit behind the competition, at least in terms of graphics. Some of the latest Intel processors are pretty powerful and you would expect them to be able to perform much better, at least as well as on Windows, but there was a problem.
The guys from LunarG worked on a piece of software called GlassyMesa, which drastically improved Intel's shader compiler stack. They also made a number of improvements in the past few months, but none of these changes was reflected in the driver's performance. This led them to believe that there had to be a bottleneck somewhere along the line.
"We started to suspect there was a bigger bottleneck masking the improvements, and sure enough we were able to generate a test program that showed a huge performance issue with how the hardware samplers were working as compared to the OpenGL driver running under windows. Something was slowing down the samplers on Linux, and we were determined to find out what," wrote the devs on their blog.
They did all sorts of testing, but they don't have access to the way the hardware is set up. Therefore, they sent the test program to Intel and the engineers found the problem and corrected it. As you can imagine, the people at Intel didn't say anything about what they actually corrected.
### 20% increase in performance is no small matter ###
In any case, LunarG also published some of the improvements they saw, and one of them is a 20% increase in game framerate.
- Left4Dead2 with frames that have hordes of zombies we've seen an increase of 17-25%
- Counter-Strike GO: 16-20%
- Lightsmark increased on a GT2 by 60% (HD4600) 4770
A kernel patch is required to make all these improvements available to users. It's not clear whether it will be available in Linux kernel 3.18 or 3.19, but it's coming. It also means that the kernel patch will be backported to the SteamOS kernel as well.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Massive-20-Improvement-to-Land-in-Intel-s-Mesa-Driver-Thanks-to-Valve-s-Efforts-464233.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie

View File

@ -1,48 +0,0 @@
Prizes Ahoy! Ubuntu Scope Showdown Kicks Off
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/scope.jpg)
**SDK at the ready: Ubuntu has launched a new development competition for its mobile platform, with some swish prizes up for grabs for the winners.**
The [Ubuntu Scope Showdown][1] is the third such initiative to be held by the project and the second pitched squarely at mobile.
But this time around amateur and l33t developers alike are being tasked with a new brief: creating custom home screen experiences — [Scopes][2] — for Ubuntu on phones.
### Er, What Is a Scope? ###
We often refer to Scopes as mini search engines, little portals that help you find content from a specific web site, service or topic — think eBay, Cat gifs, or Restaurants Nearby — from the home screen, no need to open an app.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/07/scopes-customization.jpg)
Thanks to the rich variety of result layouts content can be surfaced and previewed in interesting, intuitive ways. And when youre happy with what youve found you can (typically) click through to open it up in an app or a new tab in the Ubuntu web browser.
Scopes can be added, removed, re-ordered and favourited for easy access. Some Scopes can search multiple sources, others just the one.
Its because of this multifariousness that makes the lack of a traditional home screen as we know it from Android, iOS and other mobile platforms (pages and pages of scopes) less of a negative. Theres no desktop; no custom wallpaper you can cover with icons, folders, shortcuts and widgets, but there is, quite literally, a world of information at your fingertips.
### The Competition ###
The Ubuntu Scope Showdown runs for five weeks (October 30 December 3) giving participants just about enough time to take a project from concept to completion using the Ubuntu SDK and submitted to the Ubuntu Store.
The overall winner (decided by a judging panel of which, disclaimer ahoy, I am part of), will bag a brand new Dell XPS 13 Laptop (Developer Edition) preloaded with Ubuntu.
Runners up nab a Logitech UE Boom Bluetooth speaker, a Nexus 7 (2013) running Ubuntu, or one of two bundles of Ubuntu merchandise.
Interested in taking part? Youll find more details on the entry requirements plus links to all the documentation you can eat on the [developer.ubuntu.com mini-site][3].
Will you be taking part?
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/ubuntu-scope-showdown-competition-launched
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://developer.ubuntu.com/2014/10/scope-development-competition/
[2]:http://developer.ubuntu.com/scopes/
[3]:http://developer.ubuntu.com/

View File

@ -1,36 +0,0 @@
Ubuntu's Click Packages Might End the Linux Packaging Nightmare
================================================================================
> It's time to have one type of package for all distros
**The new Click packages that are already used on the Ubunu Touch platform by Canonical are also coming to the desktop and they might be able to change the Linux packaging paradigm.**
![](http://i1-news.softpedia-static.com/images/news2/Ubuntu-s-Click-Packages-Might-End-the-Linux-Packaging-Nightmare-464271-3.jpg)
Ubuntu is the most used Linux operating system, so it's very likely that, if something really catches on with users of this distribution, it will probably shake things up in the Linux ecosystem as well. For now, the app packaging for Linux operating systems is a mess. It has improved over the years, but it still poses many problems.
There isn't any kind of unification and different distros use different packages. Debian-based distros use .deb and Fedora-based ones use .rpm, but you can also find packages with .sh or .run. The main problem is that they depend very much on the libraries that are already installed or available in the repos. Even if you have a .deb file for your Ubuntu system, it's not a guarantee that it will work. It might very well depend on a library that's not available for that particular version.
### One package to rule them all ###
For now, only the Ubuntu SDK can make Click packages, but they present some advantages over regular ones. For example, they are much safer than regular packages, mostly because there are no maintainer scripts that can run as root. In conjunction with the Ubuntu Software Center and Apparmor, the Click packages are pretty safe.
One of the best features of Click packages is that they have no external dependencies, which means that you can basically run them on any system, regardless of the available libraries installed or in the repositories. Martin Albisetti from Canonical explains this feature in more detail on his [blog][1].
"Clicks, by design, can't express any external dependencies other than a base system (called a 'framework'). That means that if your app depends on a fancy library that isn't shipped by default, you just bundle it into the Click package and you're set. You get to update it whenever it suits you as a developer, and have predictability over how it will run on a user's computer (or device!). That opens up the possibility of shipping newer versions of a library, or just sticking with one that works for you."
Another cool feature is that Click packages for different versions of the same app can be run on the same system. There are numerous applications out there that need to be alone on the system, otherwise they create problems for users, but the confinement provided by Click packages solves this issue.
These are just a few of the features that are already implemented. It will take a while until they reach the desktop, however. They will land along with Unity 8, but they are coming nonetheless. We can only hope that other distros will adopt this kind of format and not do their own similar thing, which would preserve the current packaging problems.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Ubuntu-s-Click-Packages-Might-End-the-Linux-Packaging-Nightmare-464271.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://beuno.com.ar/archives/334

View File

@ -1,54 +0,0 @@
Open-Source Vs Groupon: GNOME Battle To Protect Their Trademark
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/gnome-blank.jpg)
**GNOME is a name synonymous with open-source software, but if the billion-dollar company Groupon has its way it could soon mean something different.**
[Groupon][1], famed for its deal-of-the-day website, recently unveiled a “tablet-based platform“ called GNOME, and has filed requisite trademark filings — 10 so far — seeking ownership of the name.
Naturally, this has the GNOME Foundation concerned. GNOME is a [registered trademark][2] of the foundation, and has been since 2006. This mark was issued under a number of sections, including operating system which the Chicago-based Groupon is also claiming against.
Could it just be that theyve never heard of GNOME before? Highly unlikely.
![Groupons POS system. Ahem.](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/Gnome-Stand.jpg)
Groupons POS system. Ahem.
Even the most Saul Goodman-y of lawyers would first check existing trademarks and investigate the company(s) owning or contesting. Even assuming that lapse in professionalism, most would have at least given the name a quick Google. Damningly, the company has previously [claimed to be fuelled by open-source][3].
Groupon clearly knows of GNOME, knows what it does, what it stands for and how long its been around yet considers itself better placed to “own” the name for its brand of hokey in-store point-of-sale terminals.
*Hrm.*
### Campaign to Protect GNOME ###
Ask not what GNOME can do for you, but what you can do for GNOME. This morning the GNOME Foundation [launched a campaign][4] to raise (an estimated) US$80,000 to battle the first found of marks Groupon has applied to register.
“**We must not let a billion-dollar-company take the well-established name of one of the biggest Free Software communities,**” says Tobias Mueller, a GNOME Foundation director.
**“If you want to help GNOME defend its trademark and promote Free Software, visit the campaigns page, share the link, and let Groupon know that they behaved terribly”.**
Lucas Nussbaum, **Debian Project Leader**, sums the whole situation up succinctly:
“**This legal defense is not just about protecting GNOMEs trademark; it is about asserting to the corporate world that FLOSS trademarks can and will be guarded. Not just by the project in question, but by the community as a whole. As a result, all FLOSS trademarks will be strengthened at once.**”
More details can be found on the GNOME Groupon Campaign page.
- [GNOME vs Groupon Campaign Page][5]
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/gnome-groupon-trademark-battle
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://groupon.com/
[2]:http://tsdr.uspto.gov/#caseNumber=76368848&caseType=SERIAL_NO&searchType=statusSearch
[3]:https://engineering.groupon.com/2014/open-source/sharing-is-caring-open-source-at-groupon/
[4]:https://mail.gnome.org/archives/foundation-list/2014-November/msg00020.html
[5]:http://www.gnome.org/groupon/

View File

@ -1,56 +0,0 @@
Chakra Linux 2014.11 Brings a Custom and Cool KDE 4.14.2 Desktop Gallery
================================================================================
> A new version of Chakra Linux has been released
**Chakra Linux, a distribution specially built to take advantage of KDE Software Compilation and the Plasma desktop, has been upgraded to version 2014.11 and is now ready for download.**
The developers of this distribution usually choose names of famous scientists. The current iteration of the Chakra Linux, which is actually the second version in the branch, has been dubbed Euler, after Swiss mathematician and physicist Leonhard Euler, who refined calculus and graph theory. Because it follows the KDE releases, it means that we will probably get another version in a few months.
Surprisingly, if you already have Chakra Linux installed, it won't be enough just to keep your system up to date. Upgrading the OS with the provided ISO is quite easy, but if you're doing it manually, then you'll have to follow a rather intricate tutorial on how to do it properly. It's not unusual for developers to make such big changes that result in the usual updating process not working, but sometimes it's necessary.
### The latest Chakra Linux is using KDE 4.14.2 ###
The Chakra Linux developers are following the latest KDE branch very closely, but not the latest version. Case in point, KDE 4.14.3 was released yesterday, but Chakra features KDE 4.14.2. On the other hand, the developers go through great lengths to customize the KDE desktop so that it's unique to this particular distribution.
"The Chakra team is happy to announce the second release of the Chakra Euler series, which follows the KDE Applications and Platform 4.14 releases. The main reason for providing this new ISO, in addition to providing a new KDE release, is that Chakra has now implemented the /usr merge changes. If you already have Chakra installed on your system manual intervention is needed, so please follow the [instructions][1] on how to properly update. For new installations using this ISO, this is of course not needed."
"The extra repository, which is disabled by default, provides the must-have GTK-based applications and their dependencies. Kapudan, our desktop greeter which runs after the first boot, will allow you to enable it. Please have in mind that our installer, Tribe, does not currently officially support UEFI, RAID, LVM and GPT, although you might find some workarounds in our forums," [reads][2] the official website.
The developers also say that the Linux kernel has been updated to version 3.16.4, the systemd component has been updated to version 216, and all of the video drivers, free or proprietary, have been updated as well.
A complete list of new features and updates can be found in the official announcement.
Download Chakra Linux 2014.11:
- [Chakra GNU/Linux 2014.11 (ISO) 64-bitFile size: 1.7 GB][3]
- [MD5][4]
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-1.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-2.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-3.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-4.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-5.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-6.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889-7.jpg)
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Chakra-Linux-2014-11-Brings-a-Custom-and-Cool-KDE-4-14-2-Desktop-Gallery-464889.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://chakraos.org/news/index.php?/archives/134-Warning-Manual-intervention-needed-on-your-next-major-update.html
[2]:http://chakraos.org/news/index.php?/archives/135-Chakra-2014.11-Euler-released.html
[3]:http://sourceforge.net/projects/chakra/files/2014.11/chakra-2014.11-euler-x86_64.iso
[4]:http://chakra-project.org/get/checksums.txt

View File

@ -1,50 +0,0 @@
GNOME 3.14.2 Officially Released, Finally Drops SSLv3
================================================================================
> Users will find the new version in the repositories
![](http://i1-news.softpedia-static.com/images/news2/GNOME-3-14-2-Officially-Released-Finally-Drops-SSLv3-464903-2.jpg)
**The GNOME development team has released the second update for the for GNOME 3.14.x branch and it brings a large number of fixes and improvements for a lot of the packages from the stack.**
GNOME 3.14 was initially released a few weeks ago and the developers are still ironing out a few issues. The new version has been received very well by the community and it's been adopted already by numerous Linux distributions. It's very likely that GNOME 3.14.2 will be integrated in most of the big repositories, as soon as possible.
The GNOME project managed to stay on track and the new release has arrived on time. Not all of the packages in the stack have been updated, but there are more than enough to get the users interested. It's a good idea to upgrade your desktop environment as soon as possible in order to get all of these enhancement.
### GNOME 3.14.2 gets a ton of improvements ###
Just like the previous iteration, the 3.14.2 release does have a few things that really stand out. For example, the NetworkManager dependency of GNOME Shell has been removed, the queued up notifications are now summarized, the handling of multi-day events has been improved, the GtkMenu use has been refined, various fixes for Mutter have been added, and the SSLv3 use has been disabled.
"Here comes our second update to GNOME 3.14, it has many fixes, various improvements, documentation and translation updates, we hope you'll enjoy it. Individual modules may get new stable 3.14 releases but our focus is now on the development branches, we released a first snapshot as 3.15.1 two weeks ago and will get another one by the end of the month.," [says][1] GNOME developer Frederic Peters.
GNOME 3.14.2 comes with updates for these core apps: Adwaita Icon Theme, Eye of GNOME, Epiphany, evolution-data-server, Glib, GNOME Calculator, GNOME Contacts, GNOME Desktop, GNOME Shell, GNOME Terminal, Mutter, Nautilus, Tracker, and more.
The apps that receive upgrades in the 3.14.2 branch include Aisleriot, Bijiben, Brasero, Cheese, Evolution, File Roller, Gedit, Four in a Row, GNOME Boxes, GNOME Maps, GNOME Music, Hitori, Orca, Rygel, Vinagre, and more.
We [detailed the GNOME 3.14.x release][2] when it was made available and you can find more details in the original report.
Download the GNOME 3.14.2 stack
- [GNOME 3.14.2 Stable Sources][3]
- [GNOME 3.14.2 Stable Modules][4]
- [GNOME 3.15.1 Unstable Sources][5]
- [GNOME 3.15.1 Unstable Modules][6]
But keep in mind that these are the source packages. If you want an easy upgrade or install, be sure to check the repositories.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/GNOME-3-14-2-Officially-Released-Finally-Drops-SSLv3-464903.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://permalink.gmane.org/gmane.comp.gnome.devel.announce/397
[2]:http://news.softpedia.com/news/GNOME-3-14-Officially-Released-Screenshot-Tour-and-Video-459865.shtml
[3]:https://download.gnome.org/core/3.14/3.14.2/sources/
[4]:https://download.gnome.org/teams/releng/3.14.2/
[5]:https://download.gnome.org/core/3.15/3.15.1/sources/
[6]:https://download.gnome.org/teams/releng/3.15.1/

View File

@ -1,100 +0,0 @@
Budgie Desktop v8 Released With Improved Menu, Panel
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-desktop.jpg)
**A new version of [Evolve OS][1]s simple [Budgie Desktop Environment][2] has been released, and the improvements under its wing are impressive.**
Made up of 78 commits, the lightweight desktop lands with a host of new options and applets to play with. Its plumage has also benefitted from a bit of TLC, with key parts of the shell feeling fresher and looking more refined.
But will the changes ruffle the feathers of the Budgie flock or leave them squawking in awe? Lets take a closer look.
### Budgie v8 ###
#### Menu Changes ####
The **Budgie Menu** now uses a narrower compact layout by default. This style lists the applications in categories (as previously) but sorted by usage rather than name.
Software that you open most often sit nearer the top of each category header. Its an efficacious decision that should help save time for those who hunt n scroll for apps rather than use the handy search filter.
![The Menu uses compact mode by default](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-desktop-8.jpg)
The Menu uses compact mode by default
The old two-pane setup that featured in earlier builds remains available; you can toggle it back on in Budgies preferences (**right click on the menu applet > Preferences**).
The power option menu that previously
resided in the main menu has been moved over to the System Tray applet (i.e., volume). Additionally, you can now access System Settings entries from the menu itself — no more scratching of heads!
#### Panel Changes ####
![Quicklist support in Budgie 0.8](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/quicklist-support-in-budgie.jpg)
Quicklist support in Budgie 0.8
The Budgie Panel and task list applet both benefit from a raft of improvements, including new auto-hide options, dynamic theming support and a new GNOME 2-style menu bar option.
- Auto-hide (optional)
- Quicklist support
- Dark theme support
- Application pinning
- App attention hint
- GNOME Panel theming
- Old-school Menu Bar applet (optional)
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/classic-menu.jpg)
#### Elsewhere ####
Other changes include support for GNOME 3.10 and up; improved animations when changing wallpapers; and the run dialog has been hugely improved in design, sporting an almost Alfred/GNOME-Do-esque design. Mmmhm!
![Run, Run, Run](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/budgie-run-dialog.jpg)
Run, Run, Run
### Install Budgie Desktop on Ubuntu ###
Budgie 0.8 is, as with previous releases, available to install in Ubuntu 14.04 LTS and Ubuntu 14.10 by way of an official PPA. The desktop can be installed alongside Unity, GNOME Shell and Cinnamon without much (if any) issue.
To install, open a new Terminal window and enter the following commands. Enter your password where prompted.
sudo add-apt-repository ppa:evolve-os/ppa
sudo apt-get update && sudo apt-get install budgie-desktop
After the install has completed you will need to log out of Unity (or whichever desktop youre currently using). At the Unity Greeter click the Ubuntu logo emblem, select the Budgie session from the session list, and then log in as normal. The Budgie desktop will load.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/select-budgie.jpg)
#### Notes for Ubuntu Users ####
![Expect Odd Theming Issues in Ubuntu](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/not-all-perfect.jpg)
Expect Odd Theming Issues in Ubuntu
While Budgie is now easy for Ubuntu users to install it is not designed for it specifically (the Evolve OS distribution is the best way to experience it).
Naturally, you might not fancy upheaving to another OS. Thats fine, but if you plan on keeping Budgie caged in Ubuntu youll need to note the following caveats (lest you end up bird-brained).
First up, **Budgie is under active development**. Several key features remain missing, including native network management support. An applet can be added to the panel that supports Ubuntus Indicator Applets, but its a little rough around the edges.
You should also expect some theming issues when using the shell with Ambiance/Radiance. The Adwaita theme (and other GNOME themes) work best. You should also disable Ubuntus Overlay Scrollbars.
Finally, logout (volume > power button) **does not work under Ubuntu**. To log out you should use the run dialog (Alt+F2) and the following command:
gnome-session-quit
If all of that sounds like fun rather than faff, theres plenty to enjoy in Budgie and not just its minimal system footprint! Let us know your own thoughts on it, what youd like to see it add next, etc. in the comments below.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/budgie-desktop-0-8-released-big-changes
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://evolve-os.com/
[2]:http://www.omgubuntu.co.uk/2014/07/install-budgie-evolve-os-desktop-ubuntu-14-04

View File

@ -1,51 +0,0 @@
Ubuntu 15.04 Gets Tentative Release Date of April 23, 2015
================================================================================
![](http://i.imgur.com/FfX14E9.jpg)
**Doing anything special on April 23 next year? You might well be — its the tentative release date being given for Ubuntu 15.04 Vivid Vervet.**
The date, along with those of various other development milestones, is listed as part of a [draft release schedule][1] on the Ubuntu Wiki page for the V update. As of writing all dates are subject to approval from the Ubuntu release team and are therefore **not final**.
Ubuntus previous spring release, 14.04 LTS, went live on April 17, 2014.
### Veracity Potential is Void ###
![Dates not yet ready to be inked in](http://www.omgubuntu.co.uk/wp-content/uploads/2014/05/california-calendar.jpg)
Dates not yet ready to be inked in
Draft means just that, but having covered some 10 Ubuntu release over five years I do know that the proposed dates dont tend to differ too wildly from those that go final (famous last words, Im sure!).
Even so, take the proposals with a pinch of optimism for now. Ill be keeping both this page and the fancy-schmancy graphic updated as, if or when anything changes.
### Key Ubuntu 15.04 Release Dates ###
As with all releases post-13.04, Ubuntu proper only makes fleeting appearances in select milestone releases, specifically the final beta and the release candidate stages.
Ubuntus family of flavours, which may include Ubuntu MATE this cycle, take full advantage of the testing opportunities at hand.
- **Alpha 1** December 18th (for flavours)
- **Alpha 2** January 22nd (for flavours)
- *Feature Freeze* — February 19th
- **Beta 1** August 28th (for flavours)
- *UI Freeze* — March 12th
- **Final Beta** March 26th
- *Kernel Freeze* — April 9th
- **Release Candidate** April 16th
The final release of the Vivid Vervet in all its vivacious glory is pencilled in for release on:
- **Ubuntu 15.04 Final** April 23rd
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/ubuntu-15-04-release-schedule-date-vivid-vervet
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:https://wiki.ubuntu.com/VividVervet/ReleaseSchedule

View File

@ -1,42 +0,0 @@
Systemd fallout: Two Debian technical panel members resign
================================================================================
![](http://www.itwire.com/media/k2/items/cache/985881530be9dfdb268b3ae49be9a710_XL.jpg)
**Two well-known and experienced Debian developers, both members of the project's technical committee, have announced they will be leaving the committee.**
The resignations of [Colin Watson][1] and [Russ Allberry][2] from the panel come soon after senior developer Joey Hess [resigned][3] from the project altogether.
There has been much acrimony recently over the adoption of the systemd init system as the default for Jessie, the next release of Debian, which is expected to come out in the next few months.
The Debian Technical Committee [decided][4] back in February, via the casting vote of panel chief Bdale Garbee, to adopt systemd as the default. This decision came after months of discussion.
Recently, there has been [another push][5] for reconsideration led by another technical committee member, Ian Jackson, and [a general resolution][6] was put up for vote. It is open for voting until midnight on November 18, UTC (10am on Wednesday AEST). There are a few options proposed by others, including one from the Debian Project leader Lucas Nussbaum, besides the main resolution.
In the initial vote back in February, Allberry supported systemd as the default, while Watson, an employee of Canonical, the company that creates the Ubuntu GNU/Linux distribution, expressed a preference for upstart. Jackson also backed upstart.
In [a post][7] explaining his decision, Watson, one of the first batch to join Canonical, attributed it to a general move on his part to start spending his Debian time on things he found enjoyable. Late last month, [he asked][8] to be moved from the Ubuntu Foundations team to the Launchpad engineering team. Watson has given the Debian Technical Committee time to appoint someone in his place before he moves on.
In contrast, Allberry's [resignation post][9] said he wanted to leave immediately, though he later added that he would stay on for a while if needed.
His frustration was clear: "If any part of this doesn't make sense, or if any of it feels like an attack or a reaction to any single person or event, I'm happy to clarify. I would appreciate it if people would ask for clarification rather than making assumptions, as assumptions about other people's motives are one of the things that I find the most demoralising about the Debian project right now."
--------------------------------------------------------------------------------
via: http://www.itwire.com/business-it-news/open-source/66153-systemd-fallout-two-debian-technical-panel-members-resign
作者:[Sam Varghese][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.itwire.com/business-it-news/open-source/itemlist/user/902-samvarghese
[1]:https://lists.debian.org/debian-ctte/2014/11/msg00052.html
[2]:https://lists.debian.org/debian-ctte/2014/11/msg00071.html
[3]:http://www.itwire.com/business-it-news/open-source/66014-systemd-fallout-joey-hess-quits-debian-project
[4]:http://www.itwire.com/business-it-news/open-source/63121-garbees-casting-vote-means-systemd-is-debian-init
[5]:http://www.itwire.com/business-it-news/open-source/65781-pushback-against-systemd-in-debian-gathers-steam
[6]:https://www.debian.org/vote/2014/vote_003
[7]:https://lists.debian.org/debian-ctte/2014/11/msg00052.html
[8]:http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/ubuntu/2014-10-26-moving-on-but-not-too-far.html
[9]:https://lists.debian.org/debian-ctte/2014/11/msg00071.html

View File

@ -1,64 +0,0 @@
After an 18 Month Gap, Opera for Linux Returns With New Stable Release
================================================================================
**The first stable release of Opera for Linux in more than 18 months is now available for download.**
![Hello again, Opera!](http://www.omgubuntu.co.uk/wp-content/uploads/2014/06/iopera.jpg)
Hello again, Opera!
Opera for Computers for Linux 26 (no really, thats its name) features a complete top-to-bottom overhaul, new features, and better performance thanks to its Aura and Blink underpinnings (yes, its no longer based on the proprietary Presto web engine).
#### Features ####
The browser [wiggled its toes in tux-friendly waters back in June with the launch of a developer preview][1], but if you last tried Opera when it looked like this, youll want to grab some smelling salts: things have changed.
youtube 视频,发布时可换成链接地址
<iframe width="750" height="422" src="https://www.youtube.com/embed/-kS10C2BUOs?feature=oembed" frameborder="0" allowfullscreen></iframe>
Alongside an impressive new look and blazing fast, standards-compliant rendering engine come many new and improved features.
- **Discover** — Shows articles from around the web in a range of categories
- **Speed Dial** — Supports interactive widgets, folders, and themes
- **Tab Peek** — Preview the content of an open tab without switching back to it
- **Opera Turbo** — Data-saving mode ideal for patchy connections
- **Rich bookmarking** — including new sharing functionality
- **Add-ons** — compatible with Chrome extensions, too
- **Support for HiDPI displays on Linux**
### Download Opera for Linux 26 ###
Opera say those running Opera 12.6 on a 64-bit version of Ubuntu still supported by Canonical will automatically receive this new update through the Ubuntu Software Center.
But in all honesty Im not sure anyone is in that boat! So, helpfully, a Debian installer can be downloaded from the Opera website. This will also add the Opera repository to your Software Sources to enable you to receive future updates in a timely fashion.
- [Download Opera for Computers for Linux 26][2]
Feel free to kit your new browser out with our nifty Opera Add-On, too:
- [Install OMG! Ubuntu! Opera Extension][3]
#### Important Notice about Linux Support ####
**Opera for Linux is 64-bit only**. The company say this decision was made based on what most Linux desktop users have installed. While annoying it is part of a larger overall trend away from 32-bit software, with Opera for Mac also being 64-bit exclusive, too.
In another case of “spending limited resources wisely”, this release is only being officially supported on Ubuntu (and buntu-based derivatives, including Linux Mint).
Users on other distributions, from Arch to openSUSE, can still install Opera for Linux but will need to [use a (fairly simple) workaround][4] or hunt down an unofficial repository.
**If you give it a spin let us know what you make of it in the comments below.**
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/12/new-opera-for-linux-goes-stable-download-now
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/06/opera-linux-chromium-download-released
[2]:http://opera.com/computer/linux
[3]:https://addons.opera.com/en/extensions/details/omg-ubuntu-for-opera/?display=en
[4]:https://gist.github.com/ruario/99522c94838d0680633c#file-manual-install-of-opera-md

View File

@ -1,61 +0,0 @@
Firefox 34 Arrives with Plugin-Free Video Calling and Powerful WebIDE
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/firefox-banner.jpg)
**Its been a busy few months for Mozilla, what with agreeing to a deal to switch its default search provider to Yahoo!, launching a custom version of its browser packed full of developer goodies, and launching Firefox OS handsets in new territories.**
Today, Mozilla has released Firefox 34 for Windows, Mac and Linux desktops, the first stable release since last months security n bug fix update.
### Headline Feature ###
Despite the rapid release cycle Mozilla once again manages to deliver some great new features.
Making its first appearance in a stable release is **Firefox Hello**, Mozillas WebRTC feature.
Though not enabled for all (you can manually turn it on via about:config), the feature bring plugin-free video and voice calls to the browser. No Skype, no add-ons, no hassle. You simple click the Firefox Hello icon, send your share link to the recipient to initiate a connection (assuming theyre also using a WebRTC-enabled browser, like Google Chrome or Opera).
![The Hello Firefox Popup](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/hello-firefox.jpg)
The Hello Firefox Popup
Signing in with a Firefox account will give you more features, including a contacts book with one-click calling (no need to share links).
#### Other Changes ####
Version 34 also makes it easier to **switch themes** (formerly known as personas), with live previews and a switcher menu now available on the **Customising canvas**:
![Ad-hoc theme switching](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-theme-switcher.jpg)
Ad-hoc theme switching
The first major search engine change arrives in this release, with Yandex shipping as default for Belarusian, Kazakh, and Russian locales. Yahoo! will be enabled for US users in the near future. But remember: [this does not affect the version of Firefox provided in Ubuntu][1].
US users get secure **HTTPS** Wikipedia searching from the search box:
![Secure Wikipedia Searches for English US Users](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-https-search-for-wikipedia.jpg)
Secure Wikipedia Searches for English US Users
In addition to improved HTML5 support (largely around WebCrypto features) a [**new WebIDE tool**][2] ships in this release, and is packed full of great tools for developers.
From Android connectivity and an in-app editor to support for deploying and testing apps in a Firefox OS simulator. If you havent tried Firefox OS in a while, v2.2 (unstable) has plenty to play with including edge swiping, new home screen arranging features, and some new APIs.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/12/firefox-webide.jpg)
### Download Firefox 34 ###
Canonical will roll out Firefox 34 to users of Ubuntu 12.04, 14.04 and 14.10 in the next 24 hours or so, so keep an eye out. If youre super impatient the release can also be downloaded from Mozilla servers directly.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/12/firefox-34-changes-include-hello-html5-webide
作者:[Joey-Elijah Sneddon ][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/11/firefox-set-yahoo-default-search-engine-ubuntu-not-affected
[2]:https://developer.mozilla.org/en-US/docs/Tools/WebIDE

View File

@ -1,78 +0,0 @@
From Mint to Trisquel: The Top Linux Distro Releases in November 2014
================================================================================
**November wasnt heavy on new Linux distribution releases, but still had more than enough to keep distro-hoppers bouncing from download server to ISO mirror and back again.**
From the free software ethic of **Trisquel** to the nostalgic glow of **Ubuntu MATE**, lets take a look at the major Linux distribution releases made in November 2014.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/linux-mint-17.jpg)
### Linux Mint 17.1 ###
Linux Mint 17.1 Rebecca is the big hitter on this list, going stable just in time to make it.
Based on Ubuntu 14.04 and using Linux kernel 3.13, the update also comes loaded with the **latest [Cinnamon 2.4][1] desktop environment, a customisable version of the Nemo file manager**, and improvements to the Update Manager to make package upgrades safer, saner and swifter.
Other changes see the **Background**, **Login** and **Theme** settings panes redesigned, and **Privacy and Notification sections** added. The default **system font has been switched to Noto Sans**, while fans of customisation will enjoy new colors added to the Mint-X theme package.
Linux Mint 17.1 delivers a set of solid, well thought out changes and performance improvements, important for an LTS release supported until 2019.
More information and those all important downloads can be found on the official project website.
- [Visit the Linux Mint Website][2]
### Ubuntu Mate 14.04 LTS ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/ubuntu-mate-lts.jpg)
It may have [arrived after the release of Ubuntu MATE 14.10][3] (**timey-wimey**), but as the first Long Term Support release of the flavor Ubuntu MATE 14.04 was welcomed with warm arms, especially by those who love to bask in the green-hued glow of GNOME 2 nostalgia.
Packed with security updates, MATE 1.8.1, and new software included out of the box, Ubuntu MATE 14.04 LTS is a notable update with plenty to tempt those on the newer (but older) 14.10 release.
For full hardware requirements, support information and download links, head on over to the official project website.
- [Download Ubuntu MATE 14.04 LTS][4]
### Trisquel 7.0 ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/trisquel-7-300x224.jpg)
The [latest stable release of Trisquel][5], an Ubuntu-based distribution endorsed by the Free Software Foundation (FSF), arrived in the middle of November — and was met by **a lot** of interest.
The free (as in freedom) distribution is built on Ubuntu 14.04 LTS but ships without any of the proprietary bits and pieces. Its a “pure” Linux experience that may require some workarounds, but serves to flag up the areas where more attention is needed in FOSS hardware support and app alternatives.
The Libre Linux 3.13 Kernel, GNOME 3.12 Flashback desktop and the Firefox-based Abrowser 33 are among the changes to be found in Trisquel 7.
- [Download Trisquel 7][6]
### Other Notable Releases ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/11/opensuse-desktop-kde.jpg)
Outside of the Ubuntu-based bubble November 2014 saw releases of other popular Linux distributions, including beta milestones of Mageia 5 and Fedora 21, and a new stable release of Scientific Linux 6.6.
Joining them is openSUSE 13.2 (stable) — the first release to follow a change in the way openSUSE development takes place, the first to adopt the new openSUSE design guidelines and the first to ship with a streamlined (if still unwieldy) installer.
The release has been getting great reviews from the geek press, who gave particular praise for the GNOME 3.14 implementation.
Coming from Ubuntu, where “everything just works”, the cultural and technical gulf can be daunting at first. But if you have some free time, like the color green and relish a challenge, the official openSUSE 13.2 [release announcement][7] should be your starting point.
**Have you tried any of these releases above? Let us know what you made of them in the space down below .**
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/11/linux-distro-releases-round-november-2014
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/11/install-cinnamon-2-4-ubuntu-14-04-lts
[2]:http://www.linuxmint.com/download.php
[3]:http://www.omgubuntu.co.uk/2014/11/ubuntu-mate-14-04-download-released
[4]:https://ubuntu-mate.org/longterm/
[5]:http://www.omgubuntu.co.uk/2014/11/download-trisquel-7-0-kernel-3-13
[6]:https://trisquel.info/en/download
[7]:https://news.opensuse.org/2014/11/04/opensuse-13-2-green-light-to-freedom/

View File

@ -1,540 +0,0 @@
translating by yupmoon
Readers' Choice Awards 2014--Linux Journal
================================================================================
It's time for another Readers' Choice issue of Linux Journal! The format last year was well received, so we've followed suit making your voices heard loud again. I couldn't help but add some commentary in a few places, but for the most part, we just reported results. Please enjoy this year's Readers' Choice Awards!
We'd like to make Readers' Choice Awards even better next year. Please send ideas for new categories and any comments or feedback via [http://www.linuxjournal.com/contact][1].
Please see the December 2014 issue of Linux Journal for the complete list of winners.
### Best Linux Distribution ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f1.jpg)
Although this year the Debian/Ubuntu-based distros took the lion's share of the votes, the "Best Linux Distribution" category is a bit like "Best Kind of Pizza"—even the bottom of the list is still pizza. It's hard to go wrong with Linux, and the wide variety of votes only proves how many different choices exist in our wonderful Open Source world.
- Ubuntu 16.5%
- Debian 16.4%
- Linux Mint 11%
- Arch Linux 8.5%
- Fedora 8.3%
- CentOS 6%
- openSUSE 5.3%
- Kubuntu 4.1%
- Gentoo 2.9%
- Slackware 2.7%
- Xubuntu 2.5%
- Other 2.3%
- Red Hat Enterprise Linux 1.6%
- NixOS 1.4%
- elementary OS 1.3%
- Lubuntu 1.2%
- CrunchBang 1%
- Mageia .7%
- LXLE .4%
- Tails .4%
- Android-x86 .3%
- Bodhi Linux .3%
- Chakra .3%
- Kali Linux .3%
- PCLinuxOS .3%
- SolydK .3%
- Mandriva .1%
- Oracle Linux .1%
### Best Mobile Linux OS ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f2.jpg)
Android is such a dominant force in the mobile world, we decided to allow Android variants to be counted separately. So although the underlying system on some of these are indeed Android, it seems far more informative this way.
- Stock Android 37.1%
- Sailfish OS 27.6%
- CyanogenMod 20.2%
- Other 3%
- Ubuntu Phone 3%
- Amazon Fire OS 1.5%
- Ubuntu for Android 1.4%
- Replicant .8%
- Tizen .8%
### Best Linux Smartphone Manufacturer ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f3.jpg)
- Samsung 29%
- Jolla 26.7%
- Nexus 16.5%
- Other 7.1%*
- HTC 7%
- LG 5.3%
- Sony 3.7%
- Nokia 1.8%
- Huawei 1.4%
- GeeksPhone 1%
- Amazon .6%
*Under "Other", Motorola got many write-ins, followed by OnePlus.
### Best Linux Tablet ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f4.jpg)
- Google Nexus 7 35.3%
- Google Nexus 10 14.8%
- Samsung Galaxy Tab 14%
- Samsung Galaxy Note 9.8%
- ASUS Transformer Pad 8.4%
- Other 6.4%
- Kindle Fire HD 4.7%
- ASUS MeMO Pad 2%
- Dell Venue 1.6%
- Acer Iconia One 1.4%
- Samsung Galaxy Note Edge .9%
- Ekoore Python S3 .7%
### Best Other Linux-Based Gadget (not including smartphones or tablets) ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f5.jpg)
We are a Raspberry Pi-loving bunch, that's for sure! But really, who can blame us? With the new B+ model, the already awesome RPi is getting sleeker and more useful. I'm no fortune teller, but I suspect I know next year's winner already.
- Raspberry Pi 71.4%
- BeagleBone Black 8.1%
- Other 4.3%*
- Lego Mindstorms Ev3 3.7%
- Moto 360 3.4%
- Cubieboard 1.7%
- Parrot A.R Drone 1.7%
- Samsung Gear S 1.4%
- Yamaha Motif XF8 1.1%
- Nvidia Jetson-K1 Development System .8%
- Cloudsto EVO Ubuntu Linux Mini PC .5%
- VoCore Open Hardware Computer .5%
- LG G Watch .4%
- RaZberry .4%
- VolksPC .4%
- IFC6410 Pico-ITX Board .2%
- JetBox 5300 .1%
*Under "Other", the most popular write-ins were Odroid and CuBox.
### Best Laptop Vendor ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/lenovo.jpg)
This category used to be a rating of which vendors worked the best with Linux, but thankfully, now most laptops work fairly well. So, we truly get to see the cream rise to the top and focus on things other than "it works with Linux". It's awesome living in the future.
- Lenovo 32%
- ASUS 19.3%
- Dell 18.5%
- System76 10.6%
- Other 7.9%*
- Acer 4.5%
- ThinkPenguin 1.9%
- LinuxCertified 1.8%
- ZaReason 1.6%
- EmperorLinux 1.5%
- CyberPower .3%
- Eurocom .1%
*Under "Other", the most popular write-ins were (in this order) Apple running Linux, HP, Toshiba and Samsung.
### Best Content Management System ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f6.jpg)
- WordPress 34.7%
- Drupal 25.3%
- Joomla! 11.1%
- MediaWiki 10.5%
- Other 10%*
- Alfresco 4.3%
- WebGUI 1.3%
- ikiwiki 1.1%
- eZ publish .7%
- Wolf CMS .4%
- Elgg .3%
- Blosxom .2%
*Under "Other", the most popular write-ins were (in this order) DokuWiki, Plone, Django and Typo3.
### Best Linux-Friendly Web Hosting Company ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/question.jpg)
When it comes to Web hosting, it's hard to find a company that isn't Linux-friendly these days. In fact, finding a hosting provider running Windows is more of a challenge. As is obvious by our winner ("Other"), the options are amazing. Perhaps a "Worst Web Hosting" category would be more useful!
- Other 22.8%*
- Amazon 22.5%
- Rackspace 13.1%
- Linode 10.4%
- GoDaddy.com 6.5%
- OVH 5.6%
- DreamHost 5.4%
- 1&1 4.8%
- LAMP Host 2.9%
- Hurricane Electric 2.6%
- Liquid Web .6%
- RimuHosting .6%
- Host Media .5%
- Savvis .5%
- Blacknight Solutions .4%
- Netfirms .4%
- Prgmr .4%
*Under "Other", the most write-ins went to (in this order) Digital Ocean (by a landslide), followed by Hetzner, BlueHost and WebFaction.
### Best Web Browser ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f7.jpg)
Firefox takes the gold this year by a significant margin. Even if you combine Chrome and Chromium, Firefox still takes the top spot. There was a time when we worried that the faithful Firefox would fade away, but thankfully, it's remained strong and continues to be a fast, viable, compatible browser.
- Firefox 53.8%
- Chrome 26.9%
- Chromium 8.1%
- Iceweasel 4%
- Opera 3%
- Other 2%
- SeaMonkey .8%
- rekonq .5%
- dwb .4%
- QupZill .4%
- Dillo .2%
### Best E-mail Client ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f8.jpg)
If I didn't know firsthand how many hard-core geeks live among us, I might accuse Kyle Rankin of voting fraud. His beloved Mutt e-mail client doesn't take top spot, but for a program without any graphical interface, third place is impressive!
- Mozilla Thunderbird 44.4%
- Gmail 24.7%
- Mutt 6.8%
- Evolution 5.5%
- KMail 5.3%
- Other 3.2%
- Claws Mail 2.2%
- Zimbra 2%
- Alpine 1.8%
- Geary 1.7%
- SeaMonkey 1%
- Opera Mail .9%
- Sylpheed .4%
### Best Audio Editing Tool ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f9.jpg)
- Audacity 69.1%
- FFmpeg 10.8%
- VLC 9.7%
- Ardour 4.9%
- Other 1.9%
- SoX 1.3%
- Mixxx 1.1%
- LMMS .7%
- Format Junkie .5%
### Best Audio Player ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10.jpg)
We figured VLC would take top spot in the video player category (see below), but it was a bit of a surprise to see how many folks prefer it as an audio player as well. Perhaps it's become the one-stop shop for media playback. Either way, we're thrilled to see VLC on the top.
- VLC 25.2%
- Amarok 15.3%
- Rhythmbox 10.4%
- Clementine 8.6%
- MPlayer 6.1%
- Spotify 5.9%
- Audacious 5.5%
- Banshee 4.6%
- Other 4%*
- XBMC 3.1%
- foobar2000 3%
- Xmms 2.4%
- DeaDBeeF 1.2%
- MOC .9%
- cmus .8%
- Ncmpcpp .8%
- Guayadeque .6%
- Mixxx .4%
- MPC-HC .4%
- Subsonic .4%
- Nightingale .3%
- Decibel Audio Player .2%
*Under "Other", Quod Libet had the most write-ins.
### Best Video Player ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_0.jpg)
- VLC 64.7%
- MPlayer 14.5%
- XBMC 6.4%
- Totem 2.7%
- Other 2.7%*
- Plex 2%
- Kaffeine 1.9%
- mpv 1.6%
- MythTV 1.6%
- Amarok 1.4%
- Xmms .3%
- Daum Potplayer .2%
- Clementine .1%
*Under "Other", most write-ins were for SMPlayer.
### Best Video Editor ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f10_1.jpg)
This is another testament to the geek factor when it comes to our readers. We didn't specify "non-linear editor", so by a transcoding technicality, VLC eked out a win in the video editing category. Well played, VLC, well played.
- VLC 17.5%
- Kdenlive 16.4%
- Blender 15.1%
- Avidemux 13.2%
- OpenShot 13.2%
- Cinelerra 7.5%
- PiTiVi 4.9%
- LightWorks 4.8%
- Other 4.7%
- LiVES 1.4%
- Shotcut .6%
- Jahshaka .4%
- Flowblade .4%
### Best Cloud-Based File Storage ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f11.jpg)
In a category that used to have few options, Dropbox still takes top spot, but the margin is closing. It's hard to argue against Dropbox's convenience and stability, but hosting your own data on ownCloud gives it quite a boost into the second-place spot.
- Dropbox 30.5%
- ownCloud 23.6%
- Google Drive 16%
- rsync 8.3%
- Other 7.5%*
- Amazon S3 6.6%
- SpiderOak 4.4%
- Box 1.8%
- Copy 1%
- AjaXplorer .3%
*Under "Other", the most write-ins went to Younited and MEGA. Many also said things like "no cloud is the best choice/my files stay on my storage/local only".
### Best Linux Game ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/CIVILIZATION-V-FRONT-OF-BOX.jpg)
I rarely play games, so every year I look forward to this category to find the most popular options for those few times I do. I'm personally tickled to see NetHack so high on the list, especially considering the opposition. There's just something about wandering around random tunnels that appeals to the old-school DnD player in all of us.
- Civilization 5 26.5%
- Other 23.5%*
- Team Fortress 2 8.7%
- NetHack 8.4%
- X-Plane 10 7.1%
- Dota 6.1%
- Bastion 5.4%
- Scorched 3D 3.7%
- Destiny 3.6%
- Ultima IV 1.9%
- FreeCol 1.8%
- Kpat 1.4%
- FreeOrion 1.1%
- Ryzom .9%
*Under "Other", the most write-ins were (in this order) Minecraft, 0 A.D., Frozen Bubble, Battle for Wesnoth, Portal and Counter Strike.
### Best Virtualization Solution ###
I think the relationship with Vagrant has helped Oracle's VirtualBox significantly in popularity. Yes, Vagrant works with other virtualization platforms, but since it so seamlessly integrates with VirtualBox, I think it gets quite a boost. Virtualization is such an efficient and reliable way to implement systems, bare-metal solutions are almost a thing of the past!
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Virtualbox_logo_0.jpg)
- Oracle VM VirtualBox 33.4%
- VMware 22.3%
- KVM 21.1%
- XEN 5.7%
- QEMU 5.3%
- OpenStack 4.9%
- Other 4.2%*
- OpenVZ 1.7%
- Linux-VServer 1.3%
- Symantec Workspace Virtualization .1%
*Under "Other", the most write-ins went to Docker, ProxMox and LXC, in that order.
### Best Monitoring Application ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Nagios-Core-4.0.8.png)
- Nagios 27.1%
- Wireshark 20.7%
- htop 12.3%
- Zabbix 10.5%
- Other 8.6%*
- Zenoss 6.2%
- Munin 3.4%
- PC Monitor 2.8%
- New Relic 1.9%
- Opsview 1.2%
- SaltStack 1%
- NTM (Network Traffic Monitor) .7%
- xosview .7%
- Manage Engine .5%
- FlowViewer .3%
- Circonus .2%
- SysPeek .2%
*Under "Other", most write-ins went to Icinga and OpenNMS.
### Best DevOps Configuration Management Tool ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/Git-Logo-2Color.jpg)
It was interesting to see Git take top spot in this category, because although it certainly would work to use standard version control on configuration files, I always assumed it would be used alongside tools like Chef or Puppet. If nothing else, the DevOps movement has taught crusty old system administrators like myself to treat configuration files like code. Version control is incredible, and it seems as though most readers agree.
- Git 39.4%
- Puppet 17.2%
- Ansible 8.9%
- cron jobs 8.8%
- Subversion 7.6%
- Chef 5%
- SaltStack 5.4%
- Other 4.6%*
- CFEngine 3%
*Under "Other", most write-ins went to NixOps.
### Best Programming Language ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13.jpg)
- Python 30.2%
- C++ 17.8%
- C 16.7%
- Perl 7.1%
- Java 6.9%
- Other 4.6%
- Ruby 4.3%
- Go 2.4%
- JavaScript 2.4%
- QML 2.2%
- Fortran 1.4%
- Haskell 1.4%
- Lisp 1.2%
- Erlang .6%
- Rust .6%
- D .4%
- Hack .1%
*Under "Other", most write-ins went to Scala, PHP and Clojure (in that order).
### Best Scripting Language ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f13_0.jpg)
Python is incredibly powerful, and it appears to be a favorite in both the scripting and programming categories. As someone who knows Bash and a little PHP, I think it's clear what I need to focus on as I delve into the world of development. Meaningful whitespace, here I come!
- Python 37.1%
- Bash/Shell scripts 27%
- Perl 11.8%
- PHP 8.4%
- JavaScript 6.7%
- Ruby 4.9%
- Other 2.1%
- Lua 2%
### Best New Linux/Open-Source Product/Project ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/11781f14.jpg)
Docker is clearly our winner here, and rightly so—what a game-changing technology. It's nice to see Jolla/Sailfish get some love as well. We love Android, but having a choice is a vital part of who we are as Open Source advocates.
- Docker 28%
- Jolla and Sailfish OS 19%
- LibreOffice 7%
- ownCloud 5%
- Steam 5%
- Zenoss Control Center 5%
- Raspberry Pi 4%
- Git 4%
- Apache Cordova/OpenOffice/Spark/Tika 3%
- Ansible 2%
- Elementary OS 2%
- OpenStack 2%
- Zabbix 2%
- CoreOS 2%
- Firefox OS 2%
- KDE Connect 1%
- NixOS and NixOps 1%
- Open Media Vault 1%
### Coolest Thing You've Ever Done with Linux ###
![](http://www.linuxjournal.com/files/linuxjournal.com/ufiles/imagecache/slideshow-400/tux_cruise.png)
This is my favorite new category for the Readers' Choice Awards. Imagine attending a Linux conference and asking everyone the coolest thing they've done with Linux. That's basically what happened here! We've listed a handful of our favorites, but for the entire list, check out: [http://www.linuxjournal.com/rc2014/coolest][2].
Note: the most common answers were "use it"; "rescue data/photos/whatever off broken Windows machines"; "convert friends/family/businesses to Linux"; "learn"; "teach"; "get a job"; "home automation"; and "build a home media server". The following list is of our favorite more-specific and unique answers, not the most common ones.
- Building my procmail pre-spam spam filter back in the mid-late 1990s.
- 450-node compute cluster.
- 7.1 channel preamp with integrated mopidy music player.
- A robot running Linux (for the Eurobot annual competition).
- Accidentally printing on the wrong continent.
- Adding an audio channel to a video while also syncing it.
- Analyzed NASA satellite data with self-written code.
- Annoyed the cat remotely.
- Automated my entire lighting setup in my house to respond to voice and my mobile apps.
- Automatic window plant irrigation system.
- Bathroom radio.
- Brewing beer.
- Built an application that runs on the International Space Station.
- Built a system for real-time toll collection for a major toll highway system.
- Built our own smartphone.
- Built Web-based home alarm system on Raspberry Pi.
- Cluster of Raspberry Pis to crack encrypted office documents.
- Controlled my Parrot drone.
- Controlled the comms for 186 Wind turbines.
- Controlling my Meade Telescope with Stellarium under Linux.
- Converted my old VHS family videos, using a laptop more than ten years old.
- Created a mesh network in the subarctic.
- Created an ocean environmental sensor buoy with radio data transmitter.
- Discovered new planets.
- Fixed a jabber server in Denver, USA, while in a hotel lobby in Amman, Jordan.
- Got Linus' autograph on a Red Hat 5.0 CD.
- Hacked my coffee machine to send me a text message when the coffee is ready.
- Introduced my daughter to Lego Mindstorm EV3.
- Monitor the temp and humidity of my wine cellar and open the doors when too hot or humid.
- Replaced the controller in my hot tub with a Raspberry Pi.
- Scripted opening and closing of a co-worker's CD tray every 15 seconds for four days.
- Used an LFS system to move ACH transfers for a national gas company.
- Flushed my toilet from another city.
- Remote chicken door.
- Web-based sprinkler controller for 16 stations on a Raspberry PI (also control the pool and yard lights).
- Chaining SSH tunnels together to get from work to home via three hops due to restrictive network settings.
- Built a system that monitors a renewable energy installation with two fixed solar arrays, a two axis sun tracking solar array and a wind turbine. Production and weather data are displayed on a Web site in real time.
- Back in the days of modems, I had my computer call up my girlfriend every morning, so she would wake up and go to work.
- Used a Wii controller, through Bluetooth with my Linux computer as an Infrared Camera, to detect the movement of my daughter's Fisher Price Sit and Spin Pony, and to control a video game.
--------------------------------------------------------------------------------
via: http://www.linuxjournal.com/rc2014
作者:[Shawn Powers][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxjournal.com/users/shawn-powers
[1]:http://www.linuxjournal.com/contact
[2]:http://www.linuxjournal.com/rc2014/coolest

View File

@ -1,38 +0,0 @@
Linus Torvalds Thanks Microsoft for a Great Black Friday Monitor Deal
================================================================================
![Linus Torvalds](http://i1-news.softpedia-static.com/images/news2/Linus-Torvalds-Thanks-Microsoft-for-a-Great-Black-Friday-Monitor-Deal-466599-2.jpg)
> The creator of the Linux kernel now has a UHD display
**Linus Torvalds is the creator of the Linux Kernel, he advocated for years against Microsoft's practices and he often talked about Windows. These are just some of the reasons why it's funny to see him thank Microsoft, even if it's probably done sarcastically.**
The rhetoric regarding the Linux vs. Windows subject has subsided a great deal in the last few years. There have been some issues with UEFI and other similar problems, but for the most part things have quieted down.
There is no one left at the Redmond campus to call Linux a cancer and no one is making fun of Windows for crashing all the time. In fact, there has been some sort of reconciliation between the two sides, which seems to benefit everyone.
It's not like Microsoft is ready to adopt the Linux kernel for their operating system, but the new management of the company talks about Linux as a friend, especially in the cloud.
They can no longer ignore it, even if they want to. The same happened with Linus Torvalds who hasn't said anything bad about Microsoft and Windows for a long time, and that is a good thing.
### Linus Torvalds saying "thanks" to Microsoft is not something you see every day ###
The creator of the Linux kernel talked about a great Black Friday deal he got from the Microsoft store, for a UHD monitor. He shared this piece of info on Google+ and some of the users also found it amusing to read that he's giving sincere thanks to Microsoft for their great deal.
"Whee. Just installed a new monitor. 3840x2160 resolution - it's the Dell 28" UHD panel - for $299 (€241) thanks to Microsoft's black Friday deal. Thanks MS! Ok, I have to admit that it's not actually a great panel: very clear color shifts off-center, 30Hz refresh etc. But still - I'm a nut for resolution, and at $299 (€241) I decided that this will carry me over until better panels start showing up at good prices," wrote Linus on [Google+][1].
In the meantime, he is also working on the latest kernel branch, 3.18, which will probably be released sometime at the end of this week. It's not clear how things will evolve after that, especially given the fact that the holidays are approaching fast, and devs might be a little sluggish when it comes to pushing patches and new features for the next 3.19 branch.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Linus-Torvalds-Thanks-Microsoft-for-a-Great-Black-Friday-Monitor-Deal-466599.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:https://plus.google.com/+LinusTorvalds/posts/4MwQKZhGkEr

View File

@ -1,42 +0,0 @@
Apparently This Trojan Virus May Have Infected Linux Systems For Years
================================================================================
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/trojan-word-cloud.jpg)
One of the first few argument in [why should you switch to Linux][1] is that Linux is secure and virus free. It is widely perceived by most of the Linux users that Linux is immune to viruses, which is true to an extent but not entirely.
Like any other OS, Linux too is not immune to malware, trojan, rootkit, virus etc. There have been several [famous Linux viruses][2]. But if you compare those to that of Windows, the number is infinitesimal. So, why am I talking about Linux viruses today then? Because a new trojan has been detected in market which might be impacting Linux systems.
### Turla infects Linux systems as well ###
Few months back a sophisticated cyber espionage program, nicknamed [Turla][3], was detected. It was supposed to be originated in Russia, allegedly with Russian government backing. The spyware program was targeting government organizations in Europe and the United States for four years.
In a recent report, researchers at [Kaspersky][4] has found that Turla was not only affecting Windows system but also Linux operating system. Kaspersky researchers have termed it the missing piece of Turla puzzle. As per the report:
> “This newly found Turla component supports Linux for broader system support at victim sites. The attack tool takes us further into the set alongside the Snake rootkit and components first associated with this actor a couple years ago. We suspect that this component was running for years at a victim site, but do not have concrete data to support that statement just yet.”
### What is this Linux module of Turla and how dangerous it is? ###
Going by the Kaspersky report,
> The Linux Turla module is a C/C++ executable statically linked against multiple libraries, greatly increasing its file size. It was stripped of symbol information, more likely intended to increase analysis effort than to decrease file size. Its functionality includes hidden network communications, arbitrary remote command execution, and remote management. Much of its code is based on public sources.
Report also mentions that this trojan doesnt require elevated privileges (read root) while running arbitrary remote commands and it cannot be discovered by commonly used administrative tools. Personally, I doubt their claims.
So, as a Linux desktop user, should you be scared? In my opinion, it is too early to go in to panic mode as we experienced with [ShellShock Linux bug][5]. Turla was originally intended for government organization, not common users. Lets wait and watch for more concrete news. Ill keep on updating this article. Till then enjoy Linux.
--------------------------------------------------------------------------------
via: http://itsfoss.com/apparently-trojan-virus-infected-linux-systems-years/
作者:[Abhishek][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://itsfoss.com/author/Abhishek/
[1]:http://itsfoss.com/reasons-switch-linux-windows-xp/
[2]:http://www.unixmen.com/meet-linux-viruses/
[3]:http://www.reuters.com/article/2014/03/07/us-russia-cyberespionage-insight-idUSBREA260YI20140307
[4]:https://securelist.com/blog/research/67962/the-penquin-turla-2/
[5]:http://itsfoss.com/linux-shellshock-check-fix/

View File

@ -32,4 +32,4 @@ via: http://www.computerworld.com/article/2857129/turla-espionage-operation-infe
[a]:http://www.computerworld.com/author/Lucian-Constantin/
[1]:http://news.techworld.com/security/3505688/invisible-russian-cyberweapon-stalked-us-and-ukraine-since-2005-new-research-reveals/
[2]:https://securelist.com/blog/research/67962/the-penquin-turla-2/
[2]:https://securelist.com/blog/research/67962/the-penquin-turla-2/

View File

@ -0,0 +1,25 @@
Git 2.2.1 Released To Fix Critical Security Issue
================================================================================
![](http://www.phoronix.com/assets/categories/freesoftware.jpg)
Git 2.2.1 was released this afternoon to fix a critical security vulnerability in Git clients. Fortunately, the vulnerability doesn't plague Unix/Linux users but rather OS X and Windows.
Today's Git vulnerability affects those using the Git client on case-insensitive file-systems. On case-insensitive platforms like Windows and OS X, committing to .Git/config could overwrite the user's .git/config and could lead to arbitrary code execution. Fortunately with most Phoronix readers out there running Linux, this isn't an issue thanks to case-sensitive file-systems.
Besides the attack vector from case insensitive file-systems, Windows and OS X's HFS+ would map some strings back to .git too if certain characters are present, which could lead to overwriting the Git config file. Git 2.2.1 addresses these issues.
More details via the [Git 2.2.1 release announcement][1] and [GitHub has additional details][2].
--------------------------------------------------------------------------------
via: http://www.phoronix.com/scan.php?page=news_item&px=MTg2ODA
作者:[Michael Larabel][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.michaellarabel.com/
[1]:http://article.gmane.org/gmane.linux.kernel/1853266
[2]:https://github.com/blog/1938-git-client-vulnerability-announced

View File

@ -0,0 +1,43 @@
Google Cloud offers streamlined Ubuntu for Docker use
================================================================================
> Ubuntu Core provides a minimal Lightweight Linux environment for running containers
Google has adopted for use in its cloud a streamlined version of the Canonical Ubuntu Linux distribution tweaked to run Docker and other containers.
Ubuntu Core was designed to provide only the essential components for running Linux workloads in the cloud. An [early preview edition][1] of it, which Canonical calls "Snappy," was released last week. The new edition jettisoned many of the libraries and programs usually found in general use Linux distributions that were unnecessary for cloud use.
[ [Get started with Docker][2] using this step-by-step guide to the red-hot open source framework. | Get the latest insight on the tech news that matters from [InfoWorld's Tech Watch blog][3]. ]
The Google Compute Engine (GCE) [joins Microsoft Azure][4] in supporting the fresh distribution.
According to Canonical, Ubuntu Core should provide users with an easy way to deploy Docker, an [increasingly lightweight virtualization container][4] that allows users to quickly spin up workloads and easily move them around, even across different cloud providers.
Google has been an ardent supporter of Docker and container-based virtualization itself. In June, the company [released as open source its software for managing containers][5], called Kubernetes.
The design of Ubuntu Core is similar to another Linux distribution, CoreOS, [first released a year ago][7].
Developed in part by two ex-Rackspace engineers, [CoreOS][8] is a lightweight Linux distribution designed to work in clustered, highly scalable environments favored by companies that do much or all of their business on the Web.
CoreOS was quickly adopted by many cloud providers, including Microsoft Azure, Amazon Web Services, DigitalOcean and Google Compute Engine.
Like CoreOS, Ubuntu Core offers an expedited process for updating components, reducing the amount of time that an administrator would need to manually manage them.
--------------------------------------------------------------------------------
via: http://www.infoworld.com/article/2860401/cloud-computing/google-cloud-offers-streamlined-ubuntu-for-docker-use.html
作者:[Joab Jackson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.infoworld.com/author/Joab-Jackson/
[1]:http://www.ubuntu.com/cloud/tools/snappy
[2]:http://www.infoworld.com/article/2607941/linux/how-to--get-started-with-docker.html
[3]:http://www.infoworld.com/blog/infoworld-tech-watch/
[4]:http://www.ubuntu.com/cloud/tools/snappy
[5]:http://www.itworld.com/article/2695383/open-source-tools/docker-all-geared-up-for-the-enterprise.html
[6]:http://www.itworld.com/article/2695501/cloud-computing/google-unleashes-docker-management-tools.html
[7]:http://www.itworld.com/article/2696116/open-source-tools/coreos-linux-does-away-with-the-upgrade-cycle.html
[8]:https://coreos.com/using-coreos/

View File

@ -0,0 +1,28 @@
New 64-bit Linux Kernel Vulnerabilities Disclosed This Week
================================================================================
![](http://www.phoronix.com/assets/categories/linuxkernel.jpg)
For those that didn't hear the news yet, multiple Linux x86_64 vulnerabilities were made public this week.
With CVE-2014-9322 that's now public, there's a local privilege escalation issue affecting all kernel versions prior to Linux 3.17.5. CVE-2014-9322 is described as "privilege escalation due to incorrect handling of a #SS fault caused
by an IRET instruction. In particular, if IRET executes on a writeable kernel stack (this was always the case before 3.16 and is sometimes the case on 3.16 and newer), the assembly function general_protection will execute with the user's gsbase and the kernel's gsbase swapped. This is likely to be easy to exploit for privilege escalation, except on systems with SMAP or UDEREF. On those systems, assuming that the mitigation works correctly, the impact of this bug may be limited to massive memory corruption and an eventual crash or reboot."
Fortunately, it's fixed [in Linux kernel Git since late November][1]. CVE-2014-9322 is linked to CVE-2014-9090, which is also corrected by the fixes in Git.
There's also two x86_64 kernel bugs related to espfix. "The next two bugs are related to espfix. The IRET instruction has IMO a blatant design flaw: IRET to a 16-bit user stack segment will leak bits 31:16 of the kernel stack pointer. This flaw exists on 32-bit and 64-bit systems. 32-bit Linux kernels have mitigated this leak for a long time, and 64-bit Linux kernels have mitigated this leak since 3.16. The mitigation is called espfix."
Fixes for CVE-2014-8133 and CVE-2014-8134 are in KVM and Linux kernel Git as of a few days ago. More details on these x86_64 vulnerabilities via [this oss-sec posting][2]. These issues were uncovered by Andy Lutomirski at AMA Capital Management.
--------------------------------------------------------------------------------
via: http://www.phoronix.com/scan.php?page=news_item&px=MTg2NzY
作者:[Michael Larabel][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.michaellarabel.com/
[1]:https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/arch/x86/kernel/entry_64.S?id=6f442be2fb22be02cafa606f1769fa1e6f894441
[2]:http://seclists.org/oss-sec/2014/q4/1052

View File

@ -1,79 +0,0 @@
How To Use Steam Music Player on Ubuntu Desktop
================================================================================
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/steam-music.jpg)
**Music makes the people come together Madonna once sang. But can Steams new music player feature mix the bourgeoisie and the rebel as well?**
If youve been living under a rock, ears pressed tight to a granite roof, word of Steam Music may have passed you by. The feature isnt entirely new. Its been in testing in some form or another since earlier this year.
But in the latest stable update of the Steam client on Windows, Mac and Linux it is now available to all. Why does a gaming client need to add a music player, you ask? To let you play your favourite music while gaming, of course.
Dont worry: playing your music over in-game music is not as bad as it sounds (har har) on paper. Steam reduces/cancels out the game soundtrack in favour of your tunes, but keeps sound effects high in the mix so you can hear the plings, boops and blams all the same.
### Using Steam Music Player ###
![Music in Big Picture Mode](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music-bpm.jpg)
Music in Big Picture Mode
Steam Music Player is available to anyone running the latest version of the client. Its a pretty simple addition: it lets you add, browse and play music from your computer.
The player element itself is accessible on the desktop and when playing in Steams (awesome) Big Picture mode. In both instances, controlling playback is made dead simple.
As the feature is **designed for playing music while gaming** it is not pitching itself as a rival for Rhythmbox or successor to Spotify. In fact, theres no store to purchase music from and no integration with online services like Rdio, Grooveshark, etc. or the desktop. Nope, your keyboard media keys wont work with the player in Linux.
Valve say they “*…plan to add more features so you can experience Steam music in new ways. Were just getting started.*”
#### Steam Music Key Features: ####
- Plays MP3s only
- Mixes with in-game soundtrack
- Music controls available in game
- Player can run on the desktop or in Big Picture mode
- Playlist/queue based playback
**It does not integrate with the Ubuntu Sound Menu and does not currently support keyboard media keys.**
### Using Steam Music on Ubuntu ###
The first thing to do before you can play music is to add some. On Ubuntu, by default, Steam automatically adds two folders: the standard Music directory in Home, and its own Steam Music folder, where any downloadable soundtracks are stored.
Note: at present **Steam Music only plays MP3s**. If the bulk of your music is in a different file format (e.g., .aac, .m4a, etc.) it wont be added and cannot be played.
To add an additional source or scan files in those already listed:
- Head to **View > Settings > Music**.
- Click **Add** to add a folder in a different location to the two listed entries
- Hit **Start Scanning**
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/Tardis.jpg)
This dialog is also where you can adjust other preferences, including a scan at start. If you routinely add new music and are prone to forgetting to manually initiate a scan, tick this one on. You can also choose whether to see notifications on track change, set the default volume levels, and adjust playback behaviour when opening an app or taking a voice chat.
Once your music sources have been successfully added and scanned you are all set to browse through your entries from the **Library > Music** section of the main client.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/09/browser.jpg)
The Steam Music section groups music by album title by default. To browse by band name you need to click the Albums header and then select Artists from the drop down menu.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-selection.jpg)
Steam Music works off of a queue system. You can add music to the queue by double-clicking on a track in the browser or by right-clicking and selecting Add to Queue.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music-queue.jpg)
To **launch the desktop player** click the musical note emblem in the upper-right hand corner or through the **View > Music Player** menu.
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/steam-music.jpg)
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/10/use-steam-music-player-linux
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author

View File

@ -1,5 +1,3 @@
barney-ro translating
5 Awesome Open Source Backup Software For Linux and Unix-like Systems
================================================================================
A good backup plan is essential in order to have the ability to recover from

View File

@ -1,4 +1,3 @@
mdjsjdqe translating...
11 Useful Utilities To Supercharge Your Ubuntu Experience
================================================================================
**Whether youre a relative novice or a seasoned pro, we all want to get the most from our operating system. Ubuntu, like most modern OSes, has more to offer than what is presented at first blush.**

View File

@ -1,104 +0,0 @@
[Translating by Stevearzh]
NetHack
================================================================================
## The best game of all time? ##
**Its tremendously addictive. It takes a lifetime to master. And people play it for decades without completing it. Welcome to the strange world of NetHack…**
Believe it or not, its possible to be terrified by the sight of the letter D. Or ecstatic about the sight of a % character. (And the less said about ^, the better.) But before you assume weve gone totally loopy and close the tab, bear with us for a moment: those characters represent dragons, food rations and traps respectively. Welcome to NetHack, where your imagination needs to play a big role in the gameplay.
You see, NetHack is a text-mode game: it just uses the standard terminal character set to portray the player, enemies, items and surroundings. Graphical versions of the game exist, but NetHack purists tend to avoid them, and whats the point of a game if you cant play it when youre SSHed into your revived Amiga 3000 running NetBSD? In some ways, NetHack is a lot like Vi it has been ported to nigh-on every operating system in existence, and its requirements are absolutely minimal.
Now, given that it looks like utter pants when compared to modern games, what makes NetHack so appealing? Well, this dungeon exploring masterpiece is incredibly rich and detailed. There are so many items to discover, spells to cast, monsters to fight and tricks to learn and the dungeons are generated randomly. Theres so much to explore, and no two games are ever the same. People play NetHack for years and decades without complete it, still discovering new secrets each time.
Here well show you how NetHack came about, give you a guided tour of the dungeons, and show you some tricks. Note: by reading this feature, you agree to not sue us when you become addicted to NetHack and your real-life productivity is obliterated.
![The NetHack interface](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_annotated.png)
The NetHack interface
### Possibly the oldest still-developed game ###
Despite its name, NetHack isnt an online game. Its based on an earlier dungeon-exploring romp called Hack, which in turn was a descendant of an 1980 game called Rogue. NetHacks first release arrived in 1987, and although no new features have been added since version 3.4.3 in 2003, various patches, add-ons and spin-offs are still doing the rounds on the web. This makes it arguably the oldest game thats still being hacked on and played by a sizeable group of people. Go to [www.reddit.com/r/nethack][1] to see what we mean long-time NetHack players are still discussing new strategies, discoveries and tricks. Occasionally youll see gleeful messages from old timers who have finally, after many years, completed the game.
But how do you complete it? Well, NetHack is set in a large and deep dungeon. You start at the top level 1 and your goal is to keep going down until you find a hugely valuable item called the Amulet of Yendor. This is typically in level 20 or lower, but it can vary. As you traverse through and down the dungeon, youll meet all manner of monsters, traps and human characters; some will try to kill you, some will stay out of your way, and some…. well, you dont know until you get close to them.
> Theres so much to learn, and many items only work best when combined with others.
What makes NetHack so compelling is the vast range of items crammed into the game. Weapons, armour, spell books, rings, gems theres so much to learn, and many items only work best when combined with others. Monsters often drop useful items when you kill them, although some items can have very negative effects if you dont use them correctly. Youll find shops in the dungeon that are packed with potentially useful bits of kit, but dont expect the shopkeeper to give you great descriptions. Youve got to learn from experience. Some items arent much use at all, and the game is packed with humour you can even throw a cream pie in your own face.
But before you even set foot in the dungeon, NetHack asks you what kind of player you want to be. You can take your journey as a knight, a monk, a wizard or even a humble tourist, amongst many other player types. They all have their own strengths and weaknesses, and NetHack addicts love to try completing the game with the weaker types. You know, to show off to other players.
> ## Spoilers dont spoil the fun ##
> In NetHack parlance, “spoilers” provide information on monsters, items, weapons and armour. Its technically possible to complete the game without using them, but very few players ever achieve this, as the game is monumentally complex. Consequently its not regarded as bad form to use spoilers but its still more fun to try to work things out yourself first, and only consult the spoilers when you really need them.
> A great source is [www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html][2] which separates spoilers into categories. For things that happen randomly in the game, such as the effects from drinking from fountains, it gives you the odds of a certain thing happening.
### Your first dungeon crawl ###
NetHack is available for almost every major OS and Linux distribution in the world, so you should be able to grab it with “apt-get install nethack” or “yum install nethack” or whatever is appropriate for your distro. Then run it in a terminal window by just typing “nethack”. The game will ask if it should pick a player type for you but as a newcomer, its best if you choose one of the tougher characters first. So hit “n” and then hit “v” to choose the Valkyrie type, and “d” to be a dwarf.
Then NetHack will give you some plot blurb, explaining that your god seeks the Amulet of Yendor, so your goal is to retrieve it and present it to him. Hit space when youre done reading the text (and any other time you see “More” on the screen). And here we go youre in the dungeon!
As described earlier, your character is represented by a @ sign. You can see the walls of a room around you, and the dot characters depict empty space in the room. First of all, get used to the movement keys: h, j, k and l. (Yes, its just like Vim, as covered in issue 3 of Linux Voice!) These move you left, down, up and right respectively. You can also move diagonally with y, u, b and n. So walk around the room until you get used to the controls.
NetHack is turn-based, so if youre not moving or performing an action, the game stays still. This lets youplan your moves in advance. You will see a “d” or “f” character moving around the room as well: this is your pet dog or cat, which (normally) wont harm you and can assist you in killing monsters. Pets can be annoying though they occasionally eat foot rations and tasty corpses before you get to them.
![Hit “i” to bring up an inventory of your currently carried items](http://www.linuxvoice.com/wp-content/uploads/2014/12/nh_inventory.png)
Hit “i” to bring up an inventory of your currently carried items
### Whats behind the door? ###
Now, lets go out of the room. There will be gaps around the edge, and possibly “+” signs. That “+” is a closed door, so go up to it and hit “o” to open. You will be asked for a direction, so if the door is to the left of you, press “h”. (And if the door is stuck, try opening it a few times.) Youll then end up in a corridor, marked by “#” symbols, so walk around it until you find another room.
On your travels youll see various items. Some, such as money (denoted by a “$” symbol) are picked up automatically; for other items, you have to press the comma key whilst standing on them. If there are multiple items, youll be given a menu, so press the appropriate keys shown in the menu and then Enter to choose what you want. At any time you can hit “i” to bring up your inventory list see the screenshot.
What happens if you see a monster? At these early stages of the game, the monsters youre likely to come across will be represented by “d”, “x” and “:” characters. To attack, simply walk into them. The game will tell you if your attacks are successful using the messages along the top and also how the monster is responding. These early monsters are simple to kill, so you shouldnt have any trouble defeating them, but keep an eye on your HP in the status line at the bottom.
> Early monsters are simple to kill, but keep an eye on your HP.
If a monster leaves behind a corpse (“%”), you can hit comma to take it and then press “e” to eat it. (Whenever youre prompted to choose an item, you can press its corresponding key from the inventory list, or “?” to bring up a mini list.) Warning! Some corpses are poisonous, and these are things youll learn on your travels.
If youre exploring a corridor and appear to come to a dead end, you can hit “s” to search until you find a door. This can take ages, however, so you can speed things up a bit: type “10” and then “s” and you will perform 10 searches in a row. This takes up 10 moves in game time, however, so if youre hungry you could get close to starvation!
Common items youll find in the top levels of the dungeon are “{” (fountains) and “!” (potions). For the former, you can stand on it and hit q to “quaff” from it the effects can vary from useful to deadly. For potions, pick them up and then use “q” to drink them. If you find a shop, you can pick up items and then hit “p” to pay before leaving. Use “d” to drop something.
![Souped-up versions of NetHack with fancy graphics are available, such as Falcons Eye](http://www.linuxvoice.com/wp-content/uploads/2014/12/falcon.jpg)
Souped-up versions of NetHack with fancy graphics are available, such as Falcons Eye
> ## Stupid ways to die ##
> A popular acronym amongst NetHack players is “YASD” Yet Another Stupid Death. It describes a situation where the player buys the farm due to his/her own silliness or lack of concentration. Weve had many of these, but our favourite goes as follows:
> We were browsing a shop, inspecting items, when a snake suddenly jumped out from behind a potion. After killing the snake, a message popped up saying that we were getting hungry, so we opted to eat the snakes corpse. Bad idea! This made us blind, so we couldnt see other characters or items in the shop. We tried to get to the exit, but instead bumped into the shopkeeper and accidentally attacked him. This made him furious; he started firing magic missiles at us. We just about managed to get into the corridor outside the shop, but died from the onslaught.
> If you come to any equally silly ends, let us know on our forums. And dont worry nobody will judge you. Dying like this is all part of growing up in the NetHack world.
### Equip yourself ###
On your travels, and especially after you kill monsters, youll find weapons and armour. Again, use comma to pick these up, and then “w” (lowercase) to wield a weapon or “W” (uppercase) to wear a piece of armour. You can use “T” to remove armour and “t” to throw weapons often handy if youre in a very sticky situation.
Sometimes its useful to examine things from a distance before getting close to them. Hit “;” (semicolon) and “Pick an object” will appear at the top of the screen. Use the movement keys until your view lands on the thing you want to inspect, and then hit “:” (colon). A description will appear at the top.
As your goal is to go further down the dungeon until you find the Amulet of Yendor, keep an eye out for “<” and “>” signs. These are stairs up and down respectively, and you can use the same keys to climb them. Note! Make sure your pet is standing in an adjacent square if you want it to follow you into the next level. If you need a break, use “S” (capital s) to save, and type #quit to exit. Next time you run NetHack, your game will be resumed.
We wont spoil whats ahead, as many of the dungeon levels have amazing designs, characters and secrets. So well leave you with three tips: if you come across an item that completely baffles you, try searching for it on the NetHack wiki at [http://nethack.wikia.com][3]. Youll also find an excellent (albeit very long) guidebook at [www.nethack.org/v343/Guidebook.html][4]. Happy exploring!
--------------------------------------------------------------------------------
via: http://www.linuxvoice.com/nethack/
作者:[Mike Saunders][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxvoice.com/author/mike/
[1]:http://www.reddit.com/r/nethack
[2]:http://www.statslab.cam.ac.uk/~eva/nethack/spoilerlist.html
[3]:http://nethack.wikia.com/
[4]:http://www.nethack.org/v343/Guidebook.html

View File

@ -0,0 +1,48 @@
[Translating by Stevarzh]
How to Download Music from Grooveshark with a Linux OS
================================================================================
> The solution is actually much simpler than you think
![](http://i1-news.softpedia-static.com/images/news2/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268-2.jpg)
**Grooveshark is a great online platform for people who want to listen to music, and there are a number of ways to download music from there. Groovesquid is just one of the applications that let users get music from Grooveshark, and it's multiplatform.**
If there is a service that streams something online, then there is a way to download the stuff that you are just watching or listening. As it turns out, it's not that difficult and there are a ton of solutions, no matter the platform. For example, there are dozens of YouTube downloaders and it stands to reason that it's not all that difficult to get stuff from Grooveshark either.
Now, there is the problem of legality. Like many other applications out there, Groovesquid is not actually illegal. It's the user's fault if they do something illegal with an application. The same reasoning can be applied to apps like utorrent or Bittorrent. As long as you don't touch copyrighted material, there are no problems in using Groovesquid.
### Groovesquid is fast and efficient ###
The only problem that you could find with Groovesquid is the fact that it's based on Java and that's never a good sign. This is a good way to ensure that an application runs on all the platforms, but it's an issue when it comes to the interface. It's not great, but it doesn't really matter all that much for users, especially since the app is doing a great job.
There is one caveat though. Groovesquid is a free application, but in order to remain free, it has to display an ad on the right side of the menu. This shouldn't be a problem for most people, but it's a good idea to mention that right from the start.
From a usability point of view, the application is pretty straightforward. Users can download a single song by entering the link in the top field, but the purpose of that field can be changed by accessing the small drop-down menu to its left. From there, it's possible to change to Song, Popular, Albums, Playlist, and Artist. Some of the options provide access to things like the most popular song on Grooveshark and other options allow you to download an entire playlist, for example.
You can download Groovesquid 0.7.0
- [jar][1] File size: 3.8 MB
- [tar.gz][2] File size: 549 KB
You will get a Jar file and all you have to do is to make it executable and let Java do the rest.
![](http://i1-news.softpedia-static.com/images/news2/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268-3.jpg)
![](http://i1-news.softpedia-static.com/images/news2/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268-4.jpg)
![](http://i1-news.softpedia-static.com/images/news2/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268-5.jpg)
![](http://i1-news.softpedia-static.com/images/news2/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268-6.jpg)
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/How-to-Download-Music-from-Grooveshark-with-a-Linux-OS-468268.shtml
作者:[Silviu Stahie][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:https://github.com/groovesquid/groovesquid/releases/download/v0.7.0/Groovesquid.jar
[2]:https://github.com/groovesquid/groovesquid/archive/v0.7.0.tar.gz

View File

@ -1,5 +1,3 @@
[felixonmars translating...]
Upstream and Downstream: why packaging takes time
================================================================================
Here in the KDE office in Barcelona some people spend their time on purely upstream KDE projects and some of us are primarily interested in making distros work which mean our users can get all the stuff we make. I've been asked why we don't just automate the packaging and go and do more productive things. One view of making on a distro like Kubuntu is that its just a way to package up the hard work done by others to take all the credit. I don't deny that, but there's quite a lot to the packaging of all that hard work, for a start there's a lot of it these days.

View File

@ -1,103 +0,0 @@
Why Is Huffington Post Running A Multi-Part Series To Promote The Lies Of A Guy Who Pretended To Invent Email?
================================================================================
**from the that's-just-wrong dept**
I thought this story had ended a few years ago. Back in 2012, we wrote about how The Washington Post and some other big name media outlets were claiming that a guy named V.A. Shiva Ayyadurai had "invented email" in 1978. The problem was that [it wasn't even close to true][1] and relied on a number of total misconceptions about email, software and copyright law. Ayyadurai and some of his friends have continued to play up the claim that he "invented" email, but it simply was never true, and it's reaching a level that seems truly bizarre. Ayyadurai may have done some interesting things, but his continued false insistence that he invented email is reaching really questionable levels. And, now it's gone absolutely nutty, with the Huffington Post running [a multi-part series][2] (up to five separate articles so far -- all done in the past 10 days) all playing up misleading claims saying that Ayyadurai invented email, even though even a basic understanding of the history shows he did not.
Let's take care of the basics first, and then we'll dig in on what's going on here, because it's really quite ridiculous. First off, no one denies that V.A. Shiva Ayyadurai -- an apparently very bright 14-year-old at the time -- wrote an email software program for the University of Medicine and Dentistry of New Jersey (UMDNJ) in 1978. By all accounts, it was a perfectly decent email system that allowed the UMDNJ staff to send electronic messages. Further, no one doubts that, in 1981, Ayyadurai registered the copyright on his program, which was called EMAIL. The problems are that (1) email was invented long before 1978, (2) the copyright is merely on the specific software code, not the idea of email, and (3) while Ayyadurai may have independently recreated the basics of email (and even added a nice feature), none of his work was even remotely related to what later became the standards of email. What's most sickening about this is that as part of this new PR campaign, Ayyadurai is ridiculously arguing that the reason no one believes him isn't because he's simply wrong, but because they can't stand to believe that "a dark-skinned immigrant kid, 14 years old," invented email, and that it was done in "one of the poorest cities in the US" rather than at a famous university.
Again, that might make for a nice story line if there were some factual basis behind it, but there isn't. The history of email [is well-documented][3] from [multiple sources][4] and it began way, way before 1978. And while early versions were somewhat crude, by 1978 they had basically everything that Ayyadurai claims to have invented (it is entirely believable that Ayyadurai, as a bright kid, independently came up with the same ideas, but he was hardly the first). There was a messaging system called MAILBOX at MIT in 1965. You can read [all the details of it here][5], including source code. Ray Tomlinson is frequently credited with inventing the modern concept of email for the internet by establishing the @ symbol (in 1972) as a way of determining both the user and which computer to send the email to. By 1975, there were things like email folders (invented by Larry Roberts) and some other basic email apps. As is noted, by 1976 -- two years before Ayyadurai wrote his app -- email was *75% of all ARPANET traffic*.
So, let's get to the Huffington Post trying to whitewash all of this factual history out of existence.
It started on August 20th, with an article by Larry Weber, CEO of Racepoint Global, kicking off a supposed "series" called "The History of Email." Except that the series has little to do with the history of email at all. It's just about Ayyadurai writing his particular email program in 1978. Great story. Smart kid done good. Has nothing to do with the invention of email. Weber, though, calls it [The Boy Who Invented Email][6]. At this point, it should be worth questioning why Weber suddenly decided this was such an interesting story. If you don't know, Weber is one of PR's [biggest names][7], having built one of the most successful PR companies in history. It seems odd that he "just happened" to come across Ayyadurai's fake story and decided to help create a 5-part series about it. I have reached out to both Weber and the Huffington Post to ask if Weber has any financial relationship with Ayyadurai. As I publish this, neither has responded. The post will be updated if I hear from either. None of the posts in the series disclose any such relationship. Nor does the Huffington Post indicate that this is a "sponsored" post as far as I can tell.
The [second][8] and [third][9] articles in the series are both written by Leslie Michelson, the Director of High Performance and Research Computing at Rutgers Medical School (which took over UMDNJ a while back). More importantly, in 1978 he was the Director of the Laboratory Computer Network at UMDNJ, and apparently "challenged" Ayyadurai to create an electronic interoffice mail system. The [fourth article][10] in the series is by Robert Field, a technologist at Rutgers Medical School and, in 1978, a colleague of Ayyadurai at UMDNJ. See a pattern? Huffington Post also [interviewed Ayyadurai][11] for HuffPost Live in which he mostly attacks anyone who challenges his story, comparing himself to Philo T. Farnsworth -- except in that case, Farnsworth actually invented TV before anyone else. Ayyadurai did not do that with email. Apparently there are two more in this series that are still to come.
When you look at the collection of articles, they all repeat the same basic things: Ayyadurai did create an email system and "it was recognized by the federal government." This is misleading in the extreme. It's amusing how they all use the exact same language. Larry Weber claims:
> On August 30, 1982, **the US government officially recognized V.A. Shiva Ayyadurai as the inventor of email** by awarding him the first US Copyright for "Email," "Computer Program for Electronic Mail System," for his 1978 invention. This was awarded at a time when Copyright was the only way to protect software inventions.
Leslie Michaelson says:
> On August 30,1982, V.A. Shiva Ayyadurai **received official recognition as the inventor of email from the U.S. government**, for the work he had done in 1978.
Every article in the series includes this image of his copyright registration:
[![](https://i.imgur.com/AscOfQh.png)][12]
Except, if you know anything about copyright, you know that what they're claiming is not at all true. The registration of copyrights is about as close to a rubber-stamping process as is possible. It has nothing to do with "inventions" at all, but is rather a copyright for the specific software program. Ayyadurai received a copyright on his email program and that's it. It has absolutely nothing to do with him being the inventor of email.
Microsoft holds a copyright on Windows, but no one claims it "invented" the glass things you look outside your building with. Hell, no one even claims that Microsoft invented windowing user interfaces, because it did not. The name of the program and the fact that you can copyright it does not make you the "inventor" of the concept behind it.
Weber, Ayyadurai and his friends try to counter the "it's a copyright, not a patent" claim with an incredibly misleading response. Here's Michelson:
> On August 30, 1982, Shiva was issued the first Copyright for "Email", "Computer Program for Electronic Mail System." At that time, Copyright was the equivalent of a patent, as there was no other way to protect software inventions. Only in 1980 was the Copyright Act of 1976 amended to protect software. Patent law had not even caught up to software in 1980
Copyright was not, and has never been "the equivalent of a patent." Copyright and patents are two very different things. Copyright protects specific expression. Patents protect inventions. That's why copyright protected only the specific code that Ayyadurai wrote, rather than the concept of email. While it's true that software wasn't considered patentable by many at the time, that doesn't, in any way, mean that a copyright on a particular piece of software was the equivalent in any way, to a patent at the time.
To further their argument, both Weber and Michelson include nearly identical, but slightly different, infographics on the history of email, which (of course) start in 1978 with Ayyadurai's work. According to those charts, email was barely even a thing outside of UMDNJ until 1985 when offline email readers come about. The infographic is the work of the impressive sounding International Center for Integrative Systems. What's left out is that the "[Founder and Chairman][13]" of the International Center for Integrative Systems happens to be... a guy named V.A. Shiva Ayyadurai. The same infographic tosses in a "milestone" in email in 1995, when "Echomail" launched. Doesn't sound familiar? Echomail was a company started by... V.A. Shiva Ayyadurai.
The rest of the articles seem to just focus on attacking those who actually were involved in the invention of email and who dared to speak out against Ayyadurai's claims. The story, which includes no actual support, is that the folks at BBN decided in the early 80s that email security was a big business opportunity and rewrote history. Whether or not BBN played up their role in the history of email is debatable, but none of that changes the fact that they (and many others) were using email, and had email software, long before Ayyadurai did anything. At no point do any of them address the long history of email systems long before Ayyadurai arrived on the scene. Instead, they just talk about this grand conspiracy theory, claiming (ridiculously) that if BBN were outed as not being the inventor of email (even though no one really claims the company was the inventor of email) it would harm its business. That makes no sense at all. First of all, BBN's history of work related to the internet is long and well-detailed (there's even a [fantastic book][14] about it). Even if it had nothing to do with email, it's other work is much more impressive. Second, the company is currently owned by defense contracting giant Raytheon. Does anyone honestly think Raytheon cares one way or the other who "invented email"?
All of their "debunking" claims rest entirely on a RAND report written by David Crocker in 1977, where they take two sentences totally out of context. Here's what Ayyadurai, Weber and their friends claim Crocker said:
> "At this time, no attempt is being made to emulate a full-scale, inter-organizational mail system. The fact that the system is intended for use in various organizational contexts and by users of differing expertise makes it almost impossible to build a system which responds to all users' needs."
It's telling that Ayyadurai and his friends never actually tell you the name of the report or link to it. Because actually reading what Crocker wrote would undermine their argument. The report is called "Framework and Functions of the 'MS' Personal Message System" and you can read it here. Not only do Ayyadurai and his friends take Crocker entirely out of context, the two sentences above are not even contiguous sentences. They're not even on the same page. The first sentence is on page 18 of the paper. And it just says that this particular implementation (the program called MS) is focused on certain facets, and for MS "no attempt is being made to emulate a full-scale inter-organization mail system" even though the entire point of the paper is how various email implementations are clearly replicating inter-organizational mail systems. The second sentence comes on page 21 (with lots in between) and just focuses on the fact that lots of users have very different requests and desires, and it's impossible to satisfy everyone -- and that it, alone, is beyond the scope of this project. He's not, as Ayyadurai implies, claiming that building an interoffice email system is impossible. He's claiming that creating a full system that satisfies absolutely everyone is impossible. However, he does make it clear that other components are being worked on, and when combined could create a more functional email system. Here's that part, back in context:
> To construct a fully-detailed and monolithic message processing environment requires a much larger effort than has been possible with MS. In addition, the fact that the system is intended for use in various organizational contexts and by users of differing expertise makes it almost impossible to build a system which responds to all users' needs. Consequently, important segments of a full message environment have received little or no attention and decisions have been made with the expectation that other Unix capabilities will be used to augment MS. For example, MS has fairly primitive data-base management filing and cataloging) facilities and message folders have been implemented in a way which allows them to be modified by programs, such as text editors, which access them directly, rather than through the message system.
From the actual source documents (which, again, Ayyadurai and his friends fail to link to and totally misrepresent), it's clear that all Crocker is saying is that no single system will satisfy everyone's current interests. He's not saying it's impossible to create an interoffice email system. He's just saying that lots of different people have lots of different needs for an interoffice email system, and for the team building MS, it would be too difficult to satisfy everyone's exact requests, so they're focusing on certain features, knowing others will add other components later. And, given that people are still working to improve upon email today, it seems that's still basically true.
Back to the rest of the paper, which actually does a tremendous job undermining basically all of Ayyadurai's claims (again, which suggests why no one names or links to the full paper) -- in the very first paragraph (again, this is prior to Ayyadurai doing anything) it talks about research for "computer software" for "electronic mail." Ooops. It goes on:
> This report describes the design of one such program--the "MS" message system. Early electronic mail systems have existed on the larger computers. MS incorporates and expands upon many of the functions and concepts of such systems within an integrated package...
In other words, the very paper that Ayyadurai and his friends insist prove that there was no email prior to 1978 talks in depth about a variety of email programs. Again, remember that this was written in 1977. This is not historical revisionism. It goes on:
> One of the earliest and most popular applications of the ARPANET computer communications network has been the transfer of text messages between people using different computers. This "electronic mail" capability was originally grafted onto existing informal facilities; however, they proved inadequate. A large network greatly expands the base of potential communicators; when coupled with the communication convenience of a message system, there results a considerable expansion to the list of features desired by users. Systems which have responded to these increased user needs have resided on medium- and large-scaled computers.
In other words, lots of folks are working on email systems. Ayyadurai tries to brush all those aside by saying that his actually included things like "folders." But again, Crocker's paper notes:
> Messages reside in file "folders" and may contain any number of fields, or "components."
It actually has a whole section on folders. It also shows some sample messages at the time, showing "to," "from," "cc," "subject," and "message" fields, showing that the very basics of interoffice mail (such as "cc" -- standing for carbon copy, which was a standard bit of interoffice mail) had already moved into email. Here's a screenshot (which you can click for a larger version):
[![](https://i.imgur.com/KJW7BnAm.png)][15]
Ayyadurai has built up his entire reputation around the (entirely false) claim that he "invented" email. His bio, his Twitter feed and his website all position himself as having invented email. He didn't. It looks like he wrote an implementation of an email system in 1978, long after others were working on similar things. He may have added some nice features, including the "blind carbon copy/bcc" concept (Update: Nope, bcc was in a [1977 RFC][16]). He also appears to have potentially been ahead of others in making a full address book be a part of the email system. He may, in fact, be the first person who shortened "electronic mail" to "email" which is cool enough, and he'd have an interesting claim if that's all he claimed. Unfortunately, he's claiming much, much more than that. He's set up [an entire website][17] in which he accuses lots of folks, including Techdirt, of unfairly "attacking" him. He apparently believes that some of the attacks on him are [because][18] he spoke out against corruption in India. Or because people think only rich white people can invent stuff. None of that is accurate. There's a simple fact, and it's that Ayyadurai did not invent email.
He does not even attempt to counter any of the actual facts. The documents that are presented are misleading or out of context. He misrepresents what a copyright registration means. And his main "smoking gun," in support of his claim that people are trying to unfairly write him out of history, is presented in a misleading way, out of context, with two entirely separate sentences pushed together to pretend they say something they didn't.
He's clearly quite proud of the email software he wrote in 1978, and that's great. He should be. It may have made some incremental improvements on what else was already out there, but it is not inventing email. It's also entirely possible that he was wholly unaware of everything else that was out there. And, again, that's great. We've talked many times in the past about multiple people coming up with the same ideas around the same time. Ayyadurai should be quite proud of what he's done. But he's simply not telling the truth when he claims to have invented email. His website is full of accolades from the past, including his Westinghouse award (which is a prestigious award for high schoolers), his copyrights and his later patents. There are local newspaper clippings. That's all great. It reminds me of the folder my mother has on all the nice things that happened to me as a kid. But none of it means he invented email.
It's unclear why Huffington Post is publishing this ludicrous and disproven narrative. It's unclear why one of the biggest names in PR is involved in all of this, though you can take some guesses. But there are facts, and they include that "electronic mail" existed long before V.A. Shiva Ayyadurai wrote his program as a precocious teenager. Huffington Post is either not disclosing a paid-for series of posts (which would be a massive ethical breach) or they've been taken for a ride. Neither option speaks well of HuffPo and its journalistic integrity.
--------------------------------------------------------------------------------
via: https://www.techdirt.com/articles/20140901/07280928386/huffpo-publishes-bizarre-misleading-factually-incorrect-multi-part-series-pretending-guy-invented-email-even-though-he-didnt.shtml
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://www.techdirt.com/articles/20120222/11132917842/how-guy-who-didnt-invent-email-got-memorialized-press-smithsonian-as-inventor-email.shtml
[2]:http://www.huffingtonpost.com/news/the-history-of-email/
[3]:http://www.nethistory.info/History%20of%20the%20Internet/email.html
[4]:http://www.ir.bbn.com/~craig/email.pdf
[5]:http://www.multicians.org/thvv/mail-history.html
[6]:http://www.huffingtonpost.com/larry-weber/the-history-of-email-boy-who-invented-email_b_5690783.html
[7]:http://en.wikipedia.org/wiki/Larry_Weber
[8]:http://www.huffingtonpost.com/leslie-p-michelson/the-history-of-email-invention-of-email_b_5707913.html
[9]:http://www.huffingtonpost.com/leslie-p-michelson/history-of-email-introduction_b_5726018.html
[10]:http://www.huffingtonpost.com/robert-field/history-of-email-first-email-system_b_5722000.html
[11]:http://www.huffingtonpost.com/2014/08/28/email-shiva-ayyadurai_n_5731606.html
[12]:https://imgur.com/AscOfQh
[13]:http://integrativesystems.org/board.asp
[14]:http://www.amazon.com/gp/product/0684832674/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=0684832674&linkCode=as2&tag=techdirtcom-20&linkId=OSP5B7BVSLAG5XNX
[15]:https://imgur.com/KJW7BnA
[16]:http://tools.ietf.org/html/rfc733
[17]:http://www.inventorofemail.com/index.asp
[18]:http://gizmodo.com/5888702/corruption-lies-and-death-threats-the-crazy-story-of-the-man-who-pretended-to-invent-email

View File

@ -1,157 +0,0 @@
disylee占个坑
Docker: Present and Future
================================================================================
### Docker - the story so far ###
Docker is a toolset for Linux containers designed to build, ship and run distributed applications. It was first released as an open source project by DotCloud in March 2013. The project quickly became popular, leading to DotCloud rebranded as Docker Inc (and ultimately [selling off their original PaaS business][1]). [Docker 1.0][2] was released in June 2014, and the monthly release cadence that led up to the June release has been sustained since.
The 1.0 release marked the point where Docker Inc considered the platform sufficiently mature to be used in production (with the company and partners providing paid for support options). The monthly release of point updates shows that the project is still evolving quickly, adding new features, and addressing issues as they are found. The project has however successfully decoupled ship from run, so images sourced from any version of Docker can be used with any other version (with both forward and backward compatibility), something that provides a stable foundation for Docker use despite rapid change.
The growth of Docker into one of the most popular open source projects could be perceived as hype, but there is a great deal of substance. Docker has attracted support from many brand names across the industry, including Amazon, Canonical, CenturyLink, Google, IBM, Microsoft, New Relic, Pivotal, Red Hat and VMware. This is making it almost ubiquitously available wherever Linux can be found. In addition to the big names many startups are growing up around Docker, or changing direction to be better aligned with Docker. Those partnerships (large and small) are helping to drive rapid evolution of the core project and its surrounding ecosystem.
### A brief technical overview of Docker ###
Docker makes use of Linux kernel facilities such as [cGroups][3], namespaces and [SElinux][4] to provide isolation between containers. At first Docker was a front end for the [LXC][5] container management subsystem, but release 0.9 introduced [libcontainer][6], which is a native Go language library that provides the interface between user space and the kernel.
Containers sit on top of a union file system, such as [AUFS][7], which allows for the sharing of components such as operating system images and installed libraries across multiple containers. The layering approach in the filesystem is also exploited by the [Dockerfile][8] DevOps tool, which is able to cache operations that have already completed successfully. This can greatly speed up test cycles by taking out the wait time usually taken to install operating systems and application dependencies. Shared libraries between containers can also reduce RAM footprint.
A container is started from an image, which may be locally created, cached locally, or downloaded from a registry. Docker Inc operates the [Docker Hub public registry][9], which hosts official repositories for a variety of operating systems, middleware and databases. Organisations and individuals can host public repositories for images at Docker Hub, and there are also subscription services for hosting private repositories. Since an uploaded image could contain almost anything Docker Hub provides an automated build facility (that was previously called trusted build) where images are constructed from a Dockerfile that serves as a manifest for the contents of the image.
### Containers versus VMs ###
Containers are potentially much more efficient than VMs because theyre able to share a single kernel and share application libraries. This can lead to substantially smaller RAM footprints even when compared to virtualisation systems that can make use of RAM overcommitment. Storage footprints can also be reduced where deployed containers share underlying image layers. IBMs Boden Russel has done [benchmarking][10] that illustrates these differences.
Containers also present a lower systems overhead than VMs, so the performance of an application inside a container will generally be the same or better versus the same application running within a VM. A team of IBM researchers have published a [performance comparison of virtual machines and Linux containers][11].
One area where containers are weaker than VMs is isolation. VMs can take advantage of ring -1 [hardware isolation][12] such as that provided by Intels VT-d and VT-x technologies. Such isolation prevents VMs from breaking out and interfering with each other. Containers dont yet have any form of hardware isolation, which makes them susceptible to exploits. A proof of concept attack named [Shocker][13] showed that Docker versions prior to 1.0 were vulnerable. Although Docker 1.0 fixed the particular issue exploited by Shocker, Docker CTO Solomon Hykes [stated][14], “When we feel comfortable saying that Docker out-of-the-box can safely contain untrusted uid0 programs, we will say so clearly.”. Hykess statement acknowledges that other exploits and associated risks remain, and that more work will need to be done before containers can become trustworthy.
For many use cases the choice of containers or VMs is a false dichotomy. Docker works well within a VM, which allows it to be used on existing virtual infrastructure, private clouds and public clouds. Its also possible to run VMs inside containers, which is something that Google uses as part of its cloud platform. Given the widespread availability of infrastructure as a service (IaaS) that provides VMs on demand its reasonable to expect that containers and VMs will be used together for years to come. Its also possible that container management and virtualisation technologies might be brought together to provide a best of both worlds approach; so a hardware trust anchored micro virtualisation implementation behind libcontainer could integrate with the Docker tool chain and ecosystem at the front end, but use a different back end that provides better isolation. Micro virtualisation (such as Bromiums [vSentry][15] and VMwares [Project Fargo][16]) is already used in desktop environments to provide hardware based isolation between applications, so similar approaches could be used along with libcontainer as an alternative to the container mechanisms in the Linux kernel.
### Dockerizing applications ###
Pretty much any Linux application can run inside a Docker container. There are no limitations on choice of languages or frameworks. The only practical limitation is what a container is allowed to do from an operating system perspective. Even that bar can be lowered by running containers in privileged mode, which substantially reduces controls (and correspondingly increases risk of the containerised application being able to cause damage to the host operating system).
Containers are started from images, and images can be made from running containers. There are essentially two ways to get applications into containers - manually and Dockerfile..
#### Manual builds ####
A manual build starts by launching a container with a base operating system image. An interactive terminal can then be used to install applications and dependencies using the package manager offered by the chosen flavour of Linux. Zef Hemel provides a walk through of the process in his article [Using Linux Containers to Support Portable Application Deployment][17]. Once the application is installed the container can be pushed to a registry (such as Docker Hub) or exported into a tar file.
#### Dockerfile ####
Dockerfile is a system for scripting the construction of Docker containers. Each Dockerfile specifies the base image to start from and then a series of commands that are run in the container and/or files that are added to the container. The Dockerfile can also specify ports to be exposed, the working directory when a container is started and the default command on startup. Containers built with Dockerfiles can be pushed or exported just like manual builds. Dockerfiles can also be used in Docker Hubs automated build system so that images are built from scratch in a system under the control of Docker Inc with the source of that image visible to anybody that might use it.
#### One process? ####
Whether images are built manually or with Dockerfile a key consideration is that only a single process is invoked when the container is launched. For a container serving a single purpose, such as running an application server, running a single process isnt an issue (and some argue that containers should only have a single process). For situations where its desirable to have multiple processes running inside a container a [supervisor][18] process must be launched that can then spawn the other desired processes. There is no init system within containers, so anything that relies on systemd, upstart or similar wont work without modification.
### Containers and microservices ###
A full description of the philosophy and benefits of using a microservices architecture is beyond the scope of this article (and well covered in the [InfoQ eMag: Microservices][19]). Containers are however a convenient way to bundle and deploy instances of microservices.
Whilst most practical examples of large scale microservices deployments to date have been on top of (large numbers of) VMs, containers offer the opportunity to deploy at a smaller scale. The ability for containers to have a shared RAM and disk footprint for operating systems, libraries common application code also means that deploying multiple versions of services side by side can be made very efficient.
### Connecting containers ###
Small applications will fit inside a single container, but in many cases an application will be spread across multiple containers. Dockers success has spawned a flurry of new application compositing tools, orchestration tools and platform as a service (PaaS) implementations. Behind most of these efforts is a desire to simplify the process of constructing an application from a set of interconnected containers. Many tools also help with scaling, fault tolerance, performance management and version control of deployed assets.
#### Connectivity ####
Dockers networking capabilities are fairly primitive. Services within containers can be made accessible to other containers on the same host, and Docker can also map ports onto the host operating system to make services available across a network. The officially sponsored approach to connectivity is [libchan][20], which is a library that provides Go like [channels][21] over the network. Until libchan finds its way into applications theres room for third parties to provide complementary network services. For example, [Flocker][22] has taken a proxy based approach to make services portable across hosts (along with their underlying storage).
#### Compositing ####
Docker has native mechanisms for linking containers together where metadata about a dependency can be passed into the dependent container and consumed within as environment variables and hosts entries. Application compositing tools like [Fig][23] and [geard][24] express the dependency graph inside a single file so that multiple containers can be brought together into a coherent system. CenturyLinks [Panamax][25] compositing tool takes a similar underlying approach to Fig and geard, but adds a web based user interface, and integrates directly with GitHub so that applications can be shared.
#### Orchestration ####
Orchestration systems like [Decking][26], New Relics [Centurion][27] and Googles [Kubernetes][28] all aim to help with the deployment and life cycle management of containers. There are also numerous examples (such as [Mesosphere][29]) of [Apache Mesos][30] (and particularly its [Marathon][31] framework for long running applications) being used along with Docker. By providing an abstraction between the application needs (e.g. expressed as a requirement for CPU cores and memory) and underlying infrastructure, the orchestration tools provide decoupling thats designed to simplify both application development and data centre operations. There is such a variety of orchestration systems because many have emerged from internal systems previously developed to manage large scale deployments of containers; for example Kubernetes is based on Googles [Omega][32] system thats used to manage containers across the Google estate.
Whilst there is some degree of functional overlap between the compositing tools and the orchestration tools there are also ways that they can complement each other. For example Fig might be used to describe how containers interact functionally whilst Kubernetes pods might be used to provide monitoring and scaling.
#### Platforms (as a Service) ####
A number of Docker native PaaS implementations such as [Deis][33] and [Flynn][34] have emerged to take advantage of the fact that Linux containers provide a great degree of developer flexibility (rather than being opinionated about a given set of languages and frameworks). Other platforms such as CloudFoundry, OpenShift and Apcera Continuum have taken the route of integrating Docker based functionality into their existing systems, so that applications based on Docker images (or the Dockerfiles that make them) can be deployed and managed alongside of apps using previously supported languages and frameworks.
### All the clouds ###
Since Docker can run in any Linux VM with a reasonably up to date kernel it can run in pretty much every cloud offering IaaS. Many of the major cloud providers have announced additional support for Docker and its ecosystem.
Amazon have introduced Docker into their Elastic Beanstalk system (which is an orchestration service over underlying IaaS). Google have Docker enabled managed VMs, which provide a halfway house between the PaaS of App Engine and the IaaS of Compute Engine. Microsoft and IBM have both announced services based on Kubernetes so that multi container applications can be deployed and managed on their clouds.
To provide a consistent interface to the wide variety of back ends now available the Docker team have introduced [libswarm][35], which will integrate with a multitude of clouds and resource management systems. One of the stated aims of libswarm is to avoid vendor lock-in by swapping any service out with another. This is accomplished by presenting a consistent set of services (with associated APIs) that attach to implementation specific back ends. For example the Docker server service presents the Docker remote API to a local Docker command line tool so that containers can be managed on an array of service providers.
New service types based on Docker are still in their infancy. London based Orchard labs offered a Docker hosting service, but Docker Inc said that the service wouldnt be a priority after acquiring Orchard. Docker Inc has also sold its previous DotCloud PaaS business to cloudControl. Services based on older container management systems such as [OpenVZ][36] are already commonplace, so to a certain extent Docker needs to prove its worth to hosting providers.
### Docker and the distros ###
Docker has already become a standard feature of major Linux distributions like Ubuntu, Red Hat Enterprise Linux (RHEL) and CentOS. Unfortunately the distributions move at a different pace to the Docker project, so the versions found in a distribution can be well behind the latest available. For example Ubuntu 14.04 was released with Docker 0.9.1, and that didnt change on the point release upgrade to Ubuntu 14.04.1 (by which time Docker was at 1.1.2). There are also namespace issues in official repositories since Docker was also the name of a KDE system tray; so with Ubuntu 14.04 the package name and command line tool are both docker.io.
Things arent much different in the Enterprise Linux world. CentOS 7 comes with Docker 0.11.1, a development release that precedes Docker Incs announcement of production readiness with Docker 1.0. Linux distribution users that want the latest version for promised stability, performance and security will be better off following the [installation instructions][37] and using repositories hosted by Docker Inc rather than taking the version included in their distribution.
The arrival of Docker has spawned new Linux distributions such as [CoreOS][38] and Red Hats [Project Atomic][39] that are designed to be a minimal environment for running containers. These distributions come with newer kernels and Docker versions than the traditional distributions. They also have lower memory and disk footprints. The new distributions also come with new tools for managing large scale deployments such as [fleet][40] a distributed init system and [etcd][41] for metadata management. There are also new mechanisms for updating the distribution itself so that the latest versions of the kernel and Docker can be used. This acknowledges that one of the effects of using Docker is that it pushes attention away from the distribution and its package management solution, making the Linux kernel (and Docker subsystem using it) more important.
New distributions might be the best way of running Docker, but traditional distributions and their package managers remain very important within containers. Docker Hub hosts official images for Debian, Ubuntu, and CentOS. Theres also a semi-official repository for Fedora images. RHEL images arent available in Docker Hub, as theyre distributed directly from Red Hat. This means that the automated build mechanism on Docker Hub is only available to those using pure open source distributions (and willing to trust the provenance of the base images curated by the Docker Inc team).
Whilst Docker Hub integrates with source control systems such as GitHub and Bitbucket for automated builds the package managers used during the build process create a complex relationship between a build specification (in a Dockerfile) and the image resulting from a build. Non deterministic results from the build process isnt specifically a Docker problem - its a result of how package managers work. A build done one day will get a given version, and a build done another time may get a later version, which is why package managers have upgrade facilities. The container abstraction (caring less about the contents of a container) along with container proliferation (because of lightweight resource utilisation) is however likely to make this a pain point that gets associated with Docker.
### The future of Docker ###
Docker Inc has set a clear path on the development of core capabilities (libcontainer), cross service management (libswarm) and messaging between containers (libchan). Meanwhile the company has already shown a willingness to consume its own ecosystem with the Orchard Labs acquisition. There is however more to Docker than Docker Inc, with contributions to the project coming from big names like Google, IBM and Red Hat. With a benevolent dictator in the shape of CTO Solomon Hykes at the helm there is a clear nexus of technical leadership for both the company and the project. Over its first 18 months the project has shown an ability to move fast by using its own output, and there are no signs of that abating.
Many investors are looking at the features matrix for VMwares ESX/vSphere platform from a decade ago and figuring out where the gaps (and opportunities) lie between enterprise expectations driven by the popularity of VMs and the existing Docker ecosystem. Areas like networking, storage and fine grained version management (for the contents of containers) are presently underserved by the existing Docker ecosystem, and provide opportunities for both startups and incumbents.
Over time its likely that the distinction between VMs and containers (the run part of Docker) will become less important, which will push attention to the build and ship aspects. The changes here will make the question of what happens to Docker? much less important than what happens to the IT industry as a result of Docker?.
--------------------------------------------------------------------------------
via: http://www.infoq.com/articles/docker-future
作者:[Chris Swan][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.infoq.com/author/Chris-Swan
[1]:http://blog.dotcloud.com/dotcloud-paas-joins-cloudcontrol
[2]:http://www.infoq.com/news/2014/06/docker_1.0
[3]:https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
[4]:http://selinuxproject.org/page/Main_Page
[5]:https://linuxcontainers.org/
[6]:http://blog.docker.com/2014/03/docker-0-9-introducing-execution-drivers-and-libcontainer/
[7]:http://aufs.sourceforge.net/aufs.html
[8]:https://docs.docker.com/reference/builder/
[9]:https://registry.hub.docker.com/
[10]:http://bodenr.blogspot.co.uk/2014/05/kvm-and-docker-lxc-benchmarking-with.html?m=1
[11]:http://domino.research.ibm.com/library/cyberdig.nsf/papers/0929052195DD819C85257D2300681E7B/$File/rc25482.pdf
[12]:https://en.wikipedia.org/wiki/X86_virtualization#Hardware-assisted_virtualization
[13]:http://stealth.openwall.net/xSports/shocker.c
[14]:https://news.ycombinator.com/item?id=7910117
[15]:http://www.bromium.com/products/vsentry.html
[16]:http://cto.vmware.com/vmware-docker-better-together/
[17]:http://www.infoq.com/articles/docker-containers
[18]:http://docs.docker.com/articles/using_supervisord/
[19]:http://www.infoq.com/minibooks/emag-microservices
[20]:https://github.com/docker/libchan
[21]:https://gobyexample.com/channels
[22]:http://www.infoq.com/news/2014/08/clusterhq-launch-flocker
[23]:http://www.fig.sh/
[24]:http://openshift.github.io/geard/
[25]:http://panamax.io/
[26]:http://decking.io/
[27]:https://github.com/newrelic/centurion
[28]:https://github.com/GoogleCloudPlatform/kubernetes
[29]:https://mesosphere.io/2013/09/26/docker-on-mesos/
[30]:http://mesos.apache.org/
[31]:https://github.com/mesosphere/marathon
[32]:http://static.googleusercontent.com/media/research.google.com/en/us/pubs/archive/41684.pdf
[33]:http://deis.io/
[34]:https://flynn.io/
[35]:https://github.com/docker/libswarm
[36]:http://openvz.org/Main_Page
[37]:https://docs.docker.com/installation/#installation
[38]:https://coreos.com/
[39]:http://www.projectatomic.io/
[40]:https://github.com/coreos/fleet
[41]:https://github.com/coreos/etcd

View File

@ -1,48 +0,0 @@
CoreOS breaks with Docker
================================================================================
> Summary: CoreOS, a new enterprise Linux company and a Docker partner, is now proposing its own alternative to Docker's container technology.
[Docker][1] exploded out of nowhere in 2014 to make container technology white hot in cloud and datacenter technical circles. Even [Microsoft joined its open-source virtualization revolution][2]. Now, however, early Docker supporter [CoreOS][3], a new large-scale Linux distributor vendor, is turning its back on it and developing its own container technology: [Rocket][4].
![](http://cdn-static.zdnet.com/i/r/story/70/00/036331/coreos-200x77.jpg?hash=MTAvMJZ3MJ&upscale=1)
While [CoreOS][5] is relatively unknown outside of Linux circles and Silicon Valley, it's seen by those in the know as an up and coming Linux distribution for datacenters and clouds. It's not an insignificant company crying foul, because [Docker's take on virtualization has proven to be so popular][6]. Indeed, CoreOS currently requires Docker to work well, and Brandon Philips, CoreOS' co-founder and CTO, has been a top Docker contributor and was serving on the Docker governance board.
So, why is CoreOS breaking with Docker? First, because "We believe strongly in the Unix philosophy: Tools should be independently useful, but have clean integration points." However, it also said that "Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: Building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server."
In short, instead of Docker being a Unix-style, simple reusable component, CoreOS sees Docker becoming a platform. And CoreOS has no interest in that.
Instead, with Rocket, they propose going back to the [original Docker proposal][7] for what a container should be.
CoreOS spells out that Rocket will be:
- **Composable**: All tools for downloading, installing, and running containers should be well integrated, but independent and composable.
- **Secure**: Isolation should be pluggable, and the crypto primitives for strong trust, image auditing, and application identity should exist from day one.
- **Image distribution**: Discovery of container images should be simple and facilitate a federated namespace and distributed retrieval. This opens the possibility of alternative protocols, such as BitTorrent, and deployments to private environments without the requirement of a registry.
- **Open**: The format and runtime should be well specified and developed by a community. We want independent implementations of tools to be able to run the same container consistently.
To do this, CoreOS is not forking Docker. Alex Polvi, CoreOS' CEO, wrote, "From a security and composability perspective, the Docker process model — where everything runs through a central daemon — is fundamentally flawed. To 'fix' Docker would essentially mean a rewrite of the project, while inheriting all the baggage of the existing implementation."
CoreOS already has an [alpha version of Rocket on GitHub][8], but it's still open to other ideas on how to build a Docker alternative. At the same time, however, CoreOS states that it won't be leaving Docker behind. "We will continue to make sure CoreOS is the best place to run Docker ... [and] expect Docker to continue to be fully integrated with CoreOS as it is today."
While I can understand CoreOS' concerns, I find it hard to imagine that its attempt to come up with a successful alternative to Docker will come to anything. Docker certainly isn't perfect, but in a matter of mere months, it gathered support from almost everyone in the enterprise operating system business. The only way I can see CoreOS' Rocket launching successfully will be if Docker falls flat on its face, and I just don't see that happening.
--------------------------------------------------------------------------------
via: http://www.zdnet.com/coreos-breaks-with-docker-7000036331/#ftag=RSS06bb67b
作者:[Steven J. Vaughan-Nichols][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.zdnet.com/meet-the-team/us/steven-j-vaughan-nichols/
[1]:https://www.docker.com/
[2]:http://www.zdnet.com/docker-container-support-coming-to-microsofts-next-windows-server-release-7000034708
[3]:https://coreos.com/
[4]:https://coreos.com/blog/rocket
[5]:http://www.zdnet.com/coreos-linux-for-the-cloud-and-the-datacenter-7000031137/
[6]:http://www.zdnet.com/what-is-docker-and-why-is-it-so-darn-popular-7000032269/
[7]:https://github.com/docker/docker/commit/0db56e6c519b19ec16c6fbd12e3cee7dfa6018c5
[8]:https://github.com/coreos/rocket

View File

@ -1,49 +0,0 @@
CoreOS Team Develops Rocket, Breaks with Docker
================================================================================
![](https://farm8.staticflickr.com/7297/12199695124_53d5323167_t.jpg)
[Docker][1] has easily emerged as one of the top open source stories of the year, and has helped many organizations [benefit from container technology][2]. As weve reported, even Google is [working closely][3] with it, and Microsoft is as well.
However, the folks behind CoreOS, a very popular Linux flavor for use in cloud deployments, are developing their own container technology, [dubbed Rocket][4], which will actually compete with Docker. Here are the details.
Rocket is a new container runtime, designed for composability, security, and speed, according to the CoreOS team. The group has released a [prototype version on GitHub][5] to begin getting community feedback.
“When Docker was first introduced to us in early 2013, the idea of a “standard container” was striking and immediately attractive: a simple component, a composable unit, that could be used in a variety of systems. The Docker repository [included a manifesto][6] of what a standard container should be. This was a rally cry to the industry, and we quickly followed. We thought Docker would become a simple unit that we can all agree on.”
“Unfortunately, a simple re-usable component is not how things are playing out. Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server. The standard container manifesto [was removed][7]. We should stop talking about Docker containers, and start talking about the Docker Platform.”
“We still believe in the original premise of containers that Docker introduced, so we are doing something about it. Rocket is a command line tool, rkt, for running App Containers. An App Container is the specification of an image format, container runtime, and a discovery mechanism.”
There is a specification coming for App Container Images (ACI). Anyone can [Read about and contribute to the ACI draft][8].
The Register also [notes this interesting aspect][9] of Rocket:
“Significantly, all of CoreOS's tools for working with App Container will be integrated, yet independent from one another. Rocket can run as a standalone tool on any flavor of Linux, not just CoreOS.”
In a [blog post][10], Docker CEO Ben Golub voiced disagreement with CoreOS's move, and he writes:
“There are technical or philosophical differences, which appears to be the case with the recent announcement regarding Rocket. We hope to address some of the technical arguments posed by the Rocket project in a subsequent post.”
It sounds like a standards skirmish is going to come of all this, but, as is often the case with standards confrontations, users may benefit from the competition.
--------------------------------------------------------------------------------
via: http://ostatic.com/blog/coreos-team-develops-rocket-breaks-with-docker
作者:[Sam Dean][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://ostatic.com/member/samdean
[1]:https://www.docker.com/
[2]:http://ostatic.com/blog/linux-containers-with-docker
[3]:http://ostatic.com/blog/google-sets-sights-squarely-on-docker-with-new-container-engine
[4]:https://coreos.com/blog/rocket/
[5]:https://github.com/coreos/rocket
[6]:https://github.com/docker/docker/commit/0db56e6c519b19ec16c6fbd12e3cee7dfa6018c5
[7]:https://github.com/docker/docker/commit/eed00a4afd1e8e8e35f8ca640c94d9c9e9babaf7
[8]:https://github.com/coreos/rocket/blob/master/app-container/SPEC.md#app-container-image
[9]:http://www.theregister.co.uk/2014/12/01/coreos_rocket_announcement/
[10]:http://blog.docker.com/2014/12/initial-thoughts-on-the-rocket-announcement/

View File

@ -1,57 +0,0 @@
Interview: Apache Software Foundation Elevates Drill to Top-Level Project
================================================================================
![](http://i1311.photobucket.com/albums/s669/webworkerdaily/tomer_zps5e1225aa.png)
The Apache Software Foundation (ASF) has [announced][1] that [Apache Drill][2] has graduated from the Apache Incubator to become a Top-Level Project (TLP).
Apache Drill is billed as the world's first schema-free SQL query engine that delivers real-time insights by removing the constraint of building and maintaining schemas before data can be analyzed.
Drill enables rapid application development on Apache Hadoop and also allows enterprise BI analysts to access Hadoop in a self-service fashion. OStatic caught up with Tomer Shiran (shown here), a member of the Drill Project Management Committee, to get his thoughts. Here they are in an interview.
**Can you provide a brief overview of what Drill is and what kinds of users it can make a difference for?**
Drill is the world's first distributed, schema-free SQL engine. Analysts and developers can use Drill to interactively explore data in Hadoop and other NoSQL databases, such as HBase and MongoDB. There's no need to explicitly define and maintain schemas, as Drill can automatically leverage the structure that's embedded in the data.
This enables self-service data exploration, which is not possible with traditional data warehouses or SQL-on-Hadoop solutions like Hive and Impala, in which DBAs must manage schemas and transform the data before it can be analyzed.
**What level of community involvement with Drill already exists?**
Drill is an Apache project, so it's not owned by any vendor. Developers in the community can contribute to Drill. MapR currently employs the largest number of contributors, but we're seeing an increasing number of contributions from other companies, and that trend has been accelerating in recent months.
For example, the MongoDB storage plugin (enabling queries on MongoDB) was contributed by developers at Intuit.
**Hadoop has a lot of momentum on the Big Data front. How can Drill help organizations leveraging Hadoop?**
Drill is the ideal interactive SQL engine for Hadoop. One of the main reasons organizations choose Hadoop is due to its flexibility and agility. Unlike traditional databases, getting data into Hadoop is easy, and users can load data in any shape or size on their own. Early attempts at SQL on Hadoop (eg, Hive, Impala) force schemas to be created and maintained even for self-describing data like JSON, Parquet and HBase tables.
These systems also require data to be transformed before it can be queried. Drill is the only SQL engine for Hadoop that doesn't force schemas to be defined before data can be queried, and doesn't require any data transformations. In other words, Drill maintains the flexibility and agility paradigms that made Hadoop popular, thus making it the natural technology for data exploration and BI on Hadoop.
**What does Drill's status as a top-level project at Apache mean for its development and future?**
Drill's graduation to a top-level project is an indication that Drill has established a strong community of users and developers. Graduation is a decision made by the Apache Software Foundation (ASF) board, and it provides confidence to Drill's potential users and contributors that the project has a strong foundation. From a governance standpoint, a top-level project has its own board (also known as PMC). The PMC Chair (Jacques Nadeau) is a VP at Apache.
**How do you think Drill will evolve over the next several years?**
Drill has a large and growing community of contributors. Drill 1.0 will be out in Q1'15. We'll see many new features over the next several years. Here are a just a few examples of initiatives that are currently under way:
Drill currently supports HDFS, HBase and MongoDB. Additional data sources are being added, including Cassandra and RDBMS (all JDBC-enabled databases, including Oracle and MySQL). A single query can incorporate/join data from different sources. In the next year, Drill will become the standard SQL engine for modern datastores (which are all schema-free in nature): Hadoop, NoSQL databases - HBase/MongoDB/Cassandra, and search - Elasticsearch/Solr.
A single enterprise or cloud provider will be able to serve multiple groups/departments/organizations, each having its own workloads and SLA requirements. For example, in Drill 1.0 will support user impersonation, meaning that a query can only access the data that the user is authorized to access, and this will work with all supported data sources (Hadoop, HBase, MongoDB, etc.)
Drill will support not only SELECT and CREATE TABLE ... AS SELECT (CTAS) queries, but also INSERT/UPDATE/DELETE, enabling Drill to be used for operational applications (in addition to data exploration and analytics). Drill will also support the ultra-low latency and high concurrency required for such use cases.
Full TPC-DS support. Unlike other SQL-on-Hadoop technologies, Drill is designed to support the ANSI SQL standard as opposed to a SQL-like language. This provides better support for BI and other tools. Drill will be able to run TPC-DS, unmodified, in 2015.
--------------------------------------------------------------------------------
via: http://ostatic.com/blog/interview-apache-software-foundation-elevates-drill-to-top-level-project
作者:[Sam Dean][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://ostatic.com/member/samdean
[1]:https://blogs.apache.org/foundation/entry/the_apache_software_foundation_announces66
[2]:http://drill.apache.org/

View File

@ -1,3 +1,6 @@
////translating by yupmoon
Open source all over the world
================================================================================
![](https://opensource.com/sites/default/files/styles/image-full-size/public/images/business/BUS_OpenSourceExperience_520x292_cm.png)
@ -145,4 +148,4 @@ via: https://opensource.com/business/14/12/jim-whitehurst-inspiration-open-sourc
[44]:http://jobs.redhat.com/life-at-red-hat/our-culture/
[45]:http://www.gutenberg.org/ebooks/4300
[46]:https://twitter.com/philshapiro
[47]:http://libreoffice.org/
[47]:http://libreoffice.org/

View File

@ -1,45 +0,0 @@
Was 2014 "The Year of Linux Desktop"?
================================================================================
> The Linux desktop is finally hitting all the right notes
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-2.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-3.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-4.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-5.jpg)
![](http://i1-news.softpedia-static.com/images/news2/Was-2014-The-Year-of-Linux-Desktop-467036-6.jpg)
**Linux has seen a lot of changes during 2014 and many users are saying that this was finally the year that really showed some real progress, but has it been enough to call it "the year of Linux desktop"?**
This particular phrase, "the year of Linux desktop," has been recited like a mantra in the past couple of years and it's basically trying to mark all the progress registered until now in a way that makes sense. This kind of stuff hasn't happened so far and there is no precedent for the kind of growth we're witnessing, so it's easy to understand why Linux users might look at it from this perspective.
Most software and hardware domains don't usually go through this kind of fast progress and things happen at a slower pace, but things have been wild even for people who have a better insight into the industry. It's hard, if not impossible, to pinpoint a certain moment or a certain event, but Linux development exploded and changed exponentially in the course of just a couple of years.
### Year of the Linux desktop is an uncertain term ###
There is no single authority which can decree that the year of the Linux desktop has arrived or that it has passed. We can only try to deduce it from what we've seen until now and it's actually up to the users. Some are more conservative and not too many things have changed for them, and others are more progressive and they just can't get enough. It really depends on what your outlook is.
The spark that seems to have put everything in motion appears to be the launch of Steam for Linux, although we've seen some important movement of the Linux gaming scene before that became a reality. In any case, Valve is probably the catalyst of the resurgence of what we're seeing today.
The Linux desktop has been in a kind of slow evolution in the past decade and nothing really changed. There have been a lot of innovations for sure, but the market share has remained almost the same. No matter how cool the desktop became or how many features Linux had well before anyone else, things have remained largely the same, and that includes the participation of companies making proprietary software. They largely ignored Linux.
Now, more companies have shown interest in the Linux platform in the past year than they did in the last 10. Maybe it's a natural evolution and Valve had nothing to do with it, but Linux has finally reached a level where it can be used and understood by regular users, not just people fascinated by open source.
The drivers are better, game studios are porting games now on a regular basis, applications and middleware that we never thought we would see on Linux have started to show up, the Linux kernel development has an incredible pace, the installation process for most of the major distros is usually trivial, and all of these are just the tip of the iceberg.
So, when someone asks you if 2014 was the year of the Linux desktop, you can say yes. The Linux desktop totally ruled in 2014.
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Was-2014-The-Year-of-Linux-Desktop-467036.shtml
作者:[Silviu Stahie ][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie

View File

@ -0,0 +1,81 @@
Docker and the Integrated Open Source Company
================================================================================
Its been a long time since an open source project has gotten as much buzz and attention as Docker. The easiest way to explain the concept is, well, to look at the logo of the eponymous1 company that created and manages the project:
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/docker.png)
The reference in the logo is to shipping containers, one of the most important inventions of the 20th century. Actually, the word “invention” is not quite right: the idea of putting bulk goods into consistently-sized boxes goes back at least a few hundred years.[2][1] What changed the world was the standardization of containers by a trucking magnate named Malcom McLean and Keith Tantlinger, his head engineer. Tantlinger developed much of the technology undergirding the intermodal container, especially its corner casting and Twistlock mechanism that allowed the containers to be stacked on ships, transported by trucks, and moved by crane. More importantly, Tantlinger convinced McLean to release the patented design for anyone to copy without license, knowing that the technology would only be valuable if it were deployed in every port and on every transport ship in the world. Tantlinger, to put it in software terms, open-sourced the design.
Shipping containers really are a perfect metaphor for what Docker is building: standardized containers for applications.
- Just as the idea of a container wasnt invented by Tantlinger, Docker is building on a concept that has been around for quite a while. Companies like Oracle, HP, and IBM have used containers for many years, and Google especially has a very similar implementation to Docker that they use for internal projects. Docker, though, by being open source and [community-centric][2], offers the promise of standardization
- It doesnt matter what is inside of a shipping container; the container itself will fit on any ship, truck, or crane in the world. Similarly, it doesnt matter what app (and associated files, frameworks, dependencies, etc.) is inside of a docker container; the container will run on any Linux distribution and, more importantly, just about every cloud provider including AWS, Azure, Google Cloud Platform, Rackspace, etc.
- When you move abroad, you can literally have a container brought to your house, stick in your belongings, and then have the entire thing moved to a truck to a crane to a ship to your new country. Similarly, containers allow developers to build and test an application on their local machine and have confidence that the application will behave the exact same way when it is pushed out to a server. Because everything is self-contained, the developer does not need to worry about there being different frameworks, versions, and other dependencies in the various places the application might be run
The implications of this are far-reaching: not only do containers make it easier to manage the lifecycle of an application, they also (theoretically) commoditize cloud services through the age-old hope of “write once run anywhere.” More importantly, at least for now, docker containers offer the potential of being far more efficient than virtual machines. Relative to a container, using virtual machines is like using a car transport ship to move cargo: each unique entity on the ship is self-powered, which means a lot of wasted resources (those car engines arent very useful while crossing the ocean). Similarly, each virtual machine has to deal with the overhead of its own OS; containers, on the other hand, all share the same OS resulting in huge efficiency gains.[3][4]
In short, Docker is a really big deal from a technical perspective. What excites me, though, is that the company is also innovating when it comes to their business model.
----------
The problem with monetizing open source is self-evident: if the software is freely available, what exactly is worth paying for? And, unlike media, you cant exactly stick an advertisement next to some code!
For many years the default answer has been to “be like Red Hat.” Red Hat is the creator and maintainer of the Red Hat Enterprise Linux (RHEL) distribution, which, like all Linux distributions, is freely available.[4][5] Red Hat, however, makes money by offering support, training, a certification program, etc. for enterprises looking to use their software. It is very much a traditional enterprise model make money on support! just minus the up-front license fees.
This sort of business is certainly still viable; Hortonworks is [set to IPO][3] with a similar model based on Hadoop, albeit at a much lower valuation than it received during its last VC round. That doesnt surprise me: I dont think this is a particularly great model from a business perspective.
To understand why its useful to think about there being three distinct parts of any company that is based on open source: the open source project itself, any value-added software built on top of that project, and the actual means of making money:
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/opensourcepaper.jpg)
*There are three parts of an open source business: the project itself, the value-added software on top of that project, and the means of monetization*
The problem with the “Red Hat” model is the complete separation of all three of these parts: Red Hat doesnt control the core project (Linux), and their value-added software (RHEL) is free, leaving their money-making support program to stand alone. To the companys credit they have pulled this model off, but I think a big reason is because utilizing Linux was so much more of a challenge back in the 90s.[5][11] I highly doubt Red Hat could successfully build a similar business from scratch today.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/redhatpaper.jpg)
*The three parts of Red Hats business are separate and more difficult for the company to control and monetize*
GitHub, the repository hosting service, is exploring what is to my mind a more compelling model. GitHubs value-added software is a hosting service based on Git, an open-source project designed by Linux creator Linus Torvalds. Crucially, GitHub is seeking to monetize that hosting service directly, both through a SaaS model and through an on-premise enterprise offering[6][6]. This means that, in comparison to Red Hat, there is one less place to disintermediate GitHub: you cant get their value-added software (for private projects public is free) unless youre willing to pay.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/githubpaper.jpg)
*While GitHub does not control Git, their value-added software and means of monetization are unified, making the latter much easier and more sustainable*
Docker takes the GitHub model a step further: the company controls everything from the open source project itself to the value-added software (DockerHub) built on top of that, and, just last week, [announced a monetization model][7] that is very similar to GitHubs enterprise offering. Presuming Docker continues its present momentum and finds success with this enterprise offering, they have the potential to be a fully integrated open source software company: project, value-added software, and monetization all rolled into one.
![](http://2yj23r14cytosbxol4cavq337g.wpengine.netdna-cdn.com/wp-content/uploads/2014/12/dockerpaper.jpg)
*Docker controls all the parts of their business: they are a fully integrated open source company.*
This is exciting, and, to be honest, a little scary. What is exciting is that very few movements have had such a profound effect as open source software, and not just on the tech industry. Open source products are responsible for end user products like this blog; more importantly, open source technologies have enabled exponentially more startups to get off the ground with minimal investment, vastly accelerating the rate of innovation and iteration in tech.[7][8] The ongoing challenge for any open source project, though, is funding, and Dockers business model is a potentially sustainable solution not just for Docker but for future open source technologies.
That said, if Docker is successful, over the long run commercial incentives will steer the Docker open source project in a way that benefits Docker the company, which may not be what is best for the community broadly. That is what is scary about this: might open source in the long run be subtly corrupted by this business model? The makers of CoreOS, a stripped-down Linux distribution that is a perfect complement for Docker, [argued that was the case][9] last week:
> We thought Docker would become a simple unit that we can all agree on. Unfortunately, a simple re-usable component is not how things are playing out. Docker now is building tools for launching cloud servers, systems for clustering, and a wide range of functions: building images, running images, uploading, downloading, and eventually even overlay networking, all compiled into one monolithic binary running primarily as root on your server. The standard container manifesto was removed. We should stop talking about Docker containers, and start talking about the Docker Platform. It is not becoming the simple composable building block we had envisioned.
This, I suppose, is the beauty of open source: if you disagree, fork, which is essentially what CoreOS did, launching their own “Rocket” container.[8][10] It also shows that Dockers business model and any business model that contains open source will never be completely defensible: there will always be a disintermediation point. I suspect, though, that Rocket will fail and Dockers momentum will continue: the logic of there being one true container is inexorable, and Docker has already built up quite a bit of infrastructure and just maybe a business model to make it sustainable.
--------------------------------------------------------------------------------
via: http://stratechery.com/2014/docker-integrated-open-source-company/
作者:[Ben Thompson][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://stratechery.com/category/about/
[1]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:1:1300
[2]:https://github.com/docker/docker
[3]:http://blogs.wsj.com/digits/2014/12/01/ipo-bound-hortonworks-drops-out-of-billion-dollar-startup-club/
[4]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:2:1300
[5]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:3:1300
[6]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:5:1300
[7]:http://blog.docker.com/2014/12/docker-announces-docker-hub-enterprise/
[8]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:6:1300
[9]:https://coreos.com/blog/rocket/
[10]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:7:1300
[11]:http://stratechery.com/2014/docker-integrated-open-source-company/#fn:4:1300

View File

@ -0,0 +1,74 @@
2015 will be the year Linux takes over the enterprise (and other predictions)
================================================================================
> Jack Wallen removes his rose-colored glasses and peers into the crystal ball to predict what 2015 has in store for Linux.
![](http://tr1.cbsistatic.com/hub/i/r/2014/12/15/f79d21fe-f1d1-416d-ba22-7e757dfcdb31/resize/620x485/52a10d26d34c3fc4201c5daa8ff277ff/linux2015hero.jpg)
The crystal ball has been vague and fuzzy for quite some time. Every pundit and voice has opined on what the upcoming year will mean to whatever topic it is they hold dear to their heart. In my case, we're talking Linux and open source.
In previous years, I'd don the rose-colored glasses and make predictions that would shine a fantastic light over the Linux landscape and proclaim 20** will be the year of Linux on the _____ (name your platform). Many times, those predictions were wrong, and Linux would wind up grinding on in the background.
This coming year, however, there are some fairly bold predictions to be made, some of which are sure things. Read on and see if you agree.
### Linux takes over big data ###
This should come as no surprise, considering the advancements Linux and open source has made over the previous few years. With the help of SuSE, Red Hat, and SAP Hana, Linux will hold powerful sway over big data in 2015. In-memory computing and live kernel patching will be the thing that catapults big data into realms of uptime and reliability never before known. SuSE will lead this charge like a warrior rushing into a battle it cannot possibly lose.
This rise of Linux in the world of big data will have serious trickle down over the rest of the business world. We already know how fond enterprise businesses are of Linux and big data. What we don't know is how this relationship will alter the course of Linux with regards to the rest of the business world.
My prediction is that the success of Linux with big data will skyrocket the popularity of Linux throughout the business landscape. More contracts for SuSE and Red Hat will equate to more deployments of Linux servers that handle more tasks within the business world. This will especially apply to the cloud, where OpenStack should easily become an overwhelming leader.
As the end of 2015 draws to a close, Linux will continue its take over of more backend services, which may include the likes of collaboration servers, security, and much more.
### Smart machines ###
Linux is already leading the trend for making homes and autos more intelligent. With improvements in the likes of Nest (which currently uses an embedded Linux), the open source platform is poised to take over your machines. Because 2015 should see a massive rise in smart machines, it goes without saying that Linux will be a huge part of that growth. I firmly believe more homes and businesses will take advantage of such smart controls, and that will lead to more innovations (all of which will be built on Linux).
One of the issues facing Nest, however, is that it was purchased by Google. What does this mean for the thermostat controller? Will Google continue using the Linux platform -- or will it opt to scrap that in favor of Android? Of course, a switch would set the Nest platform back a bit.
The upcoming year will see Linux lead the rise in popularity of home automation. Wink, Iris, Q Station, Staples Connect, and more (similar) systems will help to bridge Linux and home users together.
### The desktop ###
The big question, as always, is one that tends to hang over the heads of the Linux community like a dark cloud. That question is in relation to the desktop. Unfortunately, my predictions here aren't nearly as positive. I believe that the year 2015 will remain quite stagnant for Linux on the desktop. That complacency will center around Ubuntu.
As much as I love Ubuntu (and the Unity desktop), this particular distribution will continue to drag the Linux desktop down. Why?
Convergence... or the lack thereof.
Canonical has been so headstrong about converging the desktop and mobile experience that they are neglecting the current state of the desktop. The last two releases of Ubuntu (one being an LTS release) have been stagnant (at best). The past year saw two of the most unexciting releases of Ubuntu that I can recall. The reason? Because the developers of Ubuntu are desperately trying to make Unity 8/Mir and the ubiquitous Ubuntu Phone a reality. The vaporware that is the Ubuntu Phone will continue on through 2015, and Unity 8/Mir may or may not be released.
When the new iteration of the Ubuntu Unity desktop is finally released, it will suffer a serious setback, because there will be so little hardware available to truly show it off. [System76][1] will sell their outstanding [Sable Touch][2], which will probably become the flagship system for Unity 8/Mir. As for the Ubuntu Phone? How many reports have you read that proclaimed "Ubuntu Phone will ship this year"?
I'm now going on the record to predict that the Ubuntu Phone will not ship in 2015. Why? Canonical created partnerships with two OEMs over a year ago. Those partnerships have yet to produce a single shippable product. The closest thing to a shippable product is the Meizu MX4 phone. The "Pro" version of that phone was supposed to have a formal launch of Sept 25. Like everything associated with the Ubuntu Phone, it didn't happen.
Unless Canonical stops putting all of its eggs in one vaporware basket, desktop Linux will take a major hit in 2015. Ubuntu needs to release something major -- something to make heads turn -- otherwise, 2015 will be just another year where we all look back and think "we could have done something special."
Outside of Ubuntu, I do believe there are some outside chances that Linux could still make some noise on the desktop. I think two distributions, in particular, will bring something rather special to the table:
- [Evolve OS][3] -- a ChromeOS-like Linux distribution
- [Quantum OS][4] -- a Linux distribution that uses Android's Material Design specs
Both of these projects are quite exciting and offer unique, user-friendly takes on the Linux desktop. This is quickly become a necessity in a landscape being dragged down by out-of-date design standards (think the likes of Cinnamon, Mate, XFCE, LXCE -- all desperately clinging to the past).
This is not to say that Linux on the desktop doesn't have a chance in 2015. It does. In order to grasp the reins of that chance, it will have to move beyond the past and drop the anchors that prevent it from moving out to deeper, more viable waters.
Linux stands to make more waves in 2015 than it has in a very long time. From enterprise to home automation -- the world could be the oyster that Linux uses as a springboard to the desktop and beyond.
What are your predictions for Linux and open source in 2015? Share your thoughts in the discussion thread below.
--------------------------------------------------------------------------------
via: http://www.techrepublic.com/article/2015-will-be-the-year-linux-takes-over-the-enterprise-and-other-predictions/
作者:[Jack Wallen][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.techrepublic.com/search/?a=jack+wallen
[1]:https://system76.com/
[2]:https://system76.com/desktops/sable
[3]:https://evolve-os.com/
[4]:http://quantum-os.github.io/

View File

@ -0,0 +1,91 @@
A brief history of Linux malware
================================================================================
A look at some of the worms and viruses and Trojans that have plagued Linux throughout the years.
### Nobodys immune ###
![Image courtesy Shutterstock](http://images.techhive.com/images/article/2014/12/121114-linux-malware-1-100535381-orig.jpg)
Although not as common as malware targeting Windows or even OS X, security threats to Linux have become both more numerous and more severe in recent years. There are a couple of reasons for that the mobile explosion has meant that Android (which is Linux-based) is among the most attractive targets for malicious hackers, and the use of Linux as a server OS for and in the data center has also grown but Linux malware has been around in some form since well before the turn of the century. Have a look.
### Staog (1996) ###
![](http://images.techhive.com/images/article/2014/12/121114-stago-100535400-orig.gif)
The first recognized piece of Linux malware was Staog, a rudimentary virus that tried to attach itself to running executables and gain root access. It didnt spread very well, and it was quickly patched out in any case, but the concept of the Linux virus had been proved.
### Bliss (1997) ###
![](http://images.techhive.com/images/article/2014/12/121114-3new-100535402-orig.gif)
If Staog was the first, however, Bliss was the first to grab the headlines though it was a similarly mild-mannered infection, trying to grab permissions via compromised executables, and it could be deactivated with a simple shell switch. It even kept a neat little log, [according to online documentation from Ubuntu][1].
### Ramen/Cheese (2001) ###
![](http://images.techhive.com/images/article/2014/12/121114-ramen-100535404-orig.jpg)
Cheese is the malware you actually want to get certain Linux worms, like Cheese, may actually have been beneficial, patching the vulnerabilities the earlier Ramen worm used to infect computers in the first place. (Ramen was so named because it replaced web server homepages with a goofy image saying that “hackers looooove noodles.”
### Slapper (2002) ###
![Image courtesy Wikimedia CommonsCC LicenseKevin Collins](http://images.techhive.com/images/article/2014/12/121114-linux-malware-5-100535389-orig.jpg)
The Slapper worm struck in 2002, infecting servers via an SSL bug in Apache. That predates Heartbleed by 12 years, if youre keeping score at home.
### Badbunny (2007) ###
![Image courtesy Shutterstock](http://images.techhive.com/images/article/2014/12/121114-linux-malware-6-100535384-orig.jpg)
Badbunny was an OpenOffice macro worm that carries a sophisticated script payload that worked on multiple platforms even though the only effect of a successful infection was to download a raunchy pic of a guy in a bunny suit, er, doing what bunnies are known to do.
### Snakso (2012) ###
![](http://images.techhive.com/images/article/2014/12/121114-linux-malware-7-100535385-orig.jpg)
Image courtesy [TechWorld UK][2]
The Snakso rootkit targeted specific versions of the Linux kernel to directly mess with TCP packets, injecting iFrames into traffic generated by the infected machine and pushing drive-by downloads.
### Hand of Thief (2013) ###
![](http://images.techhive.com/images/article/2014/12/121114-thief-100535405-orig.jpg)
Hand of Thief is a commercial (sold on Russian hacker forums) Linux Trojan creator that made quite a splash when it was introduced last year. RSA researchers, however, discovered soon after that [it wasnt quite as dangerous as initially thought][3].
### Windigo (2014) ###
![](http://images.techhive.com/images/article/2014/12/121114-linux-malware-9-100535390-orig.jpg)
Image courtesy [freezelight][4]
Windigo is a complex, large-scale cybercrime operation that targeted tens of thousands of Linux servers, causing them to produce spam and serve drive-by malware and redirect links. Its still out there, according to ESET security, [so admins should tread carefully][5].
### Shellshock/Mayhem (2014) ###
![Shellshock/Mayhem (2014)](http://images.techhive.com/images/article/2014/12/121114-malware-mayhem-100535406-orig.gif)
Striking at the terminal strikes at the heart of Linux, which is why the recent Mayhem attacks which targeted the so-called Shellshock vulnerabilities in Linuxs Bash command-line interpreter using a specially crafted ELF library were so noteworthy. Researchers at Yandex said that the network [had snared 1,400 victims as of July][6].
### Turla (2014) ###
![Image courtesy CW](http://images.techhive.com/images/article/2014/12/121114-linux-malware-11-100535391-orig.jpg)
A large-scale campaign of cyberespionage emanating from Russia, called Epic Turla by researchers, was found to have a new Linux-focused component earlier this week. Its apparently [based on a backdoor access program from all the way back in 2000 called cd00r][7].
--------------------------------------------------------------------------------
via: http://www.networkworld.com/article/2858742/linux/a-brief-history-of-linux-malware.html
作者:[Jon Gold][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.networkworld.com/author/Jon-Gold/
[1]:https://help.ubuntu.com/community/Linuxvirus
[2]:http://news.techworld.com/security/3412075/linux-users-targeted-by-mystery-drive-by-rootkit/
[3]:http://www.networkworld.com/article/2168938/network-security/dangerous-linux-trojan-could-be-sign-of-things-to-come.html
[4]:https://www.flickr.com/photos/63056612@N00/155554663
[5]:http://www.welivesecurity.com/2014/04/10/windigo-not-windigone-linux-ebury-updated/
[6]:http://www.pcworld.com/article/2825032/linux-botnet-mayhem-spreads-through-shellshock-exploits.html
[7]:http://www.computerworld.com/article/2857129/turla-espionage-operation-infects-linux-systems-with-malware.html

View File

@ -0,0 +1,143 @@
20 Linux Commands Interview Questions & Answers
================================================================================
**Q:1 How to check current run level of a linux server ?**
Ans: who -r & runlevel commands are used to check the current runlevel of a linux box.
**Q:2 How to check the default gatway in linux ?**
Ans: Using the commands “route -n” and “netstat -nr” , we can check default gateway. Apart from the default gateway info , these commands also display the current routing tables .
**Q:3 How to rebuild initrd image file on Linux ?**
Ans: In case of CentOS 5.X / RHEL 5.X , mkinitrd command is used to create initrd file , example is shown below :
# mkinitrd -f -v /boot/initrd-$(uname -r).img $(uname -r)
If you want to create initrd for a specific kernel version , then replace uname -r with desired kernel
In Case of CentOS 6.X / RHEL 6.X , dracut command is used to create initrd file example is shown below :
# dracut -f
Above command will create the initrd file for the current version. To rebuild the initrd file for a specific kernel , use below command :
# dracut -f initramfs-2.x.xx-xx.el6.x86_64.img 2.x.xx-xx.el6.x86_64
**Q:4 What is cpio command ?**
Ans: cpio stands for Copy in and copy out. Cpio copies files, lists and extract files to and from a archive ( or a single file).
**Q:5 What is patch command and where to use it ?**
Ans: As the name suggest patch command is used to apply changes ( or patches) to the text file. Patch command generally accept output from the diff and convert older version of files into newer versions. For example Linux kernel source code consists of number of files with millions of lines , so whenever any contributor contribute the changes , then he/she will be send the only changes instead of sending the whole source code. Then the receiver will apply the changes with patch command to its original source code.
Create a diff file for use with patch,
# diff -Naur old_file new_file > diff_file
Where old_file and new_file are either single files or directories containing files. The r option supports recursion of a directory tree.
Once the diff file has been created, we can apply it to patch the old file into the new file:
# patch < diff_file
**Q:6 What is use of aspell ?**
Ans: As the name suggest aspell is an interactive spelling checker in linux operating system. The aspell command is the successor to an earlier program named ispell, and can be used, for the most part, as a drop-in replacement. While the aspell program is mostly used by other programs that require spell-checking capability, it can also be used very effectively as a stand-alone tool from the command line.
**Q:7 How to check the SPF record of domain from command line ?**
Ans: We can check SPF record of a domain using dig command. Example is shown below :
linuxtechi@localhost:~$ dig -t TXT google.com
**Q:8 How to identify which package the specified file (/etc/fstab) is associated with in linux ?**
Ans: # rpm -qf /etc/fstab
Above command will list the package which provides file “/etc/fstab”
**Q:9 Which command is used to check the status of bond0 ?**
Ans: cat /proc/net/bonding/bond0
**Q:10 What is the use of /proc file system in linux ?**
Ans: The /proc file system is a RAM based file system which maintains information about the current state of the running kernel including details on CPU, memory, partitioning, interrupts, I/O addresses, DMA channels, and running processes. This file system is represented by various files which do not actually store the information, they point to the information in the memory. The /proc file system is maintained automatically by the system.
**Q:11 How to find files larger than 10MB in size in /usr directory ?**
Ans: # find /usr -size +10M
**Q:12 How to find files in the /home directory that were modified more than 120 days ago ?**
Ans: # find /home -mtime +l20
**Q:13 How to find files in the /var directory that have not been accessed in the last 90 days ?**
Ans: # find /var -atime -90
**Q:14 Search for core files in the entire directory tree and delete them as found without prompting for confirmation**
Ans: # find / -name core -exec rm {} \;
**Q:15 What is the purpose of strings command ?**
Ans: The strings command is used to extract and display the legible contents of a non-text file.
**Q:16 What is the use tee filter ?**
Ans: The tee filter is used to send an output to more than one destination. It can send one copy of the output to a file and another to the screen (or some other program) if used with pipe.
linuxtechi@localhost:~$ ll /etc | nl | tee /tmp/ll.out
In the above example, the output from ll is numbered and captured in /tmp/ll.out file. The output is also displayed on the screen.
**Q:17 What would the command export PS1 = ”$LOGNAME@`hostname`:\$PWD: do ?**
Ans: The export command provided will change the login prompt to display username, hostname, and the current working directory.
**Q:18 What would the command ll | awk {print $3,”owns”,$9} do ?**
Ans: The ll command provided will display file names and their owners.
**Q:19 What is the use of at command in linux ?**
Ans: The at command is used to schedule a one-time execution of a program in the future. All submitted jobs are spooled in the /var/spool/at directory and executed by the atd daemon when the scheduled time arrives.
**Q:20 What is the role of lspci command in linux ?**
Ans: The lspci command displays information about PCI buses and the devices attached to your system. Specify -v, -vv, or -vvv for detailed output. With the -m option, the command produces more legible output.
--------------------------------------------------------------------------------
via: http://www.linuxtechi.com/20-linux-commands-interview-questions-answers/
作者:[Pradeep Kumar][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.linuxtechi.com/author/pradeep/
[1]:
[2]:
[3]:
[4]:
[5]:
[6]:
[7]:
[8]:
[9]:
[10]:
[11]:
[12]:
[13]:
[14]:
[15]:
[16]:
[17]:
[18]:
[19]:
[20]:

View File

@ -0,0 +1,125 @@
Defending the Free Linux World
================================================================================
![](http://www.linuxinsider.com/ai/908455/open-invention-network.jpg)
**Co-opetition is a part of open source. The Open Invention Network model allows companies to decide where they will compete and where they will collaborate, explained OIN CEO Keith Bergelt. As open source evolved, "we had to create channels for collaboration. Otherwise, we would have hundreds of entities spending billions of dollars on the same technology."**
The [Open Invention Network][1], or OIN, is waging a global campaign to keep Linux out of harm's way in patent litigation. Its efforts have resulted in more than 1,000 companies joining forces to become the largest defense patent management organization in history.
The Open Invention Network was created in 2005 as a white hat organization to protect Linux from license assaults. It has considerable financial backing from original board members that include Google, IBM, NEC, Novell, Philips, [Red Hat][2] and Sony. Organizations worldwide have joined the OIN community by signing the free OIN license.
Organizers founded the Open Invention Network as a bold endeavor to leverage intellectual property to protect Linux. Its business model was difficult to comprehend. It asked its members to take a royalty-free license and forever forgo the chance to sue other members over their Linux-oriented intellectual property.
However, the surge in Linux adoptions since then -- think server and cloud platforms -- has made protecting Linux intellectual property a critically necessary strategy.
Over the past year or so, there has been a shift in the Linux landscape. OIN is doing a lot less talking to people about what the organization is and a lot less explaining why Linux needs protection. There is now a global awareness of the centrality of Linux, according to Keith Bergelt, CEO of OIN.
"We have seen a culture shift to recognizing how OIN benefits collaboration," he told LinuxInsider.
### How It Works ###
The Open Invention Network uses patents to create a collaborative environment. This approach helps ensure the continuation of innovation that has benefited software vendors, customers, emerging markets and investors.
Patents owned by Open Invention Network are available royalty-free to any company, institution or individual. All that is required to qualify is the signer's agreement not to assert its patents against the Linux system.
OIN ensures the openness of the Linux source code. This allows programmers, equipment vendors, independent software vendors and institutions to invest in and use Linux without excessive worry about intellectual property issues. This makes it more economical for companies to repackage, embed and use Linux.
"With the diffusion of copyright licenses, the need for OIN licenses becomes more acute. People are now looking for a simpler or more utilitarian solution," said Bergelt.
OIN legal defenses are free of charge to members. Members commit to not initiating patent litigation against the software in OIN's list. They also agree to offer their own patents in defense of that software. Ultimately, these commitments result in access to hundreds of thousands of patents cross-licensed by the network, Bergelt explained.
### Closing the Legal Loopholes ###
"What OIN is doing is very essential. It offers another layer of IP protection, said Greg R. Vetter, associate professor of law at the [University of Houston Law Center][3].
Version 2 of the GPL license is thought by some to provide an implied patent license, but lawyers always feel better with an explicit license, he told LinuxInsider.
What OIN provides is something that bridges that gap. It also provides explicit coverage of the Linux kernel. An explicit patent license is not necessarily part of the GPLv2, but it was added in GPLv3, according to Vetter.
Take the case of a code writer who produces 10,000 lines of code under GPLv3, for example. Over time, other code writers contribute many more lines of code, which adds to the IP. The software patent license provisions in GPLv3 would protect the use of the entire code base under all of the participating contributors' patents, Vetter said.
### Not Quite the Same ###
Patents and licenses are overlapping legal constructs. Figuring out how the two entities work with open source software can be like traversing a minefield.
"Licenses are legal constructs granting additional rights based on, typically, patent and copyright laws. Licenses are thought to give a permission to do something that might otherwise be infringement of someone else's IP rights," Vetter said.
Many free and open source licenses (such as the Mozilla Public License, the GNU GPLv3, and the Apache Software License) incorporate some form of reciprocal patent rights clearance. Older licenses like BSD and MIT do not mention patents, Vetter pointed out.
A software license gives someone else certain rights to use the code the programmer created. Copyright to establish ownership is automatic, as soon as someone writes or draws something original. However, copyright covers only that particular expression and derivative works. It does not cover code functionality or ideas for use.
Patents cover functionality. Patent rights also can be licensed. A copyright may not protect how someone independently developed implementation of another's code, but a patent fills this niche, Vetter explained.
### Looking for Safe Passage ###
The mixing of license and patent legalities can appear threatening to open source developers. For some, even the GPL qualifies as threatening, according to William Hurley, cofounder of [Chaotic Moon Studios][4] and [IEEE][5] Computer Society member.
"Way back in the day, open source was a different world. Driven by mutual respect and a view of code as art, not property, things were far more open than they are today. I believe that many efforts set upon with the best of intentions almost always end up bearing unintended consequences," Hurley told LinuxInsider.
Surpassing the 1,000-member mark might carry a mixed message about the significance of intellectual property right protection, he suggested. It might just continue to muddy the already murky waters of today's open source ecosystem.
"At the end of the day, this shows some of the common misconceptions around intellectual property. Having thousands of developers does not decrease risk -- it increases it. The more developers licensing the patents, the more valuable they appear to be," Hurley said. "The more valuable they appear to be, the more likely someone with similar patents or other intellectual property will try to take advantage and extract value for their own financial gain."
### Sharing While Competing ###
Co-opetition is a part of open source. The OIN model allows companies to decide where they will compete and where they will collaborate, explained Bergelt.
"Many of the changes in the evolution of open source in terms of process have moved us into a different direction. We had to create channels for collaboration. Otherwise, we would have hundreds of entities spending billions of dollars on the same technology," he said.
A glaring example of this is the early evolution of the cellphone industry. Multiple standards were put forward by multiple companies. There was no sharing and no collaboration, noted Bergelt.
"That damaged our ability to access technology by seven to 10 years in the U.S. Our experience with devices was far behind what everybody else in the world had. We were complacent with GSM (Global System for Mobile Communications) while we were waiting for CDMA (Code Division Multiple Access)," he said.
### Changing Landscape ###
OIN experienced a growth surge of 400 new licensees in the last year. That is indicative of a new trend involving open source.
"The marketplace reached a critical mass where finally people within organizations recognized the need to explicitly collaborate and to compete. The result is doing both at the same time. This can be messy and taxing," Bergelt said.
However, it is a sustainable transformation driven by a cultural shift in how people think about collaboration and competition. It is also a shift in how people are embracing open source -- and Linux in particular -- as the lead project in the open source community, he explained.
One indication is that most significant new projects are not being developed under the GPLv3 license.
### Two Better Than One ###
"The GPL is incredibly important, but the reality is there are a number of licensing models being used. The relative addressability of patent issues is generally far lower in Eclipse and Apache and Berkeley licenses that it is in GPLv3," said Bergelt.
GPLv3 is a natural complement for addressing patent issues -- but the GPL is not sufficient on its own to address the issues of potential conflicts around the use of patents. So OIN is designed as a complement to copyright licenses, he added.
However, the overlap of patent and license may not do much good. In the end, patents are for offensive purposes -- not defensive -- in almost every case, Bergelt suggested.
"If you are not prepared to take legal action against others, then a patent may not be the best form of legal protection for your intellectual properties," he said. "We now live in a world where the misconceptions around software, both open and proprietary, combined with an ill-conceived and outdated patent system, leave us floundering as an industry and stifling innovation on a daily basis," he said.
### Court of Last Resort ###
It would be nice to think the presence of OIN has dampened a flood of litigation, Bergelt said, or at the very least, that OIN's presence is neutralizing specific threats.
"We are getting people to lay down their arms, so to say. At the same time, we are creating a new cultural norm. Once you buy into patent nonaggression in this model, the correlative effect is to encourage collaboration," he observed.
If you are committed to collaboration, you tend not to rush to litigation as a first response. Instead, you think in terms of how can we enable you to use what we have and make some money out of it while we use what you have, Bergelt explained.
"OIN is a multilateral solution. It encourages signers to create bilateral agreements," he said. "That makes litigation the last course of action. That is where it should be."
### Bottom Line ###
OIN is working to prevent Linux patent challenges, Bergelt is convinced. There has not been litigation in this space involving Linux.
The only thing that comes close are the mobile wars with Microsoft, which focus on elements high in the stack. Those legal challenges may be designed to raise the cost of ownership involving the use of Linux products, Bergelt noted.
Still, "these are not Linux-related law suits," he said. "They do not focus on what is core to Linux. They focus on what is in the Linux system."
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/Defending-the-Free-Linux-World-81512.html
作者Jack M. Germain
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://www.openinventionnetwork.com/
[2]:http://www.redhat.com/
[3]:http://www.law.uh.edu/
[4]:http://www.chaoticmoon.com/
[5]:http://www.ieee.org/

View File

@ -0,0 +1,102 @@
Docker CTO Solomon Hykes to Devs: Have It Your Way
================================================================================
![](http://www.linuxinsider.com/ai/845971/docker-cloud.jpg)
**"We made a very conscious effort with Docker to insert the technology into an existing toolbox. We did not want to turn the developer's world upside down on the first day. ... We showed them incremental improvements so that over time the developers discovered more things they could do with Docker. So the developers could transition into the new architecture using the new tools at their own pace."**
[Docker][1] in the last two years has moved from an obscure Linux project to one of the most popular open source technologies in cloud computing.
Project developers have witnessed millions of Docker Engine downloads. Hundreds of Docker groups have formed in 40 countries. Many more companies are announcing Docker integration. Even Microsoft will ship Windows 10 with Docker preinstalled.
![](http://www.linuxinsider.com/article_images/2014/81504_330x260.jpg)
Solomon Hykes
Founder and CTO of Docker
"That caught a lot of people by surprise," Docker founder and CTO Solomon Hykes told LinuxInsider.
Docker is an open platform for developers and sysadmins to build, ship and run distributed applications. It uses a Docker engine along with a portable, lightweight runtime and packaging tool. It also needs the Docker Hub and a cloud service for sharing applications and automating workflows.
Docker provides a vehicle for developers to quickly assemble their applications from components. It eliminates the friction between development, quality assurance and production environments. Thus, IT can ship applications faster and run them unchanged on laptops, on data center virtual machines, and in any cloud.
In this exclusive interview, LinuxInsider discusses with Solomon Hykes why Docker is revitalizing Linux and the cloud.
**LinuxInsider: You have said that Docker's success is more the result of being in the right place at the right time for a trend that's much bigger than Docker. Why is that important to users?**
**Solomon Hykes**: There is always an element of being in the right place at the right time. We worked on this concept for a long time. Until recently, the market was not ready for this kind of technology. Then it was, and we were there. Also, we were very deliberate to make the technology flexible and very easy to get started using.
**LI: Is Docker a new cloud technology or merely a new way to do cloud storage?**
**Hykes**: Containers in themselves are just an enabler. The really big story is how it changes the software model enormously. Developers are creating new kinds of applications. They are building applications that do not run on only one machine. There is a need for completely new architecture. At the heart of that is independence from the machine.
The problem for the developer is to create the kind of software that can run independently on any kind of machine. You need to package it up so it can be moved around. You need to cross that line. That is what containers do.
**LI: How analogous is the software technology to traditional cargo shipping in containers?**
**Hykes**: That is a very apt example. It is the same thing for shipping containers. The innovation is not in the box. It is in how the automation handles millions of those boxes moving around. That is what is important.
**LI: How is Docker affecting the way developers build their applications?**
**Hykes**: The biggest way is it helps them structure their applications for a better distributive system. Another distributive application is Gmail. It does not run on just one application. It is distributive. Developers can package the application as a series of services. That is their style of reasoning when they design. It brings the tooling up to the level of design.
**LI: What led you to this different architecture approach?**
**Hykes**: What is interesting about this process is that we did not invent this model. It was there. If you look around, you see this trend where developers are increasingly building distributive applications where the tooling is inadequate. Many people have tried to deal with the existing tooling level. This is a new architecture. When you come up with tools that support this new model, the logical thing to do is tell the developer that the tools are out of date and are inadequate. So throw away the old tools and here are the new tools.
**LI: How much friction did you encounter from developers not wanting to throw away their old tools?**
**Hykes**: That approach sounds perfectly reasonable and logical. But in fact it is very hard to get developers to throw away their tools. And for IT departments the same thing is very true. They have legacy performance to support. So most of these attempts to move into next-generation tools have failed. They ask too much of the developers from day one.
**LI: How did you combat that reaction from developers?**
**Hykes**: We made a very conscious effort with Docker to insert the technology into an existing toolbox. We did not want to turn the developer's world upside down on the first day. Instead, we showed them incremental improvements so that over time the developers discovered more things they could do with Docker. So the developers could transition into the new architecture using the new tools at their own pace. That makes all the difference in the world.
**LI: What reaction are you seeing from this strategy?**
**Hykes**: When I ask people using Docker today how revolutionary it is, some say they are not using it in a revolutionary way. It is just a little improvement in my toolbox. That is the point. Others say that they jumped all in on the first day. Both responses are OK. Everyone can take their time moving toward that new model.
**LI: So is it a case of integrating Docker into existing platforms, or is a complete swap of technology required to get the full benefit?**
**Hykes**: Developers can go either way. There is a lot of demand for Docker native. But there is a whole ecosystem of new tools and companies competing to build brand new platforms entirely build on top of Docker. Over time the world is trending towards Docker native, but there is no rush. We totally support the idea of developers using bits and pieces of Docker in their existing platform forever. We encourage that.
**LI: What about Docker's shared Linux kernel architecture?**
**Hykes**: There are two steps involved in answering that question. What Docker does is become a layer on top of the Linux kernel. It exposes an abstraction function. It takes advantage of the underlying system. It has access to all of the Linux features. It also takes advantage of the networking stack and the storage subsystem. It uses the abstraction feature to map what developers need.
**LI: How detailed a process is this for developers?**
**Hykes**: As a developer, when I make an application I need a run-time that can run my application in a sandbox environment. I need a packaging system that makes it easy to move it around to other machines. I need a networking model that allows my application to talk to the outside world. I need storage, etc. We abstract ... the gritty details of whatever the kernel does right now.
**LI: Why does this benefit the developer?**
**Hykes**: There are two really big advantages to that. The first is simplicity. Developers can actually be productive now because that abstraction is easier for them to comprehend and is designed for that. The system APIs are designed for the system. What the developer needs is a consistent abstraction that works everywhere.
The second advantage is that over time you can support more systems. For example, early on Docker could only work on a single distribution of Linux under very narrow versions of the kernel. Over time, we expanded the surface area for the number of systems out there that Docker supports natively. So now you can run Docker on every major Linux distribution and in combination with many more networking and storage features.
**LI: Does this functionality trickle down to nondevelopers, or is the benefit solely targeting developers?**
**Hykes**: Every time we expand that surface area, every single developer that uses the Docker abstraction benefits from that too. So every application running Docker gets the added functionality every time the Docker community adds to the expansion. That is the thing that benefits all users. Without that universal expansion, every single developer would not have time to invest to update. There is just too much to support.
**LI: What about Microsoft's recent announcement that it was shipping Docker support with Windows?**
**Hykes**: If you think of Docker as a very narrow and very simple tool, then why would you roll out support for Windows? The whole point is that over time, you can expand the reach of that abstraction. Windows works very differently, obviously. But now that Microsoft has committed to adding features to Windows 10, it exposes the functionality required to run Docker. That is real exciting.
Docker still has to be ported to Windows, but Microsoft has committed to contributing in a major way to the port. Realize how far Microsoft has come in doing this. Microsoft is doing this fully upstream in a completely native, open source way. Everyone installing Windows 10 will get Docker preinstalled.
**LI: What lies ahead for growing Docker's feature set and user base?**
**Hykes**: The community has a lot of features on the drawing board. Most of them have to do with more improved tools for developers to build better distributive applications. A toolkit implies having a series of tools with each tool designed for one job.
In each of these subsystems, there is a need for new tools. In each of these areas, you will see an enormous amount of activity in the community in terms of contributions and designs. In that regard, the Docker project is enormously ambitious. The ability to address each of these areas will ensure that developers have a huge array of choices without fragmentation.
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/Docker-CTO-Solomon-Hykes-to-Devs-Have-It-Your-Way-81504.html
作者Jack M. Germain
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:https://www.docker.com/

View File

@ -0,0 +1,120 @@
The Curious Case of the Disappearing Distros
================================================================================
![](http://www.linuxinsider.com/ai/828896/linux-distros.jpg)
"Linux is a big game now, with billions of dollars of profit, and it's the best thing since sliced bread, but corporations are taking control, and slowly but systematically, community distros are being killed," said Google+ blogger Alessandro Ebersol. "Linux is slowly becoming just like BSD, where companies use and abuse it and give very little in return."
Well the holidays are pretty much upon us at last here in the Linux blogosphere, and there's nowhere left to hide. The next two weeks or so promise little more than a blur of forced social occasions and too-large meals, punctuated only -- for the luckier ones among us -- by occasional respite down at the Broken Windows Lounge.
Perhaps that's why Linux bloggers seized with such glee upon the good old-fashioned mystery that came up recently -- delivered in the nick of time, as if on cue.
"Why is the Number of Linux Distros Declining?" is the [question][1] posed over at Datamation, and it's just the distraction so many FOSS fans have been needing.
"Until about 2011, the number of active distributions slowly increased by a few each year," wrote author Bruce Byfield. "By contrast, the last three years have seen a 12 percent decline -- a decrease too high to be likely to be coincidence.
"So what's happening?" Byfield wondered.
It would be difficult to imagine a more thought-provoking question with which to spend the Northern hemisphere's shortest days.
### 'There Are Too Many Distros' ###
![](http://www.linuxinsider.com/images/article_images/linuxgirl_bg_pinkswirl_150x245.jpg)
"That's an easy question," began blogger [Robert Pogson][2]. "There are too many distros."
After all, "if a fanatic like me can enjoy life having sampled only a dozen distros, why have any more?" Pogson explained. "If someone has a concept different from the dozen or so most common distros, that concept can likely be demonstrated by documenting the tweaks and package-lists and, perhaps, some code."
Trying to compete with some 40,000 package repositories like Debian's, however, is "just silly," he said.
"No startup can compete with such a distro," Pogson asserted. "Why try? Just use it to do what you want and tell the world about it."
### 'I Don't Distro-Hop Anymore' ###
The major existing distros are doing a good job, so "we don't need so many derivative works," Google+ blogger Kevin O'Brien agreed.
"I know I don't 'distro-hop' anymore, and my focus is on using my computer to get work done," O'Brien added.
"If my apps run fine every day, that is all that I need," he said. "Right now I am sticking with Ubuntu LTS 14.04, and probably will until 2016."
### 'The More Distros, the Better' ###
It stands to reason that "as distros get better, there will be less reasons to roll your own," concurred [Linux Rants][3] blogger Mike Stone.
"I think the modern Linux distros cover the bases of a larger portion of the Linux-using crowd, so fewer and fewer people are starting their own distribution to compensate for something that the others aren't satisfying," he explained. "Add to that the fact that corporations are more heavily involved in the development of Linux now than they ever have been, and they're going to focus their resources."
So, the decline isn't necessarily a bad thing, as it only points to the strength of the current offerings, he asserted.
At the same time, "I do think there are some negative consequences as well," Stone added. "Variation in the distros is a way that Linux grows and evolves, and with a narrower field, we're seeing less opportunity to put new ideas out there. In my mind, the more distros, the better -- hopefully the trend reverses soon."
### 'I Hope Some Diversity Survives' ###
Indeed, "the era of novelty and experimentation is over," Google+ blogger Gonzalo Velasco C. told Linux Girl.
"Linux is 20+ years old and got professional," he noted. "There is always room for experimentation, but the top 20 are here since more than a decade ago.
"Godspeed GNU/Linux," he added. "I hope some diversity survives -- especially distros without Systemd; on the other hand, some standards are reached through consensus."
### A Question of Package Managers ###
There are two trends at work here, suggested consultant and [Slashdot][4] blogger Gerhard Mack.
First, "there are fewer reasons to start a new distro," he said. "The basic nuts and bolts are mostly done, installation is pretty easy across most distros, and it's not difficult on most hardware to get a working system without having to resort to using the command line."
The second thing is that "we are seeing a reduction of distros with inferior package managers," Mack suggested. "It is clear that .deb-based distros had fewer losses and ended up with a larger overall share."
### Survival of the Fittest ###
It's like survival of the fittest, suggested consultant Rodolfo Saenz, who is certified in Linux, IBM Tivoli Storage Manager and Microsoft Active Directory.
"I prefer to see a strong Linux with less distros," Saenz added. "Too many distros dilutes development efforts and can confuse potential future users."
Fewer distros, on the other hand, "focuses development efforts into the stronger distros and also attracts new potential users with clear choices for their needs," he said.
### All About the Money ###
Google+ blogger Alessandro Ebersol also saw survival of the fittest at play, but he took a darker view.
"Linux is a big game now, with billions of dollars of profit, and it's the best thing since sliced bread," Ebersol began. "But corporations are taking control, and slowly but systematically, community distros are being killed."
It's difficult for community distros to keep pace with the ever-changing field, and cash is a necessity, he conceded.
Still, "Linux is slowly becoming just like BSD, where companies use and abuse it and give very little in return," Ebersol said. "It saddens me, but GNU/Linux's best days were 10 years ago, circa 2002 to 2004. Now, it's the survival of the fittest -- and of course, the ones with more money will prevail."
### 'Fewer Devs Care' ###
SoylentNews blogger hairyfeet focused on today's altered computing landscape.
"The reason there are fewer distros is simple: With everybody moving to the Google Playwall of Android, and Windows 10 looking to be the next XP, fewer devs care," hairyfeet said.
"Why should they?" he went on. "The desktop wars are over, MSFT won, and the mobile wars are gonna be proprietary Google, proprietary Apple and proprietary MSFT. The money is in apps and services, and with a slow economy, there just isn't time for pulling a Taco Bell and rerolling yet another distro.
"For the few that care about Linux desktops you have Ubuntu, Mint and Cent, and that is plenty," hairyfeet said.
### 'No Less Diversity' ###
Last but not least, Chris Travers, a [blogger][5] who works on the [LedgerSMB][6] project, took an optimistic view.
"Ever since I have been around Linux, there have been a few main families -- [SuSE][7], [Red Hat][8], Debian, Gentoo, Slackware -- and a number of forks of these," Travers said. "The number of major families of distros has been declining for some time -- Mandrake and Connectiva merging, for example, Caldera disappearing -- but each of these families is ending up with fewer members as well.
"I think this is a good thing," he concluded.
"The big community distros -- Debian, Slackware, Gentoo, Fedora -- are going strong and picking up a lot of the niche users that other distros catered to," he pointed out. "Many of these distros are making it easier to come up with customized variants for niche markets. So what you have is a greater connectedness within the big distros, and no less diversity."
--------------------------------------------------------------------------------
via: http://www.linuxinsider.com/story/The-Curious-Case-of-the-Disappearing-Distros-81518.html
作者Katherine Noyes
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://www.datamation.com/open-source/why-is-the-number-of-linux-distros-declining.html
[2]:http://mrpogson.com/
[3]:http://linuxrants.com/
[4]:http://slashdot.org/
[5]:http://ledgersmbdev.blogspot.com/
[6]:http://www.ledgersmb.org/
[7]:http://www.novell.com/linux
[8]:http://www.redhat.com/

View File

@ -0,0 +1,100 @@
The Good, The Bad And The Ugly Of Linux In 2014
================================================================================
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/Buggest_Linux_Stories.jpeg)
Year 2014 is coming to an end and this is the time to summarize some of the **biggest Linux stories in year 2014**. All year round we have followed some good, some bad and some ugly stories related to Linux and Open Source. Let have a quick recap on how was the year 2014 for Linux.
### The Good ###
First and foremost, lets see what were the positive stories for Linux lovers in 2014.
#### Netflix on Linux ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/netflix-linux.jpg)
Linux users have been trying several workaround to make Netflix work on Linux from using Wine to [using beta features in Chrome][1]. Good thing is that Netflix finally brought native support on Linux in year 2014 bringing smiles on the faces of Linux users where Netflix is available. People would still have to rely on workaround to [use Netflix outside US][2] (and other countries where Netflix is available officially).
#### Open Source/Linux adoption in European countries ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/OpenSource_World.jpg)
Give the credit to economic meltdown, if you want, but Linux and Open Source adoption has been gripping European cities. I am not talking about Linux adoption by individuals but by government and authorities. All year round we heard stories of how [French][3] and [Italian cities saved millions of Euro by switching to Linux][4] and Open Office. And the trend was not limited just to Italy and France, the same could be seen in Spain, [Switzerland][5] and [Germany][6].
#### Windows 10 takes inspiration from Linux ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/10/Windows10_Linux.jpg)
The upcoming release of Microsofts flagship operating system, Windows will be called Windows 10 (no Windows 9). And Windows 10 boasts of a number of new features. But these new features are new to Microsoft world only and most of those have been existing in Linux world for years. Have a look at such [Windows 10 features copied from Linux][7].
### The Bad ###
Everything was not rosy for Linux in year 2014. Some events happened that dented the image of Linux/Open Source.
#### Heartbleed ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/heartbleed-bug.jpg)
In April this year, a vulnerability was detected in [OpenSSL][8]. This bug, named [Heartbleed][9], impacted over half a million secured websites including Facebook and Google. The bug actually allowed anyone to read memory of the system and hence giving the access to the key that is used to encrypt the traffic. A [comic at xkcd explains the Heartbleed][10] in easier way. Needless to say that this vulnerability was fixed in an update to OpenSSL.
#### Shellshock ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/09/shellshock_Linux_check.jpeg)
As if Heartbleed was not enough, Linux world was further rocked in September with a vulnerability in Bash. The bug, named [Shellshock][11], further put Linux system at risk of remote attacks. The vulnerability was exploited by hackers to launch DDoS attacks. An update to Bash version supposedly fixed the issue.
#### Ubuntu Phone and Steam Console ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/Ubuntu_phone.png)
Promises after promises, hopes after hopes. But even in year 2014 no one saw Ubuntu Phone or Steam gaming consoles. Lots of talks were around Ubuntu Phone tough. From February 2014 release to September to December, finally it is (hopefully slotted) for February 2015 release. No information on Steam consoles though. Read more for [Ubuntu Phone specification, price and release date][12].
### The Ugly ###
Things turned ugly with war over systemd adoption.
### systemd controversy ###
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/Systemd_everywhere.jpg)
[init vs systemd][13] dispute is going on for some time. But it turned ugly in 2014 as systemd poised to replace init on several major Linux distribution including Debian, Ubuntu, OpenSUSE, Arch Linux and Fedora. It turned so ugly that it was not just limited to boycottsystemd.org like websites. Lennart Poettering (lead developer and author of systemd) claimed in a [Google Plus post][14] that anti systemd people were “collecting bitcoins to hire a hitman to kill him”. Lennart went on calling Open Source community “a sick place to be in”. People have taken this battle as far as forking Debian to a new OS named [Devuan][15].
### And the weird ###
Along with the good, the bad and the ugly comes the weird and that weird is none other than Microsoft.
#### Microsoft loves Linux ####
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/12/Microsoft_Loves_Linux.png)
Yes! You read it right. [Microsoft loves Linux][16]. The same Microsoft whose CEO Steve Ballmer had once said that [Linux is cancer][17]. Change in Microsoft leadership saw some changes in its approach towards Linux and Open Source when the new CEO Satya Nadella announced that Microsoft loves Linux. This new found love for Linux is actually Microsofts attempt to make [Azure][18] as a better cloud platform. For this purpose it needs Hyper-V (core of Azure) virtualization to work with Linux. This desperation has made [Microsoft, fifth biggest contributor to Linux kernel][19].
--------------------------------------------------------------------------------
via: http://itsfoss.com/biggest-linux-stories-2014/
作者:[Abhishek][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://itsfoss.com/author/Abhishek/
[1]:http://itsfoss.com/watch-netflix-in-ubuntu-14-04/
[2]:http://itsfoss.com/easiest-watch-netflix-hulu-usa/
[3]:http://itsfoss.com/french-city-toulouse-saved-1-million-euro-libreoffice/
[4]:http://itsfoss.com/italian-city-turin-open-source/
[5]:http://itsfoss.com/170-primary-public-schools-geneva-switch-ubuntu/
[6]:http://itsfoss.com/german-town-gummersbach-completes-switch-open-source/
[7]:http://itsfoss.com/windows-10-inspired-linux/
[8]:http://en.wikipedia.org/wiki/OpenSSL
[9]:http://heartbleed.com/
[10]:http://xkcd.com/1354/
[11]:http://itsfoss.com/linux-shellshock-check-fix/
[12]:http://itsfoss.com/ubuntu-phone-specification-release-date-pricing/
[13]:http://www.tecmint.com/systemd-replaces-init-in-linux/
[14]:https://plus.google.com/+LennartPoetteringTheOneAndOnly/posts/J2TZrTvu7vd
[15]:http://debianfork.org/
[16]:http://thenewstack.io/microsoft-professes-love-for-linux-adds-support-for-coreos-cloudera-and-host-of-new-features/
[17]:http://www.theregister.co.uk/2001/06/02/ballmer_linux_is_a_cancer/
[18]:http://azure.microsoft.com/en-us/
[19]:http://www.zdnet.com/article/top-five-linux-contributor-microsoft/

View File

@ -1,88 +0,0 @@
The history of Android
================================================================================
youtube视频地址
<iframe width="640" height="360" frameborder="0" src="http://www.youtube-nocookie.com/embed/e52TSXwj774?start=0&amp;wmode=transparent" type="text/html" style="display:block"></iframe>
### Android 2.0, Éclair—blowing up the GPS industry ###
Forty-one days—that was how much time passed between Android 1.6 and 2.0. The first big version number bump for Android launched in October 2009 [on the Motorola Droid][1], the first "second generation" Android device. The Droid offered huge hardware upgrades over the G1, starting with the massive (at the time) 3.7 inch, 854×480 LCD. It brought a lot more power, too: a (still single-core) 600Mhz TI OMAP Cortex A8 with 256MB of RAM.
![The Motorola Droid stares into your soul.](http://cdn.arstechnica.net/wp-content/uploads/2014/03/2181.jpg)
The Motorola Droid stares into your soul.
The most important part of the Droid, though, was the large advertising campaign around it. The Droid was the flagship device for Verizon Wireless in the US, and with that title came a ton of ad money from America's biggest carrier. Verizon licensed the word "droid" from Lucasfilm and started up the ["Droid Does" campaign][2]—a shouty, explosion-filled set of commercials that positioned the device (and by extension, Android) as the violent, ass-kicking alternative to the iPhone. The press frequently declared the T-Mobile G1 as trying to be an “iPhone Killer," but the Droid came out and owned it.
Like the G1, the Droid had a hardware keyboard that slid out from the side of the phone. The trackball was gone, but some kind of d-pad was still mandatory, so Motorola placed a five-way d-pad on the right side of the keyboard. On the front, the Droid switched from hardware buttons to capacitive touch buttons, which were just paint on the glass touchscreen. Android 2.0 also finally allowed devices to do away with the “Call" and “End" buttons. So together with the demotion of the d-pad to the keyboard tray, the front buttons could all fit in a nice, neat strip. The result of all this streamlining was the best-looking Android device yet. The T-Mobile G1 looked like a Fisher-Price toy, but the Motorola Droid looked like an industrial tool that you could cut someone with.
![The lock and home screens from 2.0 and 1.6.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/intro202.png)
The lock and home screens from 2.0 and 1.6.
Photo by Ron Amadeo
Some of Verizon's grungy ad campaign leaked over to the software, where the default wallpaper was changed from a calm, watery vista to a picture of dirty concrete. The boot animation used a pulsing, red, Hal 9000 eyeball and the default notification tone shouted "[DRRRRROOOOIIIIDDDD][3]" every time you received an e-mail. Éclair was Androids angsty teenager phase.
One of the first things Android 2.0 presented to the user was a new lock screen. Slide-to-unlock was patented by Apple, so Google went with a rotary-phone-inspired arc unlock gesture. Putting your finger on the lock icon and sliding right would unlock the device, and sliding left from the volume icon would silence the phone. A thumb naturally moves in an arc, so this felt like an even more natural gesture than sliding in a straight line.
The default homescreen layout scrapped the redundant analog clock widget and introduced what is now an Android staple: a search bar at the top of the home screen. SMS Messaging and the Android Market were also given top billing in the new layout. The app drawer tab was given a sharp redesign, too.
![The app drawers and pictures of the “Add to Home" menus.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/icons.png)
The app drawers and pictures of the “Add to Home" menus.
Photo by Ron Amadeo
Android was developed at such a breakneck pace in the early days that the Android Team could never really plan for future devices when making interface art. The Motorola Droid—with its 854×480 LCD—was a huge bump up in resolution over the 320×480 G1-era devices. Nearly everything needed to be redrawn. Starting from scratch with interface art would pretty much be the main theme of Android 2.0.
Google took this opportunity to redesign almost every icon in Android, going from a cartoony look with an isometric perspective to straight-on icons done in a more serious style. The only set of icons that weren't redrawn were the status bar icons, which now look very out of place compared to the rest of the OS. These icons would hang around from Android 0.9 until 2.3.
There were a few changes to the app lineup as well. Camcorder was merged into the camera, the IM app was killed, and two new Google-made apps were added: Car Home, a launcher with big buttons designed for use while driving, and Corporate Calendar, which is identical to the regular calendar except it supports Exchange instead of Google Calendar. Weirdly, Google also included two third-party apps out of the box: Facebook and Verizon's Visual VM app. (Neither works today.) The second set of pictures displays the “Add to Home screen" menu, and it received all new art, too.
![A Places page, showing the “Navigate" option, the Navigation disclaimer, the actual Navigation screen, and the traffic info screen.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/nav2.png)
A Places page, showing the “Navigate" option, the Navigation disclaimer, the actual Navigation screen, and the traffic info screen.
Photo by Ron Amadeo
Beyond a redesign, the clear headline feature of Android 2.0 was Google Maps Navigation. Google updated Maps to allow for free turn-by-turn navigation, complete with a point of interest search and text to speech, which could read the names of streets aloud just like a standalone GPS unit. Turning GPS navigation from a separate product into a free smartphone feature pretty much [destroyed][4] the standalone GPS market overnight. TomToms stock dropped almost 40 percent during the week of Android 2.0s launch.
But navigation was pretty hard to get to at first. You had to open the search box, type in a place or address, and tap on the search result. Next, after tapping on the "Navigate" button, Google showed a warning stating that Navigation was in beta and should not be trusted. After tapping on "accept," you could jump in a car, and a harsh-sounding robot voice would guide you to your destination. Hidden behind the menu button was an option to check out the traffic and accidents for the entire route. This design of Navigation hung around forever. Even when the main Google Maps interface was updated in Android 4.0, the Android 2.0 stylings in the Navigation section hung around until almost Android 4.3.
Maps would also show a route overview, which contained traffic data for your route. At first it was just licensed by the usual traffic data provider, but later, Google would use information from Android and iOS phones running Google Maps to [crowd source traffic data][5]. It was the first step in Google's dominance of the mobile map game. After all, real-time traffic monitoring is really just a matter of how many points of data you have. Today, with hundreds of millions of Google Maps users across iOS and Android, Google has become the best provider of traffic data in the world.
With Maps Navigation, Android finally found its killer app. Google was offering something no one else could. There was finally an answer to the "Why should I buy this over an iPhone?" question. Google Maps didn't require PC-based updating like many GPS units did, either. It was always up-to-date thanks to the cloud, and all of those updates were free. The only downside was that you needed an Internet connection to use Google Maps.
As was greatly publicized during the [Apple Maps fiasco][6], accurate maps have become one of the most important features of a smartphone, even if no one really appreciates them when they work. Mapping the world is really only solvable with tons of person power, and today, Googles “Geo" division is the largest in the company with more than [7,000 employees][7]. For most of these people, their job is to literally drive down every road in the world with the companys camera-filled Street View cars. After eight years of data collection, Google has more than [five million miles][8] of 360-degree Street View imagery, and Google Maps is one of the biggest, most untouchable pillars of the company.
![The Car Home screen, and, because we have room, a horizontal version of Navigation.](http://cdn.arstechnica.net/wp-content/uploads/2014/01/carhome1.png)
The Car Home screen, and, because we have room, a horizontal version of Navigation.
Photo by Ron Amadeo
Along with Google Maps Navigation came "Car Home," a large-buttoned home screen designed to help you use your phone while driving. It wasn't customizable, and each button was just a shortcut to a standard app. The Motorola Droid and its official [car dock accessory][9] had special magnets that would automatically trigger Car Home. While docked, pressing the hardware home button on the Droid would open Car Home instead of the normal home screen, and an on-screen home button led to the normal home screen.
Car Home, while useful, didnt last long—it was cut in Android 3.0 and never came back. GPS systems are almost entirely used in cars while driving, but encouraging users to do so with options like “search," which would bring up a keyboard, is something that Googles lawyers probably werent very fond of. With [Apples CarPlay][10] and Googles [Open Automotive Alliance][11], car computers are seeing a resurgence these days. This time, though, there is more of a focus on safety, and government organizations like the National Highway Traffic Safety Administration are on board to help out.
----------
![Ron Amadeo](http://cdn.arstechnica.net/wp-content//uploads/authors/ron-amadeo-sq.jpg)
[Ron Amadeo][a] / Ron is the Reviews Editor at Ars Technica, where he specializes in Android OS and Google products. He is always on the hunt for a new gadget and loves to rip things apart to see how they work.
[@RonAmadeo][t]
--------------------------------------------------------------------------------
via: http://arstechnica.com/gadgets/2014/06/building-android-a-40000-word-history-of-googles-mobile-os/10/
译者:[译者ID](https://github.com/译者ID) 校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://arstechnica.com/gadgets/2009/12/review-of-the-motorola-droid/
[2]:http://www.youtube.com/watch?v=e52TSXwj774
[3]:http://www.youtube.com/watch?v=UBL47tHrvMA
[4]:http://techcrunch.com/2009/10/28/googles-new-mobile-app-cuts-gps-nav-companies-at-the-knees/
[5]:http://googleblog.blogspot.com/2009/08/bright-side-of-sitting-in-traffic.html
[6]:http://arstechnica.com/apple/2012/09/apple-ceo-tim-cook-apologizes-for-ios-6-maps-promises-improvements/
[7]:http://www.businessinsider.com/apple-has-7000-fewer-people-working-on-maps-than-google-2012-9
[8]:https://developers.google.com/events/io/sessions/383278298
[9]:http://www.amazon.com/Motorola-Generation-Vehicle-Charger-Packaging/dp/B002Y3BYQA
[10]:http://arstechnica.com/apple/2014/03/ios-in-the-car-becomes-carplay-coming-to-select-dashboards-this-year/
[11]:http://arstechnica.com/information-technology/2014/01/open-automotive-alliance-aims-to-bring-android-inside-the-car/
[a]:http://arstechnica.com/author/ronamadeo
[t]:https://twitter.com/RonAmadeo

View File

@ -1,4 +1,3 @@
惊现译者CHINAANSHE 翻译!!
How to configure HTTP load balancer with HAProxy on Linux
================================================================================
Increased demand on web based applications and services are putting more and more weight on the shoulders of IT administrators. When faced with unexpected traffic spikes, organic traffic growth, or internal challenges such as hardware failures and urgent maintenance, your web application must remain available, no matter what. Even modern devops and continuous delivery practices can threaten the reliability and consistent performance of your web service.

View File

@ -1,143 +0,0 @@
7 Things to Do After Installing Ubuntu 14.10 Utopic Unicorn
================================================================================
After youve installed or [upgraded to Ubuntu 14.10][1], known by its codename Utopic Unicorn, there are a few things you should do to get it up and running in tip-top shape.
Whether youve performed a fresh install or upgraded an existing version, heres our biannual checklist of post-install tasks to get started with.
### 1. Get Acquainted ###
![The Ubuntu Browser](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/Screen-Shot-2014-10-23-at-20.02.54.png)
The Ubuntu Browser
The majority of changes rocking up in Ubuntu 14.10 arent immediately visible (save for some new wallpapers). That said, there are a bunch of freshly updated apps to get familiar with.
Preinstalled are the latest versions of workhouse staples **Mozilla Firefox**, **Thunderbird**, and **LibreOffice**. Dig a little deeper and youll also find Evince 3.14, and a brand new version of the “Ubuntu Web Browser” app, used for handling web-apps.
While youre getting familiar, be sure to fire up the Software Updater tool to **check for any impromptu issues Ubuntu has found and fixed** post-release. Yes, I know: you only just upgraded. But, even so — bugs dont adhere to deadlines like developers do!
### 2. Personalise The Desktop ###
![New wallpapers in 14.10](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/wallpapers-new-in-14.10.jpg)
New wallpapers in 14.10
Its your desktop PC, so dont put off making it look, feel and behave how you like.
Your first port of call might be changing the desktop wallpaper to one of the [twelve stunning new backgrounds][2] included in 14.10, ranging from retro record player to illustrated unicorn.
Wallpapers and a host of other theme and layout options are accessible from the **Appearance Settings** pane of the System Settings app. From here you can:
- Switch to a different theme
- Adjust launcher size & behaviour
- Enable workspaces & desktop icons
- Put app menus back into app windows
For some nifty new themes be sure to check out our **themes & icons category** here on the site.
### 3. Install Graphics Card Drivers ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/additional-drivers.jpg)
If you plan on playing the [latest Steam games][3], watching high-definition video or working with graphically intensive software youll want to enable the latest Linux graphics drivers available for your hardware.
Ubuntu makes this easy:
- Open up the Software & Updates tool from the Unity Dash
- Click the Additional Drivers tab
- Follow any on-screen prompts to check, install and apply changes
### 4. Enable Music & Video Codecs ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/msuci.jpg)
Games sorted, now to make **music and video files work just as well**.
Most popular formats, .mp3, .m4a, .mov, etc., will work fine in Ubuntu — after a little cajoling. Patent-encumbered codecs cannot ship in Ubuntu for legal reasons, leaving you unable to play popular audio and video formats out of the (invisible) box.
Dont panic. To play music or watch video you can install all of the codecs you need quickly, and through the Ubuntu Software Center.
- [Install Third-Party Codecs][4]
### 5. Pimp Your Privacy ###
![](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/privacy-in-ubuntu-settingd.jpg)
The Unity Dash is a great one-stop hub for finding stuff, be it a PDF file lurking on your computer or the current weather forecast in Stockholm, Sweden.
But the diversity of data surfaced through the Dash in just a few keystrokes doesnt suit everyones needs. So you may want to dial down the noise and restrict what shows up.
To stop certain files and folders from searched in the Dash and/or to disable all online results returned for a query, head to the **Privacy & Security** section in System Settings.
Here youll find all the tools, options and configuration switches you need, including options to:
- Choose what apps & files can be searched from the Dash
- Whether to require a password on waking from suspend
- Disable sending error reports to Canonical
- Turn off all online features of the Dash
### 6. Swap The Default Apps For Your Faves ###
![Make it yours](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/more-apps.jpg)
Make it yours
Ubuntu comes preloaded with a tonne of apps, including a web browser (Mozilla Firefox), e-mail client (Thunderbird), music player (Rhythmbox), office suite (LibreOffice) and instant messenger (Empathy Instant Messenger).
All well and good, theyre not everyones cup of tea. The Ubuntu Software Center is home to a slew of app alternatives, including:
- VLC Versatile media player
- Steam Games distribution platform
- [Geary — Easy-to-use desktop e-mail app][5]
- GIMP Advanced image editor similar to Photoshop
- Clementine — Stylish, fully-featured music player
- Chromium open-source version of Google Chrome (without Flash)
The Ubuntu Software Center plays host to a huge range of other apps, many of which you might not have heard of before. Since most apps are free, dont be scared to try things out!
### 7. Grab The Essentials ###
![Netflix in Chrome on Ubuntu](http://www.omgubuntu.co.uk/wp-content/uploads/2014/10/netflix-linux-working-in-chrome.jpg)
Netflix in Chrome on Ubuntu
Software Center apps aside, you may also wish to grab big-name apps like Skype, Spotify and Dropbox.
Google Chrome is also a must if you wish to watch Netflix natively on Ubuntu or benefit from the latest, safest version of Flash.
Most of these apps are available to download directly from their respective websites and can be installed on Ubuntu with a couple of clicks.
- [Download Skype for Linux][6]
- [Download Google Chrome for Linux][7]
- [Download Dropbox for Linux][8]
- [How to Install Spotify in Ubuntu][9]
Talking of Google Chrome — did you know you can (unofficially) [install and run Android apps through it?][9] Oh yes ;)
#### Finally… ####
The items above are not the only ones applicable post-upgrade. Read through and follow the ones that chime with you, and feel free to ignore those that dont.
Secondly, this is a list for those whove upgraded to or installed Ubuntu 14.10. Were not going walk you through carving it up into something that isnt Ubuntu. If Unity isnt your thing thats fine, but be logical about it; save yourself some time and install one of the official flavours or offshoots instead.
--------------------------------------------------------------------------------
via: http://www.omgubuntu.co.uk/2014/10/7-things-to-do-after-installing-ubuntu-14-10-utopic-unicorn
作者:[Joey-Elijah Sneddon][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://plus.google.com/117485690627814051450/?rel=author
[1]:http://www.omgubuntu.co.uk/2014/10/ubuntu-14-10-release-download-now
[2]:http://www.omgubuntu.co.uk/2014/09/ubuntu-14-10-wallpaper-contest-winners
[3]:http://www.omgubuntu.co.uk/category/gaming
[4]:https://apps.ubuntu.com/cat/applications/ubuntu-restricted-extras/
[5]:http://www.omgubuntu.co.uk/2014/09/new-shotwell-geary-stable-release-available-to-downed
[6]:http://www.skype.com/en/download-skype/skype-for-linux/
[7]:http://www.google.com/chrome
[8]:https://www.dropbox.com/install?os=lnx
[9]:http://www.omgubuntu.co.uk/2013/01/how-to-install-spotify-in-ubuntu-12-04-12-10
[10]:http://www.omgubuntu.co.uk/2014/09/install-android-apps-ubuntu-archon

View File

@ -1,5 +1,3 @@
[felixonmars translating...]
“ntpq -p” output
================================================================================
The [Gentoo][1] (and others?) [incomplete man pages for “ntpq -p”][2] merely give the description: “*Print a list of the peers known to the server as well as a summary of their state.*”

View File

@ -1,4 +1,3 @@
[felixonmars translating...]
How To Use Emoji Anywhere With Twitter's Open Source Library
================================================================================
> Embed them in webpages and other projects via GitHub.

View File

@ -1,189 +0,0 @@
johnhoow translating...
Important 10 Linux ps command Practical Examples
================================================================================
As an Operating System which inspired from Unix, Linux has a built-in tool to capture current processes on the system. This tool is available in command line interface.
### What is PS Command ###
From its manual page, PS gives a snapshots of the current process. It will “capture” the system condition at a single time. If you want to have a repetitive updates in a real time, we can use top command.
PS support three (3) type of usage syntax style.
1. UNIX style, which may be grouped and **must** be preceded by a dash
2. BSD style, which may be grouped and **must not be** used with a dash
3. GNU long options, which are preceded by two dash
We can mix those style, but conflicts can appear. In this article, will use UNIX style. Heres are some examples of PS command in a daily use.
### 1. Run ps without any options ###
This is a very basic **ps** usage. Just type ps on your console to see its result.
![ps with no options](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_no_options.png)
By default, it will show us 4 columns of information.
- PID is a Process ID of the running command (CMD)
- TTY is a place where the running command runs
- TIME tell about how much time is used by CPU while running the command
- CMD is a command that run as current process
This information is displayed in unsorted result.
### 2. Show all current processes ###
To do this, we can use **-a** options. As we can guess, **-a is stand for “all”**. While x will show all process even the current process is not associated with any TTY (terminal)
$ ps -ax
This result might be long result. To make it more easier to read, combine it with less command.
$ ps -ax | less
![ps all information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_ax.png)
### 3. Filter processes by its user ###
For some situation we may want to filter processes by user. To do this, we can use **-u** option. Let say we want to see what processes which run by user pungki. So the command will be like below
$ ps -u pungki
![filter by user](http://blog.linoxide.com/wp-content/uploads/2014/10/ps__u.png)
### 4. Filter processes by CPU or memory usage ###
Another thing that you might want to see is filter the result by CPU or memory usage. With this, you can grab information about which processes that consume your resource. To do this, we can use **aux options**. Heres an example of it :
$ ps -aux | less
![show all information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux.png)
Since the result can be in a long list, we can **pipe** less command into ps command.
By default, the result will be in unsorted form. If we want to sort by particular column, we can add **--sort** option into ps command.
Sort by the highest **CPU utilization** in ascending order
$ ps -aux --sort -pcpu | less
![sort by cpu usage](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_cpu.png)
Sort by the highest **Memory utilization** in ascending order
$ ps -aux --sort -pmem | less
![sort by memory usage](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_aux_sort_mem.png)
Or we can combine itu a single command and display only the top ten of the result :
$ ps -aux --sort -pcpu,+pmem | head -n 10
### 5. Filter processes by its name or process ID ###
To to this, we can use **-C option** followed by the keyword. Let say, we want to show processes named getty. We can type :
$ ps -C getty
![filter by its name or process ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C.png)
If we want to show more detail about the result, we can add -f option to show it on full format listing. The above command will looks like below :
$ ps -f -C getty
![filter by its name or process ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_C_f.png)
### 6. Filter processes by thread of process ###
If we need to know the thread of a particular process, we can use **-L option** followed by its Process ID (PID). Heres an example of **-L option** in action :
$ ps -L 1213
![show processes in threaded view](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_L.png)
As we can see, the PID remain the same value, but the LWP which shows numbers of thread show different values.
### 7. Show processes in hierarchy ###
Sometime we want to see the processes in hierarchical form. To do this, we can use **-axjf** options.
$ps -axjf
![show in hierarchy](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_axjf.png)
Or, another command which we can use is pstree.
$ pstree
![show information in hierarchy](http://blog.linoxide.com/wp-content/uploads/2014/10/pstree.png)
### 8. Show security information ###
If we want to see who is currently logged on into your server, we can see it using the ps command. There are some options that we can use to fulfill our needs. Heres some examples :
$ ps -eo pid,user,args
**Option -e** will show you all processes while **-o option** will control the output. **Pid**, **User and Args** will show you the **Process ID**, **the User who run the application** and **the running application**.
![show security information](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_security_1.png)
The keyword / user-defined format that can be used with **-e option** are **args, cmd, comm, command, fname, ucmd, ucomm, lstart, bsdstart and start**.
### 9. Show every process running as root (real & effecitve ID) in user format ###
System admin may want to see what processes are being run by root and other information related to it. Using ps command, we can do by this simple command :
$ ps -U root -u root u
The **-U parameter** will select by **real user ID (RUID)**. It selects the processes whose real user name or ID is in the userlist list. The real User ID identifies the user who created the process.
While the **-u paramater** will select by effective user ID (EUID)
The last **u** paramater, will display the output in user-oriented format which contains **User, PID, %CPU, %MEM, VSZ, RSS, TTY, STAT, START, TIME and COMMAND** columns.
Heres the output of the above command.
![show real and effective User ID](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_root_real_effective_ID.png)
### 10. Use PS in a realtime process viewer ###
ps will display a report of what happens in your system. The result will be a static report.
Let say, we want to filter processes by CPU and Memory usage as on the point 4 above. And we want the report is updated every 1 second. We can do it by **combining ps command with watch command** on Linux.
Heres the command :
$ watch -n 1 ps -aux --sort -pmem, -pcpu
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_1.png)
If you feel the report is too long, **we can limit it** by - let say - the top 20 processes. We can add **head** command to do it.
$ watch -n 1 ps -aux --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_2.png)
This live reporter **is not** like top or htop of course. **But the advantage of using ps** to make live report is that you can custom the field. You can choose which field you want to see.
For example, **if you need only the pungki user shown**, then you can change the command to become like this :
$ watch -n 1 ps -aux -U pungki u --sort -pmem, -pcpu | head 20
![combine ps with watch](http://blog.linoxide.com/wp-content/uploads/2014/10/ps_watch_3.png)
### Conclusion ###
You may use **ps** on your daily usage to monitor about what happens your Linux system. But actually, you can generate various types of report using **ps** command with the use of appropriate paramaters.
**Another ps advantage** is that **ps** are installed by default in any kind of Linux. So you can just start to use it.
Don't forget to see **ps documentation** by typing **man ps** on you Linux console to explore more options.
--------------------------------------------------------------------------------
via: http://linoxide.com/how-tos/linux-ps-command-examples/
作者:[Pungki Arianto][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/pungki/

View File

@ -1,5 +1,3 @@
forsil translating ...
Undelete Files on Linux Systems
================================================================================
Often times, a computer user will delete a needed file accidentally and not have an easy way to regain or recreate the file. Thankfully, files can be undeleted. When a user deletes a file, it is not gone, only hidden for some time. Here is how it all works. On a filesystem, the system has what is called a file allocation list. This list keeps track of what files are where on the storage unit (hard-drive, MicroSD card, flash-drive, etc.). When a file is deleted, the filesystem will perform one of two tasks on the allocation table. The file's entry on the file allocation table marked as "free space" or the file's entry on the list is erased and then the space is marked as free. Now, if a file needs to be placed on the storage unit, the operating system will put the file in the space marked as empty. After the new file is written to the "empty space", the deleted file is now gone forever. When a deleted file is to be recovered, the user must not manipulate any files because if the "empty space" is used, then the file can never be retrieved.

View File

@ -1,146 +0,0 @@
(translating by runningwater)
How To Create A Bootable Ubuntu USB Drive For Mac In OS X
================================================================================
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Create_bootable_Ubuntu_USB_Mac_OS_X.jpg)
I bought a Macbook Air yesterday after Dell lost my laptop from their service centre last month. And among the first few things I did was to dual boot Mac OS X with Ubuntu Linux. Ill cover up Linux installation on Macbook in later articles as first we need to learn **how to create a bootable Ubuntu USB drive for Mac in OS X**.
While it is fairly easy to create a bootable USB in Ubuntu or in Windows, it is not the same story in Mac OS X. This is why the official Ubuntu guide suggest to use a disk rather than USB for live Ubuntu in Mac. Considering my Macbook Air neither has a CD drive nor do I possess a DVD, I preferred to create a live USB in Mac OS X.
### Create a Bootable Ubuntu USB Drive in Mac OS X ###
As I said earlier, creating a bootable USB in Mac OS X is a tricky procedure, be it for Ubuntu or any other bootable OS. But dont worry, following all the steps carefully will have you going. Lets see what you need to for a bootable USB:
#### Step 1: Format the USB drive ####
Apple is known for defining its own standards and no surprises that Mac OS X has its own file system type known as Mac OS Extended or [HFS Plus][1]. So the first thing you would need to do is to format your USB drive in Mac OS Extended format.
To format the USB drive, plug in the USB key. Go to **Disk Utility** program from Launchpad (A rocket symboled icon in the bottom plank).
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Disk_Utility_Mac.jpg)
- In Disk Utility, from the left hand pane, select the USB drive to format.
- Click the **Partition** tab in the right side pane.
- From the drop-down menu, select **1 Partition**.
- Name this drive anything you desire.
- Next, change the **Format to Mac OS Extended (Journaled)**
The screenshot below should help you.
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Format_Usb_Mac_4.jpg)
There is one last thing to do before we go with formatting the USB. Click the Options button in the right side pane and make sure that the partition scheme is **GUID Partition Table**.
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Format_Usb_Mac_2.jpg)
When all is set to go, just hit the **Apply** button. It will give you a warning message about formatting the USB drive. Of course hit the Partition button to format the USB drive.
#### Step 2: Download Ubuntu ####
Of course, you need to download ISO image of Ubuntu desktop. Jump to [Ubuntu website to download your favorite Ubuntu desktop OS][2]. Since you are using a Macbook Air, I suggest you to download the 64 Bit version of whichever version you want. Ubuntu 14.04 is the latest LTS version, and this is what I would recommend to you.
#### Step 3: Convert ISO to IMG ####
The file you downloaded is in ISO format but we need it to be in IMG format. This can be easily done using [hdiutil][3] command tool. Open a terminal, either from Launchpad or from the Spotlight, and then use the following command to convert the ISO to IMG format:
hdiutil convert -format UDRW -o ~/Path-to-IMG-file ~/Path-to-ISO-file
Normally the downloaded file should be in ~/Downloads directory. So for me, the command is like this:
hdiutil convert -format UDRW -o ~/Downloads/ubuntu-14.10-desktop-amd64 ~/Downloads/ubuntu-14.10-desktop-amd64.iso
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/ISO_to_IMG_Convert_Mac_OS_X.jpeg)
You might notice that I did not put a IMG extension to the newly converted file. It is fine as the extension is symbolic and it is the file type that matters not the file name extension. Also, the converted file may have an additional .dmg extension added to it by Mac OS X. Dont worry, its normal.
#### Step 4: Get the device number for USB drive ####
The next thing is to get the device number for the USB drive. Run the following command in terminal:
diskutil list
It will list all the disks currently available in the system. You should be able to identify the USB disk by its size. To avoid confusion, I would suggest that you should have just one USB drive plugged in. In my case, the device number is 2 (for a USB of size 8 GB): /dev/disk2
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Create_bootable_USB_Mac_OSX.jpeg)
When you got the disk number, run the following command:
diskutil unmountDisk /dev/diskN
Where N is the device number for the USB you got previously. So, in my case, the above command becomes:
diskutil unmountDisk /dev/disk2
The result should be: **Unmount of all volumes on disk2 was successful**.
#### Step 5: Creating the bootable USB drive of Ubuntu in Mac OS X ####
And finally we come to the final step of creating the bootable USB drive. We shall be using [dd command][4] which is a very powerful and must be used with caution. Therefore, do remember the correct device number of your USB drive or else you might end up corrupting Mac OS X. Use the following command in terminal:
sudo dd if=/Path-to-IMG-DMG-file of=/dev/rdiskN bs=1m
Here, we are using dd (copy and convert) to copy and convert input file (if) IMG to diskN. I hope you remember where you put the converted IMG file, in step 3. For me the command was like this:
sudo dd if=~/Downloads/ubuntu-14.10-desktop-amd64.dmg of=/dev/rdisk2 bs=1m
As we are running the above command with super user privileges (sudo), it will require you to enter the password. Similar to Linux, you wont see any asterisks or something to indicate that you have entered some keyboard input, but thats the way Unix terminal behaves.
Even after you enter the password, **you wont see any immediate output and thats norma**l. It will take a few minutes for the process to complete.
#### Step 6: Complete the bootable USB drive process ####
Once the dd command finishes its process, you may see a dialogue box saying: **The disk you inserted was not readable by this computer**.
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Bootable_USB_3.png)
Dont panic. Everything is just fine. Just **dont click either of Initialize, Ignore or Eject just now**. Go back to the terminal. Youll see some information about the last completed process. For me it was:
> 1109+1 records in
> 1109+1 records out
> 1162936320 bytes transferred in 77.611025 secs (14984164 bytes/sec)
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Create_bootable_USB_Mac_OSX_1.jpeg)
Now, in the terminal use the following command to eject our USB disk:
diskutil eject /dev/diskN
N is of course the device number we have used previously which is 2 in my case:
diskutil eject /dev/disk2
Once ejected, click on **Ignore** in the dialogue box that appeared previously. Now your bootable USB disk is ready. Remove it from the system.
#### Step 7: Checking your newly created bootable USB disk ####
Once you have completed the mammoth task of creating a live USB of USB in Mac OS X, it is time to test your efforts.
- Plugin the bootable USB and reboot the system.
- At start up when the Apple tune starts up, press and hold option (or alt) key.
- This should present you with the available disks to boot in to. I presume you know what to do next.
For me it showed tow EFI boot:
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Ubuntu_boot_USB_Mac_OSX_1.jpeg)
I selected the first one and it took me straight to Grub screen:
![](http://itsfoss.itsfoss.netdna-cdn.com/wp-content/uploads/2014/11/Ubuntu_boot_USB_Mac_OSX.jpeg)
I hope this guide helped you to create a bootable USB disk of Ubuntu for Mac in OS X. Well see how to dual boot Ubuntu with OS X in next article. Stay tuned.
--------------------------------------------------------------------------------
via: http://itsfoss.com/create-bootable-ubuntu-usb-drive-mac-os/
作者:[Abhishek][a]
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://itsfoss.com/author/Abhishek/
[1]:http://en.wikipedia.org/wiki/HFS_Plus
[2]:http://www.ubuntu.com/download/desktop
[3]:https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/hdiutil.1.html
[4]:http://en.wikipedia.org/wiki/Dd_%28Unix%29

View File

@ -1,248 +0,0 @@
SPccman translating
How to create a custom backup plan for Debian with backupninja
================================================================================
Backupninja is a powerful and highly-configurable backup tool for Debian based distributions. In the [previous tutorial][1], we explored how to install backupninja and how to set up two backup actions for the program to perform. However, we should note that those examples were only "the tip of the iceberg," so to speak. In this post we will discuss how to leverage custom handlers and helpers that allow this program to be customized in order to accomplish almost any backup need that you can think of.
And believe me - that is not an overstatement, so let's begin.
### A Quick Review of Backupninja ###
One of backupninja's distinguishing features is the fact that you can just drop plain text configuration or action files in /etc/backup.d, and the program will take care of the rest. In addition, we can write custom scripts (aka "handlers") and place them in /usr/share/backupninja to handle each type of backup action. Furthermore, we can have these scripts be executed via ninjahelper's ncurses-based interactive menus (aka "helpers") to guide us to create the configuration files we mentioned earlier, minimizing the chances of human error.
### Creating a Custom Handler and Helper ###
Our goal in this case is to create a script to handle the backup of chosen home directories into a tarball with either **gzip** or **bzip2** compression, excluding music and video files. We will simply name this script home, and place it under /usr/backup/ninja.
Although you could achieve the same objective with the default tar handler (refer to /usr/share/backupninja/tar and /usr/share/backupninja/tar.helper), we will use this approach to show how to create a useful handler script and ncurses-based helper from scratch. You can then decide how to apply the same principles depending on your specific needs.
Note that since handlers are sourced from the main script, there is no need to start with #!/bin/bash at the top.
Our proposed handler (/usr/share/backupninja/home) is as follows. It is heavily commented for clarification. The getconf function is used to read the backup action's configuration file. If you specify a value for a variable here, it will override the corresponding value present in the configuration file:
# home handler script for backupninja
# Every backup file will identify the host by its FQDN
getconf backupname
# Directory to store backups
getconf backupdir
# Default compression
getconf compress
# Include /home directory
getconf includes
# Exclude files with *.mp3 and *.mp4 extensions
getconf excludes
# Default extension for the packaged backup file
getconf EXTENSION
# Absolute path to date binary
getconf TAR `which tar`
# Absolute path to date binary
getconf DATE `which date`
# Chosen date format
DATEFORMAT="%Y-%m-%d"
# If backupdir does not exist, exit with fatal error
if [ ! -d "$backupdir" ]
then
mkdir -p "$backupdir" || fatal "Can not make directory $backupdir"
fi
# If backupdir is not writeable, exit with fatal error as well
if [ ! -w "$backupdir" ]
then
fatal "Directory $backupdir is not writable"
fi
# Set the right tar option as per the chosen compression format
case $compress in
"gzip")
compress_option="-z"
EXTENSION="tar.gz"
;;
"bzip")
compress_option="-j"
EXTENSION="tar.bz2"
;;
"none")
compress_option=""
;;
*)
warning "Unknown compress filter ($tar_compress)"
compress_option=""
EXTENSION="tar.gz"
;;
esac
# Exclude the following file types / directories
exclude_options=""
for i in $excludes
do
exclude_options="$exclude_options --exclude $i"
done
# Debugging messages, performing backup
debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
# Redirect standard output to a file with .list extension
# and standard error to a file with .err extension
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
[ $? -ne 0 ] && fatal "Tar backup failed"
Next, we will create our helper file (/usr/share/backupninja/home.helper) so that our handlers shows up as a menu in **ninjahelper**:
# Backup action's description. Separate words with underscores.
HELPERS="$HELPERS home:backup_of_home_directories"
home_wizard() {
home_title="Home action wizard"
backupname=`hostname --fqdn`
# Specify default value for the time when this backup actions is supposed to run
inputBox "$home_title" "When to run this action?" "everyday at 01"
[ $? = 1 ] && return
home_when_run="when = $REPLY"
# Specify default value for backup file name
inputBox "$home_title" "\"Name\" of backups" "$backupname"
[ $? = 1 ] && return
home_backupname="backupname = $REPLY"
backupname="$REPLY"
# Specify default directory to store the backups
inputBox "$home_title" "Directory where to store the backups" "/var/backups/home"
[ $? = 1 ] && return
home_backupdir="backupdir = $REPLY"
# Specify default values for the radiobox
radioBox "$home_title" "Compression" \
"none" "No compression" off \
"gzip" "Compress with gzip" on \
"bzip" "Compress with bzip" off
[ $? = 1 ] && return;
result="$REPLY"
home_compress="compress = $REPLY "
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Includes"
formItem "Include:" /home/gacanepa
formDisplay
[ $? = 0 ] || return 1
home_includes="includes = "
for i in $REPLY; do
[ -n "$i" ] && home_includes="$home_includes $i"
done
done
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Excludes"
formItem "Exclude:" *.mp3
formItem "Exclude:" *.mp4
# Add as many “Exclude” text boxes as needed to specify other exclude options
formItem "Exclude:"
formItem "Exclude:"
formDisplay
[ $? = 0 ] || return 1
home_excludes="excludes = "
for i in $REPLY; do
[ -n "$i" ] && home_excludes="$home_excludes $i"
done
done
# Save the config
get_next_filename $configdirectory/10.home
cat > $next_filename <<EOF
$home_when_run
$home_backupname
$home_backupdir
$home_compress
$home_includes
$home_excludes
# tar binary - have to be GNU tar
TAR `which tar`
DATE `which date`
DATEFORMAT "%Y-%m-%d"
EXTENSION tar
EOF
# Backupninja requires that configuration files be chmoded to 600
chmod 600 $next_filename
}
### Running Ninjahelper ###
Once we have created our handler script named home and the corresponding helper named home.helper, let's run ninjahelper command to create a new backup action:
# ninjahelper
And choose create a new backup action.
![](https://farm8.staticflickr.com/7467/15322605273_90edaa5bc1_z.jpg)
We will now be presented with the available action types. Let's select "backup of home directories":
![](https://farm9.staticflickr.com/8636/15754955450_f3ef82217b_z.jpg)
The next screens will display the default values as set in the helper (only 3 of them are shown here). Feel free to edit the values in the text box. Particularly, refer to the scheduling section of the documentation for the right syntax for the when variable.
![](https://farm8.staticflickr.com/7508/15941578982_24b680e1c3_z.jpg)
![](https://farm8.staticflickr.com/7562/15916429476_6e84b307aa_z.jpg)
![](https://farm8.staticflickr.com/7528/15319968994_41705b7283_z.jpg)
When you are done creating the backup action, it will show in ninjahelper's initial menu:
![](https://farm8.staticflickr.com/7534/15942239225_bb66dbdb63.jpg)
Then you can press ENTER to show the options available for this action. Feel free to experiment with them, as their description is quite straightforward.
Particularly, "run this action now" will execute the backup action in debug mode immediately regardless of the scheduled time:
![](https://farm8.staticflickr.com/7508/15754955470_9af6251096_z.jpg)
Should the backup action fail for some reason, the debug will display an informative message to help you locate the error and correct it. Consider, for example, the following error messages that were displayed after running a backup action with bugs that have not been corrected yet:
![](https://farm9.staticflickr.com/8662/15754955480_487d040fcd_z.jpg)
The image above tells you that the connection needed to complete the backup action could not be completed because the remote host seems to be down. In addition, the destination directory specified in the helper file does not exist. Once you correct the problems, re-run the backup action.
A few things to remember:
- If you create a custom script in /usr/share/backupninja (e.g., foobar) to handle a specific backup action, you also need to write a corresponding helper (e.g., foobar.helper) in order to create, through ninjahelper, a file named 10.foobar (11 and onward for further actions as well) in /etc/backup.d, which is the actual configuration file for the backup action.
- You can execute your backups at any given time via ninjahelper as explained earlier, or have them run as per the specified frequency in the when variable.
### Summary ###
In this post we have discussed how to create our own backup actions from scratch and how to add a related menu in ninjahelper to facilitate the creation of configuration files. With the previous [backupninja article][2] and the present one I hope I've given you enough good reasons to go ahead and at least try it.
--------------------------------------------------------------------------------
via: http://xmodulo.com/create-custom-backup-plan-debian.html
作者:[ Gabriel Cánepa][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/gabriel
[1]:http://xmodulo.com/backup-debian-system-backupninja.html
[2]:http://xmodulo.com/backup-debian-system-backupninja.html

View File

@ -1,249 +0,0 @@
文章重复
How to create a custom backup plan for Debian with backupninja
================================================================================
Backupninja is a powerful and highly-configurable backup tool for Debian based distributions. In the [previous tutorial][1], we explored how to install backupninja and how to set up two backup actions for the program to perform. However, we should note that those examples were only "the tip of the iceberg," so to speak. In this post we will discuss how to leverage custom handlers and helpers that allow this program to be customized in order to accomplish almost any backup need that you can think of.
And believe me - that is not an overstatement, so let's begin.
### A Quick Review of Backupninja ###
One of backupninja's distinguishing features is the fact that you can just drop plain text configuration or action files in /etc/backup.d, and the program will take care of the rest. In addition, we can write custom scripts (aka "handlers") and place them in /usr/share/backupninja to handle each type of backup action. Furthermore, we can have these scripts be executed via ninjahelper's ncurses-based interactive menus (aka "helpers") to guide us to create the configuration files we mentioned earlier, minimizing the chances of human error.
### Creating a Custom Handler and Helper ###
Our goal in this case is to create a script to handle the backup of chosen home directories into a tarball with either gzip or bzip2 compression, excluding music and video files. We will simply name this script home, and place it under /usr/backup/ninja.
Although you could achieve the same objective with the default tar handler (refer to /usr/share/backupninja/tar and /usr/share/backupninja/tar.helper), we will use this approach to show how to create a useful handler script and ncurses-based helper from scratch. You can then decide how to apply the same principles depending on your specific needs.
Note that since handlers are sourced from the main script, there is no need to start with #!/bin/bash at the top.
Our proposed handler (/usr/share/backupninja/home) is as follows. It is heavily commented for clarification. The getconf function is used to read the backup action's configuration file. If you specify a value for a variable here, it will override the corresponding value present in the configuration file:
# home handler script for backupninja
# Every backup file will identify the host by its FQDN
getconf backupname
# Directory to store backups
getconf backupdir
# Default compression
getconf compress
# Include /home directory
getconf includes
# Exclude files with *.mp3 and *.mp4 extensions
getconf excludes
# Default extension for the packaged backup file
getconf EXTENSION
# Absolute path to date binary
getconf TAR `which tar`
# Absolute path to date binary
getconf DATE `which date`
# Chosen date format
DATEFORMAT="%Y-%m-%d"
# If backupdir does not exist, exit with fatal error
if [ ! -d "$backupdir" ]
then
mkdir -p "$backupdir" || fatal "Can not make directory $backupdir"
fi
# If backupdir is not writeable, exit with fatal error as well
if [ ! -w "$backupdir" ]
then
fatal "Directory $backupdir is not writable"
fi
# Set the right tar option as per the chosen compression format
case $compress in
"gzip")
compress_option="-z"
EXTENSION="tar.gz"
;;
"bzip")
compress_option="-j"
EXTENSION="tar.bz2"
;;
"none")
compress_option=""
;;
*)
warning "Unknown compress filter ($tar_compress)"
compress_option=""
EXTENSION="tar.gz"
;;
esac
# Exclude the following file types / directories
exclude_options=""
for i in $excludes
do
exclude_options="$exclude_options --exclude $i"
done
# Debugging messages, performing backup
debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
# Redirect standard output to a file with .list extension
# and standard error to a file with .err extension
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
[ $? -ne 0 ] && fatal "Tar backup failed"
Next, we will create our helper file (/usr/share/backupninja/home.helper) so that our handlers shows up as a menu in ninjahelper:
# Backup action's description. Separate words with underscores.
HELPERS="$HELPERS home:backup_of_home_directories"
home_wizard() {
home_title="Home action wizard"
backupname=`hostname --fqdn`
# Specify default value for the time when this backup actions is supposed to run
inputBox "$home_title" "When to run this action?" "everyday at 01"
[ $? = 1 ] && return
home_when_run="when = $REPLY"
# Specify default value for backup file name
inputBox "$home_title" "\"Name\" of backups" "$backupname"
[ $? = 1 ] && return
home_backupname="backupname = $REPLY"
backupname="$REPLY"
# Specify default directory to store the backups
inputBox "$home_title" "Directory where to store the backups" "/var/backups/home"
[ $? = 1 ] && return
home_backupdir="backupdir = $REPLY"
# Specify default values for the radiobox
radioBox "$home_title" "Compression" \
"none" "No compression" off \
"gzip" "Compress with gzip" on \
"bzip" "Compress with bzip" off
[ $? = 1 ] && return;
result="$REPLY"
home_compress="compress = $REPLY "
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Includes"
formItem "Include:" /home/gacanepa
formDisplay
[ $? = 0 ] || return 1
home_includes="includes = "
for i in $REPLY; do
[ -n "$i" ] && home_includes="$home_includes $i"
done
done
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$home_title: Excludes"
formItem "Exclude:" *.mp3
formItem "Exclude:" *.mp4
# Add as many “Exclude” text boxes as needed to specify other exclude options
formItem "Exclude:"
formItem "Exclude:"
formDisplay
[ $? = 0 ] || return 1
home_excludes="excludes = "
for i in $REPLY; do
[ -n "$i" ] && home_excludes="$home_excludes $i"
done
done
# Save the config
get_next_filename $configdirectory/10.home
cat > $next_filename <<EOF
$home_when_run
$home_backupname
$home_backupdir
$home_compress
$home_includes
$home_excludes
# tar binary - have to be GNU tar
TAR `which tar`
DATE `which date`
DATEFORMAT "%Y-%m-%d"
EXTENSION tar
EOF
# Backupninja requires that configuration files be chmoded to 600
chmod 600 $next_filename
}
### Running Ninjahelper ###
Once we have created our handler script named home and the corresponding helper named home.helper, let's run ninjahelper command to create a new backup action:
# ninjahelper
And choose create a new backup action.
![](https://farm8.staticflickr.com/7467/15322605273_90edaa5bc1_z.jpg)
We will now be presented with the available action types. Let's select "backup of home directories":
![](https://farm9.staticflickr.com/8636/15754955450_f3ef82217b_z.jpg)
The next screens will display the default values as set in the helper (only 3 of them are shown here). Feel free to edit the values in the text box. Particularly, refer to the scheduling section of the documentation for the right syntax for the when variable.
![](https://farm8.staticflickr.com/7508/15941578982_24b680e1c3_z.jpg)
![](https://farm8.staticflickr.com/7562/15916429476_6e84b307aa_z.jpg)
![](https://farm8.staticflickr.com/7528/15319968994_41705b7283_z.jpg)
When you are done creating the backup action, it will show in ninjahelper's initial menu:
![](https://farm8.staticflickr.com/7534/15942239225_bb66dbdb63.jpg)
Then you can press ENTER to show the options available for this action. Feel free to experiment with them, as their description is quite straightforward.
Particularly, "run this action now" will execute the backup action in debug mode immediately regardless of the scheduled time:
![](https://farm8.staticflickr.com/7508/15754955470_9af6251096_z.jpg)
Should the backup action fail for some reason, the debug will display an informative message to help you locate the error and correct it. Consider, for example, the following error messages that were displayed after running a backup action with bugs that have not been corrected yet:
![](https://farm9.staticflickr.com/8662/15754955480_487d040fcd_z.jpg)
The image above tells you that the connection needed to complete the backup action could not be completed because the remote host seems to be down. In addition, the destination directory specified in the helper file does not exist. Once you correct the problems, re-run the backup action.
A few things to remember:
- If you create a custom script in /usr/share/backupninja (e.g., foobar) to handle a specific backup action, you also need to write a corresponding helper (e.g., foobar.helper) in order to create, through ninjahelper, a file named 10.foobar (11 and onward for further actions as well) in /etc/backup.d, which is the actual configuration file for the backup action.
- You can execute your backups at any given time via ninjahelper as explained earlier, or have them run as per the specified frequency in the when variable.
### Summary ###
In this post we have discussed how to create our own backup actions from scratch and how to add a related menu in ninjahelper to facilitate the creation of configuration files. With the previous [backupninja article][2] and the present one I hope I've given you enough good reasons to go ahead and at least try it.
--------------------------------------------------------------------------------
via: http://xmodulo.com/create-custom-backup-plan-debian.html
作者:[Gabriel Cánepa][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/gabriel
[1]:http://xmodulo.com/backup-debian-system-backupninja.html
[2]:http://xmodulo.com/backup-debian-system-backupninja.html

View File

@ -0,0 +1,197 @@
Centralized Secure Storage (iSCSI) “Initiator Client” Setup on RHEL/CentOS/Fedora Part III
================================================================================
**iSCSI** Initiator are the clients which use to authenticated with iSCSI target servers to access the LUNs shared from target server. We can deploy any kind of Operating systems in those locally mounted Disks, just a single package need to be install to get authenticate with target server.
![Client Initiator Setup](http://www.tecmint.com/wp-content/uploads/2014/07/Client-Initiator-Setup.jpg)
Client Initiator Setup
#### Features ####
- Can handle any kind of file systems in locally mounted Disk.
- No need of restating the system after partition using fdisk.
#### Requirements ####
- [Create Centralized Secure Storage using iSCSI Target Part 1][1]
- [Create LUNs using LVM in Target Server Part 2][2]
#### My Client Setup for Initiator ####
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.50
- Ports Used : TCP 3260
**Warning**: Never stop the service while LUNs Mounted in Client machines (Initiator).
### Initiator Client Setup ###
**1.** In Client side, we need to install the package **iSCSI-initiator-utils**, search for the package using following command.
# yum search iscsi
**Sample Output**
============================= N/S Matched: iscsi ================================
iscsi-initiator-utils.x86_64 : iSCSI daemon and utility programs
iscsi-initiator-utils-devel.x86_64 : Development files for iscsi-initiator-utils
**2.** Once you locate the package, just install the initiator package using yum command as shown.
# yum install iscsi-initiator-utils.x86_64
**3.** After installing the package, we need to discover the share from **Target server**. The client side commands little hard to remember, so we can use man page to get the list of commands which required to run.
# man iscsiadm
![man iscsiadm](http://www.tecmint.com/wp-content/uploads/2014/07/man-iscsiadm.jpg)
man iscsiadm
**4.** Press **SHIFT+G** to Navigate to the Bottom of the man page and scroll little up to get the login example commands. We need to replace our **Target servers IP** address in below command Discover the Target.
# iscsiadm --mode discoverydb --type sendtargets --portal 192.168.0.200 --discover
**5.** Here we got the iSCSI (iqn) qualified name from above command execution.
192.168.0.200:3260,1 iqn.2014-07.com.tecmint:tgt1
![Discover Target](http://www.tecmint.com/wp-content/uploads/2014/07/Discover-Target.jpg)
Discover Target
**6.** To log-in use the below command to attach the LUN to our local System, this will authenticate with target server and allow us to log-in into LUN.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260 --login
![Login To Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Login-To-Target-Server.jpg)
Login To Target Server
**Note**: Use the login command and replace login with logout at end of command.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260 --logout
![Logout from Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Logout-from-Target-Server.jpg)
Logout from Target Server
**7.** After login to the LUN, list the records of Node using.
# iscsiadm --mode node
![List Node](http://www.tecmint.com/wp-content/uploads/2014/07/List-Node.jpg)
List Node
**8.** Display all data of a particular node.
# iscsiadm --mode node --targetname iqn.2014-07.com.tecmint:tgt1 --portal 192.168.0.200:3260
**Sample Output**
# BEGIN RECORD 6.2.0-873.10.el6
node.name = iqn.2014-07.com.tecmint:tgt1
node.tpgt = 1
node.startup = automatic
node.leading_login = No
iface.hwaddress = <empty>
iface.ipaddress = <empty>
iface.iscsi_ifacename = default
iface.net_ifacename = <empty>
iface.transport_name = tcp
iface.initiatorname = <empty>
iface.bootproto = <empty>
iface.subnet_mask = <empty>
iface.gateway = <empty>
iface.ipv6_autocfg = <empty>
iface.linklocal_autocfg = <empty>
....
**9.** Then list the drive using, fdisk will list every authenticated disks.
# fdisk -l /dev/sda
![List Disks](http://www.tecmint.com/wp-content/uploads/2014/07/List-Disks.jpg)
List Disks
**10.** Run fdisk to create a new partition.
# fdisk -cu /dev/sda
![Create New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Create-New-Partition.jpg)
Create New Partition
**Note**: After Creating a Partition using fdisk, we dont need to reboot, as we used to do in our local systems, Because this is a remote shared storage mounted locally.
**11.** Format the newly created partition.
# mkfs.ext4 /dev/sda1
![Format New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Format-New-Partition.jpg)
Format New Partition
**12.** Create a Directory and mount the formatted partition.
# mkdir /mnt/iscsi_share
# mount /dev/sda1 /mnt/iscsi_share/
# ls -l /mnt/iscsi_share/
![Mount New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Mount-New-Partition.jpg)
Mount New Partition
**13.** List the Mount Points.
# df -Th
- **-T** Prints files system types.
- **-h** Prints in human readable format eg : Megabyte or Gigabyte.
![List New Partition](http://www.tecmint.com/wp-content/uploads/2014/07/List-New-Partition.jpg)
List New Partition
**14.** If we need to permanently mount the Drive use fstab entry.
# vim /etc/fstab
**15.**Append the following Entry in fstab.
/dev/sda1 /mnt/iscsi_share/ ext4 defaults,_netdev 0 0
**Note:** Use _netdev in fstab, as this is a network device.
![Auto Mount Partition](http://www.tecmint.com/wp-content/uploads/2014/07/Auto-Mount-Partition.jpg)
Auto Mount Partition
**16.** Finally check whether our fstab entry have any error.
# mount -av
- **-a** all mount point
- **-v** Verbose
![Verify fstab Entries](http://www.tecmint.com/wp-content/uploads/2014/07/Verify-fstab-Entries.jpg)
Verify fstab Entries
We have Completed Our client side configuration Successfully. Start to use the drive as we use our local system disk.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/iscsi-initiator-client-setup/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/
[2]:http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/

View File

@ -0,0 +1,148 @@
Create Centralized Secure Storage using iSCSI Target on RHEL/CentOS/Fedora Part -I
================================================================================
**iSCSI** is a block level Protocol for sharing **RAW Storage Devices** over TCP/IP Networks, Sharing and accessing Storage over iSCSI, can be used with existing IP and Ethernet networks such as NICs, Switched, Routers etc. iSCSI target is a remote hard disk presented from an remote iSCSI server (or) target.
![Install iSCSI Target in Linux](http://www.tecmint.com/wp-content/uploads/2014/07/Install-iSCSI-Target-in-Linux.jpg)
Install iSCSI Target in Linux
We dont need a high resource for stable connectivity and performance in Client sides. iSCSI Server called as Target, this shares the storage from server. iSCSI Clients called as Initiator, this will access the storage which shared from Target Server. There are iSCSI adapters available in market for Large Storage services such as SAN Storages.
**Why we need a iSCSI adapter for Large storage Area?**
Ethernet adapters (NIC) are designed to transfer packetized file level data among systems, servers and storage devices like NAS storages, they are not capable for transferring block level data over Internet.
### Features of iSCSI Target ###
- Possible to run several iSCSI targets on a single machine.
- A single machine making multiple iscsi target available on the iSCSI SAN
- The target is the Storage and makes it available for initiator (Client) over the network
- These Storages are Pooled together to make available to the network is iSCSI LUNs (Logical Unit Number).
- iSCSI supports multiple connections within the same session
- iSCSI initiator discover the targets in network then authenticating and login with LUNs, to get the remote storage locally.
- We can Install any Operating systems in those locally mounted LUNs as what we used to install in our Base systems.
### Why the need of iSCSI? ###
In Virtualization we need storage with high redundancy, stability, iSCSI provides those all in low cost. Creating a SAN Storage in low price while comparing to Fiber Channel SANs, We can use the standard equipments for building a SAN using existing hardware such as NIC, Ethernet Switched etc..
Let start to get install and configure the centralized Secure Storage using iSCSI Target. For this guide, Ive used following setups.
- We need separate 1 systems to Setup the iSCSI Target Server and Initiator (Client).
- Multiple numbers of Hard disk can be added in large storage environment, But we here using only 1 additional drive except Base installation disk.
- Here we using only 2 drives, One for Base server installation, Other one for Storage (LUNs) which we going to create in PART-II of this series.
#### Master Server Setup ####
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.200
- Ports Used : TCP 860, 3260
- Configuration file : /etc/tgt/targets.conf
## Installing iSCSI Target ##
Open terminal and use yum command to search for the package name which need to get install for iscsi target.
# yum search iscsi
#### Sample Output ####
========================== N/S matched: iscsi =======================
iscsi-initiator-utils.x86_64 : iSCSI daemon and utility programs
iscsi-initiator-utils-devel.x86_64 : Development files for iscsi-initiator-utils
lsscsi.x86_64 : List SCSI devices (or hosts) and associated information
scsi-target-utils.x86_64 : The SCSI target daemon and utility programs
We got the search result as above, choose the **Target** package and install to play around.
# yum install scsi-target-utils -y
![Install iSCSI Utils](http://www.tecmint.com/wp-content/uploads/2014/07/Install-iSCSI-in-Linux.jpg)
Install iSCSI Utils
List the installed package to know the default config, service, and man page location.
# rpm -ql scsi-target-utils.x86_64
![List All iSCSI Files](http://www.tecmint.com/wp-content/uploads/2014/07/List-All-ISCSI-Files.jpg)
List All iSCSI Files
Lets start the iSCSI Service, and check the status of Service up and running, iSCSI service named as **tgtd**.
# /etc/init.d/tgtd start
# /etc/init.d/tgtd status
![Start iSCSI Service](http://www.tecmint.com/wp-content/uploads/2014/07/Start-iSCSI-Service.jpg)
Start iSCSI Service
Now we need to configure it to start Automatically while system start-up.
# chkconfig tgtd on
Next, verify that the run level configured correctly for the tgtd service.
# chkconfig --list tgtd
![Enable iSCSI on Startup](http://www.tecmint.com/wp-content/uploads/2014/07/Enable-iSCSI-on-Startup.jpg)
Enable iSCSI on Startup
Lets use **tgtadm** to list what targets and LUNS we currently got configured in our Server.
# tgtadm --mode target --op show
The **tgtd** installed up and running, but there is no **Output** from the above command because we have not yet defined the LUNs in Target Server. For manual page, Run **man** command.
# man tgtadm
![iSCSI Man Pages](http://www.tecmint.com/wp-content/uploads/2014/07/iSCSI-Man-Pages.jpg)
iSCSI Man Pages
Finally we need to add iptables rules for iSCSI if there is iptables deployed in your target Server. First, find the Port number of iscsi target using following netstat command, The target always listens on TCP port 3260.
# netstat -tulnp | grep tgtd
![Find iSCSI Port](http://www.tecmint.com/wp-content/uploads/2014/07/Find-iSCSI-Port.jpg)
Find iSCSI Port
Next add the following rules to allow iptables to Broadcast the iSCSI target discovery.
# iptables -A INPUT -i eth0 -p tcp --dport 860 -m state --state NEW,ESTABLISHED -j ACCEPT
# iptables -A INPUT -i eth0 -p tcp --dport 3260 -m state --state NEW,ESTABLISHED -j ACCEPT
![Open iSCSI Ports](http://www.tecmint.com/wp-content/uploads/2014/07/Open-iSCSI-Ports.jpg)
Open iSCSI Ports
![Add iSCSI Ports to Iptables](http://www.tecmint.com/wp-content/uploads/2014/07/Add-iSCSI-Ports-to-Iptables.jpg)
Add iSCSI Ports to Iptables
**Note**: Rule may vary according to your **Default CHAIN Policy**. Then save the Iptables and restart the iptables.
# iptables-save
# /etc/init.d/iptables restart
![Restart iptables](http://www.tecmint.com/wp-content/uploads/2014/07/Restart-iptables.jpg)
Restart iptables
Here we have deployed a target server to share LUNs to any initiator which authenticating with target over TCP/IP, This suitable for small to large scale production environments too.
In my next upcoming articles, I will show you how to [Create LUNs using LVM in Target Server][1] and how to share LUNs on Client machines, till then stay tuned to TecMint for more such updates and dont forget to give valuable comments.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/

View File

@ -0,0 +1,230 @@
How to Create and Setup LUNs using LVM in “iSCSI Target Server” on RHEL/CentOS/Fedora Part II
================================================================================
LUN is a Logical Unit Number, which shared from the iSCSI Storage Server. The Physical drive of iSCSI target server shares its drive to initiator over TCP/IP network. A Collection of drives called LUNs to form a large storage as SAN (Storage Area Network). In real environment LUNs are defined in LVM, if so it can be expandable as per space requirements.
![Create LUNS using LVM in Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/Create-LUNS-inLVM.png)
Create LUNS using LVM in Target Server
### Why LUNS are Used? ###
LUNS used for storage purpose, SAN Storages are build with mostly Groups of LUNS to become a pool, LUNs are Chunks of a Physical disk from target server. We can use LUNS as our systems Physical Disk to install Operating systems, LUNS are used in Clusters, Virtual servers, SAN etc. The main purpose of Using LUNS in Virtual servers for OS storage purpose. LUNS performance and reliability will be according to which kind of disk we using while creating a Target storage server.
### Requirements ###
To know about creating a ISCSI Target Server follow the below link.
- [Create Centralized Secure Storage using iSCSI Target Part I][1]
#### Master Server Setup ####
System informations and Network setup are same as iSCSI Target Server as shown in Part I, As we are defining LUNs in same server.
- Operating System CentOS release 6.5 (Final)
- iSCSI Target IP 192.168.0.200
- Ports Used : TCP 860, 3260
- Configuration file : /etc/tgt/targets.conf
## Creating LUNs using LVM in iSCSI Target Server ##
First, find out the list of drives using **fdisk -l** command, this will manipulate a long list of information of every partitions on the system.
# fdisk -l
The above command only gives the drive informations of base system. To get the storage device information, use the below command to get the list of storage devices.
# fdisk -l /dev/vda && fdisk -l /dev/sda
![List Storage Drives](http://www.tecmint.com/wp-content/uploads/2014/07/1.jpg)
List Storage Drives
**NOTE**: Here **vda** is virtual machines hard drive as Im using virtual machine for demonstration, **/dev/sda** is added additionally for storage.
### Step 1: Creating LVM Drive for LUNs ###
We going to use **/dev/sda** drive for creating a LVM.
# fdisk -l /dev/sda
![List LVM Drive](http://www.tecmint.com/wp-content/uploads/2014/07/2.jpg)
List LVM Drive
Now lets Partition the drive using fdisk command as shown below.
# fdisk -cu /dev/sda
- The option **-c** switch off the DOS compatible mode.
- The option **-u** is used to listing partition tables, give sizes in sectors instead of cylinders.
Choose **n** to create a New Partition.
Command (m for help): n
Choose **p** to create a Primary partition.
Command action
e extended
p primary partition (1-4)
Give a Partition number which we need to create.
Partition number (1-4): 1
As here, we are going to setup a LVM drive. So, we need to use the default settings to use full size of Drive.
First sector (2048-37748735, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-37748735, default 37748735):
Using default value 37748735
Choose the type of partition, Here we need to setup a LVM so use **8e**. Use **l** option to see the list of type.
Command (m for help): t
Choose which partition want to change the type.
Selected partition 1
Hex code (type L to list codes): 8e
Changed system type of partition 1 to 8e (Linux LVM)
After changing the type, check the changes by print (**p**) option to list the partition table.
Command (m for help): p
Disk /dev/sda: 19.3 GB, 19327352832 bytes
255 heads, 63 sectors/track, 2349 cylinders, total 37748736 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x9fae99c8
Device Boot Start End Blocks Id System
/dev/sda1 2048 37748735 18873344 8e Linux LVM
Write the changes using **w** to exit from fdisk utility, Restart the system to make changes.
For your reference, Ive attached screen shot below that will give you a clear idea about creating LVM drive.
![Create LVM Partition](http://www.tecmint.com/wp-content/uploads/2014/07/3.jpg)
Create LVM Partition
After system reboot, list the Partition table using the following fdisk command.
# fdisk -l /dev/sda
![Verify LVM Partition](http://www.tecmint.com/wp-content/uploads/2014/07/4.jpg)
Verify LVM Partition
### Step 2: Creating Logical Volumes for LUNs ###
Now here, we going to create Physical volume using using pvcreate command.
# pvcreate /dev/sda1
Create a Volume group with name of iSCSI to identify the group.
# vgcreate vg_iscsi /dev/sda1
Here Im defining 4 Logical Volumes, if so there will be 4 LUNs in our iSCSI Target server.
# lvcreate -L 4G -n lv_iscsi vg_iscsi
# lvcreate -L 4G -n lv_iscsi-1 vg_iscsi
# lvcreate -L 4G -n lv_iscsi-2 vg_iscsi
# lvcreate -L 4G -n lv_iscsi-3 vg_iscsi
List the Physical volume, Volume group, logical volumes to confirm.
# pvs && vgs && lvs
# lvs
For better understanding of the above command, for your reference Ive included a screen grab below.
![Creating LVM Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/07/5.jpg)
Creating LVM Logical Volumes
![Verify LVM Logical Volumes](http://www.tecmint.com/wp-content/uploads/2014/07/6.jpg)
Verify LVM Logical Volumes
### Step 3: Define LUNs in Target Server ###
We have created Logical Volumes and ready to use with LUN, here we to define the LUNs in target configuration, if so only it will be available for client machines (Initiators).
Open and edit Targer configuration file located at /etc/tgt/targets.conf with your choice of editor.
# vim /etc/tgt/targets.conf
Append the following volume definition in target conf file. Save and close the file.
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-1
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-2
</target>
<target iqn.2014-07.com.tecmint:tgt1>
backing-store /dev/vg_iscsi/lv_iscsi-3
</target
![Configure LUNs in Target Server](http://www.tecmint.com/wp-content/uploads/2014/07/7.jpg)
Configure LUNs in Target Server
- iSCSI qualified name (iqn.2014-07.com.tecmint:tgt1).
- Use what ever as your wish.
- Identify using target, 1st target in this Server.
- 4. LVM Shared for particular LUN.
Next, reload the configuration by starting **tgd** service as shown below.
# /etc/init.d/tgtd reload
![Reload Configuration](http://www.tecmint.com/wp-content/uploads/2014/07/8.jpg)
Reload Configuration
Next verify the available LUNs using the following command.
# tgtadm --mode target --op show
![List Available LUNs](http://www.tecmint.com/wp-content/uploads/2014/07/9.jpg)
List Available LUNs
![LUNs Information](http://www.tecmint.com/wp-content/uploads/2014/07/10.jpg)
LUNs Information
The above command will give long list of available LUNs with following information.
- iSCSI Qualified Name
- iSCSI is Ready to Use
- By Default LUN 0 will be reserved for Controller
- LUN 1, What we have Defined in the Target server
- Here i have defined 4 GB for a single LUN
- Online : Yes, Its ready to Use the LUN
Here we have defined the LUNs for target server using LVM, this can be expandable and support for many features such as snapshots. Let us see how to authenticate with Target server in PART-III and mount the remote Storage locally.
--------------------------------------------------------------------------------
via: http://www.tecmint.com/create-luns-using-lvm-in-iscsi-target/
作者:[Babin Lonston][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://www.tecmint.com/author/babinlonston/
[1]:http://www.tecmint.com/create-centralized-secure-storage-using-iscsi-targetin-linux/

View File

@ -0,0 +1,81 @@
What is good audio editing software on Linux
================================================================================
Whether you are an amateur musician or just a student recording his professor, you need to edit and work with audio recordings. If for a long time such task was exclusively attributed to Macintosh, this time is over, and Linux now has what it takes to do the job. In short, here is a non-exhaustive list of good audio editing software, fit for different tasks and needs.
### 1. Audacity ###
![](https://farm9.staticflickr.com/8572/15405018653_83ba3e718d_c.jpg)
Let's get started head on with my personal favorite. [Audacity][1] works on Windows, Mac, and Linux. It is open source. It is easy to use. You get it: Audacity is almost perfect. This program lets you manipulate the audio waveform from a clean interface. In short, you can overlay tracks, cut and edit them easily, apply effects, perform advanced sound analysis, and finally export to a plethora of format. The reason I like it so much is that it combines both basic features with more complicated ones, but maintain an easy leaning curve. However, it is not a fully optimized software for hardcore musicians, or people with professional knowledge.
### 2. Jokosher ###
![](https://farm8.staticflickr.com/7524/15998875136_82903a9b4a_c.jpg)
On a different level, [Jokosher][2] focuses more on the multi-track aspect for musical artists. Developed in Python and using the GTK+ interface with GStreamer for audio back-end, Jokosher really impressed me with its slick interface and its extensions. If the editing features are not the most advanced, the language is clear and directed to musicians. And I really like the association between tracks and instruments for example. In short, if you are starting as a musician, it might be a good place to get some experience before moving on to more complex suites.
### 3. Ardour ###
![](https://farm9.staticflickr.com/8577/16024644385_d8cd8073a3_c.jpg)
And talking about compex suites, [Ardour][3] is complete software for recording, editing, and mixing. Designed this time to appeal to all professionals, Ardour features in term of sound routing and plugins go way beyond my comprehension. So if you are looking for a beast and are not afraid to tame it, Ardour is probably a good pick. Again, the interface contributes to its charm, as well as its extensive documentation. I particularly appreciated the first-launch configuration tool.
### 4. Kwave ###
![](https://farm8.staticflickr.com/7557/15402389884_633a8b04c5_c.jpg)
For all KDE lovers, [KWave][4] corresponds to your idea of design and features. There are plenty of shortcuts and interesting options, like memory management. Even if the few effects are nice, we are more dealing with a simple tool to cut/paste audio together. It becomes shard not to compare it with Audacity unfortunately. And on top of that, the interface did not appeal to me that much.
### 5. Qtractor ###
![](https://farm8.staticflickr.com/7551/16022707501_68c39f37e5_c.jpg)
If Kwave is too simplistic for you but a Qt-based program really has some appeal, then [Qtractor][5] might be your option. It aims to be "simple enough for the average home user, and yet powerful enough for the professional user." Indeed the quantity of features and options is almost overwhelming. My favorite being of course customizable shortcuts. Apart from that, Qtractor is probably one of my favorite tools to deal with MIDI files.
### 6. LMMS ###
![](https://farm8.staticflickr.com/7509/15838603239_ef0ecbc8d2_c.jpg)
Standing for Linux MultiMedia Studio, LMMS is directly targeted for music production. If you do not have prior experience and do not want to spend too much time getting some, go elsewhere. LMMS is one of those complex but powerful software that only a few will truly master. The number of features and effects is simply too long to list, but if I had to pick one, I would say that the Freeboy plugin to emulate Game Boy sound system is just magical. Past that, go see their amazing documentation.
### 7. Traverso ###
![](https://farm8.staticflickr.com/7537/15838603279_70ee925057_c.jpg)
Finally, Traverso stood out to me for its unlimited track count and its direct integration with CD burning capacities. Aside from that, it appeared to me as a middle man between a simplistic software and a professional program. The interface is very KDE-like, and the keyboard configuration is always welcome. And cherry on the cake, Traverso monitors your resources and make sure that your CPU or hard drive does not go overboard.
To conclude, it is always a pleasure to see such a large diversity of applications on Linux. It makes finding the software that best fits your needs always possible. While my personal favorite stays Audacity, I was very surprised by the design of programs like LMMS or Jokosher.
Did we miss one? What do you use for audio editing on Linux? And why? Let us know in the comments.
--------------------------------------------------------------------------------
via: http://xmodulo.com/good-audio-editing-software-linux.html
作者:[Adrien Brochard][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://xmodulo.com/author/adrien
[1]:http://audacity.sourceforge.net/
[2]:https://launchpad.net/jokosher/
[3]:http://ardour.org/
[4]:http://kwave.sourceforge.net/
[5]:http://qtractor.sourceforge.net/qtractor-index.html
[6]:
[7]:
[8]:
[9]:
[10]:
[11]:
[12]:
[13]:
[14]:
[15]:
[16]:
[17]:
[18]:
[19]:
[20]:

View File

@ -0,0 +1,75 @@
(translating by runningwater)
Linux FAQs with Answers--How to install 7zip on Linux
================================================================================
> **Question**: I need to extract files from an ISO image, and for that I want to use 7zip program. How can I install 7zip on [insert your Linux distro]?
7zip is an open-source archive program originally developed for Windows, which can pack or unpack a variety of archive formats including its native format 7z as well as XZ, GZIP, TAR, ZIP and BZIP2. 7zip is also popularly used to extract RAR, DEB, RPM and ISO files. Besides simple archiving, 7zip can support AES-256 encryption as well as self-extracting and multi-volume archiving. For POSIX systems (Linux, Unix, BSD), the original 7zip program has been ported as p7zip (short for "POSIX 7zip").
Here is how to install 7zip (or p7zip) on Linux.
### Install 7zip on Debian, Ubuntu or Linux Mint ###
Debian-based distributions come with three packages related to 7zip.
- **p7zip**: contains 7zr (a minimal 7zip archive tool) which can handle its native 7z format only.
- **p7zip-full**: contains 7z which can support 7z, LZMA2, XZ, ZIP, CAB, GZIP, BZIP2, ARJ, TAR, CPIO, RPM, ISO and DEB.
- **p7zip-rar**: contains a plugin for extracting RAR files.
It is recommended to install p7zip-full package (not p7zip) since this is the most complete 7zip package which supports many archive formats. In addition, if you want to extract RAR files, you also need to install p7zip-rar package as well. The reason for having a separate plugin package is because RAR is a proprietary format.
$ sudo apt-get install p7zip-full p7zip-rar
### Install 7zip on Fedora or CentOS/RHEL ###
Red Hat-based distributions offer two packages related to 7zip.
- **p7zip**: contains 7za command which can support 7z, ZIP, GZIP, CAB, ARJ, BZIP2, TAR, CPIO, RPM and DEB.
- **p7zip-plugins**: contains 7z command and additional plugins to extend 7za command (e.g., ISO extraction).
On CentOS/RHEL, you need to enable [EPEL repository][1] before running yum command below. On Fedora, there is not need to set up additional repository.
$ sudo yum install p7zip p7zip-plugins
Note that unlike Debian based distributions, Red Hat based distributions do not offer a RAR plugin. Therefore you will not be able to extract RAR files using 7z command.
### Create or Extract an Archive with 7z ###
Once you installed 7zip, you can use 7z command to pack or unpack various types of archives. The 7z command uses other plugins to handle the archives.
![](https://farm8.staticflickr.com/7583/15874000610_878a85b06a_b.jpg)
To create an archive, use "a" option. Supported archive types for creation are 7z, XZ, GZIP, TAR, ZIP and BZIP2. If the specified archive file already exists, it will "add" the files to the existing archive, instead of overwriting it.
$ 7z a <archive-filename> <list-of-files>
To extract an archive, use "e" option. It will extract the archive in the current directory. Supported archive types for extraction are a lot more than those for creation. The list includes 7z, XZ, GZIP, TAR, ZIP, BZIP2, LZMA2, CAB, ARJ, CPIO, RPM, ISO and DEB.
$ 7z e <archive-filename>
Another way to unpack an archive is to use "x" option. Unlike "e" option, it will extract the content with full paths.
$ 7z x <archive-filename>
To see a list of files in an archive, use "l" option.
$ 7z l <archive-filename>
You can update or remove file(s) in an archive with "u" and "d" options, respectively.
$ 7z u <archive-filename> <list-of-files-to-update>
$ 7z d <archive-filename> <list-of-files-to-delete>
To test the integrity of an archive:
$ 7z t <archive-filename>
--------------------------------------------------------------------------------
via:http://ask.xmodulo.com/install-7zip-linux.html
译者:[runningwater](https://github.com/runningwater)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[1]:http://xmodulo.com/how-to-set-up-epel-repository-on-centos.html

View File

@ -0,0 +1,265 @@
Real-World WordPress Benchmarks with PHP5.5 PHP5.6 PHP-NG and HHVM
================================================================================
**TL;DR In a local, Vagrant-based environment HHVM lost, probably due to a bug; its still investigated with the help of the HHVM guys! However on a DigitalOcean 4GB box it beat even the latest build of PHP-NG!**
![](https://kinsta.com/wp-content/uploads/2014/07/phphhvm.jpg)
**Update: Please take a look at the results at the end of the article! They reflect the power of HHVM better (after the JIT warmup), for some reason we cannot get these results with all setups though.
The tests below were done in a Vagrant/VVV environment, the results are still interesting, it might be a bug in HHVM or the Vagrant setup thats preventing it from kicking into high speed, were investigating the issue with the HHVM guys.**
If you remember we [wrote an article a good couple of months ago][1] when WordPress 3.9 came out that HHVM was fully supported beginning with that release, and we were all happy about it. The initial benchmark results showed HHVM to be far more superior than the Zend engine thats currently powering all PHP builds. Then the problems came:
- HHVM can only be run as one user, which means less security (in shared environments)
- HHVM does not restart itself after it crashes, and unfortunately it still does that quite often
- HHVM uses a lot of memory right from the start, and yes, it per-request memory usage will be lower once you scale compared to PHP-FPM
Obviously you have to compromise based on your (or rather your sites) needs but is it worth it? How much of a performance gain can you expect by switching to HHVM?
At Kinsta we really like to test everything new and generally optimize everything to provide the best environment to our clients. Today I finally took the time to set up a test environment and do some tests to compare a couple of different builds with a fresh out of the box WordPress install and one that has a bunch of content added plus runs WooCommerce! To measure the script running time I simply added the
<?php timer_stop(1); ?>
line before the /body tag of the footer.phps.
**Note:
Previously this section contained benchmarks made with Vagrant/Virtualbox/Ubuntu14.04 however for some reason HHVM was really underperforming, probably due to a bug or a limitation of the virtualized environment. We feel that these test results do not reflect the reality so we re-run the tests on a cloud server and consider these valid.**
Here are the exact setup details of the environment:
- DigitalOcean 4GB droplet (2 CPU cores, 4GB RAM)
- Ubuntu 14.04, MariaDB10
- Test site: Munditia Theme with Demo Content Imported, WooCommerce 2.1.12 & WordPress 3.9.1
- PHP 5.5.9, PHP 5.5.15, PHP 5.6.0 RC2, PHP-NG (20140718-git-6cc487d) and HHVM 3.2.0 (version says PHP 5.6.99-hhvm)
**Without further ado, these were my test results, the lower the better, values in seconds:**
### DigitalOcean 4GB droplet ###
Seconds, 10 runs, lower the better.
这里有一个canvas的数据发布的时候需要截一个图
It looks like that PHP-NG achieves its peak performance after the first run! HHVM needs a couple more reloads, but their performance seems to be almost equal! I cant wait until PHP-NG is merged into the master! :)
Hits in a minute, higher the better.
这里有一个canvas的数据发布的时候需要截一个图
**PHP 5.5.15 OpCache Disabled**
- Transactions: **236 hits**
- Availability: 100.00 %
- Elapsed time: 59.03 secs
- Data transferred: 2.40 MB
- Response time: 2.47 secs
- Transaction rate: 4.00 trans/sec
- Throughput: 0.04 MB/sec
- Concurrency: 9.87
- Successful transactions: 236
- Failed transactions: 0
- Longest transaction: 4.44
- Shortest transaction: 0.48
**PHP 5.5.15 OpCache Enabled**
- Transactions: **441 hits**
- Availability: 100.00 %
- Elapsed time: 59.55 secs
- Data transferred: 4.48 MB
- Response time: 1.34 secs
- Transaction rate: 7.41 trans/sec
- Throughput: 0.08 MB/sec
- Concurrency: 9.91
- Successful transactions: 441
- Failed transactions: 0
- Longest transaction: 2.19
- Shortest transaction: 0.64
**PHP 5.6 RC2 OpCache Disabled**
- Transactions: **207 hits**
- Availability: 100.00 %
- Elapsed time: 59.87 secs
- Data transferred: 2.10 MB
- Response time: 2.80 secs
- Transaction rate: 3.46 trans/sec
- Throughput: 0.04 MB/sec
- Concurrency: 9.68
- Successful transactions: 207
- Failed transactions: 0
- Longest transaction: 3.65
- Shortest transaction: 0.54
**PHP 5.6 RC2 OpCache Enabled**
- Transactions: **412 hits**
- Availability: 100.00 %
- Elapsed time: 59.03 secs
- Data transferred: 4.18 MB
- Response time: 1.42 secs
- Transaction rate: 6.98 trans/sec
- Throughput: 0.07 MB/sec
- Concurrency: 9.88
- Successful transactions: 412
- Failed transactions: 0
- Longest transaction: 1.93
- Shortest transaction: 0.34
**HHVM 3.2.0 (version says PHP 5.6.99-hhvm)**
- Transactions: **955 hits**
- Availability: 100.00 %
- Elapsed time: 59.69 secs
- Data transferred: 9.18 MB
- Response time: 0.62 secs
- Transaction rate: 16.00 trans/sec
- Throughput: 0.15 MB/sec
- Concurrency: 9.94
- Successful transactions: 955
- Failed transactions: 0
- Longest transaction: 0.85
- Shortest transaction: 0.23
**PHP-NG OpCache Enabled (built: Jul 29 2014 )**
- Transactions: **849 hits**
- Availability: 100.00 %
- Elapsed time: 59.88 secs
- Data transferred: 8.63 MB
- Response time: 0.70 secs
- Transaction rate: 14.18 trans/sec
- Throughput: 0.14 MB/sec
- Concurrency: 9.94
- Successful transactions: 849
- Failed transactions: 0
- Longest transaction: 1.06
- Shortest transaction: 0.13
----------
**Note:
These are the previous test results, theyre faulty. I left them here for future reference but please do NOT consider these values a truthful representation!**
Here are the exact setup details of the environment:
- Apple MacBook Pro mid-2011 (Intel Core i7 2 GHz 4 cores, 4GB RAM, 256GB Ocz Vertex 3 MI)
- Current Varying Vagrant Vagrants build with Ubuntu 14.04, nginx 1.6.x, mysql 5.5.x, etc.
- Test site 1: WordPress 3.9.1 bare minimum
- Test site 2: Munditia Theme with Demo Content Imported, WooCommerce 2.1.12 & WordPress 3.9.1
- PHP 5.5.9, PHP 5.5.15, PHP 5.6.0 RC2, PHP-NG (20140718-git-6cc487d) and HHVM 3.2.0 (version says PHP 5.6.99-hhvm)
**Default Theme, Default WordPress 3.9.1, PHP 5.5.9-1ubuntu4.3 (with OpCache 7.0.3)**
**Faulty results. Please read the note above!** Seconds, 10 runs, lower the better.
这里有一个canvas的数据发布的时候需要截一个图
### Munditia Theme with Demo Content Imported, WooCommerce 2.1.12 & WordPress 3.9.1 (OpCache Disabled) ###
**Faulty results. Please read the note above**! Seconds, 10 runs, lower the better.
这里有一个canvas的数据发布的时候需要截一个图
### Munditia Theme with Demo Content Imported, WooCommerce 2.1.12 & WordPress 3.9.1 (OpCache Enabled) ###
**Faulty results. Please read the note above!** Seconds, 10 runs, lower the better.
这里有一个canvas的数据发布的时候需要截一个图
**Siege
parameters: 10 concurrent users for 1 minute: siege -c 10 -b -t 1M**
**Faulty results. Please read the note above!** Hits in a minute, higher the better.
这里有一个canvas的数据发布的时候需要截一个图
**PHP5.5 OpCache Disabled (PHP 5.5.15-1+deb.sury.org~trusty+1)Faulty results. Please read the note above!**
- Transactions: 35 hits
- Availability: 100.00 %
- Elapsed time: 59.04 secs
- Data transferred: 2.03 MB
- Response time: 14.56 secs
- Transaction rate: 0.59 trans/sec
- Throughput: 0.03 MB/sec
- Concurrency: 8.63
- Successful transactions: 35
- Failed transactions: 0
- Longest transaction: 18.73
- Shortest transaction: 5.80
**HHVM 3.2.0 (version says PHP 5.6.99-hhvm)Faulty results. Please read the note above!**
- Transactions: 44 hits
- Availability: 100.00 %
- Elapsed time: 59.53 secs
- Data transferred: 0.42 MB
- Response time: 12.00 secs
- Transaction rate: 0.74 trans/sec
- Throughput: 0.01 MB/sec
- Concurrency: 8.87
- Successful transactions: 44
- Failed transactions: 0
- Longest transaction: 13.40
- Shortest transaction: 2.65
**PHP5.5 OpCache Enabled (PHP 5.5.15-1+deb.sury.org~trusty+1 with OpCache 7.0.4-dev)Faulty results. Please read the note above!**
- Transactions: 100 hits
- Availability: 100.00 %
- Elapsed time: 59.30 secs
- Data transferred: 5.81 MB
- Response time: 5.69 secs
- Transaction rate: 1.69 trans/sec
- Throughput: 0.10 MB/sec
- Concurrency: 9.60
- Successful transactions: 100
- Failed transactions: 0
- Longest transaction: 7.25
- Shortest transaction: 2.82
**PHP5.6 OpCache Enabled (PHP 5.6.0RC2 with OpCache 7.0.4-dev)Faulty results. Please read the note above!**
- Transactions: 103 hits
- Availability: 100.00 %
- Elapsed time: 59.99 secs
- Data transferred: 5.98 MB
- Response time: 5.51 secs
- Transaction rate: 1.72 trans/sec
- Throughput: 0.10 MB/sec
- Concurrency: 9.45
- Successful transactions: 103
- Failed transactions: 0
- Longest transaction: 6.87
- Shortest transaction: 2.52
**PHP-NG OpCache Enabled (20140718-git-6cc487d)Faulty results. Please read the note above!**
- Transactions: 124 hits
- Availability: 100.00 %
- Elapsed time: 59.32 secs
- Data transferred: 7.19 MB
- Response time: 4.58 secs
- Transaction rate: 2.09 trans/sec
- Throughput: 0.12 MB/sec
- Concurrency: 9.57
- Successful transactions: 124
- Failed transactions: 0
- Longest transaction: 6.86
- Shortest transaction: 2.24
**What do you think about this test? Did I miss something? What would you like to see in the next benchmarking article? Please leave your comment below!**
--------------------------------------------------------------------------------
via: https://kinsta.com/blog/real-world-wordpress-benchmarks-with-php5-5-php5-6-php-ng-and-hhvm/
作者:[Mark Gavalda][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:https://kinsta.com/blog/author/kinstadmin/
[1]:https://kinsta.com/blog/hhvm-and-wordpress/

View File

@ -0,0 +1,125 @@
4 Steps to Setup Local Repository in Ubuntu using APT-mirror
================================================================================
Today we will show you how to setup a local repository in your Ubuntu PC or Ubuntu Server straight from the official Ubuntu repository. There are a lot benefit of creating a local repository in your computer if you have a lot of computers to install software, security updates and fixes often in all systems, then having a local Ubuntu repository is an efficient way. Because all required packages are downloaded over the fast LAN connection from your local server, so that it will save your Internet bandwidth and reduces the annual cost of Internet..
You can setup a local repository of Ubuntu in your local PC or server using many tools, but we'll featuring about APT-Mirror in this tutorial. Here, we'll be mirroring packages from the default mirror to our Local Server or PC and we'll need at least **120 GB** or more free space in your local or external hard drive. It can be configured through a **HTTP** or **FTP** server to share its software packages with local system clients.
We'll need to install Apache Web Server and APT-Mirror to get our stuffs working out of the box. Here are the steps below to configure a working local repository:
### 1. Installing Required Packages ###
First of all, we are going to pull whole packages from the public repository of Ubuntu package server and save them in our local Ubuntu server hard disk.
We'll first install a web server to host our local repository. We'll install Apache web server but you can install any web server you wish, web server are necessary for the http protocol. You can additionally install FTP servers like proftpd, vsftpd,etc if you need to configure for ftp protocols and Rsync for rsync protocols.
$ sudo apt-get install apache2
And then we'll need to install apt-mirror:
$ sudo apt-get install apt-mirror
![apt-mirror-installation](http://blog.linoxide.com/wp-content/uploads/2014/12/apt-mirror-install.png)
**Note: As I have already mentioned that we'll need at least 120 GBs free space to get all the packages mirrored or download.**
### 2. Configuring APT-Mirror ###
Now create a directory on your harddisk to save all packages. For example, let us create a directory called “/linoxide”. We are going to save all packages in this directory:
$ sudo mkdir /linoxide
![repo-dir](http://blog.linoxide.com/wp-content/uploads/2014/12/mkdir-linoxide.png)
Now, open the file **/etc/apt/mirror.list** file
$ sudo nano /etc/apt/mirror.list
![apt-mirror-edit](http://blog.linoxide.com/wp-content/uploads/2014/12/edit-mirror-list-300x7.png)
Copy the below lines of configuration to mirror.list and edit as your requirements.
############# config ##################
#
set base_path /linoxide
#
# set mirror_path $base_path/mirror
# set skel_path $base_path/skel
# set var_path $base_path/var
# set cleanscript $var_path/clean.sh
# set defaultarch <running host architecture>
# set postmirror_script $var_path/postmirror.sh
# set run_postmirror 0
set nthreads 20
set _tilde 0
#
############# end config ##############
deb http://archive.ubuntu.com/ubuntu trusty main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu trusty-security main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu trusty-updates main restricted universe multiverse
#deb http://archive.ubuntu.com/ubuntu trusty-proposed main restricted universe multiverse
#deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse
deb-src http://archive.ubuntu.com/ubuntu trusty main restricted universe multiverse
deb-src http://archive.ubuntu.com/ubuntu trusty-security main restricted universe multiverse
deb-src http://archive.ubuntu.com/ubuntu trusty-updates main restricted universe multiverse
#deb-src http://archive.ubuntu.com/ubuntu trusty-proposed main restricted universe multiverse
#deb-src http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse
clean http://archive.ubuntu.com/ubuntu
![mirror-list-config](http://blog.linoxide.com/wp-content/uploads/2014/12/mirror-list-config.png)
**Note: You can replace the above official mirror server url by the nearest one, you can get your nearest server by visiting the page [Ubuntu Mirror Server][1]. If you are not in hurry and can wait for the mirroring, you can go with the default official one.**
Here, we are going to mirror package repository of the latest and greatest LTS release of Ubuntu ie. Ubuntu 14.04 LTS (Trusty Tahr) so, we have configured trusty. If you need to mirror of Saucy or other version of Ubuntu, please edit it as its codename.
Now, we'll have to run apt-mirror which will now get/mirror all the packages in the repository.
sudo apt-mirror
It will take time to download all the packages from the Ubuntu Server which depends upon the connection speed and performance with respect to you and the mirror server. I have interrupted the download as I have already done that...
![downloading-packages](http://blog.linoxide.com/wp-content/uploads/2014/12/downloading-index.png)
### 3.Configuring Web Server ###
To be able to access the repo from other computers you need a webserver. You can also do it via ftp but I choose to use a webserver as I mentioned in above step 1. So, we are now gonna configure Apache Server:
We will create a symlink from our local repo's directory to a directory ubuntu in the hosting directory of Apache ie /var/www/ubuntu
$ sudo ln -s /linoxide /var/www/ubuntu
$ sudo service apache2 start
![symlinks-apache2](http://blog.linoxide.com/wp-content/uploads/2014/12/symblink-apache2.png)
The above command will allow us to browse our Mirrored Repo from our localhost ie http://127.0.0.1 by default.
### 4. Configuring Client Side ###
Finally, we need to add repository source in other computers which will fetch the packages and repository from our computer. To do that, we'll need to edit /etc/apt/sources.list and add the following lines.
$ sudo nano /etc/apt/sources.list
Add this line in /etc/apt/sources.list and save.
deb http://192.168.0.100/ubuntu/ trusty main restricted universe
**Note: here 192.168.0.100 is the LAN IP address of our server computer, you need to replace that with yours.**
$ sudo apt-get update
Finally, we are done. Now you can install the required packages using sudo apt-get install packagename from your local Ubuntu repository with high speed download and with low bandwidth.
--------------------------------------------------------------------------------
via: http://linoxide.com/ubuntu-how-to/setup-local-repository-ubuntu/
作者:[Arun Pyasi][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/arunp/
[1]:https://launchpad.net/ubuntu/+archivemirrors

View File

@ -0,0 +1,265 @@
translating by mtunique
5 User Space Debugging Tools in Linux
================================================================================
By definition, debugging tools are those programs which allow us to monitor ,control and correct errors in other programs while they execute. Why should we use debugging tools? To answer this, there are various situations where we get stuck while running some programs and will have the need to understand what exactly happened. For example, we might be running an application and it produces some error messages. To fix those errors, we should first figure out why and from where did the error messages come from. An application might suddenly hang and we will have to know what other processes were running at that time. We might also have to figure out what was process 'x' doing at the time of hang. In order to dissect such details, we will need the help of debugging tools. There are a few user space debugging tools and techniques in Linux which are quite useful in analysing user space problems. They are:
- **'print' statements**
- **Querying (/proc, /sys etc)**
- **Tracing (strace/ltrace)**
- **Valgrind (memwatch)**
- **GDB**
Let's go through each of them one by one.
### 1.'print' statements ###
This is a basic or primitive way of debugging a problem. We can insert print statements in the middle of a program to understand the control flow and get the value of key variables. Though it is a simple technique, it has some disadvantages to it. Programs need to be edited to add 'print 'statements which then will have to be recompiled and rerun to get the output. This is a time-consuming method if the program to be debugged is quite big.
### 2. Querying ###
In some situations, we might want to figure out in what state a running process is in the kernel or what is the memory map that it is occupying there etc. In order to obtain this type of information, we need not insert any code into the kernel. Instead, one can use the /proc filesystem.
/proc is a pseudo filesystem that gets populated with runtime system information (cpu information, amount of memory etc) once the system is up and running.
![output of 'ls /proc'](http://blog.linoxide.com/wp-content/uploads/2014/12/proc-output.png)
output of 'ls /proc'
As you can see, each process that is running in the system has an entry in the /proc filesystem in the form of its process id . Details about each of these processes can be obtained by looking into the files present in its process id directory
![output of 'ls /proc/pid'](http://blog.linoxide.com/wp-content/uploads/2014/12/proc-pid.png)
output of 'ls /proc/pid'
Explaining all the entries inside the /proc filesystem is beyond the scope of this document. Some of the useful ones are listed below:
- /proc/cmdline -> Kernel command line
- /proc/cpuinfo -> information about the processor's make, model etc
- /proc/filesystems -> filesystem information supported by the kernel
- /proc//cmdline -> command line arguments passed to the current process
- /proc//mem -> memory held by the process
- /proc//status -> status of the process
### 3. Tracing ###
strace and ltrace are two of the tracing tools used in Linux to trace program execution details.
#### strace: ####
strace intercepts and records system calls within a process and the signals received by it. To the user, it displays the system calls, arguments passed to them and the return values. strace can be attached to a process that is already running or to a new process. It is useful as a diagnostic and debugging tools for developers and system administrators. It can also be used as a tool to understand how system calls work by tracing different programs. Advantage of this tool is that no source code is needed and programs need not be recompiled.
The basic syntax for using strace is:
**strace command**
There are various options that are available to be used with strace command. One can check out the man page for strace tool to get more details.
The output of strace can be quite lengthy and we may not be interested in going through each and every line that is displayed. We can use the '-e expr' option to filter the unwanted data.
Use '-p pid' option to attach it to a running process.
Output of the command can be redirected to a file using the '-o' option
![output of strace filtering only the open system call](http://blog.linoxide.com/wp-content/uploads/2014/12/strace-output.png)
output of strace filtering only the open system call
#### ltrace: ####
ltrace tracks and records the dynamic (runtime) library calls made by a process and the signals received by it. It can also track the system calls made within a process. It's usage is similar to strace
**ltrace command**
'-i ' option prints the instruction pointer at the time of library call
'-S' option is used to display both system calls and library calls
Refer to the ltrace man page for all the available options.
![output of ltrace capturing 'strcmp' library call](http://blog.linoxide.com/wp-content/uploads/2014/12/ltrace-output.png)
output of ltrace capturing 'strcmp' library call
### 4. Valgrind ###
Valgrind is a suite of debugging and profiling tools. One of the widely used and the default tool is a memory checking tool called 'Memcheck' which intercepts calls made to malloc(), new(), free() and delete(). In other words, it is useful in detecting problems like:
- memory leaks
- double freeing
- boundary overruns
- using uninitialized memory
- using a memory after it has been freed etc.
It works directly with the executable files.
Valgrind comes with a few drawbacks as well. It can slow down your program as it increases the memory footprint. It can sometimes produce false positives and false negatives. It cannot detect out-of-range access to statically allocated arrays
In order to use it, first download it and install it on your system. ([Valgrind's download page][1]). It can be installed using the package manager for the operating system that one is using.
Installation using command line involves decompressing and untarring the downloaded file.
tar -xjvf valgring-x.y.z.tar.bz2 (where x.y.z is the version number you are trying to install)
Get inside the newly created directory (valgrind-x.y.z)and run the following commands:
./configure
make
make install
Let's understand how valgrind works with a small program(test.c):
#include <stdio.h>
void f(void)
{
int x = malloc(10 * sizeof(int));
x[10] = 0;
}
int main()
{
f();
return 0;
}
Compile the program:
gcc -o test -g test.c
Now we have an executable file called 'test'. We can now use valgrind to check for memory errors:
valgrind tool=memcheck leak-check=yes test
Here is the valgrind output showing the errors:
![output of valgrind showing heap block overrun and memory leak](http://blog.linoxide.com/wp-content/uploads/2014/12/Valgrind.png)
output of valgrind showing heap block overrun and memory leak
As we can see in the above message, we are trying to access the area beyond what is allocated in function f and the allocated memory is not freed.
### 5. GDB ###
GDB is a debugger from Free Software Foundation. It is useful in locating and fixing problems in the code. It gives control to the user to perform various actions when the program to be debugged is running, like:
- starting the program
- stop at specified locations
- stop on specified conditions
- examine required information
- make changes to data in the program etc.
One can also attach a core dump of a crashed program to GDB and analyse the cause of crash.
GDB provides a lot of options to debug programs. However, we will cover some important options here so that one can get a feel of how to get started with GDB.
If you do not already have GDB installed, it can be downloaded from [GDB's official website][2].
#### Compiling programs: ####
In order to debug a program using GDB, it has to be compiled using gcc with the'-g' option. This produces debugging information in the operating system's native format and GDB works with this information.
Here is a simple program (example1.c)performing divide by zero to show the usage of GDB:
#include
int divide()
{
int x=5, y=0;
return x / y;
}
int main()
{
divide();
}
![An example showing usage of gdb](http://blog.linoxide.com/wp-content/uploads/2014/12/gdb-example.png)
An example showing usage of gdb
#### Invoking GDB: ####
GDB can be started by executing 'gdb' in the command-line:
![invoking gdb](http://blog.linoxide.com/wp-content/uploads/2014/12/gdb.png)
invoking gdb
Once invoked, it remains there waiting for commands from the terminal and executing them until exited .
If a process is already running and you need to attach GDB to it, it can be done by specifying the process id Suppose a program has already crashed and one wants to analyse the cause of the problem, then attaching GDB to the core file helps.
#### Starting the program: ####
Once you are inside GDB, use the 'run' command to start the program to be debugged
#### Passing arguments to the program: ####
Use the 'set args' command to send the arguments to your program when it runs next time 'show args' will show the arguments passed to the program
#### Verifying the stack: ####
Whenever a program stops, first thing anyone wants to understand is why it stopped and how it stopped there. This information is called backtrace. Every function call generated by a program gets stored along with the local variables, arguments passed, call location etc in a block of data inside the stack and is called a frame. Using GDB we can examine all this data. GDB identifies these frames by giving them numbers starting from the innermost frame.
- **bt**: prints the backtrace of the entire stack
- **bt <n>** prints the backtrace of n frames
- **frame <frame number>**: switches to the specified frame and prints that frame
- **up <n>**: move 'n' frames up
- **down <n>**: move 'n' frames down. ( n is 1 by default)
#### Examining data: ####
Program's data can be examined inside GDB using the 'print' command. For example, if 'x' is a variable inside the debugging program, 'print x' prints the value of x.
#### Examining source: ####
Parts of source file can be printed within GDB. 'list' command by default prints 10 lines of code.
- **list <linenum>**: list the source file around 'linenum'
- **list <function>**: list the source from the beginning of 'function'
- **disas <function>**: displays the machine code for the function
#### Stopping and resuming the program: ####
Using GDB, we can set breakpoints, watchpoint etc in order to stop the program wherever required.
- **break <location>**: Sets up a breakpoint at 'location'. When this is hit while the program is executing, control is given to the user.
- **watch <expr>**: GDB stops when the 'expr' is written into by the program and it's value changes
- **catch <event>**: GDB stops when the 'event' occurs.
- **disable <breakpoint>**: disable the specified breakpoint
- **enable <breakpoint>**: enable the specified breakpoint
- **delete <breakpoint>**: delete the breakpoint / watchpoint / catch point passed. If no arguments are passed default action is to work on all the breakpoints
- **step**: execute the program step by step
- **continue**: continue with program execution until execution is complete
#### Exiting GDB: ####
Use the 'quit' command to exit from GDB
There are many more options that are available with GDB. Use the help option once you are inside GDB for more details.
![getting help within gdb](http://blog.linoxide.com/wp-content/uploads/2014/12/gdb-help.png)
getting help within gdb
### Summary ###
In this article, we have seen different types of user space debug tools available in Linux. To summarise all of them, here is a quick guideline on when to use what:
Basic debugging, getting values of key variables print statements
Get information about filesystems supported, available memory, cpus, status of a running program in the kernel etc - querying /proc filesystem
Initial problem diagnosis, system call or library call related issues , understanding program flow strace / ltrace
Application space related memory problems valgrind
To examine runtime behaviour of applications, analysing application crashes gdb.
--------------------------------------------------------------------------------
via: http://linoxide.com/linux-how-to/user-space-debugging-tools-linux/
作者:[B N Poornima][a]
译者:[mtunique](https://github.com/mtunique)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/bnpoornima/
[1]:http://valgrind.org/downloads.html
[2]:http://www.gnu.org/software/gdb/download/

View File

@ -0,0 +1,203 @@
Translating by ZTinoZ
How to Install Bugzilla 4.4 on Ubuntu / CentOS 6.x
================================================================================
Here, we are gonna show you how we can install Bugzilla in an Ubuntu 14.04 or CentOS 6.5/7. Bugzilla is a Free and Open Source Software(FOSS) which is web based bug tracking tool used to log and track defect database, its Bug-tracking systems allow individual or groups of developers effectively to keep track of outstanding problems with their product. Despite being "free", Bugzilla has many features its expensive counterparts lack. Consequently, Bugzilla has quickly become a favorite of thousands of organizations across the globe.
Bugzilla is very adaptable to various situations. They are used now a days in different IT support queues, Systems Administration deployment management, chip design and development problem tracking (both pre-and-post fabrication), and software and hardware bug tracking for luminaries such as Redhat, NASA, Linux-Mandrake, and VA Systems.
### 1. Installing dependencies ###
Setting up Bugzilla is fairly **easy**. This blog is specific to Ubuntu 14.04 and CentOS 6.5 ( though it might work with older versions too )
In order to get Bugzilla up and running in Ubuntu or CentOS, we are going to install Apache webserver ( SSL enabled ) , MySQL database server and also some tools that are required to install and configure Bugzilla.
To install Bugzilla in your server, you'll need to have the following components installed:
- Per l(5.8.1 or above)
- MySQL
- Apache2
- Bugzilla
- Perl modules
- Bugzilla using apache
As we have mentioned that this article explains installation of both Ubuntu 14.04 and CentOS 6.5/7, we will have 2 different sections for them.
Here are the steps you need to follow to setup Bugzilla in your Ubuntu 14.04 LTS and CentOS 7:
**Preparing the required dependency packages:**
You need to install the essential packages by running the following command:
**For Ubuntu:**
$ sudo apt-get install apache2 mysql-server libapache2-mod-perl2
libapache2-mod-perl2-dev libapache2-mod-perl2-doc perl postfix make gcc g++
**For CentOS:**
$ sudo yum install httpd mod_ssl mysql-server mysql php-mysql gcc perl* mod_perl-devel
**Note: Please run all the commands in a shell or terminal and make sure you have root access (sudo) on the machine.**
### 2. Running Apache server ###
As you have already installed the apache server from the above step, we need to now configure apache server and run it. We'll need to go for sudo or root mode to get all the commands working so, we'll gonna switch to root access.
$ sudo -s
Now, we need to open port 80 in the firewall and need to save the changes.
# iptables -I INPUT -p tcp --dport 80 -j ACCEPT
# service iptables save
Now, we need to run the service:
For CentOS:
# service httpd start
Lets make sure that Apache will restart every time you restart the machine:
# /sbin/chkconfig httpd on
For Ubuntu:
# service apache2 start
Now, as we have started our apache http server, we will be able to open apache server at IP address of 127.0.0.1 by default.
### 3. Configuring MySQL Server ###
Now, we need to start our MySQL server:
For CentOS:
# chkconfig mysqld on
# service start mysqld
For Ubuntu:
# service mysql-server start
![mysql](http://blog.linoxide.com/wp-content/uploads/2014/12/mysql.png)
Login with root access to MySQL and create a DB for Bugzilla. Change “mypassword” to anything you want for your mysql password. You will need it later when configuring Bugzilla too.
For Both CentOS 6.5 and Ubuntu 14.04 Trusty
# mysql -u root -p
# password: (You'll need to enter your password)
# mysql > create database bugs;
# mysql > grant all on bugs.* to root@localhost identified by "mypassword";
#mysql > quit
**Note: Please remember the DB name, passwords for mysql , we'll need it later.**
### 4. Installing and configuring Bugzilla ###
Now, as we have all the required packages set and running, we'll want to configure our Bugzilla.
So, first we'll want to download the latest Bugzilla package, here I am downloading version 4.5.2 .
To download using wget in a shell or terminal:
wget http://ftp.mozilla.org/pub/mozilla.org/webtools/bugzilla-4.5.2.tar.gz
You can also download from their official site ie. [http://www.bugzilla.org/download/][1]
**Extracting and renaming the downloaded bugzilla tarball:**
# tar zxvf bugzilla-4.5.2.tar.gz -C /var/www/html/
# cd /var/www/html/
# mv -v bugzilla-4.5.2 bugzilla
**Note**: Here, **/var/www/html/bugzilla/** is the directory where we're gonna **host Bugzilla**.
Now, we'll configure buzilla:
# cd /var/www/html/bugzilla/
# ./checksetup.pl --check-modules
![bugzilla-check-module](http://blog.linoxide.com/wp-content/uploads/2014/12/bugzilla2-300x198.png)
After the check is done, we will see some missing modules that needs to be installed And that can be installed by the command below:
# cd /var/www/html/bugzilla
# perl install-module.pl --all
This will take a bit time to download and install all dependencies. Run the **checksetup.pl check-modules** command again to verify there are nothing left to install.
Now we'll need to run the below command which will automatically generate a file called “localconfig” in the /var/www/html/bugzilla directory.
# ./checksetup.pl
Make sure you input the correct database name, user, and password we created earlier in the localconfig file
# nano ./localconfig
# checksetup.pl
![bugzilla-success](http://blog.linoxide.com/wp-content/uploads/2014/12/bugzilla-success.png)
If all is well, checksetup.pl should now successfully configure Bugzilla.
Now we need to add Bugzilla to our Apache config file. so, we'll need to open /etc/httpd/conf/httpd.conf (For CentOS) or etc/apache2/apache2.conf (For Ubuntu) with a text editor:
For CentOS:
# nano /etc/httpd/conf/httpd.conf
For Ubuntu:
# nano etc/apache2/apache2.conf
Now, we'll need to configure Apache server we'll need to add the below configuration in the config file:
<VirtualHost *:80>
DocumentRoot /var/www/html/bugzilla/
</VirtualHost>
<Directory /var/www/html/bugzilla>
AddHandler cgi-script .cgi
Options +Indexes +ExecCGI
DirectoryIndex index.cgi
AllowOverride Limit FileInfo Indexes
</Directory>
Lastly, we need to edit .htaccess file and comment out “Options -Indexes” line at the top by adding “#”
Lets restart our apache server and test our installation.
For CentOS:
# service httpd restart
For Ubuntu:
# service apache2 restart
![bugzilla-install-success](http://blog.linoxide.com/wp-content/uploads/2014/12/bugzilla_apache.png)
Finally, our Bugzilla is ready to get bug reports now in our Ubuntu 14.04 LTS and CentOS 6.5 and you can browse to bugzilla by going to the localhost page ie 127.0.0.1 or to your IP address in your web browser .
--------------------------------------------------------------------------------
via: http://linoxide.com/tools/install-bugzilla-ubuntu-centos/
作者:[Arun Pyasi][a]
译者:[译者ID](https://github.com/译者ID)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://linoxide.com/author/arunp/
[1]:http://www.bugzilla.org/download/

View File

@ -0,0 +1,45 @@
Linus Torvalds发布了Linux 3.19 RC1目前为止最大的更新
================================================================================
> 新的内核开发周期开始了
![](http://i1-news.softpedia-static.com/images/news2/Linus-Torvalds-Launches-Linux-kernel-3-19-RC1-One-of-the-Biggest-So-Far-468043-2.jpg)
**首个内核候选版本在3.19分支上发布了,它看上去像目前最大的更新。这个早先发布让众人惊喜,但是很容易理解为什么。**
内核开发周期被新的3.19的发布而刷新了。事实是3.18分支才几周前才发布今天的发布并不是完全在预期中。假期要来了很多开发者和维护任何可能会休息。一般来说RC版本每周发布一次但是用户可能会看到轻微的延误。
这个版本没有提到在Linux 3.18中确认的回归问题但是可以确定的是开发人员仍在努力修复中。另一方面Linus说这是一个很大的更新事实上这是目前为止最大的更新。很有可能是许多开发者想要在节日之前推送他们的补丁因此下一个RC版本会小一些。
### Linux 3.19 RC1 标志着新的一个周期的开始 ###
发布版本的大小随着更新的频率正在增加。内核的开发周期通常大约8到10周并且很少多于这个这给项目一个很好的预测。
[阅读][1] Linus Torvalds的发布声明中说“也就是说也许没有真正的落后者并且从rc1的大小来看真的已经不多了。我不仅觉得下一个版本有更多的提交并且比历史上的rc1更多知道在提交数量上。我们已经有比较大的版本3.10和3.15的都有很大的很并窗口导致的),但是这明显不是一个小的合并窗口。”
“在这个在蓝图下这看上去只是一个常规发布。大约三分之二的驱动更新这剩下的一半是架构的更新新的nios2补丁还没有优势它只有ARM一半的性能新的niso2支持小于整体架构更新的10%)。”
具体关于这个RC的细节可以在官方邮件列表中找到。
#### 下载 Linux 3.19 RC1 源码包: ####
- [tar.xz (3.18.1 Stable)][3]文件大小 77.2 MB
- [tar.xz (3.19 RC1 Unstable)][4]
如果你想要测试,需要自己编译。并不建议在生产机器上测试。
--------------------------------------------------------------------------------
via: http://news.softpedia.com/news/Linus-Torvalds-Launches-Linux-kernel-3-19-RC1-One-of-the-Biggest-So-Far-468043.shtml
作者:[Silviu Stahie ][a]
译者:[geekpi](https://github.com/geekpi)
校对:[校对者ID](https://github.com/校对者ID)
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创翻译,[Linux中国](http://linux.cn/) 荣誉推出
[a]:http://news.softpedia.com/editors/browse/silviu-stahie
[1]:http://lkml.iu.edu/hypermail/linux/kernel/1412.2/02480.html
[2]:http://linux.softpedia.com/get/System/Operating-Systems/Kernels/Linux-Kernel-Development-8069.shtml
[3]:https://www.kernel.org/pub/linux/kernel/v3.x/linux-3.18.1.tar.xz
[4]:https://www.kernel.org/pub/linux/kernel/v3.x/testing/linux-3.19-rc1.tar.xz

Some files were not shown because too many files have changed in this diff Show More