mirror of
https://github.com/LCTT/TranslateProject.git
synced 2024-12-29 21:41:00 +08:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
e9bee730f4
@ -1,2 +1,3 @@
|
||||
language: c
|
||||
script: make -s check
|
||||
script:
|
||||
- sh ./scripts/check.sh
|
||||
|
51
Makefile
51
Makefile
@ -1,51 +0,0 @@
|
||||
DIR_PATTERN := (news|talk|tech)
|
||||
NAME_PATTERN := [0-9]{8} [a-zA-Z0-9_.,() -]*\.md
|
||||
|
||||
RULES := rule-source-added \
|
||||
rule-translation-requested \
|
||||
rule-translation-completed \
|
||||
rule-translation-revised \
|
||||
rule-translation-published
|
||||
.PHONY: check match $(RULES)
|
||||
|
||||
CHANGE_FILE := /tmp/changes
|
||||
|
||||
check: $(CHANGE_FILE)
|
||||
echo 'PR #$(TRAVIS_PULL_REQUEST) Changes:'
|
||||
cat $(CHANGE_FILE)
|
||||
echo
|
||||
echo 'Check for rules...'
|
||||
make -k $(RULES) 2>/dev/null | grep '^Rule Matched: '
|
||||
|
||||
$(CHANGE_FILE):
|
||||
git --no-pager diff $(TRAVIS_BRANCH) FETCH_HEAD --no-renames --name-status > $@
|
||||
|
||||
rule-source-added:
|
||||
echo 'Unmatched Files:'
|
||||
egrep -v '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) || true
|
||||
echo '[End of Unmatched Files]'
|
||||
[ $(shell egrep '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) -ge 1 ]
|
||||
[ $(shell egrep -v '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 0 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-requested:
|
||||
[ $(shell egrep '^M\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-completed:
|
||||
[ $(shell egrep '^D\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell egrep '^A\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-revised:
|
||||
[ $(shell egrep '^M\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
echo 'Rule Matched: $(@)'
|
||||
|
||||
rule-translation-published:
|
||||
[ $(shell egrep '^D\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell egrep '^A\s*"?published/$(NAME_PATTERN)' $(CHANGE_FILE) | wc -l) = 1 ]
|
||||
[ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ]
|
||||
echo 'Rule Matched: $(@)'
|
@ -1,6 +1,11 @@
|
||||
简介
|
||||
-------------------------------
|
||||
|
||||
![待翻译](https://lctt.github.io/TranslateProject/badge/sources.svg)
|
||||
![翻译中](https://lctt.github.io/TranslateProject/badge/translating.svg)
|
||||
![待校正](https://lctt.github.io/TranslateProject/badge/translated.svg)
|
||||
![已发布](https://lctt.github.io/TranslateProject/badge/published.svg)
|
||||
|
||||
LCTT 是“Linux中国”([https://linux.cn/](https://linux.cn/))的翻译组,负责从国外优秀媒体翻译 Linux 相关的技术、资讯、杂文等内容。
|
||||
|
||||
LCTT 已经拥有几百名活跃成员,并欢迎更多的Linux志愿者加入我们的团队。
|
||||
|
135
published/20180105 The Best Linux Distributions for 2018.md
Normal file
135
published/20180105 The Best Linux Distributions for 2018.md
Normal file
@ -0,0 +1,135 @@
|
||||
2018 年最好的 Linux 发行版
|
||||
======
|
||||
|
||||
![Linux distros 2018](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/linux-distros-2018.jpg?itok=Z8sdx4Zu "Linux distros 2018")
|
||||
|
||||
> Jack Wallen 分享他挑选的 2018 年最好的 Linux 发行版。
|
||||
|
||||
这是新的一年,Linux 仍有无限可能。而且许多 Linux 发行版在 2017 年都带来了许多重大的改变,我相信在 2018 年它在服务器和桌面上将会带来更加稳定的系统和市场份额的增长。
|
||||
|
||||
对于那些期待迁移到开源平台(或是那些想要切换到)的人对于即将到来的一年,什么是最好的选择?如果你去 [Distrowatch][14] 找一下,你可能会因为众多的发行版而感到头晕,其中一些的排名在上升,而还有一些则恰恰相反。
|
||||
|
||||
因此,哪个 Linux 发行版将在 2018 年得到偏爱?我有我的看法。事实上,我现在就要和你们分享它。
|
||||
|
||||
跟我做的 [去年清单][15] 相似,我将会打破那张清单,使任务更加轻松。普通的 Linux 用户,至少包含以下几个类别:系统管理员,轻量级发行版,桌面,为物联网和服务器发行的版本。
|
||||
|
||||
根据这些,让我们开始 2018 年最好的 Linux 发行版清单吧。
|
||||
|
||||
### 对系统管理员最好的发行版
|
||||
|
||||
[Debian][16] 不常出现在“最好的”列表中。但它应该出现,为什么呢?如果了解到 Ubuntu 是基于 Debian 构建的(其实有很多的发行版都基于 Debian),你就很容易理解为什么这个发行版应该在许多“最好”清单中。但为什么是对管理员最好的呢?我想这是由于两个非常重要的原因:
|
||||
|
||||
* 容易使用
|
||||
* 非常稳定
|
||||
|
||||
因为 Debain 使用 dpkg 和 apt 包管理,它使得使用该环境非常简单。而且因为 Debian 提供了最稳定的 Linux 平台之一,它为许多事物提供了理想的环境:桌面、服务器、测试、开发。虽然 Debian 可能不包括去年本分类的优胜者 [Parrot Linux][17] 所带有的大量应用程序,但添加完成任务所需的任何或全部必要的应用程序都非常容易。而且因为 Debian 可以根据你的选择安装不同的桌面(Cinnamon、GNOME、KDE、LXDE、Mate 或者 Xfce),肯定可以满足你对桌面的需求。
|
||||
|
||||
![debian](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/debian.jpg?itok=XkHHG692 "debian")
|
||||
|
||||
*图 1:在 Debian 9.3 上运行的 GNOME 桌面。*
|
||||
|
||||
同时,Debain 在 Distrowatch 上名列第二。下载、安装,然后让它为你的工作而服务吧。Debain 尽管不那么华丽,但是对于管理员的工作来说十分有用。
|
||||
|
||||
### 最轻量级的发行版
|
||||
|
||||
轻量级的发行版有其特殊的用途:给予一些老旧或是性能低下的机器以新生。但是这不意味着这些特别的发行版仅仅只为了老旧的硬件机器而生。如果你想要的是运行速度,你可能会想知道在你的现代机器上这类发行版的运行速度能有多快。
|
||||
|
||||
在 2018 年上榜的最轻量级的发行版是 [Lubuntu][18]。尽管在这个类别里还有很多选择,而且尽管 Lubuntu 的资源占用与 Puppy Linux 一样小,但得益于它是 Ubuntu 家庭的一员,其易用性为它加了分。但是不要担心,Lubuntu 对于硬件的要求并不高:
|
||||
|
||||
+ CPU:奔腾 4 或者奔腾 M 或者 AMD K8 以上
|
||||
+ 对于本地应用,512 MB 的内存就可以了,对于网络使用(Youtube、Google+、Google Drive、Facebook),建议 1 GB 以上。
|
||||
|
||||
Lubuntu 使用的是 LXDE 桌面(图 2),这意味着新接触 Linux 的用户在使用这个发行版时不会有任何问题。这份简短清单中包含的应用(例如:Abiword、Gnumeric 和 Firefox)都是非常轻量的,且对用户友好的。
|
||||
|
||||
![Lubuntu](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/lubuntu_2.jpg?itok=BkTnh7hU "Lubuntu")
|
||||
|
||||
*图 2:LXDE桌面。*
|
||||
|
||||
Lubuntu 能让十年以上的电脑如获新生。
|
||||
|
||||
### 最好的桌面发行版
|
||||
|
||||
[Elementary OS][19] 连续两年都是我清单中最好的桌面发行版。对于许多人,[Linux Mint][20] (也是一个非常棒的分支)都是桌面发行版的领袖。但是,于我来说,它在易用性和稳定性上很难打败 Elementary OS。例如,我确信是 [Ubuntu][21] 17.10 的发布让我迁移回了 Canonical 的发行版。迁移到新的使用 GNOME 桌面的 Ubuntu 不久之后,我发现我缺少了 Elementary OS 外观、可用性和感觉(图 3)。在使用 Ubuntu 两周以后,我又换回了 Elementary OS。
|
||||
|
||||
![Elementary OS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/elementaros.jpg?itok=SRZC2vkg "Elementary OS")
|
||||
|
||||
*图 3:Pantheon 桌面是一件像艺术品一样的桌面。*
|
||||
|
||||
使用 Elementary OS 的任何一个人都会觉得宾至如归。Pantheon 桌面是将操作顺滑和用户友好结合的最完美的桌面。每次更新,它都会变得更好。
|
||||
|
||||
尽管 Elementary OS 在 Distrowatch 页面访问量中排名第六,但我预计到 2018 年末,它将至少上升至第三名。Elementary 开发人员非常关注用户的需求。他们倾听并且改进,这个发行版目前的状态是如此之好,似乎他们一切都可以做的更好。 如果您需要一个具有出色可靠性和易用性的桌面,Elementary OS 就是你的发行版。
|
||||
|
||||
### 能够证明自己的最好的发行版
|
||||
|
||||
很长一段时间内,[Gentoo][22] 都稳坐“展现你技能”的发行版的首座。但是,我认为现在 Gentoo 是时候让出“证明自己”的宝座给 [Linux From Scratch(LFS)][23]。你可能认为这不公平,因为 LFS 实际上不是一个发行版,而是一个帮助用户创建自己的 Linux 发行版的项目。但是,有什么能比你自己创建一个自己的发行版更能证明自己所学的 Linux 知识的呢?在 LFS 项目中,你可以从头开始构建自定义的 Linux 系统,而且是从源代码开始。 所以,如果你真的想证明些什么,请下载 [Linux From Scratch Book][24] 并开始构建。
|
||||
|
||||
### 对于物联网最好的发行版
|
||||
|
||||
[Ubuntu Core][25] 已经是第二年赢得了该项的冠军。Ubuntu Core 是 Ubuntu 的一个小型的、事务型版本,专为嵌入式和物联网设备而构建。使 Ubuntu Core 如此完美支持物联网的原因在于它将重点放在 snap 包上 —— 这种通用包可以安装到一个平台上而不会干扰其基本系统。这些 snap 包包含它们运行所需的所有内容(包括依赖项),因此不必担心安装它会破坏操作系统(或任何其他已安装的软件)。 此外,snap 包非常容易升级,并运行在隔离的沙箱中,这使它们成为物联网的理想解决方案。
|
||||
|
||||
Ubuntu Core 内置的另一个安全领域是登录机制。Ubuntu Core 使用Ubuntu One ssh密钥,这样登录系统的唯一方法是通过上传的 ssh 密钥到 [Ubuntu One帐户][26](图 4)。这为你的物联网设备提供了更高的安全性。
|
||||
|
||||
![ Ubuntu Core](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntucore.jpg?itok=Ydfq8NKH " Ubuntu Core")
|
||||
|
||||
*图 4:Ubuntu Core屏幕指示通过Ubuntu One用户启用远程访问。*
|
||||
|
||||
### 最好的服务器发行版
|
||||
|
||||
这里有点意见不统一。主要原因是支持。如果你需要商业支持,乍一看,你最好的选择可能是 [Red Hat Enterprise Linux][27]。红帽年复一年地证明了自己不仅是全球最强大的企业服务器平台之一,而且是单一最赚钱的开源业务(年收入超过 20 亿美元)。
|
||||
|
||||
但是,Red Hat 并不是唯一的服务器发行版。 实际上,Red Hat 甚至并不能垄断企业服务器计算的各个方面。如果你关注亚马逊 Elastic Compute Cloud 上的云统计数据,Ubuntu 就会打败红帽企业 Linux。根据[云市场][28]的报告,EC2 统计数据显示 RHEL 的部署率低于 10 万,而 Ubuntu 的部署量超过 20 万。
|
||||
|
||||
最终的结果是,Ubuntu 几乎已经成为云计算的领导者。如果你将它与 Ubuntu 对容器的易用性和可管理性结合起来,就会发现 Ubuntu Server 是服务器类别的明显赢家。而且,如果你需要商业支持,Canonical 将为你提供 [Ubuntu Advantage][29]。
|
||||
|
||||
对使用 Ubuntu Server 的一个警告是它默认为纯文本界面(图 5)。如果需要,你可以安装 GUI,但使用 Ubuntu Server 命令行非常简单(每个 Linux 管理员都应该知道)。
|
||||
|
||||
![Ubuntu server](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntuserver_1.jpg?itok=qtFSUlee "Ubuntu server")
|
||||
|
||||
*图 5:Ubuntu 服务器登录,通知更新。*
|
||||
|
||||
### 你怎么看
|
||||
|
||||
正如我之前所说,这些选择都非常主观,但如果你正在寻找一个好的开始,那就试试这些发行版。每一个都可以用于非常特定的目的,并且比大多数做得更好。虽然你可能不同意我的个别选择,但你可能会同意 Linux 在每个方面都提供了惊人的可能性。并且,请继续关注下周更多“最佳发行版”选秀。
|
||||
|
||||
通过 Linux 基金会和 edX 的免费[“Linux 简介”][13]课程了解有关Linux的更多信息。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/learn/intro-to-linux/2018/1/best-linux-distributions-2018
|
||||
|
||||
作者:[JACK WALLEN][a]
|
||||
译者:[dianbanjiu](https://github.com/dianbanjiu)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/jlwallen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/used-permission
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/licenses/category/used-permission
|
||||
[5]:https://www.linux.com/licenses/category/used-permission
|
||||
[6]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[7]:https://www.linux.com/files/images/debianjpg
|
||||
[8]:https://www.linux.com/files/images/lubuntujpg-2
|
||||
[9]:https://www.linux.com/files/images/elementarosjpg
|
||||
[10]:https://www.linux.com/files/images/ubuntucorejpg
|
||||
[11]:https://www.linux.com/files/images/ubuntuserverjpg-1
|
||||
[12]:https://www.linux.com/files/images/linux-distros-2018jpg
|
||||
[13]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[14]:https://distrowatch.com/
|
||||
[15]:https://www.linux.com/news/learn/sysadmin/best-linux-distributions-2017
|
||||
[16]:https://www.debian.org/
|
||||
[17]:https://www.parrotsec.org/
|
||||
[18]:http://lubuntu.me/
|
||||
[19]:https://elementary.io/
|
||||
[20]:https://linuxmint.com/
|
||||
[21]:https://www.ubuntu.com/
|
||||
[22]:https://www.gentoo.org/
|
||||
[23]:http://www.linuxfromscratch.org/
|
||||
[24]:http://www.linuxfromscratch.org/lfs/download.html
|
||||
[25]:https://www.ubuntu.com/core
|
||||
[26]:https://login.ubuntu.com/
|
||||
[27]:https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux
|
||||
[28]:http://thecloudmarket.com/stats#/by_platform_definition
|
||||
[29]:https://buy.ubuntu.com/?_ga=2.177313893.113132429.1514825043-1939188204.1510782993
|
@ -1,17 +1,21 @@
|
||||
写作是如何帮助技能拓展和事业成长的
|
||||
======
|
||||
|
||||
> 了解为什么写作可以帮助学习新技能和事业成长
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/graffiti-1281310_1920.jpg?itok=RCayfGKv)
|
||||
|
||||
Creative Commons Zero Pixabay
|
||||
|
||||
在最近的[温哥华开源峰会][1]上,我参加了一个小组讨论,叫做“写作是如何改变你的职业生涯的(即使你不是个作家)”。主持人是 Opensource.com 的社区经理兼编辑 Rikki Endsley,成员有开源策略顾问 VM (Vicky) Brasseur,The New Stack 的创始人兼主编 Alex Williams,还有 The Scale Factory 的顾问 Dawn Foster。
|
||||
|
||||
Rikki 在她的[这篇文章][3]中总结了一些能愉悦你,并且能以意想不到的方式改善你职业生涯的写作方法,我在峰会上的发言是受她这篇文章的启发。透露一下,我认识 Rikki 很久了,我们在同一家公司共事了很多年,一起带过孩子,到现在还是很亲密的朋友。
|
||||
Rikki 在她的[这篇文章][3]中总结了一些令人愉快的,并且能以意想不到的方式改善你职业生涯的写作方法,我在峰会上的发言是受她这篇文章的启发。透露一下,我认识 Rikki 很久了,我们在同一家公司共事了很多年,一起带过孩子,到现在还是很亲密的朋友。
|
||||
|
||||
### 写作和学习
|
||||
|
||||
正如 Rikki 对这个小组讨论的描述,“即使你自认为不是一个‘作家’,你也应该考虑写一下对开源的贡献,还有你的项目或者社区”。写作是一种很好的方式,来分享自己的知识并让别人参与到你的工作中来,当然它对个人也有好处。写作能帮助你结识新人,学习新技能,还能改善你的沟通。
|
||||
|
||||
我发现写作能让我搞清楚自己对某个主题有哪些不懂的地方。写作的过程会让知识体系的空白很突出,这激励了我通过进一步的研究、阅读和提问来填补空白。
|
||||
我发现写作能让我搞清楚自己对某个主题有哪些不懂的地方。写作的过程会让知识体系的空白很突出,这激励了我通过进一步的研究、阅读和提问来填补这些空白。
|
||||
|
||||
Rikki 说:“写那些你不知道的东西会更加困难也更加耗时,但是也更有成就感,更有益于你的事业。我发现写我不知道的东西有助于自己学习,因为得研究透彻才能给读者解释清楚。”
|
||||
|
||||
@ -19,12 +23,11 @@ Rikki 说:“写那些你不知道的东西会更加困难也更加耗时,
|
||||
|
||||
### 更明确的沟通
|
||||
|
||||
|
||||
写作有助于练习思考和准确讲话,尤其是面向国际受众写作(或演讲)时。例如,在[这篇文章中][5],Isabel Drost-Fromm 为那些母语不是英语的演讲者提供了几个技巧来消除歧义。不管是在会议上还是在自己团队内发言,写作还能帮你在演示之前理清思路。
|
||||
写作有助于思维训练和准确表达,尤其是面向国际受众写作(或演讲)时。例如,在[这篇文章中][5],Isabel Drost-Fromm 为那些母语不是英语的演讲者提供了几个技巧来消除歧义。不管是在会议上还是在自己团队内发言,写作还能帮你在演示幻灯片之前理清思路。
|
||||
|
||||
Rikki 说:“写文章的过程有助于我组织整理自己的发言和演示稿,也是一个给参会者提供笔记的好方式,还可以分享给没有参加活动的更多国际观众。”
|
||||
|
||||
如果你有兴趣,我鼓励你去写作。我强烈建议你参考这里提到的文章,开始思考你要写的内容。 不幸的是,我们在开源峰会上的讨论没有记录,但我希望将来能再做一次讨论,分享更多的想法。
|
||||
如果你有兴趣,我鼓励你去写作。我强烈建议你参考这里提到的文章,开始思考你要写的内容。不幸的是,我们在开源峰会上的讨论没有记录下来,但我希望将来能再做一次讨论,分享更多的想法。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -33,7 +36,7 @@ via: https://www.linux.com/blog/2018/9/how-writing-can-help-you-learn-new-skills
|
||||
作者:[Amber Ankerholz][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[belitex](https://github.com/belitex)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[pityonline](https://github.com/pityonline)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -2,19 +2,20 @@
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/09/tilingwindowmanagers-816x345.jpg)
|
||||
Linux 桌面生态中有多种窗口管理器 (WM)。有些是作为桌面环境的一部分开发的。有的则被用作独立程序。平铺 WM 就是这种情况,它提供了一个更轻量级的自定义环境。本文介绍了五种这样的平铺 WM 供你试用。
|
||||
|
||||
Linux 桌面生态中有多种窗口管理器(WM)。有些是作为桌面环境的一部分开发的。有的则被用作独立程序。平铺窗口管理器就是这种情况,它提供了一个更轻量级的自定义环境。本文介绍了五种这样的平铺窗口管理器供你试用。
|
||||
|
||||
### i3
|
||||
|
||||
[i3][1] 是最受欢迎的平铺窗口管理器之一。与大多数其他此类 WM 一样,i3 专注于低资源消耗和用户可定制性。
|
||||
|
||||
您可以参考[Magazine 上的这篇文章][2]了解 i3 安装细节以及如何配置它。
|
||||
您可以参考 [Magazine 上的这篇文章][2]了解 i3 安装细节以及如何配置它。
|
||||
|
||||
### sway
|
||||
|
||||
[sway][3] 是一个平铺 Wayland 合成器。它有与现有 i3 配置兼容的优点,因此你可以使用它来替换 i3 并使用 Wayland 作为显示协议。
|
||||
|
||||
您可以使用 dnf 从 Fedora 仓库安装 sway:
|
||||
您可以使用 `dnf` 从 Fedora 仓库安装 sway:
|
||||
|
||||
```
|
||||
$ sudo dnf install sway
|
||||
@ -24,7 +25,7 @@ $ sudo dnf install sway
|
||||
|
||||
### Qtile
|
||||
|
||||
[Qtile][5] 是另一个平铺管理器,也恰好是用 Python 编写的。默认情况下,你在位于 ~/.config/qtile/config.py 下的 Python 脚本中配置 Qtile。当此脚本不存在时,Qtile 会使用默认[配置][6]。
|
||||
[Qtile][5] 是另一个平铺管理器,也恰好是用 Python 编写的。默认情况下,你在位于 `~/.config/qtile/config.py` 下的 Python 脚本中配置 Qtile。当此脚本不存在时,Qtile 会使用默认[配置][6]。
|
||||
|
||||
Qtile 使用 Python 的一个好处是你可以编写脚本来控制 WM。例如,以下脚本打印屏幕详细信息:
|
||||
|
||||
@ -45,13 +46,13 @@ $ sudo dnf install qtile
|
||||
|
||||
[dwm][7] 窗口管理器更侧重于轻量级。该项目的一个目标是保持 dwm 最小。例如,整个代码库从未超过 2000 行代码。另一方面,dwm 不容易定制和配置。实际上,改变 dwm 默认配置的唯一方法是[编辑源代码并重新编译程序][8]。
|
||||
|
||||
如果你想尝试默认配置,你可以使用 dnf 在 Fedora 中安装 dwm:
|
||||
如果你想尝试默认配置,你可以使用 `dnf` 在 Fedora 中安装 dwm:
|
||||
|
||||
```
|
||||
$ sudo dnf install dwm
|
||||
```
|
||||
|
||||
对于那些想要改变 dwm 配置的人,Fedora 中有一个 dwm-user 包。该软件包使用用户主目录中 ~/.dwm/config.h 的配置自动重新编译 dwm。
|
||||
对于那些想要改变 dwm 配置的人,Fedora 中有一个 dwm-user 包。该软件包使用用户主目录中 `~/.dwm/config.h` 的配置自动重新编译 dwm。
|
||||
|
||||
### awesome
|
||||
|
||||
@ -71,7 +72,7 @@ via: https://fedoramagazine.org/5-cool-tiling-window-managers/
|
||||
作者:[Clément Verna][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,15 +1,15 @@
|
||||
系统管理员需知的 16 个 iptables 使用技巧
|
||||
=======
|
||||
|
||||
iptables 是一款控制系统进出流量的强大配置工具。
|
||||
> iptables 是一款控制系统进出流量的强大配置工具。
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/command_line_prompt.png?itok=wbGiJ_yg)
|
||||
|
||||
现代 Linux 内核带有一个叫 [Netfilter][1] 的数据包过滤框架。Netfilter 提供了允许、禁止以及修改等操作来控制进出系统的流量数据包。基于 Netfilter 框架的用户层命令行工具 **iptables** 提供了强大的防火墙配置功能,允许你添加规则来构建防火墙策略。[iptables][2] 丰富复杂的功能以及其巴洛克式命令语法可能让人难以驾驭。我们就来探讨一下其中的一些功能,提供一些系统管理员解决某些问题需要的使用技巧。
|
||||
现代 Linux 内核带有一个叫 [Netfilter][1] 的数据包过滤框架。Netfilter 提供了允许、丢弃以及修改等操作来控制进出系统的流量数据包。基于 Netfilter 框架的用户层命令行工具 `iptables` 提供了强大的防火墙配置功能,允许你添加规则来构建防火墙策略。[iptables][2] 丰富复杂的功能以及其巴洛克式命令语法可能让人难以驾驭。我们就来探讨一下其中的一些功能,提供一些系统管理员解决某些问题需要的使用技巧。
|
||||
|
||||
### 避免封锁自己
|
||||
|
||||
应用场景:假设你将对公司服务器上的防火墙规则进行修改,需要避免封锁你自己以及其他同事的情况(这将会带来一定时间和金钱的损失,也许一旦发生马上就有部门打电话找你了)
|
||||
应用场景:假设你将对公司服务器上的防火墙规则进行修改,你需要避免封锁你自己以及其他同事的情况(这将会带来一定时间和金钱的损失,也许一旦发生马上就有部门打电话找你了)
|
||||
|
||||
#### 技巧 #1: 开始之前先备份一下 iptables 配置文件。
|
||||
|
||||
@ -17,7 +17,6 @@ iptables 是一款控制系统进出流量的强大配置工具。
|
||||
|
||||
```
|
||||
/sbin/iptables-save > /root/iptables-works
|
||||
|
||||
```
|
||||
#### 技巧 #2: 更妥当的做法,给文件加上时间戳。
|
||||
|
||||
@ -25,28 +24,24 @@ iptables 是一款控制系统进出流量的强大配置工具。
|
||||
|
||||
```
|
||||
/sbin/iptables-save > /root/iptables-works-`date +%F`
|
||||
|
||||
```
|
||||
|
||||
然后你就可以生成如下名字的文件:
|
||||
|
||||
```
|
||||
/root/iptables-works-2018-09-11
|
||||
|
||||
```
|
||||
|
||||
这样万一使得系统不工作了,你也可以很快的利用备份文件恢复原状:
|
||||
|
||||
```
|
||||
/sbin/iptables-restore < /root/iptables-works-2018-09-11
|
||||
|
||||
```
|
||||
|
||||
#### 技巧 #3: 每次创建 iptables 配置文件副本时,都创建一个指向 `latest` 的文件的链接。
|
||||
#### 技巧 #3: 每次创建 iptables 配置文件副本时,都创建一个指向最新的文件的链接。
|
||||
|
||||
```
|
||||
ln –s /root/iptables-works-`date +%F` /root/iptables-works-latest
|
||||
|
||||
```
|
||||
|
||||
#### 技巧 #4: 将特定规则放在策略顶部,底部放置通用规则。
|
||||
@ -55,19 +50,17 @@ ln –s /root/iptables-works-`date +%F` /root/iptables-works-latest
|
||||
|
||||
```
|
||||
iptables -A INPUT -p tcp --dport 22 -j DROP
|
||||
|
||||
```
|
||||
|
||||
你在规则中指定的条件越多,封锁自己的可能性就越小。不要使用上面暗中通用规则,而是使用如下的规则:
|
||||
你在规则中指定的条件越多,封锁自己的可能性就越小。不要使用上面非常通用的规则,而是使用如下的规则:
|
||||
|
||||
```
|
||||
iptables -A INPUT -p tcp --dport 22 –s 10.0.0.0/8 –d 192.168.100.101 -j DROP
|
||||
|
||||
```
|
||||
|
||||
此规则表示在 **INPUT** 链尾追加一条新规则,将源地址为 **10.0.0.0/8**、 目的地址是 **192.168.100.101**、目的端口号是 **22** (**\--dport 22** ) 的 **tcp**(**-p tcp** )数据包通通丢弃掉。
|
||||
此规则表示在 `INPUT` 链尾追加一条新规则,将源地址为 `10.0.0.0/8`、 目的地址是 `192.168.100.101`、目的端口号是 `22` (`--dport 22` ) 的 TCP(`-p tcp` )数据包通通丢弃掉。
|
||||
|
||||
还有很多方法可以设置更具体的规则。例如,使用 **-i eth0** 将会限制这条规则作用于 **eth0** 网卡,对 **eth1** 网卡则不生效。
|
||||
还有很多方法可以设置更具体的规则。例如,使用 `-i eth0` 将会限制这条规则作用于 `eth0` 网卡,对 `eth1` 网卡则不生效。
|
||||
|
||||
#### 技巧 #5: 在策略规则顶部将你的 IP 列入白名单。
|
||||
|
||||
@ -75,10 +68,9 @@ iptables -A INPUT -p tcp --dport 22 –s 10.0.0.0/8 –d 192.168.100.101 -j DROP
|
||||
|
||||
```
|
||||
iptables -I INPUT -s <your IP> -j ACCEPT
|
||||
|
||||
```
|
||||
|
||||
你需要将该规则添加到策略首位置。**-I** 表示则策略首部插入规则,**-A** 表示在策略尾部追加规则。
|
||||
你需要将该规则添加到策略首位置。`-I` 表示则策略首部插入规则,`-A` 表示在策略尾部追加规则。
|
||||
|
||||
#### 技巧 #6: 理解现有策略中的所有规则。
|
||||
|
||||
@ -100,7 +92,7 @@ iptables -I INPUT -s <your IP> -j ACCEPT
|
||||
|
||||
#### 技巧 #2: 将用户完成工作所需的最少量服务设置为允许
|
||||
|
||||
该策略需要允许工作站能通过 DHCP (**-p udp --dport 67:68 -sport 67:68**)来获取 IP 地址、子网掩码以及其他一些信息。对于远程操作,需要允许 SSH 服务(**-dport 22**),邮件服务(**--dport 25**),DNS服务(**--dport 53**),ping 功能(**-p icmp**),NTP 服务(**--dport 123 --sport 123**)以及HTTP 服务(**-dport 80**)和 HTTPS 服务(**--dport 443**)。
|
||||
该策略需要允许工作站能通过 DHCP(`-p udp --dport 67:68 -sport 67:68`)来获取 IP 地址、子网掩码以及其他一些信息。对于远程操作,需要允许 SSH 服务(`-dport 22`),邮件服务(`--dport 25`),DNS 服务(`--dport 53`),ping 功能(`-p icmp`),NTP 服务(`--dport 123 --sport 123`)以及 HTTP 服务(`-dport 80`)和 HTTPS 服务(`--dport 443`)。
|
||||
|
||||
```
|
||||
# Set a default policy of DROP
|
||||
@ -144,7 +136,7 @@ COMMIT
|
||||
|
||||
### 限制 IP 地址范围
|
||||
|
||||
应用场景:贵公司的 CEO 认为员工在 Facebook 上花费过多的时间,需要采取一些限制措施。CEO 命令下达给 CIO,CIO 命令CISO,最终任务由你来执行。你决定阻止一切到 Facebook 的访问连接。首先你使用 `host` 或者 `whois` 命令来获取 Facebook 的 IP 地址。
|
||||
应用场景:贵公司的 CEO 认为员工在 Facebook 上花费过多的时间,需要采取一些限制措施。CEO 命令下达给 CIO,CIO 命令 CISO,最终任务由你来执行。你决定阻止一切到 Facebook 的访问连接。首先你使用 `host` 或者 `whois` 命令来获取 Facebook 的 IP 地址。
|
||||
|
||||
```
|
||||
host -t a www.facebook.com
|
||||
@ -153,33 +145,33 @@ star.c10r.facebook.com has address 31.13.65.17
|
||||
whois 31.13.65.17 | grep inetnum
|
||||
inetnum: 31.13.64.0 - 31.13.127.255
|
||||
```
|
||||
然后使用 [CIDR to IPv4转换][3] 页面来将其转换为 CIDR 表示法。然后你得到 **31.13.64.0/18** 的地址。输入以下命令来阻止对 Facebook 的访问:
|
||||
|
||||
然后使用 [CIDR 到 IPv4 转换][3] 页面来将其转换为 CIDR 表示法。然后你得到 `31.13.64.0/18` 的地址。输入以下命令来阻止对 Facebook 的访问:
|
||||
|
||||
```
|
||||
iptables -A OUTPUT -p tcp -i eth0 –o eth1 –d 31.13.64.0/18 -j DROP
|
||||
```
|
||||
|
||||
### 按时间规定做限制-场景1
|
||||
### 按时间规定做限制 - 场景1
|
||||
|
||||
应用场景:公司员工强烈反对限制一切对 Facebook 的访问,这导致了 CEO 放宽了要求(考虑到员工的反对以及他的助理提醒说她将 HIS Facebook 页面保持在最新状态)。然后 CEO 决定允许在午餐时间访问 Facebook(中午12点到下午1点之间)。假设默认规则是丢弃,使用 iptables 的时间功能便可以实现。
|
||||
应用场景:公司员工强烈反对限制一切对 Facebook 的访问,这导致了 CEO 放宽了要求(考虑到员工的反对以及他的助理提醒说她负责更新他的 Facebook 页面)。然后 CEO 决定允许在午餐时间访问 Facebook(中午 12 点到下午 1 点之间)。假设默认规则是丢弃,使用 iptables 的时间功能便可以实现。
|
||||
|
||||
```
|
||||
iptables –A OUTPUT -p tcp -m multiport --dport http,https -i eth0 -o eth1 -m time --timestart 12:00 –timestop 13:00 –d 31.13.64.0/18 -j ACCEPT
|
||||
```
|
||||
|
||||
该命令中指定在中午12点(**\--timestart 12:00**)到下午1点(**\--timestop 13:00**)之间允许(**-j ACCEPT**)到 Facebook.com (**-d [31.13.64.0/18][5]**)的 http 以及 https (**-m multiport --dport http,https**)的访问。
|
||||
该命令中指定在中午12点(`--timestart 12:00`)到下午 1 点(`--timestop 13:00`)之间允许(`-j ACCEPT`)到 Facebook.com (`-d [31.13.64.0/18][5]`)的 http 以及 https (`-m multiport --dport http,https`)的访问。
|
||||
|
||||
### 按时间规定做限制-场景2
|
||||
### 按时间规定做限制 - 场景2
|
||||
|
||||
应用场景
|
||||
Scenario: 在计划系统维护期间,你需要设置凌晨2点到3点之间拒绝所有的 TCP 和 UDP 访问,这样维护任务就不会受到干扰。使用两个 iptables 规则可实现:
|
||||
应用场景:在计划系统维护期间,你需要设置凌晨 2 点到 3 点之间拒绝所有的 TCP 和 UDP 访问,这样维护任务就不会受到干扰。使用两个 iptables 规则可实现:
|
||||
|
||||
```
|
||||
iptables -A INPUT -p tcp -m time --timestart 02:00 --timestop 03:00 -j DROP
|
||||
iptables -A INPUT -p udp -m time --timestart 02:00 --timestop 03:00 -j DROP
|
||||
```
|
||||
|
||||
该规则禁止(**-j DROP**)在凌晨2点(**\--timestart 02:00**)到凌晨3点(**\--timestop 03:00**)之间的 TCP 和 UDP (**-p tcp and -p udp**)的数据进入(**-A INPUT**)访问。
|
||||
该规则禁止(`-j DROP`)在凌晨2点(`--timestart 02:00`)到凌晨3点(`--timestop 03:00`)之间的 TCP 和 UDP (`-p tcp and -p udp`)的数据进入(`-A INPUT`)访问。
|
||||
|
||||
### 限制连接数量
|
||||
|
||||
@ -189,11 +181,11 @@ iptables -A INPUT -p udp -m time --timestart 02:00 --timestop 03:00 -j DROP
|
||||
iptables –A INPUT –p tcp –syn -m multiport -–dport http,https –m connlimit -–connlimit-above 20 –j REJECT -–reject-with-tcp-reset
|
||||
```
|
||||
|
||||
分析一下上面的命令。如果单个主机在一分钟之内新建立(**-p tcp -syn**)超过20个(**-connlimit-above 20**)到你的 web 服务器(**--dport http,https**)的连接,服务器将拒绝(**-j REJECT**)建立新的连接,然后通知该主机新建连接被拒绝(**--reject-with-tcp-reset**)。
|
||||
分析一下上面的命令。如果单个主机在一分钟之内新建立(`-p tcp -syn`)超过 20 个(`-connlimit-above 20`)到你的 web 服务器(`--dport http,https`)的连接,服务器将拒绝(`-j REJECT`)建立新的连接,然后通知对方新建连接被拒绝(`--reject-with-tcp-reset`)。
|
||||
|
||||
### 监控 iptables 规则
|
||||
|
||||
应用场景:由于数据包会遍历链中的规则,iptables遵循 ”首次匹配获胜“ 的原则,因此经常匹配的规则应该靠近策略的顶部,而不太频繁匹配的规则应该接近底部。 你怎么知道哪些规则使用最多或最少,可以在顶部或底部附近监控?
|
||||
应用场景:由于数据包会遍历链中的规则,iptables 遵循 “首次匹配获胜” 的原则,因此经常匹配的规则应该靠近策略的顶部,而不太频繁匹配的规则应该接近底部。 你怎么知道哪些规则使用最多或最少,可以在顶部或底部附近监控?
|
||||
|
||||
#### 技巧 #1: 查看规则被访问了多少次
|
||||
|
||||
@ -203,7 +195,7 @@ iptables –A INPUT –p tcp –syn -m multiport -–dport http,https –m connl
|
||||
iptables -L -v -n –line-numbers
|
||||
```
|
||||
|
||||
用 **-L** 选项列出链中的所有规则。因为没有指定具体哪条链,所有链规则都会被输出,使用 **-v** 选项显示详细信息,**-n** 选项则显示数字格式的数据包和字节计数器,每个规则开头的数值表示该规则在链中的位置。
|
||||
用 `-L` 选项列出链中的所有规则。因为没有指定具体哪条链,所有链规则都会被输出,使用 `-v` 选项显示详细信息,`-n` 选项则显示数字格式的数据包和字节计数器,每个规则开头的数值表示该规则在链中的位置。
|
||||
|
||||
根据数据包和字节计数的结果,你可以将访问频率最高的规则放到顶部,将访问频率最低的规则放到底部。
|
||||
|
||||
@ -215,17 +207,17 @@ iptables -L -v -n –line-numbers
|
||||
iptables -nvL | grep -v "0 0"
|
||||
```
|
||||
|
||||
注意:两个数字0之间不是 Tab 键,而是 5 个空格。
|
||||
注意:两个数字 0 之间不是 Tab 键,而是 **5** 个空格。
|
||||
|
||||
#### 技巧 #3: 监控正在发生什么
|
||||
|
||||
可能你也想像使用 **top** 命令一样来实时监控 iptables 的情况。使用如下命令来动态监视 iptables 中的活动,并仅显示正在遍历的规则:
|
||||
可能你也想像使用 `top` 命令一样来实时监控 iptables 的情况。使用如下命令来动态监视 iptables 中的活动,并仅显示正在遍历的规则:
|
||||
|
||||
```
|
||||
watch --interval=5 'iptables -nvL | grep -v "0 0"'
|
||||
```
|
||||
|
||||
**watch** 命令通过参数 **iptables -nvL | grep -v “0 0“** 每隔 5s 输出 iptables 的动态。这条命令允许你查看数据包和字节计数的变化。
|
||||
`watch` 命令通过参数 `iptables -nvL | grep -v “0 0“` 每隔 5 秒输出 iptables 的动态。这条命令允许你查看数据包和字节计数的变化。
|
||||
|
||||
### 输出日志
|
||||
|
||||
@ -239,7 +231,7 @@ watch --interval=5 'iptables -nvL | grep -v "0 0"'
|
||||
|
||||
### 不要满足于允许和丢弃规则
|
||||
|
||||
本文中已经涵盖了 iptables 的很多方面,从避免封锁自己、iptables 配置防火墙以及监控 iptables 中的活动等等方面介绍了 iptables。你可以从这里开始探索 iptables 甚至获取更多的使用技巧。
|
||||
本文中已经涵盖了 iptables 的很多方面,从避免封锁自己、配置 iptables 防火墙以及监控 iptables 中的活动等等方面介绍了 iptables。你可以从这里开始探索 iptables 甚至获取更多的使用技巧。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -247,8 +239,8 @@ via: https://opensource.com/article/18/10/iptables-tips-and-tricks
|
||||
|
||||
作者:[Gary Smith][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[jrg](https://github.com/jrglinu)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
译者:[jrg](https://github.com/jrglinux)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,7 +1,7 @@
|
||||
在 Linux 命令行中使用 ls 列出文件的提示
|
||||
在 Linux 命令行中使用 ls 列出文件的技巧
|
||||
======
|
||||
|
||||
学习一些 Linux `ls` 命令最有用的变化。
|
||||
> 学习一些 Linux `ls` 命令最有用的变化。
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/button_push_open_keyboard_file_organize.png?itok=KlAsk1gx)
|
||||
|
@ -0,0 +1,48 @@
|
||||
你从不知道的 11 个 KDE 应用
|
||||
======
|
||||
|
||||
> 你今天需要哪种有趣或奇特的应用?
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BIZ_DebucketizeOrgChart_A.png?itok=RB3WBeQQ)
|
||||
|
||||
Linux 桌面环境 KDE 于今年 10 月 14 日庆祝诞生 22 周年。KDE 社区用户创建了海量应用(并没有很多,但[也有不少][1]),它们很多都提供有趣和奇特的服务。我们仔细看了该列表,并挑选出了你可能想了解的 11 个应用。
|
||||
|
||||
### 11 个你从没了解的 KDE 应用
|
||||
|
||||
1. [KTeaTime][2] 是一个泡茶计时器。选择你正在饮用的茶的类型 —— 绿茶、红茶、凉茶等 —— 当可以取出茶包来饮用时,计时器将会响。
|
||||
2. [KTux][3] 就是一个屏保程序……是么?Tux 用它的绿色飞船在外太空飞行。
|
||||
3. [Blinken][4] 是一款基于 Simon Says 的记忆游戏,这是一个 1978 年发布的电子游戏。玩家们在记住长度增加的序列时会有挑战。
|
||||
4. [Tellico][5] 是一个收集管理器,用于组织你最喜欢的爱好。也许你还在收集棒球卡。也许你是红酒俱乐部的一员。也许你是一个严肃的书虫。也许三个都是!
|
||||
5. [KRecipes][6] **不是** 简单的食谱管理器。它还有很多其他功能!购物清单、营养素分析、高级搜索、菜谱评级、导入/导出各种格式等。
|
||||
6. [KHangMan][7] 基于经典游戏 Hangman,你可以按逐个字母猜测单词。这个游戏有多种语言版本,这可以用来改善你学习另一种语言。它有四个分类,其中一个是“动物”,非常适合孩子。
|
||||
7. [KLettres][8] 是另一款可以帮助你学习新语言的应用。它教授字母表并挑战用户阅读和发音音节。
|
||||
8. [KDiamond][9] 类似于宝石迷阵或其他单人益智游戏,其中游戏的目标是搭建一定数量的相同类型的宝石或物体的行。这里是钻石。
|
||||
9. [KolourPaint][10] 是一个非常简单的图像编辑工具,也可以用于创建简单的矢量图形。
|
||||
10. [Kiriki][11] 是一款类似于 Yahtzee 的 2-6 名玩家的骰子游戏。
|
||||
11. [RSIBreak][12] 居然没有以 K 开头!?它以“RSI”开头代表“<ruby>重复性劳损<rt>Repetitive Strain Injury</rt></ruby>” ,这会在日复一日长时间使用鼠标和键盘后发生。这个应用会提醒你休息,并可以个性化定制,以满足你的需求。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/kde-applications
|
||||
|
||||
作者:[Opensource.com][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.kde.org/applications/
|
||||
[2]: https://www.kde.org/applications/games/kteatime/
|
||||
[3]: https://userbase.kde.org/KTux
|
||||
[4]: https://www.kde.org/applications/education/blinken
|
||||
[5]: http://tellico-project.org/
|
||||
[6]: https://www.kde.org/applications/utilities/krecipes/
|
||||
[7]: https://edu.kde.org/khangman/
|
||||
[8]: https://edu.kde.org/klettres/
|
||||
[9]: https://games.kde.org/game.php?game=kdiamond
|
||||
[10]: https://www.kde.org/applications/graphics/kolourpaint/
|
||||
[11]: https://www.kde.org/applications/games/kiriki/
|
||||
[12]: https://userbase.kde.org/RSIBreak
|
@ -3,30 +3,30 @@
|
||||
|
||||
![](http://fasterland.net/wp-content/uploads/2018/10/Arch-Linux-Boot-Menu-750x375.jpg)
|
||||
|
||||
前段时间,我写了一篇在安装 Windows 后在 Arch Linux 上**[如何重新安装 Grub][1]的教程。**
|
||||
前段时间,我写了一篇在安装 Windows 后在 Arch Linux 上[如何重新安装 Grub][1]的教程。
|
||||
|
||||
几周前,我不得不在我的笔记本上从头开始重新安装 **Arch Linux**,同时我发现安装 **Grub** 并不像我想的那么简单。
|
||||
几周前,我不得不在我的笔记本上从头开始重新安装 Arch Linux,同时我发现安装 Grub 并不像我想的那么简单。
|
||||
|
||||
出于这个原因,由于在新安装 **Arch Linux** 时**在 UEFI bios 中安装 Grub** 并不容易,所以我要写这篇教程。
|
||||
出于这个原因,由于在新安装 Arch Linux 时在 UEFI bios 中安装 Grub 并不容易,所以我要写这篇教程。
|
||||
|
||||
### 定位 EFI 分区
|
||||
|
||||
在 **Arch Linux** 上安装 **Grub** 的第一件重要事情是定位 **EFI** 分区。让我们运行以下命令以找到此分区:
|
||||
在 Arch Linux 上安装 Grub 的第一件重要事情是定位 EFI 分区。让我们运行以下命令以找到此分区:
|
||||
|
||||
```
|
||||
# fdisk -l
|
||||
```
|
||||
|
||||
我们需要检查标记为 **EFI System** 的分区,我这里是 **/dev/sda2**。
|
||||
我们需要检查标记为 EFI System 的分区,我这里是 `/dev/sda2`。
|
||||
|
||||
之后,我们需要在例如 /boot/efi 上挂载这个分区:
|
||||
之后,我们需要在例如 `/boot/efi` 上挂载这个分区:
|
||||
|
||||
```
|
||||
# mkdir /boot/efi
|
||||
# mount /dev/sdb2 /boot/efi
|
||||
```
|
||||
|
||||
另一件重要的事情是将此分区添加到 **/etc/fstab** 中。
|
||||
另一件重要的事情是将此分区添加到 `/etc/fstab` 中。
|
||||
|
||||
#### 安装 Grub
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
|
||||
#### 自动将 Windows 添加到 Grub 菜单中
|
||||
|
||||
为了自动将**Windows 条目添加到 Grub 菜单**,我们需要安装 **os-prober**:
|
||||
为了自动将 Windows 条目添加到 Grub 菜单,我们需要安装 os-prober:
|
||||
|
||||
```
|
||||
# pacman -Sy os-prober
|
||||
@ -62,7 +62,7 @@ via: http://fasterland.net/how-to-install-grub-on-arch-linux-uefi.html
|
||||
作者:[Francesco Mondello][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
9
scripts/check.sh
Normal file
9
scripts/check.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
# PR 检查脚本
|
||||
set -e
|
||||
|
||||
CHECK_DIR="$(dirname "$0")/check"
|
||||
# sh "${CHECK_DIR}/check.sh" # 需要依赖,暂时禁用
|
||||
sh "${CHECK_DIR}/collect.sh"
|
||||
sh "${CHECK_DIR}/analyze.sh"
|
||||
sh "${CHECK_DIR}/identify.sh"
|
43
scripts/check/analyze.sh
Normal file
43
scripts/check/analyze.sh
Normal file
@ -0,0 +1,43 @@
|
||||
#!/bin/sh
|
||||
# PR 文件变更分析
|
||||
set -e
|
||||
|
||||
# 加载公用常量和函数
|
||||
# shellcheck source=common.inc.sh
|
||||
. "$(dirname "$0")/common.inc.sh"
|
||||
|
||||
################################################################################
|
||||
# 读入:
|
||||
# - /tmp/changes # 文件变更列表
|
||||
# 写出:
|
||||
# - /tmp/stats # 文件变更统计
|
||||
################################################################################
|
||||
|
||||
# 执行分析并将统计输出到标准输出
|
||||
do_analyze() {
|
||||
cat /dev/null > /tmp/stats
|
||||
OTHER_REGEX='^$'
|
||||
for TYPE in 'SRC' 'TSL' 'PUB'; do
|
||||
for STAT in 'A' 'M' 'D'; do
|
||||
# 统计每个类别的每个操作
|
||||
REGEX="$(get_operation_regex "$STAT" "$TYPE")"
|
||||
OTHER_REGEX="${OTHER_REGEX}|${REGEX}"
|
||||
eval "${TYPE}_${STAT}=\"\$(grep -Ec '$REGEX' /tmp/changes)\"" || true
|
||||
eval echo "${TYPE}_${STAT}=\$${TYPE}_${STAT}"
|
||||
done
|
||||
done
|
||||
|
||||
# 统计其他操作
|
||||
OTHER="$(grep -Evc "$OTHER_REGEX" /tmp/changes)" || true
|
||||
echo "OTHER=$OTHER"
|
||||
|
||||
# 统计变更总数
|
||||
TOTAL="$(wc -l < /tmp/changes )"
|
||||
echo "TOTAL=$TOTAL"
|
||||
}
|
||||
|
||||
|
||||
echo "[分析] 统计文件变更……"
|
||||
do_analyze > /tmp/stats
|
||||
echo "[分析] 已写入统计结果:"
|
||||
cat /tmp/stats
|
10
scripts/check/check.sh
Normal file
10
scripts/check/check.sh
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
# 检查脚本状态
|
||||
set -e
|
||||
|
||||
################################################################################
|
||||
# 暂时仅供开发使用
|
||||
################################################################################
|
||||
|
||||
shellcheck -e SC2034 -x mock/stats.sh "$(dirname "$0")"/*.sh \
|
||||
&& echo '[检查] ShellCheck 通过'
|
37
scripts/check/collect.sh
Normal file
37
scripts/check/collect.sh
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
# PR 文件变更收集
|
||||
set -e
|
||||
|
||||
################################################################################
|
||||
# 读入:(无)
|
||||
# 写出:
|
||||
# - /tmp/changes # 文件变更列表
|
||||
################################################################################
|
||||
|
||||
|
||||
echo "[收集] 计算 PR 分支与目标分支的分叉点……"
|
||||
|
||||
TARGET_BRANCH="${TRAVIS_BRANCH:-master}"
|
||||
echo "[收集] 目标分支设定为:${TARGET_BRANCH}"
|
||||
|
||||
MERGE_BASE='HEAD^'
|
||||
[ "$TRAVIS_PULL_REQUEST" != 'false' ] \
|
||||
&& MERGE_BASE="$(git merge-base "$TARGET_BRANCH" HEAD)"
|
||||
echo "[收集] 找到分叉节点:${MERGE_BASE}"
|
||||
|
||||
echo "[收集] 变更摘要:"
|
||||
git --no-pager show --summary "${MERGE_BASE}..HEAD"
|
||||
|
||||
{
|
||||
git --no-pager log --oneline "${MERGE_BASE}..HEAD" | grep -Eq '绕过检查' && {
|
||||
touch /tmp/bypass
|
||||
echo "[收集] 已标记为绕过检查项"
|
||||
}
|
||||
} || true
|
||||
|
||||
echo "[收集] 写出文件变更列表……"
|
||||
|
||||
git diff "$MERGE_BASE" HEAD --no-renames --name-status > /tmp/changes
|
||||
echo "[收集] 已写出文件变更列表:"
|
||||
cat /tmp/changes
|
||||
{ [ -z "$(cat /tmp/changes)" ] && echo "(无变更)"; } || true
|
30
scripts/check/common.inc.sh
Normal file
30
scripts/check/common.inc.sh
Normal file
@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
|
||||
################################################################################
|
||||
# 公用常量和函数
|
||||
################################################################################
|
||||
|
||||
# 定义类别目录
|
||||
export SRC_DIR='sources' # 未翻译
|
||||
export TSL_DIR='translated' # 已翻译
|
||||
export PUB_DIR='published' # 已发布
|
||||
|
||||
# 定义匹配规则
|
||||
export CATE_PATTERN='(news|talk|tech)' # 类别
|
||||
export FILE_PATTERN='[0-9]{8} [a-zA-Z0-9_.,() -]*\.md' # 文件名
|
||||
|
||||
# 用法:get_operation_regex 状态 类型
|
||||
#
|
||||
# 状态为:
|
||||
# - A:添加
|
||||
# - M:修改
|
||||
# - D:删除
|
||||
# 类型为:
|
||||
# - SRC:未翻译
|
||||
# - TSL:已翻译
|
||||
# - PUB:已发布
|
||||
get_operation_regex() {
|
||||
STAT="$1"
|
||||
TYPE="$2"
|
||||
echo "^${STAT}\\s+\"?$(eval echo "\$${TYPE}_DIR")/"
|
||||
}
|
86
scripts/check/identify.sh
Normal file
86
scripts/check/identify.sh
Normal file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
# 匹配 PR 规则
|
||||
set -e
|
||||
|
||||
################################################################################
|
||||
# 读入:
|
||||
# - /tmp/stats
|
||||
# 写出:(无)
|
||||
################################################################################
|
||||
|
||||
# 加载公用常量和函数
|
||||
# shellcheck source=common.inc.sh
|
||||
. "$(dirname "$0")/common.inc.sh"
|
||||
|
||||
echo "[匹配] 加载统计结果……"
|
||||
# 加载统计结果
|
||||
# shellcheck source=mock/stats.sh
|
||||
. /tmp/stats
|
||||
|
||||
# 定义 PR 规则
|
||||
|
||||
# 绕过检查:绕过 PR 检查
|
||||
rule_bypass_check() {
|
||||
[ -f /tmp/bypass ] && echo "匹配规则:绕过检查"
|
||||
}
|
||||
|
||||
# 添加原文:添加至少一篇原文
|
||||
rule_source_added() {
|
||||
[ "$SRC_A" -ge 1 ] \
|
||||
&& [ "$TOTAL" -eq "$SRC_A" ] && echo "匹配规则:添加原文 ${SRC_A} 篇"
|
||||
}
|
||||
|
||||
# 申领翻译:只能申领一篇原文
|
||||
rule_translation_requested() {
|
||||
[ "$SRC_M" -eq 1 ] \
|
||||
&& [ "$TOTAL" -eq 1 ] && echo "匹配规则:申领翻译"
|
||||
}
|
||||
|
||||
# 提交译文:只能提交一篇译文
|
||||
rule_translation_completed() {
|
||||
[ "$SRC_D" -eq 1 ] && [ "$TSL_A" -eq 1 ] \
|
||||
&& [ "$TOTAL" -eq 2 ] && echo "匹配规则:提交译文"
|
||||
}
|
||||
|
||||
# 校对译文:只能校对一篇
|
||||
rule_translation_revised() {
|
||||
[ "$TSL_M" -eq 1 ] \
|
||||
&& [ "$TOTAL" -eq 1 ] && echo "匹配规则:校对译文"
|
||||
}
|
||||
|
||||
# 发布译文:发布多篇译文
|
||||
rule_translation_published() {
|
||||
[ "$TSL_D" -ge 1 ] && [ "$PUB_A" -ge 1 ] && [ "$TSL_D" -eq "$PUB_A" ] \
|
||||
&& [ "$TOTAL" -eq $(($TSL_D + $PUB_A)) ] \
|
||||
&& echo "匹配规则:发布译文 ${PUB_A} 篇"
|
||||
}
|
||||
|
||||
# 定义常见错误
|
||||
|
||||
# 未知错误
|
||||
error_undefined() {
|
||||
echo "未知错误:无匹配规则,请尝试只对一篇文章进行操作"
|
||||
}
|
||||
|
||||
# 申领多篇
|
||||
error_translation_requested_multiple() {
|
||||
[ "$SRC_M" -gt 1 ] \
|
||||
&& echo "匹配错误:申领多篇,请一次仅申领一篇"
|
||||
}
|
||||
|
||||
# 执行检查并输出匹配项目
|
||||
do_check() {
|
||||
rule_bypass_check \
|
||||
|| rule_source_added \
|
||||
|| rule_translation_requested \
|
||||
|| rule_translation_completed \
|
||||
|| rule_translation_revised \
|
||||
|| rule_translation_published \
|
||||
|| {
|
||||
error_translation_requested_multiple \
|
||||
|| error_undefined
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
do_check
|
13
scripts/check/mock/stats.sh
Normal file
13
scripts/check/mock/stats.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# 给 ShellCheck 用的 Mock 统计
|
||||
SRC_A=0
|
||||
SRC_M=0
|
||||
SRC_D=0
|
||||
TSL_A=0
|
||||
TSL_M=0
|
||||
TSL_D=0
|
||||
PUB_A=0
|
||||
PUB_M=0
|
||||
PUB_D=0
|
||||
OTHER=0
|
||||
TOTAL=0
|
69
sources/talk/20180623 The IBM 029 Card Punch.md
Normal file
69
sources/talk/20180623 The IBM 029 Card Punch.md
Normal file
@ -0,0 +1,69 @@
|
||||
The IBM 029 Card Punch
|
||||
======
|
||||
Lines of code longer than 80 characters drive me crazy. I appreciate that this is pedantic. I’ve seen people on the internet make good arguments for why the 80-character limit ought to be respected even on our modern Retina-display screens, but those arguments hardly justify the visceral hatred I feel for even that one protruding 81st character.
|
||||
|
||||
There was once a golden era in which it was basically impossible to go over the 80-character limit. The 80-character limit was a physical reality, because there was no 81st column for an 81st character to fit in. Any programmers attempting to name a function something horrendously long and awful would discover, in a moment of delicious, slow-dawning horror, that there literally isn’t room for their whole declaration.
|
||||
|
||||
This golden era was the era of punch card programming. By the 1960s, IBM’s punch cards had set the standard and the standard was that punch cards had 80 columns. The 80-column standard survived into the teletype and dumb terminal era and from there embedded itself into the nooks and crannies of our operating systems. Today, when you launch your terminal emulator and open a new window, odds are it will be 80 characters wide, even though we now have plenty of screen real estate and tend to favor longer identifiers over inscrutable nonsense like `iswcntrl()`.
|
||||
|
||||
If questions on Quora are any indication, many people have trouble imagining what it must have been like to program computers using punch cards. I will admit that for a long time I also didn’t understand how punch card programming could have worked, because it struck me as awfully labor-intensive to punch all those holes. This was a misunderstanding; programmers never punched holes in cards the same way a train conductor does. They had card punch machines (also known as key punches), which allowed them to punch holes in cards using a typewriter-style keyboard. And card punches were hardly new technology—they were around as early as the 1890s.
|
||||
|
||||
One of the most widely used card punch machines was the IBM 029. It is perhaps the best remembered card punch today.
|
||||
|
||||
![][1]
|
||||
|
||||
The IBM 029 was released in 1964 as part of IBM’s System/360 rollout. System/360 was a family of computing systems and peripherals that would go on to dominate the mainframe computing market in the late 1960s. Like many of the other System/360 machines, the 029 was big. This was an era when the distinction between computing machinery and furniture was blurry—the 029 was not something you put on a table but an entire table in itself. The 029 improved upon its predecessor, the 026, by supporting new characters like parentheses and by generally being quieter. It had cool electric blue highlights and was flat and angular whereas the 026 had a 1940s rounded, industrial look. Another of its big selling points was that it could automatically left-pad numeric fields with zeros, demonstrating that JavaScript programmers were not the first programmers too lazy to do their own left-padding.
|
||||
|
||||
But wait, you might say—IBM released a brand-new card punch in 1964? What about that photograph of the Unix gods at Bell Labs using teletype machines in, like, 1970? Weren’t card punching machines passé by the mid- to late-1960s? Well, it might surprise you to know that the 029 was available in IBM’s catalog until as late as 1984. In fact, most programmers programmed using punch cards until well into the 1970s. This doesn’t make much sense given that there were people using teletype machines during World War II. Indeed, the teletype is almost of the same vintage as the card punch. The limiting factor, it turns out, was not the availability of teletypes but the availability of computing time. What kept people from using teletypes was that teletypes assumed an interactive, “online” model of communication with the computer. Before Unix and the invention of timesharing operating systems, your interactive session with a computer would have stopped everyone else from using it, a delay potentially costing thousands of dollars. So programmers instead wrote their programs offline using card punch machines and then fed them into mainframe computers later as batch jobs. Punch cards had the added benefit of being a cheap data store in an era where cheap, reliable data storage was hard to come by. Your programs lived in stacks of cards on your shelves rather than in files on your hard drive.
|
||||
|
||||
So what was it actually like using an IBM 029 card punch? That’s hard to explain without first taking a look at the cards themselves. A typical punch card had 12 rows and 80 columns. The bottom nine rows were the digit rows, numbered one through nine. These rows had the appropriate digit printed in each column. The top three rows, called the “zone” rows, consisted of two blank rows and usually a zero row. Row 12 was at the very top of the card, followed by row 11, then rows zero through nine. This somewhat confusing ordering meant that the top edge of the card was called the 12 edge while the bottom was called the nine edge. A corner of each card was usually clipped to make it easier to keep a stack of cards all turned around the right way.
|
||||
|
||||
![][2]
|
||||
|
||||
When they were first invented, punch cards were meant to be punched with circular holes, but IBM eventually realized that they could fit more columns on a card if the holes were narrow rectangles. Different combinations of holes in a column represented different characters. For human convenience, card punches like the 029 would print each column’s character at the top of the card at the same time as punching the necessary holes. Digits were represented by one punched hole in the appropriate digit row. Alphabetical and symbolic characters were represented by a hole in a zone row and then a combination of one or two holes in the digit rows. The letter A, for example, was represented by a hole in the 12 zone row and another hole in the one row. This was an encoding of sorts, sometimes called the Hollerith code, after the original inventor of the punch card machine. The encoding allowed for only a relatively small character set; lowercase letters, for example, were not represented. Some clever engineer today might wonder why punch cards didn’t just use a binary encoding—after all, with 12 rows, you could encode over 4000 characters. The Hollerith code was used instead because it ensured that no more than three holes ever appeared in a single column. This preserved the structural integrity of the card. A binary encoding would have entailed so many holes that the card would have fallen apart.
|
||||
|
||||
Cards came in different flavors. By the 1960s, 80 columns was the standard, but those 80 columns could be used to represent different things. The basic punch card was unlabeled, but cards meant for COBOL programming, for example, divided the 80 columns into fields. On a COBOL card, the last eight columns were reserved for an identification number, which could be used to automatically sort a stack of cards if it were dropped (apparently a perennial hazard). Another column, column seven, could be used to indicate that the statement on this card was a continuation of a statement on a previous card. This meant that if you were truly desperate you could circumvent the 80-character limit, though whether a two-card statement counts as one long line or just two is unclear. FORTRAN cards were similar but had different fields. Universities often watermarked the punch cards handed out by their computing centers, while other kinds of designs were introduced for special occasions like the [1976 bicentennial][3].
|
||||
|
||||
Ultimately the cards had to be read and understood by a computer. IBM sold a System/360 peripheral called the IBM 2540 which could read up to 1000 cards per minute. The IBM 2540 ran electrical brushes across the surface of each card which made contact with a plate behind the cards wherever there was a hole. Once read, the System/360 family of computers represented the characters on each punch card using an 8-bit encoding called EBCDIC, which stood for Extended Binary Coded Decimal Interchange Code. EBCDIC was a proper binary encoding, but it still traced its roots back to the punch card via an earlier encoding called BCDIC, a 6-bit encoding which used the low four bits to represent a punch card’s digit rows and the high two bits to represent the zone rows. Punch card programmers would typically hand their cards to the actual computer operators, who would feed the cards into the IBM 2540 and then hand the printed results back to the programmer. The programmer usually didn’t see the computer at all.
|
||||
|
||||
What the programmer did see a lot of was the card punch. The 029 was not a computer, but that doesn’t mean that it wasn’t a complicated machine. The best way to understand what it was like using the 029 is to watch [this instructional video][4] made by the computing center at the University of Michigan in 1967. I’m going to do my best to summarize it here, but if you don’t watch the video you will miss out on all the wonderful clacking and whooshing.
|
||||
|
||||
The 029 was built around a U-shaped track that the punch cards traveled along. On the right-hand side, at the top of the U, was the card hopper, which you would typically load with a fresh stack of cards before using the machine. The IBM 029 worked primarily with 80-column cards, but the card hopper could accommodate smaller cards if needed. Your punch cards would start in the card hopper, travel along the line of the U, and then end up in the stacker, at the top of the U on the left-hand side. The cards would accumulate there in the order that you punched them.
|
||||
|
||||
To turn the machine on, you flipped a switch under the desk at about the height of your knees. You then pressed the “FEED” key twice to get cards loaded into the machine. The business part of the card track, the bottom of the U, was made up of three separate stations: On the right was a kind of waiting area, in the middle was the punching station, and on the left was the reading station. Pressing the “FEED” key twice loaded one card into the punching station and one card into the waiting area behind it. A column number indicator right above the punching station told you which column you were currently punching. With every keystroke, the machine would punch the requisite holes, print the appropriate character at the top of the card, and then advance the card through the punching station by one column. If you punched all 80 columns, the card would automatically be released to the reading station and a new card would be loaded into the punching station. If you wanted this to happen before you reached the 80th column, you could press the “REL” key (for “release”).
|
||||
|
||||
The printed characters made it easy to spot a mistake. But fixing a mistake, as the University of Michigan video warns, is not as easy as whiting out the printed character at the top of the card and writing in a new one. The holes are all that the computer will read. Nor is it as easy as backspacing one space and typing in a new character. The holes have already been punched in the column, after all, and cannot be unpunched. Punching more holes will only produce an invalid combination not associated with any character. The IBM 029 did have a backspace button that moved the punched card backward one column, but the button was placed on the face of the machine instead of on the keyboard. This was probably done to discourage its use, since backspacing was so seldom what the user actually wanted to do.
|
||||
|
||||
Instead, the only way to correct a mistake was scrap the incorrect card and punch a new one. This is where the reading station came in handy. Say you made a mistake in the 68th column of a card. To fix your mistake, you could carefully repunch the first 67 columns of a new card and then punch the correct character in the 68th column. Alternatively, you could release the incorrect card to the reading station, load a new card into the punching station, and hold down the “DUP” key (for duplicate) until the column number indicator reads 68. You could then correct your mistake by punching the correct character. The reading station and the “DUP” key together allowed IBM 029 operators to easily copy the contents of one card to the next. There were all sorts of reasons to do this, but correcting mistakes was the most common.
|
||||
|
||||
The “DUP” key allowed the 029’s operator to invoke the duplicate functionality manually. But the 029 could also duplicate automatically where necessary. This was particularly useful when punched cards were used to record data rather than programs. For example, you might be using each card to record information about a single undergraduate university student. On each card, you might have a field that contains the name of that student’s residence hall. Perhaps you find yourself entering data for all the students in one residence hall at one time. In that case, you’d want the 029 to automatically copy over the previous card’s residence hall field every time you reached the first column of the field.
|
||||
|
||||
Automated behavior like this could be programmed into the 029 by using the program drum. The drum sat upright in the middle of the U above the punching station. You programmed the 029 by punching holes in a card and wrapping that card around the program drum. The punched card allowed you to specify the automatic behavior you expected from the machine at each column of the card currently in the punching station. You could specify that a column should automatically be copied from the previous card, which is how an 029 operator might more quickly enter student records. You could also specify, say, that a particular field should contain numeric or alphabetic characters, or that a given field should be left blank and skipped altogether. The program drum made it much easier to punch schematized cards where certain column ranges had special meanings. There is another [“advanced” instructional video][5] produced by the University of Michigan that covers the program drum that is worth watching, provided, of course, that you have already mastered the basics.
|
||||
|
||||
Watching either of the University of Michigan videos today, what’s surprising is how easy the card punch is to operate. Correcting mistakes is tedious, but otherwise the machine seems to be less of an obstacle than I would have expected. Moving from one card to the next is so seamless that I can imagine COBOL or FORTRAN programmers forgetting that they are creating separate cards rather than one long continuous text file. On the other hand, it’s interesting to consider how card punches, even though they were only an input tool, probably limited how early programming languages evolved. Structured programming would eventually come along and encourage people to think of entire blocks of code as one unit, but I can see how punch card programming’s emphasis on each line made structured programming hard to conceive of. It’s no wonder that punch card programmers were not the ones that decided to enclose blocks with single curly braces entirely on their own lines. How wasteful that would have seemed!
|
||||
|
||||
So even though nobody programs using punch cards anymore, every programmer ought to [try it][6] at least once—if only to understand why COBOL and FORTRAN look the way they do, or how 80 characters somehow became everybody’s favorite character limit.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][7] on Twitter or subscribe to the [RSS feed][8] to make sure you know when a new post is out.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/06/23/ibm-029-card-punch.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://twobithistory.org/images/ibm029_front.jpg
|
||||
[2]: https://twobithistory.org/images/card.png
|
||||
[3]: http://www.jkmscott.net/data/Punched%20card%20013.jpg
|
||||
[4]: https://www.youtube.com/watch?v=kaQmAybWn-w
|
||||
[5]: https://www.youtube.com/watch?v=SWD1PwNxpoU
|
||||
[6]: http://www.masswerk.at/keypunch/
|
||||
[7]: https://twitter.com/TwoBitHistory
|
||||
[8]: https://twobithistory.org/feed.xml
|
@ -0,0 +1,126 @@
|
||||
Dawn of the Microcomputer: The Altair 8800
|
||||
======
|
||||
Subscribers to Popular Electronics were a sophisticated group. The magazine’s editor, Arthur Salsberg, felt compelled to point out as much in the editorial section of the [December 1974 issue][1]. The magazine had received letters complaining that a recent article, titled “How to Set Up a Home TV Service Shop,” would inspire a horde of amateur TV technicians to go out and undercut professional repairmen, doing great damage to everyone’s TVs in the process. Salsberg thought this concern was based on a misunderstanding about who read Popular Electronics. He explained that, according to the magazine’s own surveys, 52% of Popular Electronics subscribers were electronics professionals of some kind and 150,000 of them had repaired a TV in the last 60 days. Moreover, the average Popular Electronics subscriber had spent $470 on electronics equipment ($3578 in 2018) and possessed such necessities as VOMs, VTVMs, tube testers, transistor testers, r-f signal generators, and scopes. “Popular Electronics readers are not largely neophytes,” Salsberg concluded.
|
||||
|
||||
I am surprised that anyone familiar with Popular Electronics could ever have doubted its subscribers. I certainly haven’t repaired a TV in the last 60 days. My computer is a block of aluminum that I have never seen the inside of. Yet the December 1974 issue of Popular Electronics features articles such as “Standing Wave Ratio: What It Is and How to Deal with It” and “Test Scene: Uses for Your Multimeter.” Even the ads are intimidating. One of them, which seems to be for some kind of stereo system, boldly proclaims that “no piece of audio equipment is as eagerly awaited as the ‘one four-channel unit that does everything—i.e. the receiver with built-in circuitry for SQ, RM and CD-4 record decoding.’” The mere hobbyists subscribed to Popular Electronics, let alone the professionals, must have been very knowledgeable indeed.
|
||||
|
||||
But Popular Electronics readers were introduced to something in the [January 1975 issue][2] that they had never encountered before. Below a heading that read “PROJECT BREAKTHROUGH,” the magazine’s cover showed a large gray and black box whose front panel bore a complicated array of lights and toggles. This was the Altair 8800, the “world’s first minicomputer kit to rival commercial models,” available for under $400. Though advertised as a “minicomputer,” the Altair would actually be the first commercially successful member of a new class of computers, first known as “microcomputers” and then eventually as PCs. The Altair was small enough and cheap enough that the average family could have one at home. Its appearance in Popular Electronics magazine meant that, as Salsberg wrote in that issue, “the home computer age is here—finally.”
|
||||
|
||||
![January 1975 cover of Popular Electronics][3]
|
||||
|
||||
I have written briefly about [the Altair before][4], but I think the Altair is worth revisiting. It was not an especially powerful computer compared to others available at the time (though it cost significantly less money). Nor was it the first general-purpose computer to incorporate a microprocessor chip—at least three microprocessor-based computers preceded it. But the Altair was and is a kind of Ur-Computer for all of us. It was the first popular computer in a lineage that includes our own devices, whereas the mainframes and bulky minicomputers that predated the Altair were an entirely different kind of machine, programmed by punched card or else rarely interacted with directly. The Altair was also a radically simple computer, without any kind of operating system or even a bootloader. Unless you bought peripherals for it, the Altair was practically a bank of RAM with switches and lights on the front. The Altair’s simplicity makes learning about it a great way to reacquaint yourself with the basic concepts of computing, exactly as they were encountered by the denizens of the old analog world as they first ventured into our digital one.
|
||||
|
||||
### Roberts and Co.
|
||||
|
||||
The Altair was designed and manufactured by a company called Micro Instrumentation and Telemetry Systems (MITS), based in Albuquerque, New Mexico. MITS was run by a man named H. Edward Roberts. The company had started off making telemetry systems for model rocket kits before moving into the calculator market, which in the early 1970s was booming. Integrated circuits were bringing the cost of a calculator down dramatically and suddenly every working American professional had to have one. But the calculator market was ruthlessly competitive and, by the beginning of 1974, MITS was deeply in debt.
|
||||
|
||||
The year 1974 would prove to be an “annus mirabilis” in computing. In January, Hewlett-Packard introduced the HP-65, the world’s first programmable handheld calculator. In April, Intel released the Intel 8080, its second 8-bit microprocessor and the first microprocessor to become widely popular. Then, in July, Radio Electronics magazine advertised a build-it-yourself minicomputer called the Mark-8, which employed the Intel 8008 microprocessor that Intel had released in 1972. The Mark-8 was only the third computer ever built using a microprocessor and it was the first to be appear on the cover of a magazine. The Mark-8’s appearance in Radio Electronics pushed Popular Electronics to look for a minicomputer project of their own to feature.
|
||||
|
||||
Popular Electronics subscribers actually received their copies of the January 1975 issue in the mail in December of 1974. So the announcement of the Altair closed out the “annus mirabilis” that was that year. The Altair’s introduction was so momentous because never before had such a fully capable computer been offered to the public at an affordable price. The PDP-8, one the most popular minicomputers at the time, could only be bought for several thousand dollars. Yet the Intel 8080 chip at the heart of the Altair made it almost as capable as the PDP-8, if not more so; the 8080 supported a wider instruction set and the Altair could be expanded to have up to 64kb of memory, while the stock PDP-8 typically only had 4kb. The Altair was also more powerful than the Mark-8, which, because it was based on the Intel 8008, could only address 16kb of memory. And whereas the Mark-8 had to be built from scratch by customers with only a booklet and printed circuit boards to guide them, the Altair could be purchased fully assembled, though MITS soon became so inundated with orders that the only real way to get an Altair was to order the construction kit.
|
||||
|
||||
For many Popular Electronics readers, the Altair was their first window into the world of digital computing. The article introducing the Altair in the January 1975 issue was written by Roberts and the Altair’s co-designer, William Yates. Roberts and Yates took pains to explain, in terms familiar to the electricians and radio enthusiasts in their audience, the basic concepts underlying digital hardware and computer programming. “A computer,” they wrote, “is basically a piece of variable hardware. By changing the bit pattern stored in the memory, the hardware can be altered from one type of device to another.” Of programming, meanwhile, Roberts and Yates wrote that the basic concepts are “simple enough to master in a relatively short time,” but that becoming “an efficient programmer requires a lot of experience and a large amount of creativity,” which sounds about right to me. The article included a detailed diagram explaining all the constituent circuits of the Intel 8080 CPU, even though readers would receive at least that part fully assembled. It explained the difference between a CPU and a computer’s memory unit, the uses of a stack pointer, and the enormous advantages offered by assembly languages and higher-level languages like FORTRAN and BASIC over manual entry of machine code.
|
||||
|
||||
Popular Electronics had in fact been running a series written by Roberts for several issues before January 1975. The series was billed as a short course in “digital logic.” In the December 1974 issue, Roberts walked readers through building a “very low cost computer terminal,” which was basically an octal keypad that could input values into an 8-bit computer. In the course of describing the keypad, Roberts explained how transistor-to-transistor logic works and also how to construct a flip-flop, a kind of circuit capable of “remembering” digital values. The keypad, Roberts promised, could be used with the Altair computer, to be announced the following month.
|
||||
|
||||
It’s unclear how many Popular Electronics readers actually built the keypad, but it would have been a very useful thing to have. Without a keypad or some other input mechanism, the only way to input values into the Altair was through the switches on the front panel. The front panel had a row of 16 switches that could be used to set an address and a lower row of eight switches that could be used to control the operation of the computer. The eight right-most switches in the row of 16 could also be used to specify a value to be stored in memory. This made sense because the Intel 8080 used 16-bit values to address 8-bit words. The 16 switches on the front panel each represented a bit—the up position represented a one, while the down position represented a zero. Interacting with a computer this way is a revelation (more on that in a minute), because the Altair’s front panel is a true binary interface. It’s as close as you can get to the bare metal.
|
||||
|
||||
As alien as the Altair’s interface is to us today, it was not unusual for its time. The PDP-8, for example, had a similar binary input mechanism on its front panel, though the PDP-8’s switches were nicer and colored in that attractive orange and yellow color scheme that really ought to make a comeback. The PDP-8, however, was often paired with a paper-tape reader or a teletype machine, which made program entry much easier. These I/O devices were expensive, meaning that most Altair users in the early days were stuck with the front panel. As you might imagine, entering long programs via the switches was a chore. Eventually the Altair could be hooked up to a cassette recorder and programs could be loaded that way. Bill Gates and Paul Allen, in what would become Microsoft’s first ever commercial venture, also wrote a version of BASIC for the Altair that MITS licensed and released in the middle of 1975. Users that could afford a teletype could then [load BASIC into the Altair via paper tape][5] and interact with their Altair through text. BASIC, which had become everyone’s favorite introductory programming language in schools, would go on to become the standard interface to the machines produced in the early microcomputer era.
|
||||
|
||||
### z80pack
|
||||
|
||||
Thanks to the efforts of several internet people, in particular a person named Udo Munk, you can run a simulation of the Altair on your computer. The simulation is built on top of some software that emulates the Zilog Z80 CPU, a CPU designed to be software-compatible with the Intel 8080. The Altair simulation allows you to input programs entirely via the front panel switches like early users of the Altair had to do. Though clicking on switches does not offer the same tactile satisfaction as flipping real switches, playing with the Altair simulation is a great way to appreciate how a binary human/computer interface was both horribly inefficient and, at least in my opinion, charmingly straightforward.
|
||||
|
||||
z80pack, Udo Munk’s Z80 emulation package, can be downloaded from the z80pack website. There are instructions in [my last Altair post][4] explaining how to get it set up on Mac OS. If you are able to compile both the FrontPanel library and the `altairsim` executable, you should be able to run `altairsim` and see the following window:
|
||||
|
||||
![Simulated Altair Front Panel][6]
|
||||
|
||||
By default, at least with the version of z80pack that I am using (1.36), the Altair is configured with something called Tarbell boot ROM, which I think is used to load disk images. In practice, what this means is that you can’t write values into the first several words in RAM. If you edit the file `/altairsim/conf/system.conf`, you can instead set up a simple Altair that has 16 pages of RAM and no ROM or bootloader software at all. You can also use this configuration file to increase the size of the window the simulation runs in, which is handy.
|
||||
|
||||
The front panel of the Altair is intimidating, but in reality there isn’t that much going on. The [Altair manual][7] does a good job of explaining the many switches and status lights, as does this [YouTube video][8]. To enter and run a simple program, you only really need to know a few things. The lights labeled D0 through D7 near the top right of the Altair indicate the contents of the currently addressed word. The lights labeled A0 through A15 indicate the current address. The 16 switches below the address lights can be used to set a new address; when the “EXAMINE” switch is pressed upward, the data lights update to show the contents of the newly addressed word. In this way, you can “peek” at all the words in memory. You can also press the “EXAMINE” switch down to the “EXAMINE NEXT” position, which automatically examines the next memory address, which makes peeking at sequential words significantly easier.
|
||||
|
||||
To save a bit pattern to a word, you have to set the bit pattern using the right-most eight switches labeled 0 through 7. You then press the “DEPOSIT” switch upward.
|
||||
|
||||
In the [February 1975 issue][9] of Popular Electronics, Roberts and Yates walked Altair owners through inputting a small sample program to ensure that their Altair was functioning. The program loads two integers from memory, adds them, and saves the sum back into memory. The program consists of only six instructions, but those six instructions involve 14 words of memory altogether, which takes some time to input correctly. The sample program also appears in the Altair manual in table form, which I’ve reproduced here:
|
||||
|
||||
Address Mnemonic Bit Pattern Octal Equivalent 0 LDA 00 111 010 0 7 2 1 (address) 10 000 000 2 0 0 2 (address) 00 000 000 0 0 0 3 MOV B, A 01 000 111 1 0 7 4 LDA 00 111 010 0 7 2 5 (address) 10 000 001 2 0 1 6 (address) 00 000 000 0 0 0 7 ADD B 10 000 000 2 0 0 8 STA 00 110 010 0 6 2 9 (address) 10 000 010 2 0 2 10 (address) 00 000 000 0 0 0 11 JMP 11 000 011 3 0 3 12 (address) 00 000 000 0 0 0 13 (address) 00 000 000 0 0 0
|
||||
|
||||
If you input each word in the above table into the Altair via the switches, you end up with a program that loads the value in word 128, adds it to the value in the word 129, and finally saves it into word 130. The addresses that accompany each instruction taking an address are given with the least-significant bits first, which is why the second byte is always zeroed out (no addresses are higher than 255). Once you’ve input the program and entered some values into words 128 and 129, you can press the “RUN” switch into the down position briefly before pushing it into the “STOP” position. Since the program loops, it repeatedly adds those values and saves the sum thousands of times a second. The sum is always the same though, so if you peek at word 130 after stopping the program, you should find the correct answer.
|
||||
|
||||
I don’t know whether any regular users of the Altair ever had access to an assembler, but z80pack includes one. The z80pack assembler, `z80asm`, is meant for Z80 assembly, so it uses a different set of mnemonics altogether. But since the Z80 was designed to be compatible with software written for the Intel 8080, the opcodes are all the same, even if the mnemonics are different. So just to illustrate what it might have been like to write the same program in assembly, here is a version that can be assembled by `z80asm` and loaded into the Altair:
|
||||
|
||||
```
|
||||
ORG 0000H
|
||||
START: LD A,(80H) ;Load from address 128.
|
||||
LD B,A ;Move loaded value from accumulator (A) to reg B.
|
||||
LD A,(81H) ;Load from address 129.
|
||||
ADD A,B ;Add A and B.
|
||||
LD (82H),A ;Store A at address 130.
|
||||
JP START ;Jump to start.
|
||||
```
|
||||
|
||||
You can turn this into something called an Intel HEX file by invoking the assembler like so (after you have compiled it):
|
||||
|
||||
```
|
||||
$ ./z80asm -fh -oadd.hex add.asm
|
||||
```
|
||||
|
||||
The `-f` flag, here taking `h` as an argument, specifies that a HEX file should be output. You can then load the program into the Altair by passing the HEX file in using the `-x` flag:
|
||||
|
||||
```
|
||||
$ ./altairsim -x add.hex
|
||||
```
|
||||
|
||||
This sets up the first 14 words in memory as if you had input the values manually via the switches. Instead of doing all that again, you can just run the program by using the “RUN” switch as before. Much easier!
|
||||
|
||||
As I said, I don’t think many Altair users wrote software this way. Once Altair BASIC became available, writing BASIC programs was probably the easiest way to program the Altair. z80pack also includes several HEX files containing different versions of Altair BASIC; the one I’ve been able to get working is version 4.0 of 4K BASIC, which you can load into the simulator like so:
|
||||
|
||||
```
|
||||
$ ./altairsim -x basic4k40.hex
|
||||
```
|
||||
|
||||
If you turn the simulated machine on and hit the “RUN” switch, you should see that BASIC has started talking to you in your terminal window. It first prompts you to enter the amount of memory you have available, which should be 4000 bytes. It then asks you a few more questions before presenting you with the “OK” prompt, which Gates and Allen used instead of the standard “READY” to save memory. From there, you can just use BASIC:
|
||||
|
||||
```
|
||||
OK
|
||||
PRINT 3 + 4
|
||||
7
|
||||
```
|
||||
|
||||
Though running BASIC with only 4kb of memory didn’t give you a lot of room, you can see how it would have been a significant step up from using the front panel.
|
||||
|
||||
The Altair, of course, was nowhere near as capable as the home desktops and laptops we have available to us today. Even something like the Macintosh, released less than a decade later, seems like a quantum leap forward over the spartan Altair. But to those first Popular Electronics readers that bought the kit and assembled it, the Altair was a real, fully capable computer that they could own for themselves, all for the low cost of $400 and half the surface space of the credenza. This would have been an amazing thing for people that had thus far only been able to interact with computers by handing [a stack of cards][10] or a roll of tape to another human being entrusted with the actual operation of the computer. Subsequent microcomputers would improve upon what the Altair offered and quickly become much easier to use, but they were all, in some sense, just more complicated Altairs. The Altair—almost Brutalist in its minimalism—was the bare-essentials blueprint for all that would follow.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][11] on Twitter or subscribe to the [RSS feed][12] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> "I invite you to come along with me on an exciting journey and spend the next ten minutes of your life learning about a piece of software nobody has used in the last decade." <https://t.co/R9zA5ibFMs>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [July 7, 2018][13]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/07/22/dawn-of-the-microcomputer.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.americanradiohistory.com/Archive-Poptronics/70s/1974/Poptronics-1974-12.pdf
|
||||
[2]: https://www.americanradiohistory.com/Archive-Poptronics/70s/1975/Poptronics-1975-01.pdf
|
||||
[3]: https://twobithistory.org/images/jan1975-altair.jpg
|
||||
[4]: https://twobithistory.org/2017/12/02/simulating-the-altair.html
|
||||
[5]: https://www.youtube.com/watch?v=qv5b1Xowxdk
|
||||
[6]: https://www.autometer.de/unix4fun/z80pack/altair.png
|
||||
[7]: http://www.classiccmp.org/dunfield/altair/d/88opman.pdf
|
||||
[8]: https://www.youtube.com/watch?v=suyiMfzmZKs
|
||||
[9]: https://www.americanradiohistory.com/Archive-Poptronics/70s/1975/Poptronics-1975-02.pdf
|
||||
[10]: https://twobithistory.org/2018/06/23/ibm-029-card-punch.html
|
||||
[11]: https://twitter.com/TwoBitHistory
|
||||
[12]: https://twobithistory.org/feed.xml
|
||||
[13]: https://twitter.com/TwoBitHistory/status/1015647820353867776?ref_src=twsrc%5Etfw
|
116
sources/talk/20180805 Where Vim Came From.md
Normal file
116
sources/talk/20180805 Where Vim Came From.md
Normal file
@ -0,0 +1,116 @@
|
||||
Where Vim Came From
|
||||
======
|
||||
I recently stumbled across a file format known as Intel HEX. As far as I can gather, Intel HEX files (which use the `.hex` extension) are meant to make binary images less opaque by encoding them as lines of hexadecimal digits. Apparently they are used by people who program microcontrollers or need to burn data into ROM. In any case, when I opened up a HEX file in Vim for the first time, I discovered something shocking. Here was this file format that, at least to me, was deeply esoteric, but Vim already knew all about it. Each line of a HEX file is a record divided into different fields—Vim had gone ahead and colored each of the fields a different color. `set ft?` I asked, in awe. `filetype=hex`, Vim answered, triumphant.
|
||||
|
||||
Vim is everywhere. It is used by so many people that something like HEX file support shouldn’t be a surprise. Vim comes pre-installed on Mac OS and has a large constituency in the Linux world. It is familiar even to people that hate it, because enough popular command line tools will throw users into Vim by default that the uninitiated getting trapped in Vim has become [a meme][1]. There are major websites, including Facebook, that will scroll down when you press the `j` key and up when you press the `k` key—the unlikely high-water mark of Vim’s spread through digital culture.
|
||||
|
||||
And yet Vim is also a mystery. Unlike React, for example, which everyone knows is developed and maintained by Facebook, Vim has no obvious sponsor. Despite its ubiquity and importance, there doesn’t seem to be any kind of committee or organization that makes decisions about Vim. You could spend several minutes poking around the [Vim website][2] without getting a better idea of who created Vim or why. If you launch Vim without giving it a file argument, then you will see Vim’s startup message, which says that Vim is developed by “Bram Moolenaar et al.” But that doesn’t tell you much. Who is Bram Moolenaar and who are his shadowy confederates?
|
||||
|
||||
Perhaps more importantly, while we’re asking questions, why does exiting Vim involve typing `:wq`? Sure, it’s a “write” operation followed by a “quit” operation, but that is not a particularly intuitive convention. Who decided that copying text should instead be called “yanking”? Why is `:%s/foo/bar/gc` short for “find and replace”? Vim’s idiosyncrasies seem too arbitrary to have been made up, but then where did they come from?
|
||||
|
||||
The answer, as is so often the case, begins with that ancient crucible of computing, Bell Labs. In some sense, Vim is only the latest iteration of a piece of software—call it the “wq text editor”—that has been continuously developed and improved since the dawn of the Unix epoch.
|
||||
|
||||
### Ken Thompson Writes a Line Editor
|
||||
|
||||
In 1966, Bell Labs hired Ken Thompson. Thompson had just completed a Master’s degree in Electrical Engineering and Computer Science at the University of California, Berkeley. While there, he had used a text editor called QED, written for the Berkeley Timesharing System between 1965 and 1966. One of the first things Thompson did after arriving at Bell Labs was rewrite QED for the MIT Compatible Time-Sharing System. He would later write another version of QED for the Multics project. Along the way, he expanded the program so that users could search for lines in a file and make substitutions using regular expressions.
|
||||
|
||||
The Multics project, which like the Berkeley Timesharing System sought to create a commercially viable time-sharing operating system, was a partnership between MIT, General Electric, and Bell Labs. AT&T eventually decided the project was going nowhere and pulled out. Thompson and fellow Bell Labs researcher Dennis Ritchie, now without access to a time-sharing system and missing the “feel of interactive computing” that such systems offered, set about creating their own version, which would eventually be known as Unix. In August 1969, while his wife and young son were away on vacation in California, Thompson put together the basic components of the new system, allocating “a week each to the operating system, the shell, the editor, and the assembler.”
|
||||
|
||||
The editor would be called `ed`. It was based on QED but was not an exact re-implementation. Thompson decided to ditch certain QED features. Regular expression support was pared back so that only relatively simple regular expressions would be understood. QED allowed users to edit several files at once by opening multiple buffers, but `ed` would only work with one buffer at a time. And whereas QED could execute a buffer containing commands, `ed` would do no such thing. These simplifications may have been called for. Dennis Ritchie has said that going without QED’s advanced regular expressions was “not much of a loss.”
|
||||
|
||||
`ed` is now a part of the POSIX specification, so if you have a POSIX-compliant system, you will have it installed on your computer. It’s worth playing around with, because many of the `ed` commands are today a part of Vim. In order to write a buffer to disk, for example, you have to use the `w` command. In order to quit the editor, you have to use the `q` command. These two commands can be specified on the same line at once—hence, `wq`. Like Vim, `ed` is a modal editor; to enter input mode from command mode you would use the insert command (`i`), the append command (`a`), or the change command (`c`), depending on how you are trying to transform your text. `ed` also introduced the `s/foo/bar/g` syntax for finding and replacing, or “substituting,” text.
|
||||
|
||||
Given all these similarities, you might expect the average Vim user to have no trouble using `ed`. But `ed` is not at all like Vim in another important respect. `ed` is a true line editor. It was written and widely used in the days of the teletype printer. When Ken Thompson and Dennis Ritchie were hacking away at Unix, they looked like this:
|
||||
|
||||
![Ken Thompson interacting with a PDP-11 via teletype.][3]
|
||||
|
||||
`ed` doesn’t allow you to edit lines in place among the other lines of the open buffer, or move a cursor around, because `ed` would have to reprint the entire file every time you made a change to it. There was no mechanism in 1969 for `ed` to “clear” the contents of the screen, because the screen was just a sheet of paper and everything that had already been output had been output in ink. When necessary, you can ask `ed` to print out a range of lines for you using the list command (`l`), but most of the time you are operating on text that you can’t see. Using `ed` is thus a little trying to find your way around a dark house with an underpowered flashlight. You can only see so much at once, so you have to try your best to remember where everything is.
|
||||
|
||||
Here’s an example of an `ed` session. I’ve added comments (after the `#` character) explaining the purpose of each line, though if these were actually entered `ed` wouldn’t recognize them as comments and would complain:
|
||||
|
||||
```
|
||||
[sinclairtarget 09:49 ~]$ ed
|
||||
i # Enter input mode
|
||||
Hello world!
|
||||
|
||||
Isn't it a nice day?
|
||||
. # Finish input
|
||||
1,2l # List lines 1 to 2
|
||||
Hello world!$
|
||||
$
|
||||
2d # Delete line 2
|
||||
,l # List entire buffer
|
||||
Hello world!$
|
||||
Isn't it a nice day?$
|
||||
s/nice/terrible/g # Substitute globally
|
||||
,l
|
||||
Hello world!$
|
||||
Isn't it a terrible day?$
|
||||
w foo.txt # Write to foo.txt
|
||||
38 # (bytes written)
|
||||
q # Quit
|
||||
[sinclairtarget 10:50 ~]$ cat foo.txt
|
||||
Hello world!
|
||||
Isn't it a terrible day?
|
||||
```
|
||||
|
||||
As you can see, `ed` is not an especially talkative program.
|
||||
|
||||
### Bill Joy Writes a Text Editor
|
||||
|
||||
`ed` worked well enough for Thompson and Ritchie. Others found it difficult to use and it acquired a reputation for being a particularly egregious example of Unix’s hostility toward the novice. In 1975, a man named George Coulouris developed an improved version of `ed` on the Unix system installed at Queen Mary’s College, London. Coulouris wrote his editor to take advantage of the video displays that he had available at Queen Mary’s. Unlike `ed`, Coulouris’ program allowed users to edit a single line in place on screen, navigating through the line keystroke by keystroke (imagine using Vim on one line at a time). Coulouris called his program `em`, or “editor for mortals,” which he had supposedly been inspired to do after Thompson paid a visit to Queen Mary’s, saw the program Coulouris had built, and dismissed it, saying that he had no need to see the state of a file while editing it.
|
||||
|
||||
In 1976, Coulouris brought `em` with him to UC Berkeley, where he spent the summer as a visitor to the CS department. This was exactly ten years after Ken Thompson had left Berkeley to work at Bell Labs. At Berkeley, Coulouris met Bill Joy, a graduate student working on the Berkeley Software Distribution (BSD). Coulouris showed `em` to Joy, who, starting with Coulouris’ source code, built out an improved version of `ed` called `ex`, for “extended `ed`.” Version 1.1 of `ex` was bundled with the first release of BSD Unix in 1978. `ex` was largely compatible with `ed`, but it added two more modes: an “open” mode, which enabled single-line editing like had been possible with `em`, and a “visual” mode, which took over the whole screen and enabled live editing of an entire file like we are used to today.
|
||||
|
||||
For the second release of BSD in 1979, an executable named `vi` was introduced that did little more than open `ex` in visual mode.
|
||||
|
||||
`ex`/`vi` (henceforth `vi`) established most of the conventions we now associate with Vim that weren’t already a part of `ed`. The video terminal that Joy was using was a Lear Siegler ADM-3A, which had a keyboard with no cursor keys. Instead, arrows were painted on the `h`, `j`, `k`, and `l` keys, which is why Joy used those keys for cursor movement in `vi`. The escape key on the ADM-3A keyboard was also where today we would find the tab key, which explains how such a hard-to-reach key was ever assigned an operation as common as exiting a mode. The `:` character that prefixes commands also comes from `vi`, which in regular mode (i.e. the mode entered by running `ex`) used `:` as a prompt. This addressed a long-standing complaint about `ed`, which, once launched, greets users with utter silence. In visual mode, saving and quitting now involved typing the classic `:wq`. “Yanking” and “putting,” marks, and the `set` command for setting options were all part of the original `vi`. The features we use in the course of basic text editing today in Vim are largely `vi` features.
|
||||
|
||||
![A Lear Siegler ADM-3A keyboard.][4]
|
||||
|
||||
`vi` was the only text editor bundled with BSD Unix other than `ed`. At the time, Emacs could cost hundreds of dollars (this was before GNU Emacs), so `vi` became enormously popular. But `vi` was a direct descendant of `ed`, which meant that the source code could not be modified without an AT&T source license. This motivated several people to create open-source versions of `vi`. STEVIE (ST Editor for VI Enthusiasts) appeared in 1987, Elvis appeared in 1990, and `nvi` appeared in 1994. Some of these clones added extra features like syntax highlighting and split windows. Elvis in particular saw many of its features incorporated into Vim, since so many Elvis users pushed for their inclusion.
|
||||
|
||||
### Bram Moolenaar Writes Vim
|
||||
|
||||
“Vim”, which now abbreviates “Vi Improved”, originally stood for “Vi Imitation.” Like many of the other `vi` clones, Vim began as an attempt to replicate `vi` on a platform where it was not available. Bram Moolenaar, a Dutch software engineer working for a photocopier company in Venlo, the Netherlands, wanted something like `vi` for his brand-new Amiga 2000. Moolenaar had grown used to using `vi` on the Unix systems at his university and it was now “in his fingers.” So in 1988, using the existing STEVIE `vi` clone as a starting point, Moolenaar began work on Vim.
|
||||
|
||||
Moolenaar had access to STEVIE because STEVIE had previously appeared on something called a Fred Fish disk. Fred Fish was an American programmer that mailed out a floppy disk every month with a curated selection of the best open-source software available for the Amiga platform. Anyone could request a disk for nothing more than the price of postage. Several versions of STEVIE were released on Fred Fish disks. The version that Moolenaar used had been released on Fred Fish disk 256. (Disappointingly, Fred Fish disks seem to have nothing to do with [Freddi Fish][5].)
|
||||
|
||||
Moolenaar liked STEVIE but quickly noticed that there were many `vi` commands missing. So, for the first release of Vim, Moolenaar made `vi` compatibility his priority. Someone else had written a series of `vi` macros that, when run through a properly `vi`-compatible editor, could solve a [randomly generated maze][6]. Moolenaar was able to get these macros working in Vim. In 1991, Vim was released for the first time on Fred Fish disk 591 as “Vi Imitation.” Moolenaar had added some features (including multi-level undo and a “quickfix” mode for compiler errors) that meant that Vim had surpassed `vi`. But Vim would remain “Vi Imitation” until Vim 2.0, released in 1993 via FTP.
|
||||
|
||||
Moolenaar, with the occasional help of various internet collaborators, added features to Vim at a steady clip. Vim 2.0 introduced support for the `wrap` option and for horizontal scrolling through long lines of text. Vim 3.0 added support for split windows and buffers, a feature inspired by the `vi` clone `nvi`. Vim also now saved each buffer to a swap file, so that edited text could survive a crash. Vimscript made its first appearance in Vim 5.0, along with support for syntax highlighting. All the while, Vim’s popularity was growing. It was ported to MS-DOS, to Windows, to Mac, and even to Unix, where it competed with the original `vi`.
|
||||
|
||||
In 2006, Vim was voted the most popular editor among Linux Journal readers. Today, according to Stack Overflow’s 2018 Developer Survey, Vim is the most popular text-mode (i.e. terminal emulator) editor, used by 25.8% of all software developers (and 40% of Sysadmin/DevOps people). For a while, during the late 1980s and throughout the 1990s, programmers waged the “Editor Wars,” which pitted Emacs users against `vi` (and eventually Vim) users. While Emacs certainly still has a following, some people think that the Editor Wars are over and that Vim won. The 2018 Stack Overflow Developer Survey suggests that this is true; only 4.1% of respondents used Emacs.
|
||||
|
||||
How did Vim become so successful? Obviously people like the features that Vim has to offer. But I would argue that the long history behind Vim illustrates that it had more advantages than just its feature set. Vim’s codebase dates back only to 1988, when Moolenaar began working on it. The “wq text editor,” on the other hand—the broader vision of how a Unix-y text editor should work—goes back a half-century. The “wq text editor” had a few different concrete expressions, but thanks in part to the unusual attention paid to backward compatibility by both Bill Joy and Bram Moolenaar, good ideas accumulated gradually over time. The “wq text editor,” in that sense, is one of the longest-running and most successful open-source projects, having enjoyed contributions from some of the greatest minds in the computing world. I don’t think the “startup-company-throws-away all-precedents-and-creates-disruptive-new-software” approach to development is necessarily bad, but Vim is a reminder that the collaborative and incremental approach can also yield wonders.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][7] on Twitter or subscribe to the [RSS feed][8] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> New post! This time we're taking a look at the Altair 8800, the very first home computer, and how to simulate it on your modern PC.<https://t.co/s2sB5njrkd>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [July 22, 2018][9]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/08/05/where-vim-came-from.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://stackoverflow.blog/wp-content/uploads/2017/05/meme.jpeg
|
||||
[2]: https://www.vim.org/
|
||||
[3]: https://upload.wikimedia.org/wikipedia/commons/8/8f/Ken_Thompson_%28sitting%29_and_Dennis_Ritchie_at_PDP-11_%282876612463%29.jpg
|
||||
[4]: https://vintagecomputer.ca/wp-content/uploads/2015/01/LSI-ADM3A-full-keyboard.jpg
|
||||
[5]: https://en.wikipedia.org/wiki/Freddi_Fish
|
||||
[6]: https://github.com/isaacs/.vim/tree/master/macros/maze
|
||||
[7]: https://twitter.com/TwoBitHistory
|
||||
[8]: https://twobithistory.org/feed.xml
|
||||
[9]: https://twitter.com/TwoBitHistory/status/1021058552352387074?ref_src=twsrc%5Etfw
|
@ -0,0 +1,138 @@
|
||||
What Did Ada Lovelace's Program Actually Do?
|
||||
======
|
||||
The story of Microsoft’s founding is one of the most famous episodes in computing history. In 1975, Paul Allen flew out to Albuquerque to demonstrate the BASIC interpreter that he and Bill Gates had written for the Altair microcomputer. Because neither of them had a working Altair, Allen and Gates tested their interpreter using an emulator that they wrote and ran on Harvard’s computer system. The emulator was based on nothing more than the published specifications for the Intel 8080 processor. When Allen finally ran their interpreter on a real Altair—in front of the person he and Gates hoped would buy their software—he had no idea if it would work. But it did. The next month, Allen and Gates officially founded their new company.
|
||||
|
||||
Over a century before Allen and Gates wrote their BASIC interpreter, Ada Lovelace wrote and published a computer program. She, too, wrote a program for a computer that had only been described to her. But her program, unlike the Microsoft BASIC interpreter, was never run, because the computer she was targeting was never built.
|
||||
|
||||
Lovelace’s program is often called the world’s first computer program. Not everyone agrees that it should be called that. Lovelace’s legacy, it turns out, is one of computing history’s most hotly debated subjects. Walter Isaacson has written that the dispute about the extent and merit of her contributions constitutes a “minor academic specialty.” Inevitably, the fact that Lovelace was a woman has made this dispute a charged one. Historians have cited all kinds of primary evidence to argue that the credit given to Lovelace is either appropriate or undeserved. But they seem to spend less time explaining the technical details of her published writing, which is unfortunate, because the technical details are the most fascinating part of the story. Who wouldn’t want to know exactly how a program written in 1843 was supposed to work?
|
||||
|
||||
In fairness, Lovelace’s program is not easy to explain to the layperson without some hand-waving. It’s the intricacies of her program, though, that make it so remarkable. Whether or not she ought to be known as “the first programmer,” her program was specified with a degree of rigor that far surpassed anything that came before. She thought carefully about how operations could be organized into groups that could be repeated, thereby inventing the loop. She realized how important it was to track the state of variables as they changed, introducing a notation to illustrate those changes. As a programmer myself, I’m startled to see how much of what Lovelace was doing resembles the experience of writing software today.
|
||||
|
||||
So let’s take a closer look at Lovelace’s program. She designed it to calculate the Bernoulli numbers. To understand what those are, we have to go back a couple millennia to the genesis of one of mathematics’ oldest problems.
|
||||
|
||||
### Sums of Powers
|
||||
|
||||
The Pythagoreans lived on the shores of the Mediterranean and worshiped numbers. One of their pastimes was making triangles out of pebbles.
|
||||
|
||||
![][1]
|
||||
|
||||
One pebble followed by a row of two pebbles makes a triangle containing three pebbles. Add another row of three pebbles and you get a triangle containing six pebbles. You can continue like this, each time adding a row with one more pebble in it than the previous row. A triangle with six rows contains 21 pebbles. But how many pebbles does a triangle with 423 rows contain?
|
||||
|
||||
What the Pythagoreans were looking for was a way to calculate the following without doing all the addition:
|
||||
|
||||
They eventually realized that, if you place two triangles of the same size up against each other so that they form a rectangle, you can find the area of the rectangle and divide by two to get the number of pebbles in each of the triangles:
|
||||
|
||||
![][2]
|
||||
|
||||
Archimedes later explored a similar problem. He was interested in the following series:
|
||||
|
||||
You might visualize this series by imagining a stack of progressively larger squares (made out of tiny cubes), one on top of the other, forming a pyramid. Archimedes wanted to know if there was an easy way to tell how many cubes would be needed to construct a pyramid with, say, 423 levels. He recorded a solution that also permits a geometrical interpretation.
|
||||
|
||||
Three pyramids can be fit together to form a rectangular prism with a tiny, one-cube-high extrusion at one end. That little extrusion happens to be a triangle that obeys the same rules that the Pythagoreans used to make their pebble triangles. ([This video][3] might be a more helpful explanation of what I mean.) So the volume of the whole shape is given by the following equation:
|
||||
|
||||
By substituting the Pythagorean equation for the sum of the first n integers and doing some algebra, you get this:
|
||||
|
||||
In 499, the Indian mathematician and astronomer, Aryabhata, published a work known as the Aryabhatiya, which included a formula for calculating the sum of cubes:
|
||||
|
||||
A formula for the sum of the first n positive integers raised to the fourth power wasn’t published for another 500 years.
|
||||
|
||||
You might be wondering at this point if there is a general method for finding the sum of the first n integers raised to the kth power. Mathematicians were wondering too. Johann Faulhaber, a German mathematician and slightly kooky numerologist, was able to calculate formulas for sums of integers up to the 17th power, which he published in 1631. But this may have taken him years and he did not state a general solution. Blaise Pascal finally outlined a general method in 1665, though it depended on first knowing how to calculate the sum of integers raised to every lesser power. To calculate the sum of the first n positive integers raised to the sixth power, for example, you would first have to know how to calculate the sum of the first n positive integers raised to the fifth power.
|
||||
|
||||
A more practical general solution was stated in the posthumously published work of Swiss mathematician Jakob Bernoulli, who died in 1705. Bernoulli began by deriving the formulas for calculating the sums of the first n positive integers to the first, second, and third powers. These he gave in polynomial form, so they looked like the below:
|
||||
|
||||
Using Pascal’s Triangle, Bernoulli realized that these polynomials followed a predictable pattern. Essentially, Bernoulli broke the coefficients of each term down into two factors, one of which he could determine using Pascal’s Triangle and the other which he could derive from the interesting property that all the coefficients in the polynomial seemed to always add to one. Figuring out the exponent that should be attached to each term was no problem, because that also followed a predictable pattern. The factor of each coefficient that had to be calculated using the sums-to-one rule formed a sequence that became known as the Bernoulli numbers.
|
||||
|
||||
Bernoulli’s discovery did not mean that it was now trivial to calculate the sum of the first positive n integers to any given power. In order to calculate the sum of the first positive n integers raised to the kth power, you would need to know every Bernoulli number up to the kth Bernoulli number. Each Bernoulli number could only be calculated if the previous Bernoulli numbers were known. But calculating a long series of Bernoulli numbers was significantly easier than deriving each sum of powers formula in turn, so Bernoulli’s discovery was a big advance for mathematics.
|
||||
|
||||
### Babbage
|
||||
|
||||
Charles Babbage was born in 1791, nearly a century after Bernoulli died. I’ve always had some vague idea that Babbage designed but did not build a mechanical computer. But I’ve never entirely understood how that computer was supposed to work. The basic ideas, as it happens, are not that difficult to grasp, which is good news. Lovelace’s program was designed to run on one of Babbage’s machines, so we need to take another quick detour here to talk about how those machines worked.
|
||||
|
||||
Babbage designed two separate mechanical computing machines. His first machine was called the Difference Engine. Before the invention of the pocket calculator, people relied on logarithmic tables to calculate the product of large numbers. (There is a good [Numberphile video][4] on how this was done.) Large logarithmic tables are not difficult to create, at least conceptually, but the sheer number of calculations that need to be done in order to create them meant that in Babbage’s time they often contained errors. Babbage, frustrated by this, sought to create a machine that could tabulate logarithms mechanically and therefore without error.
|
||||
|
||||
The Difference Engine was not a computer, because all it did was add and subtract. It took advantage of a method devised by the French mathematician Gaspard de Prony that broke the process of tabulating logarithms down into small steps. These small steps involved only addition and subtraction, meaning that a small army of people without any special mathematical aptitude or training could be employed to produce a table. De Prony’s method, known as the method of divided differences, could be used to tabulate any polynomial. Polynomials, in turn, could be used to approximate logarithmic and trigonometric functions.
|
||||
|
||||
To get a sense of how this process worked, consider the following simple polynomial function:
|
||||
|
||||
The method of divided differences involves finding the difference between each successive value of y for different values of x. The differences between these differences are then found, and possibly the differences between those next differences themselves, until a constant difference appears. These differences can then be used to get the next value of the polynomial simply by adding.
|
||||
|
||||
Because the above polynomial is only a second-degree polynomial, we are able to find the constant difference after only two columns of differences:
|
||||
|
||||
x y Diff 1 Diff 2 1 2 2 5 3 3 10 5 2 4 17 7 2 5 ? ? 2 … … … …
|
||||
|
||||
Now, since we know that the constant difference is 2, we can find the value of y when x is 5 through addition only. If we add 2 to 7, the last entry in the “Diff 1” column, we get 9. If we add 9 to 17, the last entry in the y column, we get 26, our answer.
|
||||
|
||||
Babbage’s Difference Engine had, for each difference column in a table like the one above, a physical column of gears. Each gear was a decimal digit and one whole column was a decimal number. The Difference Engine had eight columns of gears, so it could tabulate a polynomial up to the seventh degree. The columns were initially set with values matching an early row in the difference table, worked out ahead of time. A human operator would then turn a crank shaft, causing the constant difference to propagate through the machine as the value stored on each column was added to the next.
|
||||
|
||||
Babbage was able to build a small section of the Difference Engine and use it to demonstrate his ideas at parties. But even after spending an amount of public money equal to the cost of two large warships, he never built the entire machine. Babbage could not find anyone in the early 1800s that could make the number of gears he needed with sufficient accuracy. A working Difference Engine would not be built until the 1990s, after the advent of precision machining. There is [a great video on YouTube][5] demonstrating a working Difference Engine on loan to the Computer History Museum in Mountain View, which is worth watching even just to listen to the marvelous sounds the machine makes while it runs.
|
||||
|
||||
Babbage eventually lost interest in the Difference Engine when he realized that a much more powerful and flexible machine could be built. His Analytical Engine was the machine that we know today as Babbage’s mechanical computer. The Analytical Engine was based on the same columns of gears used in the Difference Engine, but whereas the Difference Engine only had eight columns, the Analytical Engine was supposed to have many hundreds more. The Analytical Engine could be programmed using punched cards like a Jacquard Loom and could multiply and divide as well as add and subtract. In order to perform one of these operations, a section of the machine called the “mill” would rearrange itself into the appropriate configuration, read the operands off of other columns used for data storage, and then write the result back to another column.
|
||||
|
||||
Babbage called his new machine the Analytical Engine because it was powerful enough to do something resembling mathematical analysis. The Difference Engine could tabulate a polynomial, but the Analytical Engine would be able to calculate, for example, the coefficients of the polynomial expansion of another expression. It was an amazing machine, but the British government wisely declined to fund its construction. So Babbage went abroad to Italy to try to drum up support for his idea.
|
||||
|
||||
### Notes by The Translator
|
||||
|
||||
In Turin, Babbage met Italian engineer and future prime minister Luigi Menabrea. He persuaded Menabrea to write an outline of what the Analytical Engine could accomplish. In 1842, Menabrea published a paper on the topic in French. The following year, Lovelace published a translation of Menabrea’s paper into English.
|
||||
|
||||
Lovelace, then known as Ada Byron, first met Babbage at a party in 1833, when she was 17 and he was 41. Lovelace was fascinated with Babbage’s Difference Engine. She could also understand how it worked, because she had been extensively tutored in mathematics throughout her childhood. Her mother, Annabella Milbanke, had decided that a solid grounding in mathematics would ward off the wild, romantic sensibility that possessed Lovelace’s father, Lord Byron, the famous poet. After meeting in 1833, Lovelace and Babbage remained a part of the same social circle and wrote to each other frequently.
|
||||
|
||||
Ada Byron married William King in 1835. King later became the Earl of Lovelace, making Ada the Countess of Lovelace. Even after having three children, she continued her education in mathematics, employing Augustus de Morgan, who discovered De Morgan’s laws, as her tutor. Lovelace saw the potential of Babbage’s Analytical Machine immediately and was eager to work with him to promote the idea. A friend suggested that she translate Menabrea’s paper for an English audience.
|
||||
|
||||
Menabrea’s paper gave a brief overview of how the Difference Engine worked, then showed how the Analytical Engine would be a far superior machine. The Analytical Engine would be so powerful that it could “form the product of two numbers, each containing twenty figures, in three minutes” (emphasis in the original). Menabrea gave further examples of the machine’s capabilities, demonstrating how it could solve a simple system of linear equations and expand the product of two binomial expressions. In both cases, Menabrea provided what Lovelace called “diagrams of development,” which listed the sequence of operations that would need to be performed to calculate the correct answer. These were programs in the same sense that Lovelace’s own program was a program and they were originally published the year before. But as we will see, Menabrea’s programs were only simple examples of what was possible. All of them were trivial in the sense that they did not require any kind of branching or looping.
|
||||
|
||||
Lovelace appended a series of notes to her translation of Menabrea’s paper that together ran much longer than the original work. It was here that she made her major contributions to computing. In Note A, which Lovelace attached to Menabrea’s initial description of the Analytical Engine, Lovelace explained at some length and often in lyrical language the promise of a machine that could perform arbitrary mathematical operations. She foresaw that a machine like the Analytical Engine wasn’t just limited to numbers and could in fact act on any objects “whose mutual fundamental relations could be expressed by those of the abstract science of operations, and which should be also susceptible of adaptations to the action of the operating notation and mechanism of the engine.” She added that the machine might one day, for example, compose music. This insight was all the more remarkable given that Menabrea saw the Analytical Engine primarily as a tool for automating “long and arid computation,” which would free up the intellectual capacities of brilliant scientists for more advanced thinking. The miraculous foresight that Lovelace demonstrated in Note A is one major reason that she is celebrated today.
|
||||
|
||||
The other famous note is Note G. Lovelace begins Note G by arguing that, despite its impressive powers, the Analytical Machine cannot really be said to “think.” This part of Note G is what Alan Turing would later refer to as “Lady Lovelace’s Objection.” Nevertheless, Lovelace continues, the machine can do extraordinary things. To illustrate its ability to handle even more complex problems, Lovelace provides her program calculating the Bernoulli numbers.
|
||||
|
||||
The full program, in the expanded “diagram of development” format that Lovelace explains in Note D, can be seen [here][6]. The program is essentially a list of operations, specified using the usual mathematical symbols. It doesn’t appear that Babbage or Lovelace got as far as developing anything like a set of op codes for the Analytical Engine.
|
||||
|
||||
Though Lovelace was describing a method for computing the entire sequence of Bernoulli numbers up to some limit, the program she provided only illustrated one step of that process. Her program calculated a number that she called B7, which modern mathematicians know as the eighth Bernoulli number. Her program thus sought to solve the following equation:
|
||||
|
||||
In the above, each term represents a coefficient in the polynomial formula for the sum of integers to a particular power. Here that power is eight, since the eighth Bernoulli number first appears in the formula for the sum of positive integers to the eighth power. The B and A numbers represent the two kinds of factors that Bernoulli discovered. B1 through B7 are all different Bernoulli numbers, indexed according to Lovelace’s indexing. A0 through A5 represent the factors of the coefficients that Bernoulli could calculate using Pascal’s Triangle. The values of A0, A1, A3, and A5 appear below. Here n represents the index of the Bernoulli number in the sequence of odd-numbered Bernoulli numbers starting with the first. Lovelace’s program used n = 4.
|
||||
|
||||
I’ve created a [translation][7] of Lovelace’s program into C, which may be easier to follow. Lovelace’s program first calculates A0 and the product B1A1. It then enters a loop that repeats twice to calculate B3A3 and B5A5, since those are formed according to an identical pattern. After each product is calculated, it is added with all the previous products, so that by the end of the program the full sum has been obtained.
|
||||
|
||||
Obviously the C translation is not an exact recreation of Lovelace’s program. It declares variables on the stack, for example, whereas Lovelace’s variables were more like registers. But it makes obvious the parts of Lovelace’s program that were so prescient. The C program contains two `while` loops, one nested inside the other. Lovelace’s program did not have `while` loops exactly, but she made groups of operations and in the text of her note specified when they should repeat. The variable `v10`, in the original program and in the C translation, functions as a counter variable that decrements with each loop, a construct any programmer would be familiar with. In fact, aside from the profusion of variables with unhelpful names, the C translation of Lovelace’s program doesn’t look that alien at all.
|
||||
|
||||
The other thing worth mentioning quickly is that translating Lovelace’s program into C was not that difficult, thanks to the detail present in her diagram. Unlike Menabrea’s tables, her table includes a column labeled “Indication of change in the value on any Variable,” which makes it much easier to follow the mutation of state throughout the program. She adds a superscript index here to each variable to indicate the successive values they hold. A superscript of two, for example, means that the value being used here is the second value that has been assigned to the variable since the beginning of the program.
|
||||
|
||||
### The First Programmer?
|
||||
|
||||
After I had translated Lovelace’s program into C, I was able to run it on my own computer. To my frustration, I kept getting the wrong result. After some debugging, I finally realized that the problem wasn’t the code that I had written. The bug was in the original!
|
||||
|
||||
In her “diagram of development,” Lovelace gives the fourth operation as `v5 / v4`. But the correct ordering here is `v4 / v5`. This may well have been a typesetting error and not an error in the program that Lovelace devised. All the same, this must be the oldest bug in computing. I marveled that, for ten minutes or so, unknowingly, I had wrestled with this first ever bug.
|
||||
|
||||
Jim Randall, another blogger that has [translated Lovelace’s program into Python][8], has noted this division bug and two other issues. What does it say about Ada Lovelace that her published program contains minor bugs? Perhaps it shows that she was attempting to write not just a demonstration but a real program. After all, can you really be writing anything more than toy programs if you aren’t also writing lots of bugs?
|
||||
|
||||
One Wikipedia article calls Lovelace the first to publish a “complex program.” Maybe that’s the right way to think about Lovelace’ accomplishment. Menabrea published “diagrams of development” in his paper a year before Lovelace published her translation. Babbage also wrote more than twenty programs that he never published. So it’s not quite accurate to say that Lovelace wrote or published the first program, though there’s always room to quibble about what exactly constitutes a “program.” Even so, Lovelace’s program was miles ahead of anything else that had been published before. The longest program that Menabrea presented was 11 operations long and contained no loops or branches; Lovelace’s program contains 25 operations and a nested loop (and thus branching). Menabrea wrote the following toward the end of his paper:
|
||||
|
||||
> When once the engine shall have been constructed, the difficulty will be reduced to the making of the cards; but as these are merely the translation of algebraic formulae, it will, by means of some simple notation, be easy to consign the execution of them to a workman.
|
||||
|
||||
Neither Babbage nor Menabrea were especially interested in applying the Analytical Engine to problems beyond the immediate mathematical challenges that first drove Babbage to construct calculating machines. Lovelace saw that the Analytical Engine was capable of much more than Babbage or Menabrea could imagine. Lovelace also grasped that “the making of the cards” would not be a mere afterthought and that it could be done well or done poorly. This is hard to appreciate without understanding her program from Note G and seeing for oneself the care she put into designing it. But having done that, you might agree that Lovelace, even if she was not the very first programmer, was the first programmer to deserve the title.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][9] on Twitter or subscribe to the [RSS feed][10] to make sure you know when a new post is out.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/08/18/ada-lovelace-note-g.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://twobithistory.org/images/triangular_numbers1.png
|
||||
[2]: https://twobithistory.org/images/triangular_numbers2.png
|
||||
[3]: https://www.youtube.com/watch?v=aXbT37IlyZQ
|
||||
[4]: https://youtu.be/VRzH4xB0GdM
|
||||
[5]: https://www.youtube.com/watch?v=BlbQsKpq3Ak
|
||||
[6]: https://upload.wikimedia.org/wikipedia/commons/c/cf/Diagram_for_the_computation_of_Bernoulli_numbers.jpg
|
||||
[7]: https://gist.github.com/sinclairtarget/ad18ac65d277e453da5f479d6ccfc20e
|
||||
[8]: https://enigmaticcode.wordpress.com/tag/bernoulli-numbers/
|
||||
[9]: https://twitter.com/TwoBitHistory
|
||||
[10]: https://twobithistory.org/feed.xml
|
122
sources/talk/20180916 The Rise and Demise of RSS.md
Normal file
122
sources/talk/20180916 The Rise and Demise of RSS.md
Normal file
@ -0,0 +1,122 @@
|
||||
The Rise and Demise of RSS
|
||||
======
|
||||
There are two stories here. The first is a story about a vision of the web’s future that never quite came to fruition. The second is a story about how a collaborative effort to improve a popular standard devolved into one of the most contentious forks in the history of open-source software development.
|
||||
|
||||
In the late 1990s, in the go-go years between Netscape’s IPO and the Dot-com crash, everyone could see that the web was going to be an even bigger deal than it already was, even if they didn’t know exactly how it was going to get there. One theory was that the web was about to be revolutionized by syndication. The web, originally built to enable a simple transaction between two parties—a client fetching a document from a single host server—would be broken open by new standards that could be used to repackage and redistribute entire websites through a variety of channels. Kevin Werbach, writing for Release 1.0, a newsletter influential among investors in the 1990s, predicted that syndication “would evolve into the core model for the Internet economy, allowing businesses and individuals to retain control over their online personae while enjoying the benefits of massive scale and scope.” He invited his readers to imagine a future in which fencing aficionados, rather than going directly to an “online sporting goods site” or “fencing equipment retailer,” could buy a new épée directly through e-commerce widgets embedded into their favorite website about fencing. Just like in the television world, where big networks syndicate their shows to smaller local stations, syndication on the web would allow businesses and publications to reach consumers through a multitude of intermediary sites. This would mean, as a corollary, that consumers would gain significant control over where and how they interacted with any given business or publication on the web.
|
||||
|
||||
RSS was one of the standards that promised to deliver this syndicated future. To Werbach, RSS was “the leading example of a lightweight syndication protocol.” Another contemporaneous article called RSS the first protocol to realize the potential of XML. It was going to be a way for both users and content aggregators to create their own customized channels out of everything the web had to offer. And yet, two decades later, RSS [appears to be a dying technology][1], now used chiefly by podcasters and programmers with tech blogs. Moreover, among that latter group, RSS is perhaps used as much for its political symbolism as its actual utility. Though of course some people really do have RSS readers, stubbornly adding an RSS feed to your blog, even in 2018, is a reactionary statement. That little tangerine bubble has become a wistful symbol of defiance against a centralized web increasingly controlled by a handful of corporations, a web that hardly resembles the syndicated web of Werbach’s imagining.
|
||||
|
||||
The future once looked so bright for RSS. What happened? Was its downfall inevitable, or was it precipitated by the bitter infighting that thwarted the development of a single RSS standard?
|
||||
|
||||
### Muddied Water
|
||||
|
||||
RSS was invented twice. This meant it never had an obvious owner, a state of affairs that spawned endless debate and acrimony. But it also suggests that RSS was an important idea whose time had come.
|
||||
|
||||
In 1998, Netscape was struggling to envision a future for itself. Its flagship product, the Netscape Navigator web browser—once preferred by 80% of web users—was quickly losing ground to Internet Explorer. So Netscape decided to compete in a new arena. In May, a team was brought together to start work on what was known internally as “Project 60.” Two months later, Netscape announced “My Netscape,” a web portal that would fight it out with other portals like Yahoo, MSN, and Excite.
|
||||
|
||||
The following year, in March, Netscape announced an addition to the My Netscape portal called the “My Netscape Network.” My Netscape users could now customize their My Netscape page so that it contained “channels” featuring the most recent headlines from sites around the web. As long as your favorite website published a special file in a format dictated by Netscape, you could add that website to your My Netscape page, typically by clicking an “Add Channel” button that participating websites were supposed to add to their interfaces. A little box containing a list of linked headlines would then appear.
|
||||
|
||||
![A My Netscape Network Channel][2]
|
||||
|
||||
The special file that participating websites had to publish was an RSS file. In the My Netscape Network announcement, Netscape explained that RSS stood for “RDF Site Summary.” This was somewhat of a misnomer. RDF, or the Resource Description Framework, is basically a grammar for describing certain properties of arbitrary resources. (See [my article about the Semantic Web][3] if that sounds really exciting to you.) In 1999, a draft specification for RDF was being considered by the W3C. Though RSS was supposed to be based on RDF, the example RSS document Netscape actually released didn’t use any RDF tags at all, even if it declared the RDF XML namespace. In a document that accompanied the Netscape RSS specification, Dan Libby, one of the specification’s authors, explained that “in this release of MNN, Netscape has intentionally limited the complexity of the RSS format.” The specification was given the 0.90 version number, the idea being that subsequent versions would bring RSS more in line with the W3C’s XML specification and the evolving draft of the RDF specification.
|
||||
|
||||
RSS had been cooked up by Libby and another Netscape employee, Ramanathan Guha. Guha previously worked for Apple, where he came up with something called the Meta Content Framework. MCF was a format for representing metadata about anything from web pages to local files. Guha demonstrated its power by developing an application called [HotSauce][4] that visualized relationships between files as a network of nodes suspended in 3D space. After leaving Apple for Netscape, Guha worked with a Netscape consultant named Tim Bray to produce an XML-based version of MCF, which in turn became the foundation for the W3C’s RDF draft. It’s no surprise, then, that Guha and Libby were keen to incorporate RDF into RSS. But Libby later wrote that the original vision for an RDF-based RSS was pared back because of time constraints and the perception that RDF was “‘too complex’ for the ‘average user.’”
|
||||
|
||||
While Netscape was trying to win eyeballs in what became known as the “portal wars,” elsewhere on the web a new phenomenon known as “weblogging” was being pioneered. One of these pioneers was Dave Winer, CEO of a company called UserLand Software, which developed early content management systems that made blogging accessible to people without deep technical fluency. Winer ran his own blog, [Scripting News][5], which today is one of the oldest blogs on the internet. More than a year before Netscape announced My Netscape Network, on December 15th, 1997, Winer published a post announcing that the blog would now be available in XML as well as HTML.
|
||||
|
||||
Dave Winer’s XML format became known as the Scripting News format. It was supposedly similar to Microsoft’s Channel Definition Format (a “push technology” standard submitted to the W3C in March, 1997), but I haven’t been able to find a file in the original format to verify that claim. Like Netscape’s RSS, it structured the content of Winer’s blog so that it could be understood by other software applications. When Netscape released RSS 0.90, Winer and UserLand Software began to support both formats. But Winer believed that Netscape’s format was “woefully inadequate” and “missing the key thing web writers and readers need.” It could only represent a list of links, whereas the Scripting News format could represent a series of paragraphs, each containing one or more links.
|
||||
|
||||
In June, 1999, two months after Netscape’s My Netscape Network announcement, Winer introduced a new version of the Scripting News format, called ScriptingNews 2.0b1. Winer claimed that he decided to move ahead with his own format only after trying but failing to get anyone at Netscape to care about RSS 0.90’s deficiencies. The new version of the Scripting News format added several items to the `<header>` element that brought the Scripting News format to parity with RSS. But the two formats continued to differ in that the Scripting News format, which Winer nicknamed the “fat” syndication format, could include entire paragraphs and not just links.
|
||||
|
||||
Netscape got around to releasing RSS 0.91 the very next month. The updated specification was a major about-face. RSS no longer stood for “RDF Site Summary”; it now stood for “Rich Site Summary.” All the RDF—and there was almost none anyway—was stripped out. Many of the Scripting News tags were incorporated. In the text of the new specification, Libby explained:
|
||||
|
||||
> RDF references removed. RSS was originally conceived as a metadata format providing a summary of a website. Two things have become clear: the first is that providers want more of a syndication format than a metadata format. The structure of an RDF file is very precise and must conform to the RDF data model in order to be valid. This is not easily human-understandable and can make it difficult to create useful RDF files. The second is that few tools are available for RDF generation, validation and processing. For these reasons, we have decided to go with a standard XML approach.
|
||||
|
||||
Winer was enormously pleased with RSS 0.91, calling it “even better than I thought it would be.” UserLand Software adopted it as a replacement for the existing ScriptingNews 2.0b1 format. For a while, it seemed that RSS finally had a single authoritative specification.
|
||||
|
||||
### The Great Fork
|
||||
|
||||
A year later, the RSS 0.91 specification had become woefully inadequate. There were all sorts of things people were trying to do with RSS that the specification did not address. There were other parts of the specification that seemed unnecessarily constraining—each RSS channel could only contain a maximum of 15 items, for example.
|
||||
|
||||
By that point, RSS had been adopted by several more organizations. Other than Netscape, which seemed to have lost interest after RSS 0.91, the big players were Dave Winer’s UserLand Software; O’Reilly Net, which ran an RSS aggregator called Meerkat; and Moreover.com, which also ran an RSS aggregator focused on news. Via mailing list, representatives from these organizations and others regularly discussed how to improve on RSS 0.91. But there were deep disagreements about what those improvements should look like.
|
||||
|
||||
The mailing list in which most of the discussion occurred was called the Syndication mailing list. [An archive of the Syndication mailing list][6] is still available. It is an amazing historical resource. It provides a moment-by-moment account of how those deep disagreements eventually led to a political rupture of the RSS community.
|
||||
|
||||
On one side of the coming rupture was Winer. Winer was impatient to evolve RSS, but he wanted to change it only in relatively conservative ways. In June, 2000, he published his own RSS 0.91 specification on the UserLand website, meant to be a starting point for further development of RSS. It made no significant changes to the 0.91 specification published by Netscape. Winer claimed in a blog post that accompanied his specification that it was only a “cleanup” documenting how RSS was actually being used in the wild, which was needed because the Netscape specification was no longer being maintained. In the same post, he argued that RSS had succeeded so far because it was simple, and that by adding namespaces or RDF back to the format—some had suggested this be done in the Syndication mailing list—it “would become vastly more complex, and IMHO, at the content provider level, would buy us almost nothing for the added complexity.” In a message to the Syndication mailing list sent around the same time, Winer suggested that these issues were important enough that they might lead him to create a fork:
|
||||
|
||||
> I’m still pondering how to move RSS forward. I definitely want ICE-like stuff in RSS2, publish and subscribe is at the top of my list, but I am going to fight tooth and nail for simplicity. I love optional elements. I don’t want to go down the namespaces and schema road, or try to make it a dialect of RDF. I understand other people want to do this, and therefore I guess we’re going to get a fork. I have my own opinion about where the other fork will lead, but I’ll keep those to myself for the moment at least.
|
||||
|
||||
Arrayed against Winer were several other people, including Rael Dornfest of O’Reilly, Ian Davis (responsible for a search startup called Calaba), and a precocious, 14-year-old Aaron Swartz, who all thought that RSS needed namespaces in order to accommodate the many different things everyone wanted to do with it. On another mailing list hosted by O’Reilly, Davis proposed a namespace-based module system, writing that such a system would “make RSS as extensible as we like rather than packing in new features that over-complicate the spec.” The “namespace camp” believed that RSS would soon be used for much more than the syndication of blog posts, so namespaces, rather than being a complication, were the only way to keep RSS from becoming unmanageable as it supported more and more use cases.
|
||||
|
||||
At the root of this disagreement about namespaces was a deeper disagreement about what RSS was even for. Winer had invented his Scripting News format to syndicate the posts he wrote for his blog. Guha and Libby at Netscape had designed RSS and called it “RDF Site Summary” because in their minds it was a way of recreating a site in miniature within Netscape’s online portal. Davis, writing to the Syndication mailing list, explained his view that RSS was “originally conceived as a way of building mini sitemaps,” and that now he and others wanted to expand RSS “to encompass more types of information than simple news headlines and to cater for the new uses of RSS that have emerged over the last 12 months.” Winer wrote a prickly reply, stating that his Scripting News format was in fact the original RSS and that it had been meant for a different purpose. Given that the people most involved in the development of RSS disagreed about why RSS had even been created, a fork seems to have been inevitable.
|
||||
|
||||
The fork happened after Dornfest announced a proposed RSS 1.0 specification and formed the RSS-DEV Working Group—which would include Davis, Swartz, and several others but not Winer—to get it ready for publication. In the proposed specification, RSS once again stood for “RDF Site Summary,” because RDF had had been added back in to represent metadata properties of certain RSS elements. The specification acknowledged Winer by name, giving him credit for popularizing RSS through his “evangelism.” But it also argued that just adding more elements to RSS without providing for extensibility with a module system—that is, what Winer was suggesting—”sacrifices scalability.” The specification went on to define a module system for RSS based on XML namespaces.
|
||||
|
||||
Winer was furious that the RSS-DEV Working Group had arrogated the “RSS 1.0” name for themselves. In another mailing list about decentralization, he described what the RSS-DEV Working Group had done as theft. Other members of the Syndication mailing list also felt that the RSS-DEV Working Group should not have used the name “RSS” without unanimous agreement from the community on how to move RSS forward. But the Working Group stuck with the name. Dan Brickley, another member of the RSS-DEV Working Group, defended this decision by arguing that “RSS 1.0 as proposed is solidly grounded in the original RSS vision, which itself had a long heritage going back to MCF (an RDF precursor) and related specs (CDF etc).” He essentially felt that the RSS 1.0 effort had a better claim to the RSS name than Winer did, since RDF had originally been a part of RSS. The RSS-DEV Working Group published a final version of their specification in December. That same month, Winer published his own improvement to RSS 0.91, which he called RSS 0.92, on UserLand’s website. RSS 0.92 made several small optional improvements to RSS, among which was the addition of the `<enclosure>` tag soon used by podcasters everywhere. RSS had officially forked.
|
||||
|
||||
It’s not clear to me why a better effort was not made to involve Winer in the RSS-DEV Working Group. He was a prominent contributor to the Syndication mailing list and obviously responsible for much of RSS’ popularity, as the members of the Working Group themselves acknowledged. But Tim O’Reilly, founder and CEO of O’Reilly, explained in a UserLand discussion group that Winer more or less refused to participate:
|
||||
|
||||
> A group of people involved in RSS got together to start thinking about its future evolution. Dave was part of the group. When the consensus of the group turned in a direction he didn’t like, Dave stopped participating, and characterized it as a plot by O’Reilly to take over RSS from him, despite the fact that Rael Dornfest of O’Reilly was only one of about a dozen authors of the proposed RSS 1.0 spec, and that many of those who were part of its development had at least as long a history with RSS as Dave had.
|
||||
|
||||
To this, Winer said:
|
||||
|
||||
> I met with Dale [Dougherty] two weeks before the announcement, and he didn’t say anything about it being called RSS 1.0. I spoke on the phone with Rael the Friday before it was announced, again he didn’t say that they were calling it RSS 1.0. The first I found out about it was when it was publicly announced.
|
||||
>
|
||||
> Let me ask you a straight question. If it turns out that the plan to call the new spec “RSS 1.0” was done in private, without any heads-up or consultation, or for a chance for the Syndication list members to agree or disagree, not just me, what are you going to do?
|
||||
>
|
||||
> UserLand did a lot of work to create and popularize and support RSS. We walked away from that, and let your guys have the name. That’s the top level. If I want to do any further work in Web syndication, I have to use a different name. Why and how did that happen Tim?
|
||||
|
||||
I have not been able to find a discussion in the Syndication mailing list about using the RSS 1.0 name prior to the announcement of the RSS 1.0 proposal.
|
||||
|
||||
RSS would fork again in 2003, when several developers frustrated with the bickering in the RSS community sought to create an entirely new format. These developers created Atom, a format that did away with RDF but embraced XML namespaces. Atom would eventually be specified by [a proposed IETF standard][7]. After the introduction of Atom, there were three competing versions of RSS: Winer’s RSS 0.92 (updated to RSS 2.0 in 2002 and renamed “Really Simple Syndication”), the RSS-DEV Working Group’s RSS 1.0, and Atom.
|
||||
|
||||
### Decline
|
||||
|
||||
The proliferation of competing RSS specifications may have hampered RSS in other ways that I’ll discuss shortly. But it did not stop RSS from becoming enormously popular during the 2000s. By 2004, the New York Times had started offering its headlines in RSS and had written an article explaining to the layperson what RSS was and how to use it. Google Reader, an RSS aggregator ultimately used by millions, was launched in 2005. By 2013, RSS seemed popular enough that the New York Times, in its obituary for Aaron Swartz, called the technology “ubiquitous.” For a while, before a third of the planet had signed up for Facebook, RSS was simply how many people stayed abreast of news on the internet.
|
||||
|
||||
The New York Times published Swartz’ obituary in January, 2013. By that point, though, RSS had actually turned a corner and was well on its way to becoming an obscure technology. Google Reader was shutdown in July, 2013, ostensibly because user numbers had been falling “over the years.” This prompted several articles from various outlets declaring that RSS was dead. But people had been declaring that RSS was dead for years, even before Google Reader’s shuttering. Steve Gillmor, writing for TechCrunch in May, 2009, advised that “it’s time to get completely off RSS and switch to Twitter” because “RSS just doesn’t cut it anymore.” He pointed out that Twitter was basically a better RSS feed, since it could show you what people thought about an article in addition to the article itself. It allowed you to follow people and not just channels. Gillmor told his readers that it was time to let RSS recede into the background. He ended his article with a verse from Bob Dylan’s “Forever Young.”
|
||||
|
||||
Today, RSS is not dead. But neither is it anywhere near as popular as it once was. Lots of people have offered explanations for why RSS lost its broad appeal. Perhaps the most persuasive explanation is exactly the one offered by Gillmor in 2009. Social networks, just like RSS, provide a feed featuring all the latest news on the internet. Social networks took over from RSS because they were simply better feeds. They also provide more benefits to the companies that own them. Some people have accused Google, for example, of shutting down Google Reader in order to encourage people to use Google+. Google might have been able to monetize Google+ in a way that it could never have monetized Google Reader. Marco Arment, the creator of Instapaper, wrote on his blog in 2013:
|
||||
|
||||
> Google Reader is just the latest casualty of the war that Facebook started, seemingly accidentally: the battle to own everything. While Google did technically “own” Reader and could make some use of the huge amount of news and attention data flowing through it, it conflicted with their far more important Google+ strategy: they need everyone reading and sharing everything through Google+ so they can compete with Facebook for ad-targeting data, ad dollars, growth, and relevance.
|
||||
|
||||
So both users and technology companies realized that they got more out of using social networks than they did out of RSS.
|
||||
|
||||
Another theory is that RSS was always too geeky for regular people. Even the New York Times, which seems to have been eager to adopt RSS and promote it to its audience, complained in 2006 that RSS is a “not particularly user friendly” acronym coined by “computer geeks.” Before the RSS icon was designed in 2004, websites like the New York Times linked to their RSS feeds using little orange boxes labeled “XML,” which can only have been intimidating. The label was perfectly accurate though, because back then clicking the link would take a hapless user to a page full of XML. [This great tweet][8] captures the essence of this explanation for RSS’ demise. Regular people never felt comfortable using RSS; it hadn’t really been designed as a consumer-facing technology and involved too many hurdles; people jumped ship as soon as something better came along.
|
||||
|
||||
RSS might have been able to overcome some of these limitations if it had been further developed. Maybe RSS could have been extended somehow so that friends subscribed to the same channel could syndicate their thoughts about an article to each other. But whereas a company like Facebook was able to “move fast and break things,” the RSS developer community was stuck trying to achieve consensus. The Great RSS Fork only demonstrates how difficult it was to do that. So if we are asking ourselves why RSS is no longer popular, a good first-order explanation is that social networks supplanted it. If we ask ourselves why social networks were able to supplant it, then the answer may be that the people trying to make RSS succeed faced a problem much harder than, say, building Facebook. As Dornfest wrote to the Syndication mailing list at one point, “currently it’s the politics far more than the serialization that’s far from simple.”
|
||||
|
||||
So today we are left with centralized silos of information. In a way, we do have the syndicated internet that Kevin Werbach foresaw in 1999. After all, The Onion is a publication that relies on syndication through Facebook and Twitter the same way that Seinfeld relied on syndication to rake in millions after the end of its original run. But syndication on the web only happens through one of a very small number of channels, meaning that none of us “retain control over our online personae” the way that Werbach thought we would. One reason this happened is garden-variety corporate rapaciousness—RSS, an open format, didn’t give technology companies the control over data and eyeballs that they needed to sell ads, so they did not support it. But the more mundane reason is that centralized silos are just easier to design than common standards. Consensus is difficult to achieve and it takes time, but without consensus spurned developers will go off and create competing standards. The lesson here may be that if we want to see a better, more open web, we have to get better at not screwing each other over.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][9] on Twitter or subscribe to the [RSS feed][10] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> New post: This week we're traveling back in time in our DeLorean to see what it was like learning to program on early home computers.<https://t.co/qDrwqgIuuy>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [September 2, 2018][11]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/09/16/the-rise-and-demise-of-rss.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://trends.google.com/trends/explore?date=all&geo=US&q=rss
|
||||
[2]: https://twobithistory.org/images/mnn-channel.gif
|
||||
[3]: https://twobithistory.org/2018/05/27/semantic-web.html
|
||||
[4]: http://web.archive.org/web/19970703020212/http://mcf.research.apple.com:80/hs/screen_shot.html
|
||||
[5]: http://scripting.com/
|
||||
[6]: https://groups.yahoo.com/neo/groups/syndication/info
|
||||
[7]: https://tools.ietf.org/html/rfc4287
|
||||
[8]: https://twitter.com/mgsiegler/status/311992206716203008
|
||||
[9]: https://twitter.com/TwoBitHistory
|
||||
[10]: https://twobithistory.org/feed.xml
|
||||
[11]: https://twitter.com/TwoBitHistory/status/1036295112375115778?ref_src=twsrc%5Etfw
|
@ -1,75 +0,0 @@
|
||||
(translating by runningwater)
|
||||
CPU Power Manager – Control And Manage CPU Frequency In Linux
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/Manage-CPU-Frequency-720x340.jpeg)
|
||||
|
||||
If you are a laptop user, you probably know that power management on Linux isn’t really as good as on other OSes. While there are tools like **TLP** , [**Laptop Mode Tools** and **powertop**][1] to help reduce power consumption, overall battery life on Linux isn’t as good as Windows or Mac OS. Another way to reduce power consumption is to limit the frequency of your CPU. While this is something that has always been doable, it generally requires complicated terminal commands, making it inconvenient. But fortunately, there’s a gnome extension that helps you easily set and manage your CPU’s frequency – **CPU Power Manager**. CPU Power Manager uses the **intel_pstate** frequency scaling driver (supported by almost every Intel CPU) to control and manage CPU frequency in your GNOME desktop.
|
||||
|
||||
Another reason to use this extension is to reduce heating in your system. There are many systems out there which can get uncomfortably hot in normal usage. Limiting your CPU’s frequency could reduce heating. It will also decrease the wear and tear on your CPU and other components.
|
||||
|
||||
### Installing CPU Power Manager
|
||||
|
||||
First, go to the [**extension’s page**][2], and install the extension.
|
||||
|
||||
Once the extension has installed, you’ll get a CPU icon at the right side of the Gnome top bar. Click the icon, and you get an option to install the extension:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-icon.png)
|
||||
|
||||
If you click **“Attempt Installation”** , you’ll get a password prompt. The extension needs root privileges to add policykit rule for controlling CPU frequency. This is what the prompt looks like:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-1.png)
|
||||
|
||||
Type in your password and Click **“Authenticate”** , and that finishes installation. The last action adds a policykit file – **mko.cpupower.setcpufreq.policy** at **/usr/share/polkit-1/actions**.
|
||||
|
||||
After installation is complete, if you click the CPU icon at the top right, you’ll get something like this:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager.png)
|
||||
|
||||
### Features
|
||||
|
||||
* **See the current CPU frequency:** Obviously, you can use this window to see the frequency that your CPU is running at.
|
||||
* **Set maximum and minimum frequency:** With this extension, you can set maximum and minimum frequency limits in terms of percentage of max frequency. Once these limits are set, the CPU will operate only in this range of frequencies.
|
||||
* **Turn Turbo Boost On and Off:** This is my favorite feature. Most Intel CPU’s have “Turbo Boost” feature, whereby the one of the cores of the CPU is boosted past the normal maximum frequency for extra performance. While this can make your system more performant, it also increases power consumption a lot. So if you aren’t doing anything intensive, it’s nice to be able to turn off Turbo Boost and save power. In fact, in my case, I have Turbo Boost turned off most of the time.
|
||||
* **Make Profiles:** You can make profiles with max and min frequency that you can turn on/off easily instead of fiddling with max and frequencies.
|
||||
|
||||
|
||||
|
||||
### Preferences
|
||||
|
||||
You can also customize the extension via the preferences window:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences.png)
|
||||
|
||||
As you can see, you can set whether CPU frequency is to be displayed, and whether to display it in **Mhz** or **Ghz**.
|
||||
|
||||
You can also edit and create/delete profiles:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences-1.png)
|
||||
|
||||
You can set maximum and minimum frequencies, and turbo boost for each profile.
|
||||
|
||||
### Conclusion
|
||||
|
||||
As I said in the beginning, power management on Linux is not the best, and many people are always looking to eek out a few minutes more out of their Linux laptop. If you are one of those, check out this extension. This is a unconventional method to save power, but it does work. I certainly love this extension, and have been using it for a few months now.
|
||||
|
||||
What do you think about this extension? Put your thoughts in the comments below!
|
||||
|
||||
Cheers!
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/cpu-power-manager-control-and-manage-cpu-frequency-in-linux/
|
||||
|
||||
作者:[EDITOR][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.ostechnix.com/author/editor/
|
||||
[1]: https://www.ostechnix.com/improve-laptop-battery-performance-linux/
|
||||
[2]: https://extensions.gnome.org/extension/945/cpu-power-manager/
|
@ -0,0 +1,126 @@
|
||||
How Lisp Became God's Own Programming Language
|
||||
======
|
||||
When programmers discuss the relative merits of different programming languages, they often talk about them in prosaic terms as if they were so many tools in a tool belt—one might be more appropriate for systems programming, another might be more appropriate for gluing together other programs to accomplish some ad hoc task. This is as it should be. Languages have different strengths and claiming that a language is better than other languages without reference to a specific use case only invites an unproductive and vitriolic debate.
|
||||
|
||||
But there is one language that seems to inspire a peculiar universal reverence: Lisp. Keyboard crusaders that would otherwise pounce on anyone daring to suggest that some language is better than any other will concede that Lisp is on another level. Lisp transcends the utilitarian criteria used to judge other languages, because the median programmer has never used Lisp to build anything practical and probably never will, yet the reverence for Lisp runs so deep that Lisp is often ascribed mystical properties. Everyone’s favorite webcomic, xkcd, has depicted Lisp this way at least twice: In [one comic][1], a character reaches some sort of Lisp enlightenment, which appears to allow him to comprehend the fundamental structure of the universe. In [another comic][2], a robed, senescent programmer hands a stack of parentheses to his padawan, saying that the parentheses are “elegant weapons for a more civilized age,” suggesting that Lisp has all the occult power of the Force.
|
||||
|
||||
Another great example is Bob Kanefsky’s parody of a song called “God Lives on Terra.” His parody, written in the mid-1990s and called “Eternal Flame”, describes how God must have created the world using Lisp. The following is an excerpt, but the full set of lyrics can be found in the [GNU Humor Collection][3]:
|
||||
|
||||
> For God wrote in Lisp code
|
||||
> When he filled the leaves with green.
|
||||
> The fractal flowers and recursive roots:
|
||||
> The most lovely hack I’ve seen.
|
||||
> And when I ponder snowflakes,
|
||||
> never finding two the same,
|
||||
> I know God likes a language
|
||||
> with its own four-letter name.
|
||||
|
||||
I can only speak for myself, I suppose, but I think this “Lisp Is Arcane Magic” cultural meme is the most bizarre and fascinating thing ever. Lisp was concocted in the ivory tower as a tool for artificial intelligence research, so it was always going to be unfamiliar and maybe even a bit mysterious to the programming laity. But programmers now [urge each other to “try Lisp before you die”][4] as if it were some kind of mind-expanding psychedelic. They do this even though Lisp is now the second-oldest programming language in widespread use, younger only than Fortran, and even then by just one year. Imagine if your job were to promote some new programming language on behalf of the organization or team that created it. Wouldn’t it be great if you could convince everyone that your new language had divine powers? But how would you even do that? How does a programming language come to be known as a font of hidden knowledge?
|
||||
|
||||
How did Lisp get to be this way?
|
||||
|
||||
![Byte Magazine Cover, August, 1979.][5]
|
||||
The cover of Byte Magazine, August, 1979.
|
||||
|
||||
### Theory A: The Axiomatic Language
|
||||
|
||||
John McCarthy, Lisp’s creator, did not originally intend for Lisp to be an elegant distillation of the principles of computation. But, after one or two fortunate insights and a series of refinements, that’s what Lisp became. Paul Graham—we will talk about him some more later—has written that, with Lisp, McCarthy “did for programming something like what Euclid did for geometry.” People might see a deeper meaning in Lisp because McCarthy built Lisp out of parts so fundamental that it is hard to say whether he invented it or discovered it.
|
||||
|
||||
McCarthy began thinking about creating a language during the 1956 Darthmouth Summer Research Project on Artificial Intelligence. The Summer Research Project was in effect an ongoing, multi-week academic conference, the very first in the field of artificial intelligence. McCarthy, then an assistant professor of Mathematics at Dartmouth, had actually coined the term “artificial intelligence” when he proposed the event. About ten or so people attended the conference for its entire duration. Among them were Allen Newell and Herbert Simon, two researchers affiliated with the RAND Corporation and Carnegie Mellon that had just designed a language called IPL.
|
||||
|
||||
Newell and Simon had been trying to build a system capable of generating proofs in propositional calculus. They realized that it would be hard to do this while working at the level of the computer’s native instruction set, so they decided to create a language—or, as they called it, a “pseudo-code”—that would help them more naturally express the workings of their “Logic Theory Machine.” Their language, called IPL for “Information Processing Language”, was more of a high-level assembly dialect then a programming language in the sense we mean today. Newell and Simon, perhaps referring to Fortran, noted that other “pseudo-codes” then in development were “preoccupied” with representing equations in standard mathematical notation. Their language focused instead on representing sentences in propositional calculus as lists of symbolic expressions. Programs in IPL would basically leverage a series of assembly-language macros to manipulate and evaluate expressions within one or more of these lists.
|
||||
|
||||
McCarthy thought that having algebraic expressions in a language, Fortran-style, would be useful. So he didn’t like IPL very much. But he thought that symbolic lists were a good way to model problems in artificial intelligence, particularly problems involving deduction. This was the germ of McCarthy’s desire to create an algebraic list processing language, a language that would resemble Fortran but also be able to process symbolic lists like IPL.
|
||||
|
||||
Of course, Lisp today does not resemble Fortran. Over the next few years, McCarthy’s ideas about what an ideal list processing language should look like evolved. His ideas began to change in 1957, when he started writing routines for a chess-playing program in Fortran. The prolonged exposure to Fortran convinced McCarthy that there were several infelicities in its design, chief among them the awkward `IF` statement. McCarthy invented an alternative, the “true” conditional expression, which returns sub-expression A if the supplied test succeeds and sub-expression B if the supplied test fails and which also only evaluates the sub-expression that actually gets returned. During the summer of 1958, when McCarthy worked to design a program that could perform differentiation, he realized that his “true” conditional expression made writing recursive functions easier and more natural. The differentiation problem also prompted McCarthy to devise the maplist function, which takes another function as an argument and applies it to all the elements in a list. This was useful for differentiating sums of arbitrarily many terms.
|
||||
|
||||
None of these things could be expressed in Fortran, so, in the fall of 1958, McCarthy set some students to work implementing Lisp. Since McCarthy was now an assistant professor at MIT, these were all MIT students. As McCarthy and his students translated his ideas into running code, they made changes that further simplified the language. The biggest change involved Lisp’s syntax. McCarthy had originally intended for the language to include something called “M-expressions,” which would be a layer of syntactic sugar that made Lisp’s syntax resemble Fortran’s. Though M-expressions could be translated to S-expressions—the basic lists enclosed by parentheses that Lisp is known for— S-expressions were really a low-level representation meant for the machine. The only problem was that McCarthy had been denoting M-expressions using square brackets, and the IBM 026 keypunch that McCarthy’s team used at MIT did not have any square bracket keys on its keyboard. So the Lisp team stuck with S-expressions, using them to represent not just lists of data but function applications too. McCarthy and his students also made a few other simplifications, including a switch to prefix notation and a memory model change that meant the language only had one real type.
|
||||
|
||||
In 1960, McCarthy published his famous paper on Lisp called “Recursive Functions of Symbolic Expressions and Their Computation by Machine.” By that time, the language had been pared down to such a degree that McCarthy realized he had the makings of “an elegant mathematical system” and not just another programming language. He later wrote that the many simplifications that had been made to Lisp turned it “into a way of describing computable functions much neater than the Turing machines or the general recursive definitions used in recursive function theory.” In his paper, he therefore presented Lisp both as a working programming language and as a formalism for studying the behavior of recursive functions.
|
||||
|
||||
McCarthy explained Lisp to his readers by building it up out of only a very small collection of rules. Paul Graham later retraced McCarthy’s steps, using more readable language, in his essay [“The Roots of Lisp”][6]. Graham is able to explain Lisp using only seven primitive operators, two different notations for functions, and a half-dozen higher-level functions defined in terms of the primitive operators. That Lisp can be specified by such a small sequence of basic rules no doubt contributes to its mystique. Graham has called McCarthy’s paper an attempt to “axiomatize computation.” I think that is a great way to think about Lisp’s appeal. Whereas other languages have clearly artificial constructs denoted by reserved words like `while` or `typedef` or `public static void`, Lisp’s design almost seems entailed by the very logic of computing. This quality and Lisp’s original connection to a field as esoteric as “recursive function theory” should make it no surprise that Lisp has so much prestige today.
|
||||
|
||||
### Theory B: Machine of the Future
|
||||
|
||||
Two decades after its creation, Lisp had become, according to the famous [Hacker’s Dictionary][7], the “mother tongue” of artificial intelligence research. Early on, Lisp spread quickly, probably because its regular syntax made implementing it on new machines relatively straightforward. Later, researchers would keep using it because of how well it handled symbolic expressions, important in an era when so much of artificial intelligence was symbolic. Lisp was used in seminal artificial intelligence projects like the [SHRDLU natural language program][8], the [Macsyma algebra system][9], and the [ACL2 logic system][10].
|
||||
|
||||
By the mid-1970s, though, artificial intelligence researchers were running out of computer power. The PDP-10, in particular—everyone’s favorite machine for artificial intelligence work—had an 18-bit address space that increasingly was insufficient for Lisp AI programs. Many AI programs were also supposed to be interactive, and making a demanding interactive program perform well on a time-sharing system was challenging. The solution, originally proposed by Peter Deutsch at MIT, was to engineer a computer specifically designed to run Lisp programs. These Lisp machines, as I described in [my last post on Chaosnet][11], would give each user a dedicated processor optimized for Lisp. They would also eventually come with development environments written entirely in Lisp for hardcore Lisp programmers. Lisp machines, devised in an awkward moment at the tail of the minicomputer era but before the full flowering of the microcomputer revolution, were high-performance personal computers for the programming elite.
|
||||
|
||||
For a while, it seemed as if Lisp machines would be the wave of the future. Several companies sprang into existence and raced to commercialize the technology. The most successful of these companies was called Symbolics, founded by veterans of the MIT AI Lab. Throughout the 1980s, Symbolics produced a line of computers known as the 3600 series, which were popular in the AI field and in industries requiring high-powered computing. The 3600 series computers featured large screens, bit-mapped graphics, a mouse interface, and [powerful graphics and animation software][12]. These were impressive machines that enabled impressive programs. For example, Bob Culley, who worked in robotics research and contacted me via Twitter, was able to implement and visualize a path-finding algorithm on a Symbolics 3650 in 1985. He explained to me that bit-mapped graphics and object-oriented programming (available on Lisp machines via [the Flavors extension][13]) were very new in the 1980s. Symbolics was the cutting edge.
|
||||
|
||||
![Bob Culley's path-finding program.][14] Bob Culley’s path-finding program.
|
||||
|
||||
As a result, Symbolics machines were outrageously expensive. The Symbolics 3600 cost $110,000 in 1983. So most people could only marvel at the power of Lisp machines and the wizardry of their Lisp-writing operators from afar. But marvel they did. Byte Magazine featured Lisp and Lisp machines several times from 1979 through to the end of the 1980s. In the August, 1979 issue, a special on Lisp, the magazine’s editor raved about the new machines being developed at MIT with “gobs of memory” and “an advanced operating system.” He thought they sounded so promising that they would make the two prior years—which saw the launch of the Apple II, the Commodore PET, and the TRS-80—look boring by comparison. A half decade later, in 1985, a Byte Magazine contributor described writing Lisp programs for the “sophisticated, superpowerful Symbolics 3670” and urged his audience to learn Lisp, claiming it was both “the language of choice for most people working in AI” and soon to be a general-purpose programming language as well.
|
||||
|
||||
I asked Paul McJones, who has done lots of Lisp [preservation work][15] for the Computer History Museum in Mountain View, about when people first began talking about Lisp as if it were a gift from higher-dimensional beings. He said that the inherent properties of the language no doubt had a lot to do with it, but he also said that the close association between Lisp and the powerful artificial intelligence applications of the 1960s and 1970s probably contributed too. When Lisp machines became available for purchase in the 1980s, a few more people outside of places like MIT and Stanford were exposed to Lisp’s power and the legend grew. Today, Lisp machines and Symbolics are little remembered, but they helped keep the mystique of Lisp alive through to the late 1980s.
|
||||
|
||||
### Theory C: Learn to Program
|
||||
|
||||
In 1985, MIT professors Harold Abelson and Gerald Sussman, along with Sussman’s wife, Julie Sussman, published a textbook called Structure and Interpretation of Computer Programs. The textbook introduced readers to programming using the language Scheme, a dialect of Lisp. It was used to teach MIT’s introductory programming class for two decades. My hunch is that SICP (as the title is commonly abbreviated) about doubled Lisp’s “mystique factor.” SICP took Lisp and showed how it could be used to illustrate deep, almost philosophical concepts in the art of computer programming. Those concepts were general enough that any language could have been used, but SICP’s authors chose Lisp. As a result, Lisp’s reputation was augmented by the notoriety of this bizarre and brilliant book, which has intrigued generations of programmers (and also become [a very strange meme][16]). Lisp had always been “McCarthy’s elegant formalism”; now it was also “that language that teaches you the hidden secrets of programming.”
|
||||
|
||||
It’s worth dwelling for a while on how weird SICP really is, because I think the book’s weirdness and Lisp’s weirdness get conflated today. The weirdness starts with the book’s cover. It depicts a wizard or alchemist approaching a table, prepared to perform some sort of sorcery. In one hand he holds a set of calipers or a compass, in the other he holds a globe inscribed with the words “eval” and “apply.” A woman opposite him gestures at the table; in the background, the Greek letter lambda floats in mid-air, radiating light.
|
||||
|
||||
![The cover art for SICP.][17] The cover art for SICP.
|
||||
|
||||
Honestly, what is going on here? Why does the table have animal feet? Why is the woman gesturing at the table? What is the significance of the inkwell? Are we supposed to conclude that the wizard has unlocked the hidden mysteries of the universe, and that those mysteries consist of the “eval/apply” loop and the Lambda Calculus? It would seem so. This image alone must have done an enormous amount to shape how people talk about Lisp today.
|
||||
|
||||
But the text of the book itself is often just as weird. SICP is unlike most other computer science textbooks that you have ever read. Its authors explain in the foreword to the book that the book is not merely about how to program in Lisp—it is instead about “three foci of phenomena: the human mind, collections of computer programs, and the computer.” Later, they elaborate, describing their conviction that programming shouldn’t be considered a discipline of computer science but instead should be considered a new notation for “procedural epistemology.” Programs are a new way of structuring thought that only incidentally get fed into computers. The first chapter of the book gives a brief tour of Lisp, but most of the book after that point is about much more abstract concepts. There is a discussion of different programming paradigms, a discussion of the nature of “time” and “identity” in object-oriented systems, and at one point a discussion of how synchronization problems may arise because of fundamental constraints on communication that play a role akin to the fixed speed of light in the theory of relativity. It’s heady stuff.
|
||||
|
||||
All this isn’t to say that the book is bad. It’s a wonderful book. It discusses important programming concepts at a higher level than anything else I have read, concepts that I had long wondered about but didn’t quite have the language to describe. It’s impressive that an introductory programming textbook can move so quickly to describing the fundamental shortfalls of object-oriented programming and the benefits of functional languages that minimize mutable state. It’s mind-blowing that this then turns into a discussion of how a stream paradigm, perhaps something like today’s [RxJS][18], can give you the best of both worlds. SICP distills the essence of high-level program design in a way reminiscent of McCarthy’s original Lisp paper. The first thing you want to do after reading it is get your programmer friends to read it; if they look it up, see the cover, but then don’t read it, all they take away is that some mysterious, fundamental “eval/apply” thing gives magicians special powers over tables with animal feet. I would be deeply impressed in their shoes too.
|
||||
|
||||
But maybe SICP’s most important contribution was to elevate Lisp from curious oddity to pedagogical must-have. Well before SICP, people told each other to learn Lisp as a way of getting better at programming. The 1979 Lisp issue of Byte Magazine is testament to that fact. The same editor that raved about MIT’s new Lisp machines also explained that the language was worth learning because it “represents a different point of view from which to analyze problems.” But SICP presented Lisp as more than just a foil for other languages; SICP used Lisp as an introductory language, implicitly making the argument that Lisp is the best language in which to grasp the fundamentals of computer programming. When programmers today tell each other to try Lisp before they die, they arguably do so in large part because of SICP. After all, the language [Brainfuck][19] presumably offers “a different point of view from which to analyze problems.” But people learn Lisp instead because they know that, for twenty years or so, the Lisp point of view was thought to be so useful that MIT taught Lisp to undergraduates before anything else.
|
||||
|
||||
### Lisp Comes Back
|
||||
|
||||
The same year that SICP was released, Bjarne Stroustrup published the first edition of The C++ Programming Language, which brought object-oriented programming to the masses. A few years later, the market for Lisp machines collapsed and the AI winter began. For the next decade and change, C++ and then Java would be the languages of the future and Lisp would be left out in the cold.
|
||||
|
||||
It is of course impossible to pinpoint when people started getting excited about Lisp again. But that may have happened after Paul Graham, Y-Combinator co-founder and Hacker News creator, published a series of influential essays pushing Lisp as the best language for startups. In his essay [“Beating the Averages,”][20] for example, Graham argued that Lisp macros simply made Lisp more powerful than other languages. He claimed that using Lisp at his own startup, Viaweb, helped him develop features faster than his competitors were able to. [Some programmers at least][21] were persuaded. But the vast majority of programmers did not switch to Lisp.
|
||||
|
||||
What happened instead is that more and more Lisp-y features have been incorporated into everyone’s favorite programming languages. Python got list comprehensions. C# got Linq. Ruby got… well, Ruby [is a Lisp][22]. As Graham noted even back in 2001, “the default language, embodied in a succession of popular languages, has gradually evolved toward Lisp.” Though other languages are gradually becoming like Lisp, Lisp itself somehow manages to retain its special reputation as that mysterious language that few people understand but everybody should learn. In 1980, on the occasion of Lisp’s 20th anniversary, McCarthy wrote that Lisp had survived as long as it had because it occupied “some kind of approximate local optimum in the space of programming languages.” That understates Lisp’s real influence. Lisp hasn’t survived for over half a century because programmers have begrudgingly conceded that it is the best tool for the job decade after decade; in fact, it has survived even though most programmers do not use it at all. Thanks to its origins and use in artificial intelligence research and perhaps also the legacy of SICP, Lisp continues to fascinate people. Until we can imagine God creating the world with some newer language, Lisp isn’t going anywhere.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][23] on Twitter or subscribe to the [RSS feed][24] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> This week's post: A look at Chaosnet, the network that gave us the "CH" DNS class.<https://t.co/dC7xqPYzi5>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [September 30, 2018][25]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/10/14/lisp.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://xkcd.com/224/
|
||||
[2]: https://xkcd.com/297/
|
||||
[3]: https://www.gnu.org/fun/jokes/eternal-flame.en.html
|
||||
[4]: https://www.reddit.com/r/ProgrammerHumor/comments/5c14o6/xkcd_lisp/d9szjnc/
|
||||
[5]: https://twobithistory.org/images/byte_lisp.jpg
|
||||
[6]: http://languagelog.ldc.upenn.edu/myl/llog/jmc.pdf
|
||||
[7]: https://en.wikipedia.org/wiki/Jargon_File
|
||||
[8]: https://hci.stanford.edu/winograd/shrdlu/
|
||||
[9]: https://en.wikipedia.org/wiki/Macsyma
|
||||
[10]: https://en.wikipedia.org/wiki/ACL2
|
||||
[11]: https://twobithistory.org/2018/09/30/chaosnet.html
|
||||
[12]: https://youtu.be/gV5obrYaogU?t=201
|
||||
[13]: https://en.wikipedia.org/wiki/Flavors_(programming_language)
|
||||
[14]: https://twobithistory.org/images/symbolics.jpg
|
||||
[15]: http://www.softwarepreservation.org/projects/LISP/
|
||||
[16]: https://knowyourmeme.com/forums/meme-research/topics/47038-structure-and-interpretation-of-computer-programs-hugeass-image-dump-for-evidence
|
||||
[17]: https://twobithistory.org/images/sicp.jpg
|
||||
[18]: https://rxjs-dev.firebaseapp.com/
|
||||
[19]: https://en.wikipedia.org/wiki/Brainfuck
|
||||
[20]: http://www.paulgraham.com/avg.html
|
||||
[21]: https://web.archive.org/web/20061004035628/http://wiki.alu.org/Chris-Perkins
|
||||
[22]: http://www.randomhacks.net/2005/12/03/why-ruby-is-an-acceptable-lisp/
|
||||
[23]: https://twitter.com/TwoBitHistory
|
||||
[24]: https://twobithistory.org/feed.xml
|
||||
[25]: https://twitter.com/TwoBitHistory/status/1046437600658169856?ref_src=twsrc%5Etfw
|
@ -0,0 +1,71 @@
|
||||
translating by belitex
|
||||
|
||||
What is an SRE and how does it relate to DevOps?
|
||||
======
|
||||
The SRE role is common in large enterprises, but smaller businesses need it, too.
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/toolbox-learn-draw-container-yearbook.png?itok=xDbwz1pP)
|
||||
|
||||
Even though the site reliability engineer (SRE) role has become prevalent in recent years, many people—even in the software industry—don't know what it is or does. This article aims to clear that up by explaining what an SRE is, how it relates to DevOps, and how an SRE works when your entire engineering organization can fit in a coffee shop.
|
||||
|
||||
### What is site reliability engineering?
|
||||
|
||||
[Site Reliability Engineering: How Google Runs Production Systems][1], written by a group of Google engineers, is considered the definitive book on site reliability engineering. Google vice president of engineering Ben Treynor Sloss [coined the term][2] back in the early 2000s. He defined it as: "It's what happens when you ask a software engineer to design an operations function."
|
||||
|
||||
Sysadmins have been writing code for a long time, but for many of those years, a team of sysadmins managed many machines manually. Back then, "many" may have been dozens or hundreds, but when you scale to thousands or hundreds of thousands of hosts, you simply can't continue to throw people at the problem. When the number of machines gets that large, the obvious solution is to use code to manage hosts (and the software that runs on them).
|
||||
|
||||
Also, until fairly recently, the operations team was completely separate from the developers. The skillsets for each job were considered completely different. The SRE role tries to bring both jobs together.
|
||||
|
||||
Before we dig deeper into what makes an SRE and how SREs work with the development team, we need to understand how site reliability engineering works within the DevOps paradigm.
|
||||
|
||||
### Site reliability engineering and DevOps
|
||||
|
||||
At its core, site reliability engineering is an implementation of the DevOps paradigm. There seems to be a wide array of ways to [define DevOps][3]. The traditional model, where the development ("devs") and operations ("ops") teams were separated, led to the team that writes the code not being responsible for how it works when customers start using it. The development team would "throw the code over the wall" to the operations team to install and support.
|
||||
|
||||
This situation can lead to a significant amount of dysfunction. The goals of the dev and ops teams are constantly at odds—a developer wants customers to use the "latest and greatest" piece of code, but the operations team wants a steady system with as little change as possible. Their premise is that any change can introduce instability, while a system with no changes should continue to behave in the same manner. (Noting that minimizing change on the software side is not the only factor in preventing instability is important. For example, if your web application stays exactly the same, but the number of customers grows by 10x, your application may break in many different ways.)
|
||||
|
||||
The premise of DevOps is that by merging these two distinct jobs into one, you eliminate contention. If the "dev" wants to deploy new code all the time, they have to deal with any fallout the new code creates. As Amazon's [Werner Vogels said][4], "you build it, you run it" (in production). But developers already have a lot to worry about. They are continually pushed to develop new features for their employer's products. Asking them to understand the infrastructure, including how to deploy, configure, and monitor their service, may be asking a little too much from them. This is where an SRE steps in.
|
||||
|
||||
When a web application is developed, there are often many people that contribute. There are user interface designers, graphic designers, frontend engineers, backend engineers, and a whole host of other specialties (depending on the technologies used). Requirements include how the code gets managed (e.g., deployed, configured, monitored)—which are the SRE's areas of specialty. But, just as an engineer developing a nice look and feel for an application benefits from knowledge of the backend-engineer's job (e.g., how data is fetched from a database), the SRE understands how the deployment system works and how to adapt it to the specific needs of that particular codebase or project.
|
||||
|
||||
So, an SRE is not just "an ops person who codes." Rather, the SRE is another member of the development team with a different set of skills particularly around deployment, configuration management, monitoring, metrics, etc. But, just as an engineer developing a nice look and feel for an application must know how data is fetched from a data store, an SRE is not singly responsible for these areas. The entire team works together to deliver a product that can be easily updated, managed, and monitored.
|
||||
|
||||
The need for an SRE naturally comes about when a team is implementing DevOps but realizes they are asking too much of the developers and need a specialist for what the ops team used to handle.
|
||||
|
||||
### How the SRE works at a startup
|
||||
|
||||
This is great when there are hundreds of employees (let alone when you are the size of Google or Facebook). Large companies have SRE teams that are split up and embedded into each development team. But a startup doesn't have those economies of scale, and engineers often wear many hats. So, where does the "SRE hat" sit in a small company? One approach is to fully adopt DevOps and have the developers be responsible for the typical tasks an SRE would perform at a larger company. On the other side of the spectrum, you hire specialists — a.k.a., SREs.
|
||||
|
||||
The most obvious advantage of trying to put the SRE hat on a developer's head is it scales well as your team grows. Also, the developer will understand all the quirks of the application. But many startups use a wide variety of SaaS products to power their infrastructure. The most obvious is the infrastructure platform itself. Then you add in metrics systems, site monitoring, log analysis, containers, and more. While these technologies solve some problems, they create an additional complexity cost. The developer would need to understand all those technologies and services in addition to the core technologies (e.g., languages) the application uses. In the end, keeping on top of all of that technology can be overwhelming.
|
||||
|
||||
The other option is to hire a specialist to handle the SRE job. Their responsibility would be to focus on deployment, configuration, monitoring, and metrics, freeing up the developer's time to write the application. The disadvantage is that the SRE would have to split their time between multiple, different applications (i.e., the SRE needs to support the breadth of applications throughout engineering). This likely means they may not have the time to gain any depth of knowledge of any of the applications; however, they would be in a position to see how all the different pieces fit together. This "30,000-foot view" can help prioritize the weak spots to fix in the system as a whole.
|
||||
|
||||
There is one key piece of information I am ignoring: your other engineers. They may have a deep desire to understand how deployment works and how to use the metrics system to the best of their ability. Also, hiring an SRE is not an easy task. You are looking for a mix of sysadmin skills and software engineering skills. (I am specific about software engineers, vs. just "being able to code," because software engineering involves more than just writing code [e.g., writing good tests or documentation].)
|
||||
|
||||
Therefore, in some cases, it may make more sense for the "SRE hat" to live on a developer's head. If so, keep an eye on the amount of complexity in both the code and the infrastructure (SaaS or internal). At some point, the complexity on either end will likely push toward more specialization.
|
||||
|
||||
### Conclusion
|
||||
|
||||
An SRE team is one of the most efficient ways to implement the DevOps paradigm in a startup. I have seen a couple of different approaches, but I believe that hiring a dedicated SRE (pretty early) at your startup will free up time for the developers to focus on their specific challenges. The SRE can focus on improving the tools (and processes) that make the developers more productive. Also, an SRE will focus on making sure your customers have a product that is reliable and secure.
|
||||
|
||||
Craig Sebenik will present [SRE (and DevOps) at a Startup][5] at [LISA18][6], October 29-31 in Nashville, Tennessee.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/sre-startup
|
||||
|
||||
作者:[Craig Sebenik][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/craig5
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: http://shop.oreilly.com/product/0636920041528.do
|
||||
[2]: https://landing.google.com/sre/interview/ben-treynor.html
|
||||
[3]: https://opensource.com/resources/devops
|
||||
[4]: https://queue.acm.org/detail.cfm?id=1142065
|
||||
[5]: https://www.usenix.org/conference/lisa18/presentation/sebenik
|
||||
[6]: https://www.usenix.org/conference/lisa18
|
@ -0,0 +1,213 @@
|
||||
What MMORPGs can teach us about leveling up a heroic developer team
|
||||
======
|
||||
The team-building skills that make winning gaming guilds also produce successful work teams.
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/team-game-play-inclusive-diversity-collaboration.png?itok=8sUXV7W1)
|
||||
|
||||
For the better part of a decade, I have been leading guilds in massively multiplayer role-playing games (MMORPGs). Currently, I lead a guild in [Guild Wars 2][1], and before that, I led progression raid teams in [World of Warcraft][2], while also maintaining a career as a software engineer. As I made the transition into software development, it became clear that the skills I gained in building successful raid groups translated well to building successful tech teams.
|
||||
|
||||
|
||||
![Guild Wars 2 guild members after an event.][4]
|
||||
|
||||
Guild Wars 2 guild members after an event.
|
||||
|
||||
### Identify your problem
|
||||
|
||||
The first step to building a successful team, whether in software or MMORPGs, is to recognize your problem. In video games, it's obvious: the monster. If you don't take it down, it will take you down. In tech, it's a product or service you want to deliver to solve your users' problems. In both situations, this is a problem you are unlikely to solve by yourself. You need a team.
|
||||
|
||||
In MMORPGs, the goal is to create a "progression" raid team that improves over time for faster and smoother tackling of objectives together, allowing it to push its goals further and further. You will not reach the second objective in a raid without tackling the initial one first.
|
||||
|
||||
In this article, I'll share how you can build, improve, and maintain your own progression software and/or systems teams. I'll cover assembling our team, leading the team, optimizing for success, continuously improving, and keeping morale high.
|
||||
|
||||
### Assemble your team
|
||||
|
||||
In MMORPGs, progression teams commonly have different levels of commitment, summed up into three tiers: hardcore, semi-hardcore, and casuals. These commitment levels translate to what players value in their raiding experience.
|
||||
|
||||
You may have heard of the concept of "cultural fit" vs "value fit." One of the most important things in assembling your team is making sure everyone aligns with your concrete values and goals. Creating teams based on cultural fit is problematic because culture is hard to define. Matching new recruits based on their culture will also result in homogenous groups.
|
||||
|
||||
Hardcore teams value dedication, mastery, and achievements. Semi-hardcore teams value efficiency, balance, and empathy. Casual teams balance fun above all else. If you put a casual player in a hardcore raid group, the casual player is probably going to tell the hardcore players they're taking things too seriously, while the hardcore players will tell the casual player they aren't taking things seriously enough (then remove them promptly).
|
||||
|
||||
#### Values-driven team building
|
||||
|
||||
A mismatch in values results in a negative experience for everyone. You need to build your team on a shared foundation of what is important, and each member should align with your team's values and goals. What is important to your team? What do you want your team's driving values to be? If you cannot easily answer those questions, take a moment right away and define them with your team.
|
||||
|
||||
The values you define should influence which new members you recruit. In building raid teams, each potential member should be assessed not only on their skills but also their values. One of my previous employers had a "value fit" interview that a person must pass after their skills assessment to be considered for hiring. It doesn't matter if you're a "ninja" or a "rockstar" if you don't align with the company's values.
|
||||
|
||||
#### Diversify your team
|
||||
|
||||
When looking for new positions, I want a team that has a strong emphasis on delivering a quality product while understanding that work/life balance should be weighed more heavily on the life side ("life/work balance"). I steer away from companies with meager, two-week PTO policies, commitments over 40 hours, or rigid schedules. When interviews with companies show less emphasis on technical collaboration, I know there is a values mismatch.
|
||||
|
||||
While values are important to share, the same skills, experience, and roles are not. Ten tanks might be able to get a boss down, eventually, but it is certainly more effective to have diversity. You need people who are skilled and trained in their specific roles to work together, with everyone focusing on what they do best.
|
||||
|
||||
In MMORPGs, there are always considerably more people who want to play damage roles because they get all the glory. However, you're not going to down the boss without at least a tank and a healer. The tank and the healer mitigate the damage so that the damage classes can do what they do. We need to be respectful of the roles we each play and realize we're much better when we work together. There shouldn't be developers vs. operators when working together helps us deliver more effectively.
|
||||
|
||||
Diversity in roles is important but so is diversity within roles. If you take 10 necromancers to a raid, you'll quickly find there are problems you can't solve with your current ability pool. You need to throw in some elementalists, thieves, and mesmers, too. It's the same with developers; if you everyone comes from the same background, abilities, and experience, you're going to face unnecessary challenges.
|
||||
|
||||
It's better to take the inexperienced person who is willing to learn than the experienced person unwilling to take criticism. If a developer doesn't have hundreds of open source commits, it doesn't necessarily mean they are less skilled. Everyone has to learn somewhere. Senior developers and operators don't appear out of nowhere. Teams often only look for "experienced" people, spending more time with less manpower than if they had just trained an inexperienced recruit.
|
||||
|
||||
Teams often only look for "experienced" people, spending more time with less manpower than if they had just trained an inexperienced recruit.
|
||||
|
||||
Experience helps people pick things up faster, but no one starts out knowing exactly what to do, and you'd be surprised how seemingly unrelated skills translate well when applied to new experiences (like raid leadership!). **Hire and support junior technologists.** Keep in mind that a team comprised of a high percentage of inexperienced people will take considerably more time to achieve their objectives. It's important to find a good balance, weighed more heavily with experienced people available to mentor.
|
||||
|
||||
Experience helps people pick things up faster, but no one starts out knowing exactly what to do, and you'd be surprised how seemingly unrelated skills translate well when applied to new experiences (like raid leadership!).Keep in mind that a team comprised of a high percentage of inexperienced people will take considerably more time to achieve their objectives. It's important to find a good balance, weighed more heavily with experienced people available to mentor.
|
||||
|
||||
Every member of a team comes with strengths we need to utilize. In raids, we become obsessed with the "meta," which is a build for a class that is dubbed most efficient. We become so obsessed with what is "the best" that we forget about what "just works." In reality, forcing someone to dramatically change their playstyle because someone else determined this other playstyle to be slightly better will not be as efficient as just letting a player play what they have expertise in.
|
||||
|
||||
Every member of a team comes with strengths we need to utilize.
|
||||
|
||||
We get so excited about the latest and greatest in tech that we don't always think about the toll it takes. It's OK to choose "boring" technology and adopt new technologies as they become standard. What's "the best" is always changing, so focus on what's best for your team. Sometimes the best is what people are the most comfortable with. **Trust in your team's expertise rather than the tools.**
|
||||
|
||||
### Take the lead
|
||||
|
||||
We get so excited about the latest and greatest in tech that we don't always think about the toll it takes. It's OK to choose "boring" technology and adopt new technologies as they become standard. What's "the best" is always changing, so focus on what's best for your team. Sometimes the best is what people are the most comfortable with.
|
||||
|
||||
You need a strong leader to lead a team and guide the overall direction, working for the team. Servant leadership is the idea that we serve our entire team and their individual needs before our own, and it is the leadership philosophy I have found most successful. Growth should be promoted at the contributor level to encourage growth at the macro level. As leaders, we want to work with each individual to identify their strengths and weaknesses. We want to keep morale high and keep everyone excited and focused so that they can succeed.
|
||||
|
||||
Above all, a leader wants to keep the team working together. Sometimes this means resolving conflicts or holding meetings. Often this means breaking down communication barriers and improving team communication.
|
||||
|
||||
![Guild Wars 2 raid team encountering Samarog.][6]
|
||||
|
||||
Guild Wars 2 raid team encountering Samarog.
|
||||
|
||||
#### Communicate effectively
|
||||
|
||||
As companies move towards the remote/distributed model, optimizing communication and information access has become more critical than ever. How do you make sure everyone is on the same page?
|
||||
|
||||
Above all, a leader wants to keep the team working together.
|
||||
|
||||
During my World of Warcraft years, we used voice-over-IP software called Ventrilo. It was important for each team member to be able to hear my instructions, so whenever too many people started talking, someone would say "Clear Vent!" to silence the channel. You want the important information to be immediately accessible. In remote teams, this is usually achieved by a zero-noise "#announcements" channel in Slack where only need-to-know information is present.
|
||||
|
||||
During my World of Warcraft years, we used voice-over-IP software called Ventrilo. It was important for each team member to be able to hear my instructions, so whenever too many people started talking, someone would say "Clear Vent!" to silence the channel. You want the important information to be immediately accessible. In remote teams, this is usually achieved by a zero-noise "#announcements" channel in Slack where only need-to-know information is present.
|
||||
|
||||
A central knowledge base is also crucial. Guild Wars 2 has a /wiki command built into the game, which brings up a player-maintained wiki in the browser to look up information as needed without bothering other players. In most companies where I've worked, information is stored across various repositories, wikis, and documents, making it difficult and time-consuming to seek a source of truth. A central, searchable wiki, like Guild Wars 2 has, would relieve this issue. Treat knowledge sharing as an important component of your company!
|
||||
|
||||
### Optimize for what works
|
||||
|
||||
When you have your team assembled and are communicating effectively, you're prepared to take on your objectives. You need to think about it strategically, whether it's a monster or a system, breaking it down into steps and necessary roles. It's going to feel like you don't know what you're doing—but it's a starting point. The monster is going to die as long as you deplete its health pool, despite how messy the encounter may be at first. Your product can start making money with the minimum. Only once you have achieved the minimum can you move the goalpost.
|
||||
|
||||
Your team learns what works and how to improve when they have the freedom to experiment. Trying something and failing is OK if it's a learning experience. It can even help identify overlooked weaknesses in your systems or processes.
|
||||
|
||||
![Deaths during the Samarog encounter.][8]
|
||||
|
||||
Deaths during the Samarog encounter.
|
||||
|
||||
Your team learns what works and how to improve when they have the freedom to experiment.
|
||||
|
||||
We live in the information age where there are various strategies at our disposal, but what works for others might not work for your team. While there is no one way to do anything, some ways are definitely better than others. Perform educated experiments based on the experience of others. Don't go in without a basic strategy unless absolutely necessary.
|
||||
|
||||
We live in the information age where there are various strategies at our disposal, but what works for others might not work for your team. While there is no one way to do anything, some ways are definitely better than others. Perform educated experiments based on the experience of others. Don't go in without a basic strategy unless absolutely necessary.
|
||||
|
||||
Your team needs to feel comfortable making mistakes. The only true failures are when nothing can be salvaged and nothing was learned. For your team to feel comfortable experimenting, you need to foster a culture where people are held accountable but not punished for their mistakes. When your team fears retaliation, they will be hesitant to try something unfamiliar. Worse, they might hide the mistakes they've made, and you'll find out too late to recover.
|
||||
|
||||
Large-scale failures are rarely the result of one person. They are an accumulation of mistakes and oversights by different people combined with things largely outside the team's control. Tank healer went down? OK, another healer will cover. Someone is standing in a ring of fire. Your only remaining healer is overloaded, everything's on cooldown, and now your tank's block missed thanks to a random number generator outside her control. It's officially reached the point of failure, and the raid has wiped.
|
||||
|
||||
Is it the tank healer's fault we wiped? It went down first and caused some stress on the other healer, sure. But there were enough people alive to keep going. It was a cumulation of everything.
|
||||
|
||||
In systems, there are recovery protocols and hopefully automation around failures. Someone on-call will step in to provide coverage. Failures are more easily prevented when we become better aware of our systems.
|
||||
|
||||
#### Measure success (or failures) with metrics
|
||||
|
||||
How do you become more aware? Analysis of logs and metrics. Monitoring and observability.
|
||||
|
||||
Logs, metrics, and analysis are as important in raids as they are around your systems and applications. After objectives, we review damage output, support uptime, time to completion, and failed mechanics.
|
||||
|
||||
Your teams need to collect similar metrics. You need baseline metrics to compare and ensure progress has been made. In systems and applications, you care about speed, health, and overall output, too. Without being able to see these logs and metrics, you have limited measures of success.
|
||||
|
||||
![Boon uptime stats][10]
|
||||
|
||||
Boon uptime stats for my healer, Lullaby of Thix.
|
||||
|
||||
### Continuously improve
|
||||
|
||||
A team is a sum of its parts, with an ultimate goal of being coordinated at both the individual and group levels. You want people comfortable in their roles and who can make decisions in the best interest of the whole team; people who know how to step in when needed and seamlessly return to their original role after recovery. This is not easy, and many teams never reach this level of systemization.
|
||||
|
||||
One of the ways we can improve coordination is to help people grow where they are struggling, whether by extending additional educational resources or working with them directly to strengthen their skills. Simply telling someone to "get good" (a phrase rampant in gaming culture) is not going to help. Constructive feedback with working points and pairing will, though.
|
||||
|
||||
Keep in mind that you're measuring progress properly. You can't compare a healer's damage output to that of a dedicated damage class. Recognize that just because someone's performance looks different than another's, it could be that they are taking on roles that others are neglecting, like reviewing code or harder-than-average tickets.
|
||||
|
||||
If one person isn't carrying their weight and the team notices, you have to address it. Start positively, give them a chance to improve: resources, assistance, or whatever they need (within reason). If they still show no interest in improvement, it's time to let them go to keep your team happy and running smoothly.
|
||||
|
||||
### Maintain happiness
|
||||
|
||||
Happiness is important for team longevity. After the honeymoon phase is over, what makes them stay?
|
||||
|
||||
#### Safety
|
||||
|
||||
One of the core, foundational needs of maintaining happiness is maintaining safety. **People stay where they feel safe.**
|
||||
|
||||
Happiness is important for team longevity.
|
||||
|
||||
In a game, it's easy to hide your identity and try to blend in with the perceived status quo. When people are accepted for who they are, they are comfortable enough to stay. And because they stay, a diverse community is built.
|
||||
|
||||
In a game, it's easy to hide your identity and try to blend in with the perceived status quo. When people are accepted for who they are, they are comfortable enough to stay. And because they stay, a diverse community is built.
|
||||
|
||||
One way to create this sense of safety is to use a Code of Conduct (CoC) that, as explicitly as possible, maps out boundaries and the consequences of violating them. It serves as a definitive guide to acceptable behavior and lets people have minimal doubts as to what is and is not allowed. While having a CoC is a good start, **it is meaningless if it is not actively enforced.**
|
||||
|
||||
I've had to cite CoC violations to remove gaming members from our community a few times. Thankfully this doesn't happen very often, because we review our values and CoC as part of the interview process. I have turned people away because they weren't sure they could commit to it. Your values and CoC serve as a filter in your recruiting process, preventing some potential conflicts.
|
||||
|
||||
#### Inclusion
|
||||
|
||||
Once people feel safe, they want to feel included and a sense of belonging. In raids, people who are constantly considered substitutes are going to find a different team where they are appreciated. If hero worship is rampant in your team's culture, you will have a difficult time fostering inclusion. No one likes feeling like they are constantly in the shadows. Everyone has something to bring to the table when given the chance.
|
||||
|
||||
#### Reputation management
|
||||
|
||||
Maintaining team happiness also means maintaining the team's reputation. Having toxic members representing you damages your reputation.
|
||||
|
||||
In Guild Wars 2, a few members belonging to the same guild wanted the achievements and rewards that come from winning a player vs. player (PvP) tournament, so they purchased a tournament win—essentially, skilled PvP players played as them and won the tournament. ArenaNet, the maker of Guild Wars 2, found out and reprimanded them. The greater community found out and lost respect for the entire guild, despite only a tiny percent of the guild being the offenders. You don't want people to lose faith in your team because of bad actors.
|
||||
|
||||
Everyone has something to bring to the table when given the chance.
|
||||
|
||||
Having a positive impact on the greater community also carries a positive impact on your image. In games, we do this by hosting events, helping newcomers, and just being friendly in our interactions with people outside our guilds. In business, maybe you do this by sponsoring things you agree with or open sourcing your core software products.
|
||||
|
||||
Having a positive impact on the greater community also carries a positive impact on your image. In games, we do this by hosting events, helping newcomers, and just being friendly in our interactions with people outside our guilds. In business, maybe you do this by sponsoring things you agree with or open sourcing your core software products.
|
||||
|
||||
If you have a good reputation, earned by both how you treat your members and how you treat your community, recruiting new talent and retaining the talent you have will be much easier.
|
||||
|
||||
Recruiting and retraining take significantly more effort than letting people just relax from time to time. If your team members burn out, they are going to leave. When you're constantly retraining new people, you have more and more opportunities for mistakes. New people to your team generally lack knowledge about the deep internals of your system or product. **High turnover leads to high failure.**
|
||||
|
||||
#### Avoid burnout
|
||||
|
||||
Burnout happens in gaming, too. Everyone needs a break. Time off is good for everyone! You need to balance your team's goals and health. While we may feel like cogs in a machine, we are not machines. Sprint after sprint is really just a full-speed marathon.
|
||||
|
||||
#### Celebrate wins
|
||||
|
||||
Relieve some pressure by celebrating your team's success. This stuff is hard! Recognize and reward your teams. Were you working on a monster encounter for weeks and finally got it down? Have a /dance party! Finally tackled a bug that plagued you for months? Send everyone a cupcake!
|
||||
|
||||
![Guild Wars 2 dance party][12]
|
||||
|
||||
A dance party after a successful Keep Construct encounter in Guild Wars 2.
|
||||
|
||||
### Always evolve
|
||||
|
||||
To thrive as a team, you need to evolve with your market, your company, and your community. Change is inevitable. Embrace it. Grow. I truly believe that the worst thing you can say is, "We've always done it this way, and we're not going to change."
|
||||
|
||||
Building, maintaining, and growing a heroic team is an arduous process that needs constant evolution, but the benefits are infinite.
|
||||
|
||||
Aly Fulton will present [It's Dangerous to Go Alone: Leveling Up a Heroic Team][13] at [LISA18][14], October 29-31 in Nashville, Tenn.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/what-mmorpgs-can-teach-us
|
||||
|
||||
作者:[Aly Fulton][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/sinthetix
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.guildwars2.com/
|
||||
[2]: https://worldofwarcraft.com/
|
||||
[3]: /file/412396
|
||||
[4]: https://opensource.com/sites/default/files/uploads/lime_southsun_cove.png (Guild Wars 2 guild members after an event.)
|
||||
[5]: /file/412401
|
||||
[6]: https://opensource.com/sites/default/files/uploads/lime_samarog_readycheck.png (Guild Wars 2 raid team encountering Samarog.)
|
||||
[7]: /file/412406
|
||||
[8]: https://opensource.com/sites/default/files/uploads/lime_samarog_deaths.png (Deaths during the Samarog encounter.)
|
||||
[9]: /file/412411
|
||||
[10]: https://opensource.com/sites/default/files/uploads/boon_uptime.png (Boon uptime stats)
|
||||
[11]: /file/412416
|
||||
[12]: https://opensource.com/sites/default/files/uploads/lime_keep_construct_trophy_dance_party.png (Guild Wars 2 dance party)
|
||||
[13]: https://www.usenix.org/conference/lisa18/presentation/fulton
|
||||
[14]: https://www.usenix.org/conference/lisa18
|
@ -1,639 +0,0 @@
|
||||
BriFuture is translating this article
|
||||
|
||||
# Compiling Lisp to JavaScript From Scratch in 350
|
||||
|
||||
In this article we will look at a from-scratch implementation of a compiler from a simple LISP-like calculator language to JavaScript. The complete source code can be found [here][7].
|
||||
|
||||
We will:
|
||||
|
||||
1. Define our language and write a simple program in it
|
||||
|
||||
2. Implement a simple parser combinator library
|
||||
|
||||
3. Implement a parser for our language
|
||||
|
||||
4. Implement a pretty printer for our language
|
||||
|
||||
5. Define a subset of JavaScript for our usage
|
||||
|
||||
6. Implement a code translator to the JavaScript subset we defined
|
||||
|
||||
7. Glue it all together
|
||||
|
||||
Let's start!
|
||||
|
||||
### 1\. Defining the language
|
||||
|
||||
The main attraction of lisps is that their syntax already represent a tree, this is why they are so easy to parse. We'll see that soon. But first let's define our language. Here's a BNF description of our language's syntax:
|
||||
|
||||
```
|
||||
program ::= expr
|
||||
expr ::= <integer> | <name> | ([<expr>])
|
||||
```
|
||||
|
||||
Basically, our language let's us define one expression at the top level which it will evaluate. An expression is composed of either an integer, for example `5`, a variable, for example `x`, or a list of expressions, for example `(add x 1)`.
|
||||
|
||||
An integer evaluate to itself, a variable evaluates to what it's bound in the current environment, and a list evaluates to a function call where the first argument is the function and the rest are the arguments to the function.
|
||||
|
||||
We have some built-in special forms in our language so we can do more interesting stuff:
|
||||
|
||||
* let expression let's us introduce new variables in the environment of the body of the let. The syntax is:
|
||||
|
||||
```
|
||||
let ::= (let ([<letarg>]) <body>)
|
||||
letargs ::= (<name> <expr>)
|
||||
body ::= <expr>
|
||||
```
|
||||
|
||||
* lambda expression: evaluates to an anonymous function definition. The syntax is:
|
||||
|
||||
```
|
||||
lambda ::= (lambda ([<name>]) <body>)
|
||||
```
|
||||
|
||||
We also have a few built in functions: `add`, `mul`, `sub`, `div` and `print`.
|
||||
|
||||
Let's see a quick example of a program written in our language:
|
||||
|
||||
```
|
||||
(let
|
||||
((compose
|
||||
(lambda (f g)
|
||||
(lambda (x) (f (g x)))))
|
||||
(square
|
||||
(lambda (x) (mul x x)))
|
||||
(add1
|
||||
(lambda (x) (add x 1))))
|
||||
(print ((compose square add1) 5)))
|
||||
```
|
||||
|
||||
This program defines 3 functions: `compose`, `square` and `add1`. And then prints the result of the computation:`((compose square add1) 5)`
|
||||
|
||||
I hope this is enough information about the language. Let's start implementing it!
|
||||
|
||||
We can define the language in Haskell like this:
|
||||
|
||||
```
|
||||
type Name = String
|
||||
|
||||
data Expr
|
||||
= ATOM Atom
|
||||
| LIST [Expr]
|
||||
deriving (Eq, Read, Show)
|
||||
|
||||
data Atom
|
||||
= Int Int
|
||||
| Symbol Name
|
||||
deriving (Eq, Read, Show)
|
||||
```
|
||||
|
||||
We can parse programs in the language we defined to an `Expr`. Also, we are giving the new data types `Eq`, `Read`and `Show` instances to aid in testing and debugging. You'll be able to use those in the REPL for example to verify all this actually works.
|
||||
|
||||
The reason we did not define `lambda`, `let` and the other built-in functions as part of the syntax is because we can get away with it in this case. These functions are just a more specific case of a `LIST`. So I decided to leave this to a later phase.
|
||||
|
||||
Usually, you would like to define these special cases in the abstract syntax - to improve error messages, to unable static analysis and optimizations and such, but we won't do that here so this is enough for us.
|
||||
|
||||
Another thing you would like to do usually is add some annotation to the syntax. For example the location: Which file did this `Expr` come from and which row and col in the file. You can use this in later stages to print the location of errors, even if they are not in the parser stage.
|
||||
|
||||
* _Exercise 1_ : Add a `Program` data type to include multiple `Expr` sequentially
|
||||
|
||||
* _Exercise 2_ : Add location annotation to the syntax tree.
|
||||
|
||||
### 2\. Implement a simple parser combinator library
|
||||
|
||||
First thing we are going to do is define an Embedded Domain Specific Language (or EDSL) which we will use to define our languages' parser. This is often referred to as parser combinator library. The reason we are doing it is strictly for learning purposes, Haskell has great parsing libraries and you should definitely use them when building real software, or even when just experimenting. One such library is [megaparsec][8].
|
||||
|
||||
First let's talk about the idea behind our parser library implementation. In it's essence, our parser is a function that takes some input, might consume some or all of the input, and returns the value it managed to parse and the rest of the input it didn't parse yet, or throws an error if it failed. Let's write that down.
|
||||
|
||||
```
|
||||
newtype Parser a
|
||||
= Parser (ParseString -> Either ParseError (a, ParseString))
|
||||
|
||||
data ParseString
|
||||
= ParseString Name (Int, Int) String
|
||||
|
||||
data ParseError
|
||||
= ParseError ParseString Error
|
||||
|
||||
type Error = String
|
||||
|
||||
```
|
||||
|
||||
Here we defined three main new types.
|
||||
|
||||
First, `Parser a`, is the parsing function we described before.
|
||||
|
||||
Second, `ParseString` is our input or state we carry along. It has three significant parts:
|
||||
|
||||
* `Name`: This is the name of the source
|
||||
|
||||
* `(Int, Int)`: This is the current location in the source
|
||||
|
||||
* `String`: This is the remaining string left to parse
|
||||
|
||||
Third, `ParseError` contains the current state of the parser and an error message.
|
||||
|
||||
Now we want our parser to be flexible, so we will define a few instances for common type classes for it. These instances will allow us to combine small parsers to make bigger parsers (hence the name 'parser combinators').
|
||||
|
||||
The first one is a `Functor` instance. We want a `Functor` instance because we want to be able to define a parser using another parser simply by applying a function on the parsed value. We will see an example of this when we define the parser for our language.
|
||||
|
||||
```
|
||||
instance Functor Parser where
|
||||
fmap f (Parser parser) =
|
||||
Parser (\str -> first f <$> parser str)
|
||||
```
|
||||
|
||||
The second instance is an `Applicative` instance. One common use case for this instance instance is to lift a pure function on multiple parsers.
|
||||
|
||||
```
|
||||
instance Applicative Parser where
|
||||
pure x = Parser (\str -> Right (x, str))
|
||||
(Parser p1) <*> (Parser p2) =
|
||||
Parser $
|
||||
\str -> do
|
||||
(f, rest) <- p1 str
|
||||
(x, rest') <- p2 rest
|
||||
pure (f x, rest')
|
||||
|
||||
```
|
||||
|
||||
(Note: _We will also implement a Monad instance so we can use do notation here._ )
|
||||
|
||||
The third instance is an `Alternative` instance. We want to be able to supply an alternative parser in case one fails.
|
||||
|
||||
```
|
||||
instance Alternative Parser where
|
||||
empty = Parser (`throwErr` "Failed consuming input")
|
||||
(Parser p1) <|> (Parser p2) =
|
||||
Parser $
|
||||
\pstr -> case p1 pstr of
|
||||
Right result -> Right result
|
||||
Left _ -> p2 pstr
|
||||
```
|
||||
|
||||
The forth instance is a `Monad` instance. So we'll be able to chain parsers.
|
||||
|
||||
```
|
||||
instance Monad Parser where
|
||||
(Parser p1) >>= f =
|
||||
Parser $
|
||||
\str -> case p1 str of
|
||||
Left err -> Left err
|
||||
Right (rs, rest) ->
|
||||
case f rs of
|
||||
Parser parser -> parser rest
|
||||
|
||||
```
|
||||
|
||||
Next, let's define a way to run a parser and a utility function for failure:
|
||||
|
||||
```
|
||||
|
||||
runParser :: String -> String -> Parser a -> Either ParseError (a, ParseString)
|
||||
runParser name str (Parser parser) = parser $ ParseString name (0,0) str
|
||||
|
||||
throwErr :: ParseString -> String -> Either ParseError a
|
||||
throwErr ps@(ParseString name (row,col) _) errMsg =
|
||||
Left $ ParseError ps $ unlines
|
||||
[ "*** " ++ name ++ ": " ++ errMsg
|
||||
, "* On row " ++ show row ++ ", column " ++ show col ++ "."
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
Now we'll start implementing the combinators which are the API and heart of the EDSL.
|
||||
|
||||
First, we'll define `oneOf`. `oneOf` will succeed if one of the characters in the list supplied to it is the next character of the input and will fail otherwise.
|
||||
|
||||
```
|
||||
oneOf :: [Char] -> Parser Char
|
||||
oneOf chars =
|
||||
Parser $ \case
|
||||
ps@(ParseString name (row, col) str) ->
|
||||
case str of
|
||||
[] -> throwErr ps "Cannot read character of empty string"
|
||||
(c:cs) ->
|
||||
if c `elem` chars
|
||||
then Right (c, ParseString name (row, col+1) cs)
|
||||
else throwErr ps $ unlines ["Unexpected character " ++ [c], "Expecting one of: " ++ show chars]
|
||||
```
|
||||
|
||||
`optional` will stop a parser from throwing an error. It will just return `Nothing` on failure.
|
||||
|
||||
```
|
||||
optional :: Parser a -> Parser (Maybe a)
|
||||
optional (Parser parser) =
|
||||
Parser $
|
||||
\pstr -> case parser pstr of
|
||||
Left _ -> Right (Nothing, pstr)
|
||||
Right (x, rest) -> Right (Just x, rest)
|
||||
```
|
||||
|
||||
`many` will try to run a parser repeatedly until it fails. When it does, it'll return a list of successful parses. `many1`will do the same, but will throw an error if it fails to parse at least once.
|
||||
|
||||
```
|
||||
many :: Parser a -> Parser [a]
|
||||
many parser = go []
|
||||
where go cs = (parser >>= \c -> go (c:cs)) <|> pure (reverse cs)
|
||||
|
||||
many1 :: Parser a -> Parser [a]
|
||||
many1 parser =
|
||||
(:) <$> parser <*> many parser
|
||||
|
||||
```
|
||||
|
||||
These next few parsers use the combinators we defined to make more specific parsers:
|
||||
|
||||
```
|
||||
char :: Char -> Parser Char
|
||||
char c = oneOf [c]
|
||||
|
||||
string :: String -> Parser String
|
||||
string = traverse char
|
||||
|
||||
space :: Parser Char
|
||||
space = oneOf " \n"
|
||||
|
||||
spaces :: Parser String
|
||||
spaces = many space
|
||||
|
||||
spaces1 :: Parser String
|
||||
spaces1 = many1 space
|
||||
|
||||
withSpaces :: Parser a -> Parser a
|
||||
withSpaces parser =
|
||||
spaces *> parser <* spaces
|
||||
|
||||
parens :: Parser a -> Parser a
|
||||
parens parser =
|
||||
(withSpaces $ char '(')
|
||||
*> withSpaces parser
|
||||
<* (spaces *> char ')')
|
||||
|
||||
sepBy :: Parser a -> Parser b -> Parser [b]
|
||||
sepBy sep parser = do
|
||||
frst <- optional parser
|
||||
rest <- many (sep *> parser)
|
||||
pure $ maybe rest (:rest) frst
|
||||
|
||||
```
|
||||
|
||||
Now we have everything we need to start defining a parser for our language.
|
||||
|
||||
* _Exercise_ : implement an EOF (end of file/input) parser combinator.
|
||||
|
||||
### 3\. Implementing a parser for our language
|
||||
|
||||
To define our parser, we'll use the top-bottom method.
|
||||
|
||||
```
|
||||
parseExpr :: Parser Expr
|
||||
parseExpr = fmap ATOM parseAtom <|> fmap LIST parseList
|
||||
|
||||
parseList :: Parser [Expr]
|
||||
parseList = parens $ sepBy spaces1 parseExpr
|
||||
|
||||
parseAtom :: Parser Atom
|
||||
parseAtom = parseSymbol <|> parseInt
|
||||
|
||||
parseSymbol :: Parser Atom
|
||||
parseSymbol = fmap Symbol parseName
|
||||
|
||||
```
|
||||
|
||||
Notice that these four function are a very high-level description of our language. This demonstrate why Haskell is so nice for parsing. Still, after defining the high-level parts, we still need to define the lower-level `parseName` and `parseInt`.
|
||||
|
||||
What characters can we use as names in our language? Let's decide to use lowercase letters, digits and underscores, where the first character must be a letter.
|
||||
|
||||
```
|
||||
parseName :: Parser Name
|
||||
parseName = do
|
||||
c <- oneOf ['a'..'z']
|
||||
cs <- many $ oneOf $ ['a'..'z'] ++ "0123456789" ++ "_"
|
||||
pure (c:cs)
|
||||
```
|
||||
|
||||
For integers, we want a sequence of digits optionally preceding by '-':
|
||||
|
||||
```
|
||||
parseInt :: Parser Atom
|
||||
parseInt = do
|
||||
sign <- optional $ char '-'
|
||||
num <- many1 $ oneOf "0123456789"
|
||||
let result = read $ maybe num (:num) sign of
|
||||
pure $ Int result
|
||||
```
|
||||
|
||||
Lastly, we'll define a function to run a parser and get back an `Expr` or an error message.
|
||||
|
||||
```
|
||||
runExprParser :: Name -> String -> Either String Expr
|
||||
runExprParser name str =
|
||||
case runParser name str (withSpaces parseExpr) of
|
||||
Left (ParseError _ errMsg) -> Left errMsg
|
||||
Right (result, _) -> Right result
|
||||
```
|
||||
|
||||
* _Exercise 1_ : Write a parser for the `Program` type you defined in the first section
|
||||
|
||||
* _Exercise 2_ : Rewrite `parseName` in Applicative style
|
||||
|
||||
* _Exercise 3_ : Find a way to handle the overflow case in `parseInt` instead of using `read`.
|
||||
|
||||
### 4\. Implement a pretty printer for our language
|
||||
|
||||
One more thing we'd like to do is be able to print our programs as source code. This is useful for better error messages.
|
||||
|
||||
```
|
||||
printExpr :: Expr -> String
|
||||
printExpr = printExpr' False 0
|
||||
|
||||
printAtom :: Atom -> String
|
||||
printAtom = \case
|
||||
Symbol s -> s
|
||||
Int i -> show i
|
||||
|
||||
printExpr' :: Bool -> Int -> Expr -> String
|
||||
printExpr' doindent level = \case
|
||||
ATOM a -> indent (bool 0 level doindent) (printAtom a)
|
||||
LIST (e:es) ->
|
||||
indent (bool 0 level doindent) $
|
||||
concat
|
||||
[ "("
|
||||
, printExpr' False (level + 1) e
|
||||
, bool "\n" "" (null es)
|
||||
, intercalate "\n" $ map (printExpr' True (level + 1)) es
|
||||
, ")"
|
||||
]
|
||||
|
||||
indent :: Int -> String -> String
|
||||
indent tabs e = concat (replicate tabs " ") ++ e
|
||||
```
|
||||
|
||||
* _Exercise_ : Write a pretty printer for the `Program` type you defined in the first section
|
||||
|
||||
Okay, we wrote around 200 lines so far of what's typically called the front-end of the compiler. We have around 150 more lines to go and three more tasks: We need to define a subset of JS for our usage, define the translator from our language to that subset, and glue the whole thing together. Let's go!
|
||||
|
||||
### 5\. Define a subset of JavaScript for our usage
|
||||
|
||||
First, we'll define the subset of JavaScript we are going to use:
|
||||
|
||||
```
|
||||
data JSExpr
|
||||
= JSInt Int
|
||||
| JSSymbol Name
|
||||
| JSBinOp JSBinOp JSExpr JSExpr
|
||||
| JSLambda [Name] JSExpr
|
||||
| JSFunCall JSExpr [JSExpr]
|
||||
| JSReturn JSExpr
|
||||
deriving (Eq, Show, Read)
|
||||
|
||||
type JSBinOp = String
|
||||
```
|
||||
|
||||
This data type represent a JavaScript expression. We have two atoms - `JSInt` and `JSSymbol` to which we'll translate our languages' `Atom`, We have `JSBinOp` to represent a binary operation such as `+` or `*`, we have `JSLambda`for anonymous functions same as our `lambda expression`, We have `JSFunCall` which we'll use both for calling functions and introducing new names as in `let`, and we have `JSReturn` to return values from functions as that's required in JavaScript.
|
||||
|
||||
This `JSExpr` type is an **abstract representation** of a JavaScript expression. We will translate our own `Expr`which is an abstract representation of our languages' expression to `JSExpr` and from there to JavaScript. But in order to do that we need to take this `JSExpr` and produce JavaScript code from it. We'll do that by pattern matching on `JSExpr` recursively and emit JS code as a `String`. This is basically the same thing we did in `printExpr`. We'll also track the scoping of elements so we can indent the generated code in a nice way.
|
||||
|
||||
```
|
||||
printJSOp :: JSBinOp -> String
|
||||
printJSOp op = op
|
||||
|
||||
printJSExpr :: Bool -> Int -> JSExpr -> String
|
||||
printJSExpr doindent tabs = \case
|
||||
JSInt i -> show i
|
||||
JSSymbol name -> name
|
||||
JSLambda vars expr -> (if doindent then indent tabs else id) $ unlines
|
||||
["function(" ++ intercalate ", " vars ++ ") {"
|
||||
,indent (tabs+1) $ printJSExpr False (tabs+1) expr
|
||||
] ++ indent tabs "}"
|
||||
JSBinOp op e1 e2 -> "(" ++ printJSExpr False tabs e1 ++ " " ++ printJSOp op ++ " " ++ printJSExpr False tabs e2 ++ ")"
|
||||
JSFunCall f exprs -> "(" ++ printJSExpr False tabs f ++ ")(" ++ intercalate ", " (fmap (printJSExpr False tabs) exprs) ++ ")"
|
||||
JSReturn expr -> (if doindent then indent tabs else id) $ "return " ++ printJSExpr False tabs expr ++ ";"
|
||||
```
|
||||
|
||||
* _Exercise 1_ : Add a `JSProgram` type that will hold multiple `JSExpr` and create a function `printJSExprProgram` to generate code for it.
|
||||
|
||||
* _Exercise 2_ : Add a new type of `JSExpr` - `JSIf`, and generate code for it.
|
||||
|
||||
### 6\. Implement a code translator to the JavaScript subset we defined
|
||||
|
||||
We are almost there. In this section we'll create a function to translate `Expr` to `JSExpr`.
|
||||
|
||||
The basic idea is simple, we'll translate `ATOM` to `JSSymbol` or `JSInt` and `LIST` to either a function call or a special case we'll translate later.
|
||||
|
||||
```
|
||||
type TransError = String
|
||||
|
||||
translateToJS :: Expr -> Either TransError JSExpr
|
||||
translateToJS = \case
|
||||
ATOM (Symbol s) -> pure $ JSSymbol s
|
||||
ATOM (Int i) -> pure $ JSInt i
|
||||
LIST xs -> translateList xs
|
||||
|
||||
translateList :: [Expr] -> Either TransError JSExpr
|
||||
translateList = \case
|
||||
[] -> Left "translating empty list"
|
||||
ATOM (Symbol s):xs
|
||||
| Just f <- lookup s builtins ->
|
||||
f xs
|
||||
f:xs ->
|
||||
JSFunCall <$> translateToJS f <*> traverse translateToJS xs
|
||||
|
||||
```
|
||||
|
||||
`builtins` is a list of special cases to translate, like `lambda` and `let`. Every case gets the list of arguments for it, verify that its syntactically valid and translates it to the equivalent `JSExpr`.
|
||||
|
||||
```
|
||||
type Builtin = [Expr] -> Either TransError JSExpr
|
||||
type Builtins = [(Name, Builtin)]
|
||||
|
||||
builtins :: Builtins
|
||||
builtins =
|
||||
[("lambda", transLambda)
|
||||
,("let", transLet)
|
||||
,("add", transBinOp "add" "+")
|
||||
,("mul", transBinOp "mul" "*")
|
||||
,("sub", transBinOp "sub" "-")
|
||||
,("div", transBinOp "div" "/")
|
||||
,("print", transPrint)
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
In our case, we treat built-in special forms as special and not first class, so will not be able to use them as first class functions and such.
|
||||
|
||||
We'll translate a Lambda to an anonymous function:
|
||||
|
||||
```
|
||||
transLambda :: [Expr] -> Either TransError JSExpr
|
||||
transLambda = \case
|
||||
[LIST vars, body] -> do
|
||||
vars' <- traverse fromSymbol vars
|
||||
JSLambda vars' <$> (JSReturn <$> translateToJS body)
|
||||
|
||||
vars ->
|
||||
Left $ unlines
|
||||
["Syntax error: unexpected arguments for lambda."
|
||||
,"expecting 2 arguments, the first is the list of vars and the second is the body of the lambda."
|
||||
,"In expression: " ++ show (LIST $ ATOM (Symbol "lambda") : vars)
|
||||
]
|
||||
|
||||
fromSymbol :: Expr -> Either String Name
|
||||
fromSymbol (ATOM (Symbol s)) = Right s
|
||||
fromSymbol e = Left $ "cannot bind value to non symbol type: " ++ show e
|
||||
|
||||
```
|
||||
|
||||
We'll translate let to a definition of a function with the relevant named arguments and call it with the values, Thus introducing the variables in that scope:
|
||||
|
||||
```
|
||||
transLet :: [Expr] -> Either TransError JSExpr
|
||||
transLet = \case
|
||||
[LIST binds, body] -> do
|
||||
(vars, vals) <- letParams binds
|
||||
vars' <- traverse fromSymbol vars
|
||||
JSFunCall . JSLambda vars' <$> (JSReturn <$> translateToJS body) <*> traverse translateToJS vals
|
||||
where
|
||||
letParams :: [Expr] -> Either Error ([Expr],[Expr])
|
||||
letParams = \case
|
||||
[] -> pure ([],[])
|
||||
LIST [x,y] : rest -> ((x:) *** (y:)) <$> letParams rest
|
||||
x : _ -> Left ("Unexpected argument in let list in expression:\n" ++ printExpr x)
|
||||
|
||||
vars ->
|
||||
Left $ unlines
|
||||
["Syntax error: unexpected arguments for let."
|
||||
,"expecting 2 arguments, the first is the list of var/val pairs and the second is the let body."
|
||||
,"In expression:\n" ++ printExpr (LIST $ ATOM (Symbol "let") : vars)
|
||||
]
|
||||
```
|
||||
|
||||
We'll translate an operation that can work on multiple arguments to a chain of binary operations. For example: `(add 1 2 3)` will become `1 + (2 + 3)`
|
||||
|
||||
```
|
||||
transBinOp :: Name -> Name -> [Expr] -> Either TransError JSExpr
|
||||
transBinOp f _ [] = Left $ "Syntax error: '" ++ f ++ "' expected at least 1 argument, got: 0"
|
||||
transBinOp _ _ [x] = translateToJS x
|
||||
transBinOp _ f list = foldl1 (JSBinOp f) <$> traverse translateToJS list
|
||||
```
|
||||
|
||||
And we'll translate a `print` as a call to `console.log`
|
||||
|
||||
```
|
||||
transPrint :: [Expr] -> Either TransError JSExpr
|
||||
transPrint [expr] = JSFunCall (JSSymbol "console.log") . (:[]) <$> translateToJS expr
|
||||
transPrint xs = Left $ "Syntax error. print expected 1 arguments, got: " ++ show (length xs)
|
||||
|
||||
```
|
||||
|
||||
Notice that we could have skipped verifying the syntax if we'd parse those as special cases of `Expr`.
|
||||
|
||||
* _Exercise 1_ : Translate `Program` to `JSProgram`
|
||||
|
||||
* _Exercise 2_ : add a special case for `if Expr Expr Expr` and translate it to the `JSIf` case you implemented in the last exercise
|
||||
|
||||
### 7\. Glue it all together
|
||||
|
||||
Finally, we are going to glue this all together. We'll:
|
||||
|
||||
1. Read a file
|
||||
|
||||
2. Parse it to `Expr`
|
||||
|
||||
3. Translate it to `JSExpr`
|
||||
|
||||
4. Emit JavaScript code to the standard output
|
||||
|
||||
We'll also enable a few flags for testing:
|
||||
|
||||
* `--e` will parse and print the abstract representation of the expression (`Expr`)
|
||||
|
||||
* `--pp` will parse and pretty print
|
||||
|
||||
* `--jse` will parse, translate and print the abstract representation of the resulting JS (`JSExpr`)
|
||||
|
||||
* `--ppc` will parse, pretty print and compile
|
||||
|
||||
```
|
||||
main :: IO ()
|
||||
main = getArgs >>= \case
|
||||
[file] ->
|
||||
printCompile =<< readFile file
|
||||
["--e",file] ->
|
||||
either putStrLn print . runExprParser "--e" =<< readFile file
|
||||
["--pp",file] ->
|
||||
either putStrLn (putStrLn . printExpr) . runExprParser "--pp" =<< readFile file
|
||||
["--jse",file] ->
|
||||
either print (either putStrLn print . translateToJS) . runExprParser "--jse" =<< readFile file
|
||||
["--ppc",file] ->
|
||||
either putStrLn (either putStrLn putStrLn) . fmap (compile . printExpr) . runExprParser "--ppc" =<< readFile file
|
||||
_ ->
|
||||
putStrLn $ unlines
|
||||
["Usage: runghc Main.hs [ --e, --pp, --jse, --ppc ] <filename>"
|
||||
,"--e print the Expr"
|
||||
,"--pp pretty print Expr"
|
||||
,"--jse print the JSExpr"
|
||||
,"--ppc pretty print Expr and then compile"
|
||||
]
|
||||
|
||||
printCompile :: String -> IO ()
|
||||
printCompile = either putStrLn putStrLn . compile
|
||||
|
||||
compile :: String -> Either Error String
|
||||
compile str = printJSExpr False 0 <$> (translateToJS =<< runExprParser "compile" str)
|
||||
|
||||
```
|
||||
|
||||
That's it. We have a compiler from our language to JS. Again, you can view the full source file [here][9].
|
||||
|
||||
Running our compiler with the example from the first section yields this JavaScript code:
|
||||
|
||||
```
|
||||
$ runhaskell Lisp.hs example.lsp
|
||||
(function(compose, square, add1) {
|
||||
return (console.log)(((compose)(square, add1))(5));
|
||||
})(function(f, g) {
|
||||
return function(x) {
|
||||
return (f)((g)(x));
|
||||
};
|
||||
}, function(x) {
|
||||
return (x * x);
|
||||
}, function(x) {
|
||||
return (x + 1);
|
||||
})
|
||||
```
|
||||
|
||||
If you have node.js installed on your computer, you can run this code by running:
|
||||
|
||||
```
|
||||
$ runhaskell Lisp.hs example.lsp | node -p
|
||||
36
|
||||
undefined
|
||||
```
|
||||
|
||||
* _Final exercise_ : instead of compiling an expression, compile a program of multiple expressions.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://gilmi.me/blog/post/2016/10/14/lisp-to-js
|
||||
|
||||
作者:[ Gil Mizrahi ][a]
|
||||
选题:[oska874][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://gilmi.me/home
|
||||
[b]:https://github.com/oska874
|
||||
[1]:https://gilmi.me/blog/authors/Gil
|
||||
[2]:https://gilmi.me/blog/tags/compilers
|
||||
[3]:https://gilmi.me/blog/tags/fp
|
||||
[4]:https://gilmi.me/blog/tags/haskell
|
||||
[5]:https://gilmi.me/blog/tags/lisp
|
||||
[6]:https://gilmi.me/blog/tags/parsing
|
||||
[7]:https://gist.github.com/soupi/d4ff0727ccb739045fad6cdf533ca7dd
|
||||
[8]:https://mrkkrp.github.io/megaparsec/
|
||||
[9]:https://gist.github.com/soupi/d4ff0727ccb739045fad6cdf533ca7dd
|
||||
[10]:https://gilmi.me/blog/post/2016/10/14/lisp-to-js
|
@ -1,4 +1,4 @@
|
||||
[haoqixu翻译中]Writing a Time Series Database from Scratch
|
||||
Writing a Time Series Database from Scratch
|
||||
============================================================
|
||||
|
||||
|
||||
|
@ -1,158 +0,0 @@
|
||||
The 5 Best Linux Distributions for Development
|
||||
============================================================
|
||||
|
||||
![Linux distros for devs](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/king-penguins_1920.jpg?itok=qmy8htw6 "Linux distros for devs")
|
||||
Jack Wallen looks at some of the best LInux distributions for development efforts.[Creative Commons Zero][6]
|
||||
|
||||
When considering Linux, there are so many variables to take into account. What package manager do you wish to use? Do you prefer a modern or old-standard desktop interface? Is ease of use your priority? How flexible do you want your distribution? What task will the distribution serve?
|
||||
|
||||
It is that last question which should often be considered first. Is the distribution going to work as a desktop or a server? Will you be doing network or system audits? Or will you be developing? If you’ve spent much time considering Linux, you know that for every task there are several well-suited distributions. This certainly holds true for developers. Even though Linux, by design, is an ideal platform for developers, there are certain distributions that rise above the rest, to serve as great operating systems to serve developers.
|
||||
|
||||
I want to share what I consider to be some of the best distributions for your development efforts. Although each of these five distributions can be used for general purpose development (with maybe one exception), they each serve a specific purpose. You may or may not be surprised by the selections.
|
||||
|
||||
With that said, let’s get to the choices.
|
||||
|
||||
### Debian
|
||||
|
||||
The [Debian][14] distribution winds up on the top of many a Linux list. With good reason. Debian is that distribution from which so many are based. It is this reason why many developers choose Debian. When you develop a piece of software on Debian, chances are very good that package will also work on [Ubuntu][15], [Linux Mint][16], [Elementary OS][17], and a vast collection of other distributions.
|
||||
|
||||
Beyond that obvious answer, Debian also has a very large amount of applications available, by way of the default repositories (Figure 1).
|
||||
|
||||
![Debian apps](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_1.jpg?itok=3mpkS3Kp "Debian apps")
|
||||
Figure 1: Available applications from the standard Debian repositories.[Used with permission][1]
|
||||
|
||||
To make matters even programmer-friendly, those applications (and their dependencies) are simple to install. Take, for instance, the build-essential package (which can be installed on any distribution derived from Debian). This package includes the likes of dkpg-dev, g++, gcc, hurd-dev, libc-dev, and make—all tools necessary for the development process. The build-essential package can be installed with the command sudo apt install build-essential.
|
||||
|
||||
There are hundreds of other developer-specific applications available from the standard repositories, tools such as:
|
||||
|
||||
* Autoconf—configure script builder
|
||||
|
||||
* Autoproject—creates a source package for a new program
|
||||
|
||||
* Bison—general purpose parser generator
|
||||
|
||||
* Bluefish—powerful GUI editor, targeted towards programmers
|
||||
|
||||
* Geany—lightweight IDE
|
||||
|
||||
* Kate—powerful text editor
|
||||
|
||||
* Eclipse—helps builders independently develop tools that integrate with other people’s tools
|
||||
|
||||
The list goes on and on.
|
||||
|
||||
Debian is also as rock-solid a distribution as you’ll find, so there’s very little concern you’ll lose precious work, by way of the desktop crashing. As a bonus, all programs included with Debian have met the [Debian Free Software Guidelines][18], which adheres to the following “social contract”:
|
||||
|
||||
* Debian will remain 100% free.
|
||||
|
||||
* We will give back to the free software community.
|
||||
|
||||
* We will not hide problems.
|
||||
|
||||
* Our priorities are our users and free software
|
||||
|
||||
* Works that do not meet our free software standards are included in a non-free archive.
|
||||
|
||||
Also, if you’re new to developing on Linux, Debian has a handy [Programming section in their user manual][19].
|
||||
|
||||
### openSUSE Tumbleweed
|
||||
|
||||
If you’re looking to develop with a cutting-edge, rolling release distribution, [openSUSE][20] offers one of the best in [Tumbleweed][21]. Not only will you be developing with the most up to date software available, you’ll be doing so with the help of openSUSE’s amazing administrator tools … of which includes YaST. If you’re not familiar with YaST (Yet another Setup Tool), it’s an incredibly powerful piece of software that allows you to manage the whole of the platform, from one convenient location. From within YaST, you can also install using RPM Groups. Open YaST, click on RPM Groups (software grouped together by purpose), and scroll down to the Development section to see the large amount of groups available for installation (Figure 2).
|
||||
|
||||
|
||||
![openSUSE](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_2.jpg?itok=EeCjn1cx "openSUSE")
|
||||
Figure 2: Installing package groups in openSUSE Tumbleweed.[Creative Commons Zero][2]
|
||||
|
||||
openSUSE also allows you to quickly install all the necessary devtools with the simple click of a weblink. Head over to the [rpmdevtools install site][22] and click the link for Tumbleweed. This will automatically add the necessary repository and install rpmdevtools.
|
||||
|
||||
By developing with a rolling release distribution, you know you’re working with the most recent releases of installed software.
|
||||
|
||||
### CentOS
|
||||
|
||||
Let’s face it, [Red Hat Enterprise Linux][23] (RHEL) is the de facto standard for enterprise businesses. If you’re looking to develop for that particular platform, and you can’t afford a RHEL license, you cannot go wrong with [CentOS][24]—which is, effectively, a community version of RHEL. You will find many of the packages found on CentOS to be the same as in RHEL—so once you’re familiar with developing on one, you’ll be fine on the other.
|
||||
|
||||
If you’re serious about developing on an enterprise-grade platform, you cannot go wrong starting with CentOS. And because CentOS is a server-specific distribution, you can more easily develop for a web-centric platform. Instead of developing your work and then migrating it to a server (hosted on a different machine), you can easily have CentOS setup to serve as an ideal host for both developing and testing.
|
||||
|
||||
Looking for software to meet your development needs? You only need open up the CentOS Application Installer, where you’ll find a Developer section that includes a dedicated sub-section for Integrated Development Environments (IDEs - Figure 3).
|
||||
|
||||
![CentOS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_3.jpg?itok=0oe4zj9j "CentOS")
|
||||
Figure 3: Installing a powerful IDE is simple in CentOS.[Used with permission][3]
|
||||
|
||||
CentOS also includes Security Enhanced Linux (SELinux), which makes it easier for you to test your software’s ability to integrate with the same security platform found in RHEL. SELinux can often cause headaches for poorly designed software, so having it at the ready can be a real boon for ensuring your applications work on the likes of RHEL. If you’re not sure where to start with developing on CentOS 7, you can read through the [RHEL 7 Developer Guide][25].
|
||||
|
||||
### Raspbian
|
||||
|
||||
Let’s face it, embedded systems are all the rage. One easy means of working with such systems is via the Raspberry Pi—a tiny footprint computer that has become incredibly powerful and flexible. In fact, the Raspberry Pi has become the hardware used by DIYers all over the planet. Powering those devices is the [Raspbian][26] operating system. Raspbian includes tools like [BlueJ][27], [Geany][28], [Greenfoot][29], [Sense HAT Emulator][30], [Sonic Pi][31], and [Thonny Python IDE][32], [Python][33], and [Scratch][34], so you won’t want for the necessary development software. Raspbian also includes a user-friendly desktop UI (Figure 4), to make things even easier.
|
||||
|
||||
![Raspbian](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_4.jpg?itok=VLoYak6L "Raspbian")
|
||||
Figure 4: The Raspbian main menu, showing pre-installed developer software.[Used with permission][4]
|
||||
|
||||
For anyone looking to develop for the Raspberry Pi platform, Raspbian is a must have. If you’d like to give Raspbian a go, without the Raspberry Pi hardware, you can always install it as a VirtualBox virtual machine, by way of the ISO image found [here][35].
|
||||
|
||||
### Pop!_OS
|
||||
|
||||
Don’t let the name full you, [System76][36]’s [Pop!_OS][37] entry into the world of operating systems is serious. And although what System76 has done to this Ubuntu derivative may not be readily obvious, it is something special.
|
||||
|
||||
The goal of System76 is to create an operating system specific to the developer, maker, and computer science professional. With a newly-designed GNOME theme, Pop!_OS is beautiful (Figure 5) and as highly functional as you would expect from both the hardware maker and desktop designers.
|
||||
|
||||
### [devel_5.jpg][11]
|
||||
|
||||
![Pop!_OS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_5.jpg?itok=n4K7k7Gd "Pop!_OS")
|
||||
Figure 5: The Pop!_OS Desktop.[Used with permission][5]
|
||||
|
||||
But what makes Pop!_OS special is the fact that it is being developed by a company dedicated to Linux hardware. This means, when you purchase a System76 laptop, desktop, or server, you know the operating system will work seamlessly with the hardware—on a level no other company can offer. I would predict that, with Pop!_OS, System76 will become the Apple of Linux.
|
||||
|
||||
### Time for work
|
||||
|
||||
In their own way, each of these distributions. You have a stable desktop (Debian), a cutting-edge desktop (openSUSE Tumbleweed), a server (CentOS), an embedded platform (Raspbian), and a distribution to seamless meld with hardware (Pop!_OS). With the exception of Raspbian, any one of these distributions would serve as an outstanding development platform. Get one installed and start working on your next project with confidence.
|
||||
|
||||
_Learn more about Linux through the free ["Introduction to Linux" ][13]course from The Linux Foundation and edX._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/learn/intro-to-linux/2018/1/5-best-linux-distributions-development
|
||||
|
||||
作者:[JACK WALLEN ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/jlwallen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/licenses/category/used-permission
|
||||
[5]:https://www.linux.com/licenses/category/used-permission
|
||||
[6]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[7]:https://www.linux.com/files/images/devel1jpg
|
||||
[8]:https://www.linux.com/files/images/devel2jpg
|
||||
[9]:https://www.linux.com/files/images/devel3jpg
|
||||
[10]:https://www.linux.com/files/images/devel4jpg
|
||||
[11]:https://www.linux.com/files/images/devel5jpg
|
||||
[12]:https://www.linux.com/files/images/king-penguins1920jpg
|
||||
[13]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[14]:https://www.debian.org/
|
||||
[15]:https://www.ubuntu.com/
|
||||
[16]:https://linuxmint.com/
|
||||
[17]:https://elementary.io/
|
||||
[18]:https://www.debian.org/social_contract
|
||||
[19]:https://www.debian.org/doc/manuals/debian-reference/ch12.en.html
|
||||
[20]:https://www.opensuse.org/
|
||||
[21]:https://en.opensuse.org/Portal:Tumbleweed
|
||||
[22]:https://software.opensuse.org/download.html?project=devel%3Atools&package=rpmdevtools
|
||||
[23]:https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux
|
||||
[24]:https://www.centos.org/
|
||||
[25]:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/pdf/developer_guide/Red_Hat_Enterprise_Linux-7-Developer_Guide-en-US.pdf
|
||||
[26]:https://www.raspberrypi.org/downloads/raspbian/
|
||||
[27]:https://www.bluej.org/
|
||||
[28]:https://www.geany.org/
|
||||
[29]:https://www.greenfoot.org/
|
||||
[30]:https://www.raspberrypi.org/blog/sense-hat-emulator/
|
||||
[31]:http://sonic-pi.net/
|
||||
[32]:http://thonny.org/
|
||||
[33]:https://www.python.org/
|
||||
[34]:https://scratch.mit.edu/
|
||||
[35]:http://rpf.io/x86iso
|
||||
[36]:https://system76.com/
|
||||
[37]:https://system76.com/pop
|
@ -1,100 +0,0 @@
|
||||
translating by dianbanjiu
|
||||
Download an OS with GNOME Boxes
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/06/boxes-install-os-816x345.jpg)
|
||||
|
||||
Boxes is the GNOME application for running virtual machines. Recently Boxes added a new feature that makes it easier to run different Linux distributions. You can now automatically install these distros in Boxes, as well as operating systems like FreeBSD and FreeDOS. The list even includes Red Hat Enterprise Linux. The Red Hat Developer Program includes a [no-cost subscription to Red Hat Enterprise Linux][1]. With a [Red Hat Developer][2] account, Boxes can automatically set up a RHEL virtual machine entitled to the Developer Suite subscription. Here’s how it works.
|
||||
|
||||
### Red Hat Enterprise Linux
|
||||
|
||||
To create a Red Hat Enterprise Linux virtual machine, launch Boxes and click New. Select Download an OS from the source selection list. At the top, pick Red Hat Enterprise Linux. This opens a web form at [developers.redhat.com][2]. Sign in with an existing Red Hat Developer Account, or create a new one.
|
||||
|
||||
![][3]
|
||||
|
||||
If this is a new account, Boxes requires some additional information before continuing. This step is required to enable the Developer Subscription on the account. Be sure to [accept the Terms & Conditions][4] now too. This saves a step later during registration.
|
||||
|
||||
![][5]
|
||||
|
||||
Click Submit and the installation disk image starts to download. The download can take a while, depending on your Internet connection. This is a great time to go fix a cup of tea or coffee!
|
||||
|
||||
![][6]
|
||||
|
||||
Once the media has downloaded (conveniently to ~/Downloads), Boxes offers to perform an Express Install. Fill in the account and password information and click Continue. Click Create after you verify the virtual machine details. The Express Install automatically performs the entire installation! (Now is a great time to enjoy a second cup of tea or coffee, if so inclined.)
|
||||
|
||||
![][7]
|
||||
|
||||
![][8]
|
||||
|
||||
![][9]
|
||||
|
||||
Once the installation is done, the virtual machine reboots and logs directly into the desktop. Inside the virtual machine, launch the Red Hat Subscription Manager via the Applications menu, under System Tools. Enter the root password to launch the utility.
|
||||
|
||||
![][10]
|
||||
|
||||
Click the Register button and follow the steps through the registration assistant. Log in with your Red Hat Developers account when prompted.
|
||||
|
||||
![][11]
|
||||
|
||||
![][12]
|
||||
|
||||
Now you can download and install updates through any normal update method, such as yum or GNOME Software.
|
||||
|
||||
![][13]
|
||||
|
||||
### FreeDOS anyone?
|
||||
|
||||
Boxes can install a lot more than just Red Hat Enterprise Linux, too. As a front end to KVM and qemu, Boxes supports a wide variety of operating systems. Using [libosinfo][14], Boxes can automatically download (and in some cases, install) quite a few different ones.
|
||||
|
||||
![][15]
|
||||
|
||||
To install an OS from the list, select it and finish creating the new virtual machine. Some OSes, like FreeDOS, do not support an Express Install. In those cases the virtual machine boots from the installation media. You can then manually install.
|
||||
|
||||
![][16]
|
||||
|
||||
![][17]
|
||||
|
||||
### Popular operating systems on Boxes
|
||||
|
||||
These are just a few of the popular choices available in Boxes today.
|
||||
|
||||
![][18]![][19]![][20]![][21]![][22]![][23]
|
||||
|
||||
Fedora updates its osinfo-db package regularly. Be sure to check back frequently for new OS options.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/download-os-gnome-boxes/
|
||||
|
||||
作者:[Link Dupont][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://fedoramagazine.org/author/linkdupont/
|
||||
[1]:https://developers.redhat.com/blog/2016/03/31/no-cost-rhel-developer-subscription-now-available/
|
||||
[2]:http://developers.redhat.com
|
||||
[3]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-33-13.png
|
||||
[4]:https://www.redhat.com/wapps/tnc/termsack?event%5B%5D=signIn
|
||||
[5]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-34-37.png
|
||||
[6]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-37-27.png
|
||||
[7]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-09-11.png
|
||||
[8]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-15-19-1024x815.png
|
||||
[9]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-21-53-1024x815.png
|
||||
[10]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-26-29-1024x815.png
|
||||
[11]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-30-48-1024x815.png
|
||||
[12]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-31-17-1024x815.png
|
||||
[13]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-32-29-1024x815.png
|
||||
[14]:https://libosinfo.org
|
||||
[15]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-20-02-56.png
|
||||
[16]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-40-25.png
|
||||
[17]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-43-02-1024x815.png
|
||||
[18]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-55-20-1024x815.png
|
||||
[19]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-28-28-1024x815.png
|
||||
[20]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-11-43-1024x815.png
|
||||
[21]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-58-09-1024x815.png
|
||||
[22]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-17-46-38-1024x815.png
|
||||
[23]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-18-34-11-1024x815.png
|
@ -1,188 +0,0 @@
|
||||
translating by Flowsnow
|
||||
|
||||
How To Rename Multiple Files At Once In Linux
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/06/Rename-Multiple-Files-720x340.png)
|
||||
|
||||
As you may already know, we use **mv** command to rename or move files and directories in Unix-like operating systems. But, the mv command won’t support renaming multiple files at once. Worry not. In this tutorial, we are going to learn to rename multiple files at once using **“mmv”** command in Linux. This command is used to move, copy, append and rename files in bulk using standard wildcards in Unix-like operating systems.
|
||||
|
||||
### Rename Multiple Files At Once In Linux
|
||||
|
||||
The mmv utility is available in the default repositories of Debian-based systems. To install it on Debian, Ubuntu, Linux Mint, run the following command:
|
||||
```
|
||||
$ sudo apt-get install mmv
|
||||
|
||||
```
|
||||
|
||||
Let us say, you have the following files in your current directory.
|
||||
```
|
||||
$ ls
|
||||
a1.txt a2.txt a3.txt
|
||||
|
||||
```
|
||||
|
||||
Now you want to rename all files that starts with letter “a” to “b”. Of course, you can do this manually in few seconds. But just think if you have hundreds of files and want to rename them? It is quite time consuming process. Here is where **mmv** command comes in help.
|
||||
|
||||
To rename all files starting with letter “a” to “b”, simply run:
|
||||
```
|
||||
$ mmv a\* b\#1
|
||||
|
||||
```
|
||||
|
||||
Let us check if the files have been renamed or not.
|
||||
```
|
||||
$ ls
|
||||
b1.txt b2.txt b3.txt
|
||||
|
||||
```
|
||||
|
||||
As you can see, all files starts with letter “a” (i.e a1.txt, a2.txt, a3.txt) are renamed to b1.txt, b2.txt, b3.txt.
|
||||
|
||||
**Explanation**
|
||||
|
||||
In the above example, the first parameter (a\\*) is the ‘from’ pattern and the second parameter is ‘to’ pattern ( b\\#1 ). As per the above example, mmv will look for any filenames staring with letter ‘a’ and rename the matched files according to second parameter i.e ‘to’ pattern. We use wildcards, such as ‘*’, ‘?’ and ‘[]‘, to match one or more arbitrary characters. Please be mindful that you must escape the wildcard characters, otherwise they will be expanded by the shell and mmv won’t understand them.
|
||||
|
||||
The ‘#1′ in the ‘to’ pattern is a wildcard index. It matches the first wildcard found in the ‘from’ pattern. A ‘#2′ in the ‘to’ pattern would match the second wildcard and so on. In our example, we have only one wildcard (the asterisk), so we write a #1. And, the hash sign should be escaped as well. Also, you can enclose the patterns with quotes too.
|
||||
|
||||
You can even rename all files with a certain extension to a different extension. For example, to rename all **.txt** files to **.doc** file format in the current directory, simply run:
|
||||
```
|
||||
$ mmv \*.txt \#1.doc
|
||||
|
||||
```
|
||||
|
||||
Here is an another example. Let us say you have the following files.
|
||||
```
|
||||
$ ls
|
||||
abcd1.txt abcd2.txt abcd3.txt
|
||||
|
||||
```
|
||||
|
||||
You want to replace the the first occurrence of **abc** with **xyz** in all files in the current directory. How would you do?
|
||||
|
||||
Simple.
|
||||
```
|
||||
$ mmv '*abc*' '#1xyz#2'
|
||||
|
||||
```
|
||||
|
||||
Please note that in the above example, I have enclosed the patterns in single quotes.
|
||||
|
||||
Let us check if “abc” is actually replaced with “xyz” or not.
|
||||
```
|
||||
$ ls
|
||||
xyzd1.txt xyzd2.txt xyzd3.txt
|
||||
|
||||
```
|
||||
|
||||
See? The files **abcd1.txt** , **abcd2.txt** , and **abcd3.txt** have been renamed to **xyzd1.txt** , **xyzd2.txt** , and **xyzd3.txt**.
|
||||
|
||||
Another notable feature of mmv command is you can just print output instead of renaming the files using **-n** option like below.
|
||||
```
|
||||
$ mmv -n a\* b\#1
|
||||
a1.txt -> b1.txt
|
||||
a2.txt -> b2.txt
|
||||
a3.txt -> b3.txt
|
||||
|
||||
```
|
||||
|
||||
This way you can simply verify what mmv command would actually do before renaming the files.
|
||||
|
||||
For more details, refer man pages.
|
||||
```
|
||||
$ man mmv
|
||||
|
||||
```
|
||||
|
||||
**Update:**
|
||||
|
||||
The **Thunar file manager** has built-in **bulk rename** option by default. If you’re using thunar, it much easier to rename files than using mmv command.
|
||||
|
||||
Thunar is available in the default repositories of most Linux distributions.
|
||||
|
||||
To install it on Arch-based systems, run:
|
||||
```
|
||||
$ sudo pacman -S thunar
|
||||
|
||||
```
|
||||
|
||||
On RHEL, CentOS:
|
||||
```
|
||||
$ sudo yum install thunar
|
||||
|
||||
```
|
||||
|
||||
On Fedora:
|
||||
```
|
||||
$ sudo dnf install thunar
|
||||
|
||||
```
|
||||
|
||||
On openSUSE:
|
||||
```
|
||||
$ sudo zypper install thunar
|
||||
|
||||
```
|
||||
|
||||
On Debian, Ubuntu, Linux Mint:
|
||||
```
|
||||
$ sudo apt-get install thunar
|
||||
|
||||
```
|
||||
|
||||
Once installed, you can launch bulk rename utility from menu or from the application launcher. To launch it from Terminal, use the following command:
|
||||
```
|
||||
$ thunar -B
|
||||
|
||||
```
|
||||
|
||||
This is how bulk rename looks like.
|
||||
|
||||
[![][1]][2]
|
||||
|
||||
Click the plus sign and choose the list of files you want to rename. Bulk rename can rename the name of the files, the suffix of the files or both the name and the suffix of the files. Thunar currently supports the following Bulk Renamers:
|
||||
|
||||
* Insert Date or Time
|
||||
|
||||
* Insert or Overwrite
|
||||
|
||||
* Numbering
|
||||
|
||||
* Remove Characters
|
||||
|
||||
* Search & Replace
|
||||
|
||||
* Uppercase / Lowercase
|
||||
|
||||
|
||||
|
||||
|
||||
When you select one of these criteria from the picklist, you will see a preview of your changes in the New Name column, as shown in the below screenshot.
|
||||
|
||||
![][3]
|
||||
|
||||
Once you choose the criteria, click on **Rename Files** option to rename the files.
|
||||
|
||||
You can also open bulk renamer from within Thunar by selecting two or more files. After choosing the files, press F2 or right click and choose **Rename**.
|
||||
|
||||
And, that’s all for now. Hope this was useful. More good stuffs to come. Stay tuned!
|
||||
|
||||
Cheers!
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/how-to-rename-multiple-files-at-once-in-linux/
|
||||
|
||||
作者:[SK][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.ostechnix.com/author/sk/
|
||||
[1]:data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7
|
||||
[2]:http://www.ostechnix.com/wp-content/uploads/2018/06/bulk-rename.png
|
||||
[3]:http://www.ostechnix.com/wp-content/uploads/2018/06/bulk-rename-1.png
|
312
sources/tech/20180707 Version Control Before Git with CVS.md
Normal file
312
sources/tech/20180707 Version Control Before Git with CVS.md
Normal file
@ -0,0 +1,312 @@
|
||||
Version Control Before Git with CVS
|
||||
======
|
||||
Github was launched in 2008. If your software engineering career, like mine, is no older than Github, then Git may be the only version control software you have ever used. While people sometimes grouse about its steep learning curve or unintuitive interface, Git has become everyone’s go-to for version control. In Stack Overflow’s 2015 developer survey, 69.3% of respondents used Git, almost twice as many as used the second-most-popular version control system, Subversion. After 2015, Stack Overflow stopped asking developers about the version control systems they use, perhaps because Git had become so popular that the question was uninteresting.
|
||||
|
||||
Git itself is not much older than Github. Linus Torvalds released the first version of Git in 2005. Though today younger developers might have a hard time conceiving of a world where the term “version control software” didn’t more or less just mean Git, such a world existed not so long ago. There were lots of alternatives to choose from. Open source developers preferred Subversion, enterprises and video game companies used Perforce (some still do), while the Linux kernel project famously relied on a version control system called BitKeeper.
|
||||
|
||||
Some of these systems, particularly BitKeeper, might feel familiar to a young Git user transported back in time. Most would not. BitKeeper aside, the version control systems that came before Git worked according to a fundamentally different paradigm. In a taxonomy offered by Eric Sink, author of Version Control by Example, Git is a third-generation version control system, while most of Git’s predecessors, the systems popular in the 1990s and early 2000s, are second-generation version control systems. Where third-generation version control systems are distributed, second-generation version control systems are centralized. You have almost certainly heard Git described as a “distributed” version control system before. I never quite understood the distributed/centralized distinction, at least not until I installed and experimented with a centralized second-generation version control system myself.
|
||||
|
||||
The system I installed was CVS. CVS, short for Concurrent Versions System, was the very first second-generation version control system. It was also the most popular version control system for about a decade until it was replaced in 2000 by Subversion. Even then, Subversion was supposed to be “CVS but better,” which only underscores how dominant CVS had become throughout the 1990s.
|
||||
|
||||
CVS was first developed in 1986 by a Dutch computer scientist named Dick Grune, who was looking for a way to collaborate with his students on a compiler project. CVS was initially little more than a collection of shell scripts wrapping RCS (Revision Control System), a first-generation version control system that Grune wanted to improve. RCS works according to a pessimistic locking model, meaning that no two programmers can work on a single file at once. In order to edit a file, you have to first ask RCS for an exclusive lock on the file, which you keep until you are finished editing. If someone else is already editing a file you need to edit, you have to wait. CVS improved on RCS and ushered in the second generation of version control systems by trading the pessimistic locking model for an optimistic one. Programmers could now edit the same file at the same time, merging their edits and resolving any conflicts later. (Brian Berliner, an engineer who later took over the CVS project, wrote a very readable [paper][1] about CVS’ innovations in 1990.)
|
||||
|
||||
In that sense, CVS wasn’t all that different from Git, which also works according to an optimistic model. But that’s where the similarities end. In fact, when Linus Torvalds was developing Git, one of his guiding principles was WWCVSND, or “What Would CVS Not Do.” Whenever he was in doubt about a decision, he strove to choose the option that had not been chosen in the design of CVS. So even though CVS predates Git by over a decade, it influenced Git as a kind of negative template.
|
||||
|
||||
I’ve really enjoyed playing around with CVS. I think there’s no better way to understand why Git’s distributed nature is such an improvement on what came before. So I invite you to come along with me on an exciting journey and spend the next ten minutes of your life learning about a piece of software nobody has used in the last decade. (See correction.)
|
||||
|
||||
### Getting Started with CVS
|
||||
|
||||
Instructions for installing CVS can be found on the [project’s homepage][2]. On MacOS, you can install CVS using Homebrew.
|
||||
|
||||
Since CVS is centralized, it distinguishes between the client-side universe and the server-side universe in a way that something like Git does not. The distinction is not so pronounced that there are different executables. But in order to start using CVS, even on your own machine, you’ll have to set up the CVS backend.
|
||||
|
||||
The CVS backend, the central store for all your code, is called the repository. Whereas in Git you would typically have a repository for every project, in CVS the repository holds all of your projects. There is one central repository for everything, though there are ways to work with only a project at a time.
|
||||
|
||||
To create a local repository, you run the `init` command. You would do this somewhere global like your home directory.
|
||||
|
||||
```
|
||||
$ cvs -d ~/sandbox init
|
||||
```
|
||||
|
||||
CVS allows you to pass options to either the `cvs` command itself or to the `init` subcommand. Options that appear after the `cvs` command are global in nature, while options that appear after the subcommand are specific to the subcommand. In this case, the `-d` flag is global. Here it happens to tell CVS where we want to create our repository, but in general the `-d` flag points to the location of the repository we want to use for any given action. It can be tedious to supply the `-d` flag all the time, so the `CVSROOT` environment variable can be set instead.
|
||||
|
||||
Since we’re working locally, we’ve just passed a path for our `-d` argument, but we could also have included a hostname.
|
||||
|
||||
The command creates a directory called `sandbox` in your home directory. If you list the contents of `sandbox`, you’ll find that it contains another directory called `CVSROOT`. This directory, not to be confused with the environment variable, holds administrative files for the repository.
|
||||
|
||||
Congratulations! You’ve just created your first CVS repository.
|
||||
|
||||
### Checking In Code
|
||||
|
||||
Let’s say that you’ve decided to keep a list of your favorite colors. You are an artistically inclined but extremely forgetful person. You type up your list of colors and save it as a file called `favorites.txt`:
|
||||
|
||||
```
|
||||
blue
|
||||
orange
|
||||
green
|
||||
|
||||
definitely not yellow
|
||||
```
|
||||
|
||||
Let’s also assume that you’ve saved your file in a new directory called `colors`. Now you’d like to put your favorite color list under version control, because fifty years from now it will be interesting to look back and see how your tastes changed through time.
|
||||
|
||||
In order to do that, you will have to import your directory as a new CVS project. You can do that using the `import` command:
|
||||
|
||||
```
|
||||
$ cvs -d ~/sandbox import -m "" colors colors initial
|
||||
N colors/favorites.txt
|
||||
|
||||
No conflicts created by this import
|
||||
```
|
||||
|
||||
Here we are specifying the location of our repository with the `-d` flag again. The remaining arguments are passed to the `import` subcommand. We have to provide a message, but here we don’t really need one, so we’ve left it blank. The next argument, `colors`, specifies the name of our new directory in the repository; here we’ve just used the same name as the directory we are in. The last two arguments specify the vendor tag and the release tag respectively. We’ll talk more about tags in a minute.
|
||||
|
||||
You’ve just pulled your “colors” project into the CVS repository. There are a couple different ways to go about bringing code into CVS, but this is the method recommended by [Pragmatic Version Control Using CVS][3], the Pragmatic Programmer book about CVS. What makes this method a little awkward is that you then have to check out your work fresh, even though you’ve already got an existing `colors` directory. Instead of using that directory, you’re going to delete it and then check out the version that CVS already knows about:
|
||||
|
||||
```
|
||||
$ cvs -d ~/sandbox co colors
|
||||
cvs checkout: Updating colors
|
||||
U colors/favorites.txt
|
||||
```
|
||||
|
||||
This will create a new directory, also called `colors`. In this directory you will find your original `favorites.txt` file along with a directory called `CVS`. The `CVS` directory is basically CVS’ equivalent of the `.git` directory in every Git repository.
|
||||
|
||||
### Making Changes
|
||||
|
||||
Get ready for a trip.
|
||||
|
||||
Just like Git, CVS has a `status` subcommand:
|
||||
|
||||
```
|
||||
$ cvs status
|
||||
cvs status: Examining .
|
||||
===================================================================
|
||||
File: favorites.txt Status: Up-to-date
|
||||
|
||||
Working revision: 1.1.1.1 2018-07-06 19:27:54 -0400
|
||||
Repository revision: 1.1.1.1 /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
Commit Identifier: fD7GYxt035GNg8JA
|
||||
Sticky Tag: (none)
|
||||
Sticky Date: (none)
|
||||
Sticky Options: (none)
|
||||
```
|
||||
|
||||
This is where things start to look alien. CVS doesn’t have commit objects. In the above, there is something called a “Commit Identifier,” but this might be only a relatively recent edition—no mention of a “Commit Identifier” appears in Pragmatic Version Control Using CVS, which was published in 2003. (The last update to CVS was released in 2008.)
|
||||
|
||||
Whereas with Git you’d talk about the version of a file associated with commit `45de392`, in CVS files are versioned separately. The first version of your file is version 1.1, the next version is 1.2, and so on. When branches are involved, extra numbers are appended, so you might end up with something like the `1.1.1.1` above, which appears to be the default in our case even though we haven’t created any branches.
|
||||
|
||||
If you were to run `cvs log` (equivalent to `git log`) in a project with lots of files and commits, you’d see an individual history for each file. You might have a file at version 1.2 and a file at version 1.14 in the same project.
|
||||
|
||||
Let’s go ahead and make a change to version 1.1 of our `favorites.txt` file:
|
||||
|
||||
```
|
||||
blue
|
||||
orange
|
||||
green
|
||||
+cyan
|
||||
|
||||
definitely not yellow
|
||||
```
|
||||
|
||||
Once we’ve made the change, we can run `cvs diff` to see what CVS thinks we’ve done:
|
||||
|
||||
```
|
||||
$ cvs diff
|
||||
cvs diff: Diffing .
|
||||
Index: favorites.txt
|
||||
===================================================================
|
||||
RCS file: /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
retrieving revision 1.1.1.1
|
||||
diff -r1.1.1.1 favorites.txt
|
||||
3a4
|
||||
> cyan
|
||||
```
|
||||
|
||||
CVS recognizes that we added a new line containing the color “cyan” to the file. (Actually, it says we’ve made changes to the “RCS” file; you can see that CVS never fully escaped its original association with RCS.) The diff we are being shown is the diff between the copy of `favorites.txt` in our working directory and the 1.1.1.1 version stored in the repository.
|
||||
|
||||
In order to update the version stored in the repository, we have to commit the change. In Git, this would be a multi-step process. We’d have to stage the change so that it appears in our index. Then we’d commit the change. Finally, to make the change visible to anyone else, we’d have to push the commit up to the origin repository.
|
||||
|
||||
In CVS, all of these things happen when you run `cvs commit`. CVS just bundles up all the changes it can find and puts them in the repository:
|
||||
|
||||
```
|
||||
$ cvs commit -m "Add cyan to favorites."
|
||||
cvs commit: Examining .
|
||||
/Users/sinclairtarget/sandbox/colors/favorites.txt,v <-- favorites.txt
|
||||
new revision: 1.2; previous revision: 1.1
|
||||
```
|
||||
|
||||
I’m so used to Git that this strikes me as terrifying. Without an opportunity to stage changes, any old thing that you’ve touched in your working directory might end up as part of the public repository. Did you passive-aggressively rewrite a coworker’s poorly implemented function out of cathartic necessity, never intending for him to know? Too bad, he now thinks you’re a dick. You also can’t edit your commits before pushing them, since a commit is a push. Do you enjoy spending 40 minutes repeatedly running `git rebase -i` until your local commit history flows like the derivation of a mathematical proof? Sorry, you can’t do that here, and everyone is going to find out that you don’t actually write your tests first.
|
||||
|
||||
But I also now understand why so many people find Git needlessly complicated. If `cvs commit` is what you were used to, then I’m sure staging and pushing changes would strike you as a pointless chore.
|
||||
|
||||
When people talk about Git being a “distributed” system, this is primarily the difference they mean. In CVS, you can’t make commits locally. A commit is a submission of code to the central repository, so it’s not something you can do without a connection. All you’ve got locally is your working directory. In Git, you have a full-fledged local repository, so you can make commits all day long even while disconnected. And you can edit those commits, revert, branch, and cherry pick as much as you want, without anybody else having to know.
|
||||
|
||||
Since commits were a bigger deal, CVS users often made them infrequently. Commits would contain as many changes as today we might expect to see in a ten-commit pull request. This was especially true if commits triggered a CI build and an automated test suite.
|
||||
|
||||
If we now run `cvs status`, we can see that we have a new version of our file:
|
||||
|
||||
```
|
||||
$ cvs status
|
||||
cvs status: Examining .
|
||||
===================================================================
|
||||
File: favorites.txt Status: Up-to-date
|
||||
|
||||
Working revision: 1.2 2018-07-06 21:18:59 -0400
|
||||
Repository revision: 1.2 /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
Commit Identifier: pQx5ooyNk90wW8JA
|
||||
Sticky Tag: (none)
|
||||
Sticky Date: (none)
|
||||
Sticky Options: (none)
|
||||
```
|
||||
|
||||
### Merging
|
||||
|
||||
As mentioned above, in CVS you can edit a file that someone else is already editing. That was CVS’ big improvement on RCS. What happens when you need to bring your changes back together?
|
||||
|
||||
Let’s say that you have invited some friends to add their favorite colors to your list. While they are adding their colors, you decide that you no longer like the color green and remove it from the list.
|
||||
|
||||
When you go to commit your changes, you might discover that CVS notices a problem:
|
||||
|
||||
```
|
||||
$ cvs commit -m "Remove green"
|
||||
cvs commit: Examining .
|
||||
cvs commit: Up-to-date check failed for `favorites.txt'
|
||||
cvs [commit aborted]: correct above errors first!
|
||||
```
|
||||
|
||||
It looks like your friends committed their changes first. So your version of `favorites.txt` is not up-to-date with the version in the repository. If you run `cvs status`, you’ll see that your local copy of `favorites.txt` is version 1.2 with some local changes, but the repository version is 1.3:
|
||||
|
||||
```
|
||||
$ cvs status
|
||||
cvs status: Examining .
|
||||
===================================================================
|
||||
File: favorites.txt Status: Needs Merge
|
||||
|
||||
Working revision: 1.2 2018-07-07 10:42:43 -0400
|
||||
Repository revision: 1.3 /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
Commit Identifier: 2oZ6n0G13bDaldJA
|
||||
Sticky Tag: (none)
|
||||
Sticky Date: (none)
|
||||
Sticky Options: (none)
|
||||
```
|
||||
|
||||
You can run `cvs diff` to see exactly what the differences between 1.2 and 1.3 are:
|
||||
|
||||
```
|
||||
$ cvs diff -r HEAD favorites.txt
|
||||
Index: favorites.txt
|
||||
===================================================================
|
||||
RCS file: /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
retrieving revision 1.3
|
||||
diff -r1.3 favorites.txt
|
||||
3d2
|
||||
< green
|
||||
7,10d5
|
||||
<
|
||||
< pink
|
||||
< hot pink
|
||||
< bubblegum pink
|
||||
```
|
||||
|
||||
It seems that our friends really like pink. In any case, they’ve edited a different part of the file than we have, so the changes are easy to merge. CVS can do that for us when we run `cvs update`, which is similar to `git pull`:
|
||||
|
||||
```
|
||||
$ cvs update
|
||||
cvs update: Updating .
|
||||
RCS file: /Users/sinclairtarget/sandbox/colors/favorites.txt,v
|
||||
retrieving revision 1.2
|
||||
retrieving revision 1.3
|
||||
Merging differences between 1.2 and 1.3 into favorites.txt
|
||||
M favorites.txt
|
||||
```
|
||||
|
||||
If you now take a look at `favorites.txt`, you’ll find that it has been modified to include the changes that your friends made to the file. Your changes are still there too. Now you are free to commit the file:
|
||||
|
||||
```
|
||||
$ cvs commit
|
||||
cvs commit: Examining .
|
||||
/Users/sinclairtarget/sandbox/colors/favorites.txt,v <-- favorites.txt
|
||||
new revision: 1.4; previous revision: 1.3
|
||||
```
|
||||
|
||||
The end result is what you’d get in Git by running `git pull --rebase`. Your changes have been added on top of your friends’ changes. There is no “merge commit.”
|
||||
|
||||
Sometimes, changes to the same file might be incompatible. If your friends had changed “green” to “olive,” for example, that would have conflicted with your change removing “green” altogether. In the early days of CVS, this was exactly the kind of case that caused people to worry that CVS wasn’t safe; RCS’ pessimistic locking ensured that such a case could never arise. But CVS guarantees safety by making sure that nobody’s changes get overwritten automatically. You have to tell CVS which change you want to keep going forward, so when you run `cvs update`, CVS marks up the file with both changes in the same way that Git does when Git detects a merge conflict. You then have to manually edit the file and pick the change you want to keep.
|
||||
|
||||
The interesting thing to note here is that merge conflicts have to be fixed before you can commit. This is another consequence of CVS’ centralized nature. In Git, you don’t have to worry about resolving merges until you push the commits you’ve got locally.
|
||||
|
||||
Since CVS doesn’t have easily addressable commit objects, the only way to group a collection of changes is to mark a particular working directory state with a tag.
|
||||
|
||||
Creating a tag is easy:
|
||||
|
||||
```
|
||||
$ cvs tag VERSION_1_0
|
||||
cvs tag: Tagging .
|
||||
T favorites.txt
|
||||
```
|
||||
|
||||
You’ll later be able to return files to this state by running `cvs update` and passing the tag to the `-r` flag:
|
||||
|
||||
```
|
||||
$ cvs update -r VERSION_1_0
|
||||
cvs update: Updating .
|
||||
U favorites.txt
|
||||
```
|
||||
|
||||
Because you need a tag to rewind to an earlier working directory state, CVS encourages a lot of preemptive tagging. Before major refactors, for example, you might create a `BEFORE_REFACTOR_01` tag that you could later use if the refactor went wrong. People also used tags if they wanted to generate project-wide diffs. Basically, all the things we routinely do today with commit hashes have to be anticipated and planned for with CVS, since you needed to have the tags available already.
|
||||
|
||||
Branches can be created in CVS, sort of. Branches are just a special kind of tag:
|
||||
|
||||
```
|
||||
$ cvs rtag -b TRY_EXPERIMENTAL_THING colors
|
||||
cvs rtag: Tagging colors
|
||||
```
|
||||
|
||||
That only creates the branch (in full view of everyone, by the way), so you still need to switch to it using `cvs update`:
|
||||
|
||||
```
|
||||
$ cvs update -r TRY_EXPERIMENTAL_THING
|
||||
```
|
||||
|
||||
The above commands switch onto the new branch in your current working directory, but Pragmatic Version Control Using CVS actually advises that you create a new directory to hold your new branch. Presumably its authors found switching directories easier than switching branches in CVS.
|
||||
|
||||
Pragmatic Version Control Using CVS also advises against creating branches off of an existing branch. They recommend only creating branches off of the mainline branch, which in Git is known as `master`. In general, branching was considered an “advanced” CVS skill. In Git, you might start a new branch for almost any trivial reason, but in CVS branching was typically used only when really necessary, such as for releases.
|
||||
|
||||
A branch could later be merged back into the mainline using `cvs update` and the `-j` flag:
|
||||
|
||||
```
|
||||
$ cvs update -j TRY_EXPERIMENTAL_THING
|
||||
```
|
||||
|
||||
### Thanks for the Commit Histories
|
||||
|
||||
In 2007, Linus Torvalds gave [a talk][4] about Git at Google. Git was very new then, so the talk was basically an attempt to persuade a roomful of skeptical programmers that they should use Git, even though Git was so different from anything then available. If you haven’t already seen the talk, I highly encourage you to watch it. Linus is an entertaining speaker, even if he never fails to be his brash self. He does an excellent job of explaining why the distributed model of version control is better than the centralized one. A lot of his criticism is reserved for CVS in particular.
|
||||
|
||||
Git is a [complex tool][5]. Learning it can be a frustrating experience. But I’m also continually amazed at the things that Git can do. In comparison, CVS is simple and straightforward, though often unable to do many of the operations we now take for granted. Going back and using CVS for a while is an excellent way to find yourself with a new appreciation for Git’s power and flexibility. It illustrates well why understanding the history of software development can be so beneficial—picking up and re-examining obsolete tools will teach you volumes about the why behind the tools we use today.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][6] on Twitter or subscribe to the [RSS feed][7] to make sure you know when a new post is out.
|
||||
|
||||
#### Correction
|
||||
|
||||
I’ve been told that there are many organizations, particularly risk-adverse organizations that do things like make medical device software, that still use CVS. Programmers in these organizations have developed little tricks for working around CVS’ limitations, such as making a new branch for almost every change to avoid committing directly to `HEAD`. (Thanks to Michael Kohne for pointing this out.)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/07/07/cvs.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://docs.freebsd.org/44doc/psd/28.cvs/paper.pdf
|
||||
[2]: https://www.nongnu.org/cvs/
|
||||
[3]: http://shop.oreilly.com/product/9780974514000.do
|
||||
[4]: https://www.youtube.com/watch?v=4XpnKHJAok8
|
||||
[5]: https://xkcd.com/1597/
|
||||
[6]: https://twitter.com/TwoBitHistory
|
||||
[7]: https://twobithistory.org/feed.xml
|
@ -1,58 +0,0 @@
|
||||
translating by dianbanjiu
|
||||
6 places to host your git repository
|
||||
======
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/house_home_colors_live_building.jpg?itok=HLpsIfIL)
|
||||
|
||||
Perhaps you're one of the few people who didn't notice, but a few months back, [Microsoft bought GitHub][1]. Nothing against either company. Microsoft has become a vocal supporter of open source in recent years, and GitHub has been the de facto code repository for a heaping large number of open source projects almost since its inception.
|
||||
|
||||
However, the recent(-ish) purchase may have gotten you a little itchy. After all, there's nothing quite like a corporate buy-out to make you realize you've had your open source code sitting on a commercial platform. Maybe you're not quite ready to jump ship just yet, but it would at least be helpful to know your options. Let's have a look around the web and see what's available.
|
||||
|
||||
### Option 1: GitHub
|
||||
|
||||
Seriously, this is a valid option. [GitHub][2] doesn't have a history of acting in bad faith, and Microsoft certainly has been smiling on open source of late. There's nothing wrong with keeping your project on GitHub and taking a wait-and-see perspective. It's still the largest community website for software development, and it still has some of the best tools for issue tracking, code review, continuous integration, and general code management. And its underpinnings are still on Git, everyone's favorite open source distributed version control system. Your code is still your code. There's nothing wrong with leaving things where they are if nothing is broken.
|
||||
|
||||
### Option 2: GitLab
|
||||
|
||||
[GitLab][3] is probably the leading contender when it comes to alternative code platforms. It's fully open source. You can host your code right on GitLab's site much like you would on GitHub, but you can also choose to self-host a GitLab instance of your own on your own server and have full control over who has access to everything there and how things are managed. GitLab pretty much has feature parity with GitHub, and some folks might even say its continuous integration and testing tools are superior. Although the community of developers on GitLab is certainly smaller than the one on GitHub, it's still nothing to sneeze at. And it's possible that you'll find more like-minded developers among the population there.
|
||||
|
||||
### Option 3: Bitbucket
|
||||
|
||||
[Bitbucket][4] has been around for many years. In some ways, it could serve as a looking glass into the future of GitHub. Bitbucket was acquired by a larger corporation (Atlassian) eight years ago and has already been through some of that change-over process. It's still a commercial platform like GitHub, but it's far from being a startup, and it's on pretty stable footing, organizationally speaking. Bitbucket shares most of the features available on GitHub and GitLab, plus a few novel features of its own, like native support for [Mercurial][5] repositories.
|
||||
|
||||
### Option 4: SourceForge
|
||||
|
||||
The granddaddy of open source code repository sites is [SourceForge][6]. It used to be that if you had an open source project, SourceForge was the place to host your code and share your releases. It took a little while to migrate to Git for version control, and it had its own rash of commercial acquiring and re-acquiring events, coupled with a few unfortunate bundling decisions for a few open source projects. That said, SourceForge seems to have recovered since then, and the site is still a place where quite a few open source projects live. A lot of folks still feel a bit burned, though, and some people aren't huge fans of its various attempts to monetize the platform, so be sure you go in with open eyes.
|
||||
|
||||
### Option 5: Roll your own
|
||||
|
||||
If you want full control of your project's destiny (and no one to blame but yourself), then doing it all yourself may be the best option for you. It is a good alternative for both large and small projects. Git is open source, so it's easily self-hosted. If you want issue tracking and code review, you can run an instance of GitLab or [Phabricator][7]. For continuous integration, you can set up your own instance of the [Jenkins][8] automation server. Yes, you'll need to take responsibility for your own infrastructure overhead and the associated security requirements. However, it's not that hard to get yourself set up. And if you want a sure-fire way to avoid being beholden to the whims of anyone else's platform, this is the way to do it.
|
||||
|
||||
### Option 6: All of the above
|
||||
|
||||
Here's the beauty of all of this: Despite the proprietary drapery strewn over some of these platforms, they're still built on top of solid open source technology. And not just open source, but explicitly designed to be distributed across multiple nodes on a large network (like the internet). You're not required to use just one. You can use a couple… or all of them. Roll your own setup as a guaranteed home base using GitLab and have clone repositories on GitHub and Bitbucket for issue tracking and continuous integration. Keep your main codebase on GitHub but have "backup" clones sitting on GitLab for your own piece of mind.
|
||||
|
||||
The key thing is you have options. And we have those options thanks to open source licensing on very useful and powerful projects. The future is bright.
|
||||
|
||||
Of course, I'm bound to have missed some of the open source options available out there. Feel free to pipe up with your favorites. Are you using multiple platforms? What's your setup? Let everyone know in the comments!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/8/github-alternatives
|
||||
|
||||
作者:[Jason van Gumster][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/mairin
|
||||
[1]: https://www.theverge.com/2018/6/4/17422788/microsoft-github-acquisition-official-deal
|
||||
[2]: https://github.com/
|
||||
[3]: https://gitlab.com
|
||||
[4]: https://bitbucket.org
|
||||
[5]: https://www.mercurial-scm.org/wiki/Repository
|
||||
[6]: https://sourceforge.net
|
||||
[7]: https://phacility.com/phabricator/
|
||||
[8]: https://jenkins.io
|
@ -1,168 +0,0 @@
|
||||
Flameshot – A Simple, Yet Powerful Feature-rich Screenshot Tool
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/Flameshot-720x340.png)
|
||||
|
||||
Capturing screenshots is part of my job. I have been using Deepin-screenshot tool for taking screenshots. It’s a simple, light-weight and quite neat screenshot tool. It comes with all options such as mart window identification, shortcuts supporting, image editing, delay screenshot, social sharing, smart saving, and image resolution adjusting etc. Today, I stumbled upon yet another screenshot tool that ships with many features. Say hello to **Flameshot** , a simple and powerful, feature-rich screenshot tool for Unix-like operating systems. It is easy to use, customizable and has an option to upload your screenshots to **imgur** , an online image sharing website. And also, Flameshot has a CLI version, so you can take screenshots from commandline as well. Flameshot is completely free and open source tool. In this guide, we will see how to install Flameshot and how to take screenshots using it.
|
||||
|
||||
### Install Flameshot
|
||||
|
||||
**On Arch Linux:**
|
||||
|
||||
Flameshot is available [community] repository in Arch Linux. Make sure you have enabled community repository and install Flameshot using pacman as shown below.
|
||||
```
|
||||
$ sudo pacman -S flameshot
|
||||
|
||||
```
|
||||
|
||||
It is also available in [**AUR**][1], so you can install it using any AUR helper programs, for example [**Yay**][2], in Arch-based systems.
|
||||
```
|
||||
$ yay -S flameshot-git
|
||||
|
||||
```
|
||||
|
||||
**On Fedora:**
|
||||
```
|
||||
$ sudo dnf install flameshot
|
||||
|
||||
```
|
||||
|
||||
On **Debian 10+** and **Ubuntu 18.04+** , install it using APT package manager.
|
||||
```
|
||||
$ sudo apt install flameshot
|
||||
|
||||
```
|
||||
|
||||
**On openSUSE:**
|
||||
```
|
||||
$ sudo zypper install flameshot
|
||||
|
||||
```
|
||||
|
||||
On other distributions, compile and install it from source code. The compilation requires **Qt version 5.3** or higher and **GCC 4.9.2** or higher.
|
||||
|
||||
### Usage
|
||||
|
||||
Launch Flameshot from menu or application launcher. On MATE desktop environment, It usually found under **Applications - > Graphics**.
|
||||
|
||||
Once you opened it, you will see Flameshot systray icon in your system’s panel.
|
||||
|
||||
**Note:**
|
||||
|
||||
If you are using Gnome you need to install the [TopIcons][3] extension in order to see the systemtray icon.
|
||||
|
||||
Right click on the tray icon and you’ll see some menu items to open the configuration window and the information window or quit the application.
|
||||
|
||||
To capture screenshot, just click on the tray icon. You will see help window that says how to use Flameshot. Choose an area to capture and hit **ENTER** key to capture the screen. Press right click to show the color picker, hit spacebar to view the side panel. You can use increase or decrease the pointer’s thickness by using the Mouse scroll button.
|
||||
|
||||
Flameshot comes with quite good set of features, such as,
|
||||
|
||||
* Free hand writing
|
||||
* Line drawing
|
||||
* Rectangle / Circle drawing
|
||||
* Rectangle selection
|
||||
* Arrows
|
||||
* Marker to highlight important points
|
||||
* Add text
|
||||
* Blur the image/text
|
||||
* Show the dimension of the image
|
||||
* Undo/Redo the changes while editing images
|
||||
* Copy the selection to the clipboard
|
||||
* Save the selection
|
||||
* Leave the capture screen
|
||||
* Choose an app to open images
|
||||
* Upload the selection to imgur site
|
||||
* Pin image to desktop
|
||||
|
||||
|
||||
|
||||
Here is a sample demo:
|
||||
|
||||
<http://www.ostechnix.com/wp-content/uploads/2018/09/Flameshot-demo.mp4>
|
||||
|
||||
**Keyboard shortcuts**
|
||||
|
||||
Frameshot supports keyboard shortcuts. Right click on Flameshot tray icon and click **Information** window to see all the available shortcuts in the graphical capture mode. Here is the list of available keyboard shortcuts in GUI mode.
|
||||
|
||||
| Keys | Description |
|
||||
|------------------------|------------------------------|
|
||||
| ←, ↓, ↑, → | Move selection 1px |
|
||||
| Shift + ←, ↓, ↑, → | Resize selection 1px |
|
||||
| Esc | Quit capture |
|
||||
| Ctrl + C | Copy to clipboard |
|
||||
| Ctrl + S | Save selection as a file |
|
||||
| Ctrl + Z | Undo the last modification |
|
||||
| Right Click | Show color picker |
|
||||
| Mouse Wheel | Change the tool’s thickness |
|
||||
|
||||
Shift + drag a handler of the selection area: mirror redimension in the opposite handler.
|
||||
|
||||
**Command line options**
|
||||
|
||||
Flameshot also has a set of command line options to delay the screenshots and save images in custom paths.
|
||||
|
||||
To capture screen with Flameshot GUI, run:
|
||||
```
|
||||
$ flameshot gui
|
||||
|
||||
```
|
||||
|
||||
To capture screen with GUI and save it in a custom path of your choice:
|
||||
```
|
||||
$ flameshot gui -p ~/myStuff/captures
|
||||
|
||||
```
|
||||
|
||||
To open GUI with a delay of 2 seconds:
|
||||
```
|
||||
$ flameshot gui -d 2000
|
||||
|
||||
```
|
||||
|
||||
To capture fullscreen with custom save path (no GUI) with a delay of 2 seconds:
|
||||
```
|
||||
$ flameshot full -p ~/myStuff/captures -d 2000
|
||||
|
||||
```
|
||||
|
||||
To capture fullscreen with custom save path copying to clipboard:
|
||||
```
|
||||
$ flameshot full -c -p ~/myStuff/captures
|
||||
|
||||
```
|
||||
|
||||
To capture the screen containing the mouse and print the image (bytes) in **PNG** format:
|
||||
```
|
||||
$ flameshot screen -r
|
||||
|
||||
```
|
||||
|
||||
To capture the screen number 1 and copy it to the clipboard:
|
||||
```
|
||||
$ flameshot screen -n 1 -c
|
||||
|
||||
```
|
||||
|
||||
What do you need? Flameshot has almost all features for capturing pictures, adding annotations, editing images, blur or highlight important points and a lot more. I think I will stick with Flameshot for a while as find it best replacement for my current screenshot tool. Give it a try and you won’t be disappointed.
|
||||
|
||||
And, that’s all for now. More good stuffs to come. Stay tuned!
|
||||
|
||||
Cheers!
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/flameshot-a-simple-yet-powerful-feature-rich-screenshot-tool/
|
||||
|
||||
作者:[SK][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.ostechnix.com/author/sk/
|
||||
[1]: https://aur.archlinux.org/packages/flameshot-git
|
||||
[2]: https://www.ostechnix.com/yay-found-yet-another-reliable-aur-helper/
|
||||
[3]: https://extensions.gnome.org/extension/1031/topicons/
|
184
sources/tech/20180902 Learning BASIC Like It-s 1983.md
Normal file
184
sources/tech/20180902 Learning BASIC Like It-s 1983.md
Normal file
@ -0,0 +1,184 @@
|
||||
Learning BASIC Like It's 1983
|
||||
======
|
||||
I was not yet alive in 1983. This is something that I occasionally regret. I am especially sorry that I did not experience the 8-bit computer era as it was happening, because I think the people that first encountered computers when they were relatively simple and constrained have a huge advantage over the rest of us.
|
||||
|
||||
Today, (almost) everyone knows how to use a computer, but very few people, even in the computing industry, grasp all of what is going on inside of any single machine. There are now [so many layers of software][1] doing so many different things that one struggles to identify the parts that are essential. In 1983, though, home computers were unsophisticated enough that a diligent person could learn how a particular computer worked through and through. That person is today probably less mystified than I am by all the abstractions that modern operating systems pile on top of the hardware. I expect that these layers of abstractions were easy to understand one by one as they were introduced; today, new programmers have to try to understand them all by working top to bottom and backward in time.
|
||||
|
||||
Many famous programmers, particularly in the video game industry, started programming games in childhood on 8-bit computers like the Apple II and the Commodore 64. John Romero, Richard Garriott, and Chris Roberts are all examples. It’s easy to see how this happened. In the 8-bit computer era, many games were available only as printed BASIC listings in computer magazines and [books][2]. If you wanted to play one of those games, you had to type in the whole program by hand. Inevitably, you would get something wrong, so you would have to debug your program. By the time you got it working, you knew enough about how the program functioned to start modifying it yourself. If you were an avid gamer, you became a good programmer almost by necessity.
|
||||
|
||||
I also played computer games throughout my childhood. But the games I played came on CD-ROMs. I sometimes found myself having to google how to fix a crashing installer, which would involve editing the Windows Registry or something like that. This kind of minor troubleshooting may have made me comfortable enough with computers to consider studying computer science in college. But it never taught me anything crucial about how computers worked or how to control them.
|
||||
|
||||
Now, of course, I tell computers what to do for a living. All the same, I can’t help feeling that I missed out on some fundamental insight afforded only to those that grew up programming simpler computers. What would it have been like to encounter computers for the first time in the early 1980s? How would that have been different from the experience of using a computer today?
|
||||
|
||||
This post is going to be a little different from the usual Two-Bit History post because I’m going to try to imagine an answer to these questions.
|
||||
|
||||
### 1983
|
||||
|
||||
It was just last week that you saw [the Commodore 64 ad][3] on TV. Now that M*A*S*H was over, you were in the market for something new to do on Monday nights. This Commodore 64 thing looked even better than the Apple II that Rudy’s family had in their basement. Plus, the ad promised that the new computer would soon bring friends “knocking down” your door. You knew several people at school that would rather be hanging out at your house than Rudy’s anyway, if only they could play Zork there.
|
||||
|
||||
So you persuaded your parents to buy one. Your mother said that they would consider it only if having a home computer meant that you stayed away from the arcade. You reluctantly agreed. Your father thought he would start tracking the family’s finances in MultiPlan, the spreadsheet program he had heard about, which is why the computer got put in the living room. A year later, though, you would be the only one still using it. You were finally allowed to put it on the desk in your bedroom, right under your Police poster.
|
||||
|
||||
(Your sister protested this decision, but it was 1983 and computers [weren’t for her][4].)
|
||||
|
||||
Dad picked it up from [ComputerLand][5] on the way home from work. The two of you laid the box down next to the TV and opened it. “WELCOME TO THE WORLD OF FRIENDLY COMPUTING,” said the packaging. Twenty minutes later, you weren’t convinced—the two of you were still trying to connect the Commodore to the TV set and wondering whether the TV’s antenna cable was the 75-ohm or 300-ohm coax type. But eventually you were able to turn your TV to channel 3 and see a grainy, purple image.
|
||||
|
||||
![Commodore 64 startup screen][6]
|
||||
|
||||
`READY`, the computer reported. Your father pushed the computer toward you, indicating that you should be the first to give it a try. `HELLO`, you typed, carefully hunting for each letter. The computer’s response was baffling.
|
||||
|
||||
![Commodore 64 syntax error][7]
|
||||
|
||||
You tried typing in a few different words, but the response was always the same. Your father said that you had better read through the rest of the manual. That would be no mean feat—[the manual that came with the Commodore 64][8] was a small book. But that didn’t bother you, because the introduction to the manual foreshadowed wonders.
|
||||
|
||||
The Commodore 64, it claimed, had “the most advanced picture maker in the microcomputer industry,” which would allow you “to design your own pictures in four different colors, just like the ones you see on arcade type video games.” The Commodore 64 also had “built-in music and sound effects that rival many well known music synthesizers.” All of these tools would be put in your hands, because the manual would walk you through it all:
|
||||
|
||||
> Just as important as all the available hardware is the fact that this USER’S GUIDE will help you develop your understanding of computers. It won’t tell you everything there is to know about computers, but it will refer you to a wide variety of publications for more detailed information about the topics presented. Commodore wants you to really enjoy your new COMMODORE 64. And to have fun, remember: programming is not the kind of thing you can learn in a day. Be patient with yourself as you go through the USER’S GUIDE.
|
||||
|
||||
That night, in bed, you read through the entire first three chapters—”Setup,” “Getting Started,” and “Beginning BASIC Programming”—before finally succumbing to sleep with the manual splayed across your chest.
|
||||
|
||||
### Commodore BASIC
|
||||
|
||||
Now, it’s Saturday morning and you’re eager to try out what you’ve learned. One of the first things the manual teaches you how to do is change the colors on the display. You follow the instructions, pressing `CTRL-9` to enter reverse type mode and then holding down the space bar to create long lines. You swap between colors using `CTRL-1` through `CTRL-8`, reveling in your sudden new power over the TV screen.
|
||||
|
||||
![Commodore 64 color bands][9]
|
||||
|
||||
As cool as this is, you realize it doesn’t count as programming. In order to program the computer, you learned last night, you have to speak to it in a language called BASIC. To you, BASIC seems like something out of Star Wars, but BASIC is, by 1983, almost two decades old. It was invented by two Dartmouth professors, John Kemeny and Tom Kurtz, who wanted to make computing accessible to undergraduates in the social sciences and humanities. It was widely available on minicomputers and popular in college math classes. It then became standard on microcomputers after Bill Gates and Paul Allen wrote the MicroSoft BASIC interpreter for the Altair. But the manual doesn’t explain any of this and you won’t learn it for many years.
|
||||
|
||||
One of the first BASIC commands the manual suggests you try is the `PRINT` command. You type in `PRINT "COMMODORE 64"`, slowly, since it takes you a while to find the quotation mark symbol above the `2` key. You hit `RETURN` and this time, instead of complaining, the computer does exactly what you told it to do and displays “COMMODORE 64” on the next line.
|
||||
|
||||
Now you try using the `PRINT` command on all sorts of different things: two numbers added together, two numbers multiplied together, even several decimal numbers. You stop typing out `PRINT` and instead use `?`, since the manual has advised you that `?` is an abbreviation for `PRINT` often used by expert programmers. You feel like an expert already, but then you remember that you haven’t even made it to chapter three, “Beginning BASIC Programming.”
|
||||
|
||||
You get there soon enough. The chapter begins by prompting you to write your first real BASIC program. You type in `NEW` and hit `RETURN`, which gives you a clean slate. You then type your program in:
|
||||
|
||||
```
|
||||
10 ?"COMMODORE 64"
|
||||
20 GOTO 10
|
||||
```
|
||||
|
||||
The 10 and the 20, the manual explains, are line numbers. They order the statements for the computer. They also allow the programmer to refer to other lines of the program in certain commands, just like you’ve done here with the `GOTO` command, which directs the program back to line 10. “It is good programming practice,” the manual opines, “to number lines in increments of 10—in case you need to insert some statements later on.”
|
||||
|
||||
You type `RUN` and stare as the screen clogs with “COMMODORE 64,” repeated over and over.
|
||||
|
||||
![Commodore 64 showing result of printing "Commodore 64" repeatedly][10]
|
||||
|
||||
You’re not certain that this isn’t going to blow up your computer. It takes you a second to remember that you are supposed to hit the `RUN/STOP` key to break the loop.
|
||||
|
||||
The next few sections of the manual teach you about variables, which the manual tells you are like “a number of boxes within the computer that can each hold a number or a string of text characters.” Variables that end in a `%` symbol are whole numbers, while variables ending in a `$` symbol are strings of characters. All other variables are something called “floating point” variables. The manual warns you to be careful with variable names because only the first two letters of the name are actually recognized by the computer, even though nothing stops you from making a name as long as you want it to be. (This doesn’t particularly bother you, but you could see how 30 years from now this might strike someone as completely insane.)
|
||||
|
||||
You then learn about the `IF... THEN...` and `FOR... NEXT...` constructs. With all these new tools, you feel equipped to tackle the next big challenge the manual throws at you. “If you’re the ambitious type,” it goads, “type in the following program and see what happens.” The program is longer and more complicated than any you have seen so far, but you’re dying to know what it does:
|
||||
|
||||
```
|
||||
10 REM BOUNCING BALL
|
||||
20 PRINT "{CLR/HOME}"
|
||||
25 FOR X = 1 TO 10 : PRINT "{CRSR/DOWN}" : NEXT
|
||||
30 FOR BL = 1 TO 40
|
||||
40 PRINT " ●{CRSR LEFT}";:REM (● is a Shift-Q)
|
||||
50 FOR TM = 1 TO 5
|
||||
60 NEXT TM
|
||||
70 NEXT BL
|
||||
75 REM MOVE BALL RIGHT TO LEFT
|
||||
80 FOR BL = 40 TO 1 STEP -1
|
||||
90 PRINT " {CRSR LEFT}{CRSR LEFT}●{CRSR LEFT}";
|
||||
100 FOR TM = 1 TO 5
|
||||
110 NEXT TM
|
||||
120 NEXT BL
|
||||
130 GOTO 20
|
||||
```
|
||||
|
||||
The program above takes advantage of one of the Commodore 64’s coolest features. Non-printable command characters, when passed to the `PRINT` command as part of a string, just do the action they usually perform instead of printing to the screen. This allows you to replay arbitrary chains of commands by printing strings from within your programs.
|
||||
|
||||
It takes you a long time to type in the above program. You make several mistakes and have to re-enter some of the lines. But eventually you are able to type `RUN` and behold a masterpiece:
|
||||
|
||||
![Commodore 64 bouncing ball][11]
|
||||
|
||||
You think that this is a major contender for the coolest thing you have ever seen. You forget about it almost immediately though, because once you’ve learned about BASIC’s built-in functions like `RND` (which returns a random number) and `CHR$` (which returns the character matching a given number code), the manual shows you a program that many years from now will still be famous enough to be made the title of an [essay anthology][12]:
|
||||
|
||||
```
|
||||
10 PRINT "{CLR/HOME}"
|
||||
20 PRINT CHR$(205.5 + RND(1));
|
||||
40 GOTO 20
|
||||
```
|
||||
|
||||
When run, the above program produces a random maze:
|
||||
|
||||
![Commodore 64 maze program][13]
|
||||
|
||||
This is definitely the coolest thing you have ever seen.
|
||||
|
||||
### PEEK and POKE
|
||||
|
||||
You’ve now made it through the first four chapters of the Commodore 64 manual, including the chapter titled “Advanced BASIC,” so you’re feeling pretty proud of yourself. You’ve learned a lot this Saturday morning. But this afternoon (after a quick lunch break), you’re going to learn something that will make this magical machine in your living room much less mysterious.
|
||||
|
||||
The next chapter in the manual is titled “Advanced Color and Graphic Commands.” It starts off by revisiting the colored bars that you were able to type out first thing this morning and shows you how you can do the same thing from a program. It then teaches you how to change the background colors of the screen.
|
||||
|
||||
In order to do this, you need to use the BASIC `PEEK` and `POKE` commands. Those commands allow you to, respectively, examine and write to a memory address. The Commodore 64 has a main background color and a border color. Each is controlled by a specially designated memory address. You can write any color value you would like to those addresses to make the background or border that color.
|
||||
|
||||
The manual explains:
|
||||
|
||||
> Just as variables can be thought of as a representation of “boxes” within the machine where you placed your information, you can also think of some specially defined “boxes” within the computer that represent specific memory locations.
|
||||
>
|
||||
> The Commodore 64 looks at these memory locations to see what the screen’s background and border color should be, what characters are to be displayed on the screen—and where—and a host of other tasks.
|
||||
|
||||
You write a program to cycle through all the available combinations of background and border color:
|
||||
|
||||
```
|
||||
10 FOR BA = 0 TO 15
|
||||
20 FOR BO = 0 TO 15
|
||||
30 POKE 53280, BA
|
||||
40 POKE 53281, BO
|
||||
50 FOR X = 1 TO 500 : NEXT X
|
||||
60 NEXT BO : NEXT BA
|
||||
```
|
||||
|
||||
While the `POKE` commands, with their big operands, looked intimidating at first, now you see that the actual value of the number doesn’t matter that much. Obviously, you have to get the number right, but all the number represents is a “box” that Commodore just happened to store at address 53280. This box has a special purpose: Commodore uses it to determine what color the screen’s background should be.
|
||||
|
||||
![Commodore 64 changing background colors][14]
|
||||
|
||||
You think this is pretty neat. Just by writing to a special-purpose box in memory, you can control a fundamental property of the computer. You aren’t sure how the Commodore 64’s circuitry takes the value you write in memory and changes the color of the screen, but you’re okay not knowing that. At least you understand everything up to that point.
|
||||
|
||||
### Special Boxes
|
||||
|
||||
You don’t get through the entire manual that Saturday, since you are now starting to run out of steam. But you do eventually read all of it. In the process, you learn about many more of the Commodore 64’s special-purpose boxes. There are boxes you can write to control what is on screen—one box, in fact, for every place a character might appear. In chapter six, “Sprite Graphics,” you learn about the special-purpose boxes that allow you to define images that can be moved around and even scaled up and down. In chapter seven, “Creating Sound,” you learn about the boxes you can write to in order to make your Commodore 64 sing “Michael Row the Boat Ashore.” The Commodore 64, it turns out, has very little in the way of what you would later learn is called an API. Controlling the Commodore 64 mostly involves writing to memory addresses that have been given special meaning by the circuitry.
|
||||
|
||||
The many years you ultimately spend writing to those special boxes stick with you. Even many decades later, when you find yourself programming a machine with an extensive graphics or sound API, you know that, behind the curtain, the API is ultimately writing to those boxes or something like them. You will sometimes wonder about younger programmers that have only ever used APIs, and wonder what they must think the API is doing for them. Maybe they think that the API is calling some other, hidden API. But then what do think that hidden API is calling? You will pity those younger programmers, because they must be very confused indeed.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][15] on Twitter or subscribe to the [RSS feed][16] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> Have you ever wondered what a 19th-century computer program would look like translated into C?
|
||||
>
|
||||
> This week's post: A detailed look at how Ada Lovelace's famous program worked and what it was trying to do.<https://t.co/BizR2Zu7nt>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [August 19, 2018][17]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/09/02/learning-basic.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.youtube.com/watch?v=kZRE7HIO3vk
|
||||
[2]: https://en.wikipedia.org/wiki/BASIC_Computer_Games
|
||||
[3]: https://www.youtube.com/watch?v=ZekAbt2o6Ms
|
||||
[4]: https://www.npr.org/sections/money/2014/10/21/357629765/when-women-stopped-coding
|
||||
[5]: https://www.youtube.com/watch?v=MA_XtT3VAVM
|
||||
[6]: https://twobithistory.org/images/c64_startup.png
|
||||
[7]: https://twobithistory.org/images/c64_error.png
|
||||
[8]: ftp://www.zimmers.net/pub/cbm/c64/manuals/C64_Users_Guide.pdf
|
||||
[9]: https://twobithistory.org/images/c64_colors.png
|
||||
[10]: https://twobithistory.org/images/c64_print_loop.png
|
||||
[11]: https://twobithistory.org/images/c64_ball.gif
|
||||
[12]: http://10print.org/
|
||||
[13]: https://twobithistory.org/images/c64_maze.gif
|
||||
[14]: https://twobithistory.org/images/c64_background.gif
|
||||
[15]: https://twitter.com/TwoBitHistory
|
||||
[16]: https://twobithistory.org/feed.xml
|
||||
[17]: https://twitter.com/TwoBitHistory/status/1030974776821665793?ref_src=twsrc%5Etfw
|
@ -1,173 +0,0 @@
|
||||
Translating by way-ww
|
||||
|
||||
Why Linux users should try Rust
|
||||
======
|
||||
|
||||
![](https://images.idgesg.net/images/article/2018/09/rust-rusted-metal-100773678-large.jpg)
|
||||
|
||||
Rust is a fairly young and modern programming language with a lot of features that make it incredibly flexible and very secure. It's also becoming quite popular, having won first place for the "most loved programming language" in the Stack Overflow Developer Survey three years in a row — [2016][1], [2017][2], and [2018][3].
|
||||
|
||||
Rust is also an _open-source_ language with a suite of special features that allow it to be adapted to many different programming projects. It grew out of what was a personal project of a Mozilla employee back in 2006, was picked up as a special project by Mozilla a few years later (2009), and then announced for public use in 2010.
|
||||
|
||||
Rust programs run incredibly fast, prevent segfaults, and guarantee thread safety. These attributes make the language tremendously appealing to developers focused on application security. Rust is also a very readable language and one that can be used for anything from simple programs to very large and complex projects.
|
||||
|
||||
Rust is:
|
||||
|
||||
* Memory safe — Rust will not suffer from dangling pointers, buffer overflows, or other memory-related errors. And it provides memory safety without garbage collection.
|
||||
* General purpose — Rust is an appropriate language for any type of programming
|
||||
* Fast — Rust is comparable in performance to C/C++ but with far better security features.
|
||||
* Efficient — Rust is built to facilitate concurrent programming.
|
||||
* Project-oriented — Rust has a built-in dependency and build management system called Cargo.
|
||||
* Well supported — Rust has an impressive [support community][4].
|
||||
|
||||
|
||||
|
||||
Rust also enforces RAII (Resource Acquisition Is Initialization). That means when an object goes out of scope, its destructor will be called and its resources will be freed, providing a shield against resource leaks. It provides functional abstractions and a great [type system][5] together with speed and mathematical soundness.
|
||||
|
||||
In short, Rust is an impressive systems programming language with features that other most languages lack, making it a serious contender for languages like C, C++ and Objective-C that have been used for years.
|
||||
|
||||
### Installing Rust
|
||||
|
||||
Installing Rust is a fairly simple process.
|
||||
|
||||
```
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
```
|
||||
|
||||
Once Rust in installed, calling rustc with the **\--version** argument or using the **which** command displays version information.
|
||||
|
||||
```
|
||||
$ which rustc
|
||||
rustc 1.27.2 (58cc626de 2018-07-18)
|
||||
$ rustc --version
|
||||
rustc 1.27.2 (58cc626de 2018-07-18)
|
||||
```
|
||||
|
||||
### Getting started with Rust
|
||||
|
||||
The simplest code example is not all that different from what you'd enter if you were using one of many scripting languages.
|
||||
|
||||
```
|
||||
$ cat hello.rs
|
||||
fn main() {
|
||||
// Print a greeting
|
||||
println!("Hello, world!");
|
||||
}
|
||||
```
|
||||
|
||||
In these lines, we are setting up a function (main), adding a comment describing the function, and using a println statement to create output. You could compile and then run a program like this using the command shown below.
|
||||
|
||||
```
|
||||
$ rustc hello.rs
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
```
|
||||
|
||||
Alternately, you might create a "project" (generally used only for more complex programs than this one!) to keep your code organized.
|
||||
|
||||
```
|
||||
$ mkdir ~/projects
|
||||
$ cd ~/projects
|
||||
$ mkdir hello_world
|
||||
$ cd hello_world
|
||||
```
|
||||
|
||||
Notice that even a simple program, once compiled, becomes a fairly large executable.
|
||||
|
||||
```
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
$ ls -l hello*
|
||||
-rwxrwxr-x 1 shs shs 5486784 Sep 23 19:02 hello <== executable
|
||||
-rw-rw-r-- 1 shs shs 68 Sep 23 15:25 hello.rs
|
||||
```
|
||||
|
||||
And, of course, that's just a start — the traditional "Hello, world!" program. The Rust language has a suite of features to get you moving quickly to advanced levels of programming skill.
|
||||
|
||||
### Learning Rust
|
||||
|
||||
![rust programming language book cover][6]
|
||||
No Starch Press
|
||||
|
||||
The Rust Programming Language book by Steve Klabnik and Carol Nichols (2018) provides one of the best ways to learn Rust. Written by two members of the core development team, this book is available in print from [No Starch Press][7] or in ebook format at [rust-lang.org][8]. It has earned its reference as "the book" among the Rust developer community.
|
||||
|
||||
Among the many topics covered, you will learn about these advanced topics:
|
||||
|
||||
* Ownership and borrowing
|
||||
* Safety guarantees
|
||||
* Testing and error handling
|
||||
* Smart pointers and multi-threading
|
||||
* Advanced pattern matching
|
||||
* Using Cargo (the built-in package manager)
|
||||
* Using Rust's advanced compiler
|
||||
|
||||
|
||||
|
||||
#### Table of Contents
|
||||
|
||||
The table of contents is shown below.
|
||||
|
||||
```
|
||||
Foreword by Nicholas Matsakis and Aaron Turon
|
||||
Acknowledgements
|
||||
Introduction
|
||||
Chapter 1: Getting Started
|
||||
Chapter 2: Guessing Game
|
||||
Chapter 3: Common Programming Concepts
|
||||
Chapter 4: Understanding Ownership
|
||||
Chapter 5: Structs
|
||||
Chapter 6: Enums and Pattern Matching
|
||||
Chapter 7: Modules
|
||||
Chapter 8: Common Collections
|
||||
Chapter 9: Error Handling
|
||||
Chapter 10: Generic Types, Traits, and Lifetimes
|
||||
Chapter 11: Testing
|
||||
Chapter 12: An Input/Output Project
|
||||
Chapter 13: Iterators and Closures
|
||||
Chapter 14: More About Cargo and Crates.io
|
||||
Chapter 15: Smart Pointers
|
||||
Chapter 16: Concurrency
|
||||
Chapter 17: Is Rust Object Oriented?
|
||||
Chapter 18: Patterns
|
||||
Chapter 19: More About Lifetimes
|
||||
Chapter 20: Advanced Type System Features
|
||||
Appendix A: Keywords
|
||||
Appendix B: Operators and Symbols
|
||||
Appendix C: Derivable Traits
|
||||
Appendix D: Macros
|
||||
Index
|
||||
|
||||
```
|
||||
|
||||
[The Rust Programming Language][7] takes you from basic installation and language syntax to complex topics, such as modules, error handling, crates (synonymous with a ‘library’ or ‘package’ in other languages), modules (allowing you to partition your code within the crate itself), lifetimes, etc.
|
||||
|
||||
Probably the most important thing to say is that the book can move you from basic programming skills to building and compiling complex, secure and very useful programs.
|
||||
|
||||
### Wrap-up
|
||||
|
||||
If you're ready to get into some serious programming with a language that's well worth the time and effort to study and becoming increasingly popular, Rust is a good bet!
|
||||
|
||||
Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.networkworld.com/article/3308162/linux/why-you-should-try-rust.html
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
||||
[1]: https://insights.stackoverflow.com/survey/2016#technology-most-loved-dreaded-and-wanted
|
||||
[2]: https://insights.stackoverflow.com/survey/2017#technology-most-loved-dreaded-and-wanted-languages
|
||||
[3]: https://insights.stackoverflow.com/survey/2018#technology-most-loved-dreaded-and-wanted-languages
|
||||
[4]: https://www.rust-lang.org/en-US/community.html
|
||||
[5]: https://doc.rust-lang.org/reference/type-system.html
|
||||
[6]: https://images.idgesg.net/images/article/2018/09/rust-programming-language_book-cover-100773679-small.jpg
|
||||
[7]: https://nostarch.com/Rust
|
||||
[8]: https://doc.rust-lang.org/book/2018-edition/index.html
|
||||
[9]: https://www.facebook.com/NetworkWorld/
|
||||
[10]: https://www.linkedin.com/company/network-world
|
118
sources/tech/20180930 A Short History of Chaosnet.md
Normal file
118
sources/tech/20180930 A Short History of Chaosnet.md
Normal file
@ -0,0 +1,118 @@
|
||||
A Short History of Chaosnet
|
||||
======
|
||||
If you fire up `dig` and run a DNS query for `google.com`, you will get a response somewhat like the following:
|
||||
|
||||
```
|
||||
$ dig google.com
|
||||
|
||||
; <<>> DiG 9.10.6 <<>> google.com
|
||||
;; global options: +cmd
|
||||
;; Got answer:
|
||||
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 27120
|
||||
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
|
||||
|
||||
;; OPT PSEUDOSECTION:
|
||||
; EDNS: version: 0, flags:; udp: 512
|
||||
;; QUESTION SECTION:
|
||||
;google.com. IN A
|
||||
|
||||
;; ANSWER SECTION:
|
||||
google.com. 194 IN A 216.58.192.206
|
||||
|
||||
;; Query time: 23 msec
|
||||
;; SERVER: 8.8.8.8#53(8.8.8.8)
|
||||
;; WHEN: Fri Sep 21 16:14:48 CDT 2018
|
||||
;; MSG SIZE rcvd: 55
|
||||
```
|
||||
|
||||
The output contains both a section describing the “question” you asked (“What is the IP address of `google.com`?”) and a section describing the answer you received. In the answer section, we see that `dig` found a single record with what looks to be five fields. The record’s type is indicated by the `A` in the fourth field from the left—this is an “address” record. To the right of the `A`, in the fifth field, we can see that the IP address for `google.com` is `216.58.192.206`. The `194` value in the second field specifies how long in seconds this particular record can be cached.
|
||||
|
||||
What does the `IN` field tell us? For an embarrassingly long time, I thought `IN` functioned as a preposition, so that every DNS record was saying something like “`google.com` is in `A` and has IP address `216.58.192.206`.” It turns out that `IN` actually stands for “internet.” The `IN` part of a DNS record tells us the record’s class.
|
||||
|
||||
Why might a DNS record have a class other than “internet”? What would that even mean? How do you search for a host that isn’t on the internet? It would seem that `IN` is the only value that could possibly make sense here. Indeed, when you try to ask for the address of `google.com` while specifying that you expect a record with a class other than `IN`, the DNS server you are asking will probably complain. In the below, when we try to ask for the IP address of `google.com` using the `HS` class, the name server at `8.8.8.8` (Google Public DNS) returns a status of `SERVFAIL`:
|
||||
|
||||
```
|
||||
$ dig -c HS google.com
|
||||
|
||||
; <<>> DiG 9.10.6 <<>> -c HS google.com
|
||||
;; global options: +cmd
|
||||
;; Got answer:
|
||||
;; ->>HEADER<<- opcode: QUERY, status: SERVFAIL, id: 31517
|
||||
;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 1
|
||||
|
||||
;; OPT PSEUDOSECTION:
|
||||
; EDNS: version: 0, flags:; udp: 512
|
||||
;; QUESTION SECTION:
|
||||
;google.com. HS A
|
||||
|
||||
;; Query time: 34 msec
|
||||
;; SERVER: 8.8.8.8#53(8.8.8.8)
|
||||
;; WHEN: Tue Sep 25 14:48:10 CDT 2018
|
||||
;; MSG SIZE rcvd: 39
|
||||
```
|
||||
|
||||
So classes other than `IN` aren’t widely supported. But they do exist. In addition to `IN`, DNS records can have the `HS` class (as we’ve just seen) or the `CH` class. The `HS` class is reserved for use by a system called [Hesiod][1] that stores and distributes simple textual data using the Domain Name System. It is typically used in local environments as a stand-in for [LDAP][2]. The `CH` class is reserved for something called Chaosnet.
|
||||
|
||||
Today, the world belongs to TCP/IP. Those two protocols (together with UDP) govern most of the remote communication that happens between computers. But I think it’s wonderful that you can still find, hidden in the plumbing of the internet, traces of this other, long-extinct, evocatively named system. What was Chaosnet? And why did it go the way of the dinosaurs?
|
||||
|
||||
### A Machine Room at MIT
|
||||
|
||||
Chaosnet was developed in the 1970s by researchers at the MIT Artificial Intelligence Lab. It was created as a part of a larger effort to design and build a machine that could run the Lisp programming language more efficiently than a general-purpose computer.
|
||||
|
||||
Lisp was the brainchild of MIT professor John McCarthy, who pioneered the field of artificial intelligence. He first described Lisp to the world in [a paper][3] published in 1960. By 1962, an interpreter and a compiler had been written. Lisp introduced an astounding number of features that today we consider standard for many programming languages. It was the first language to have a garbage collector. It was the first to have a REPL. And it was the first to support dynamic typing. It found favor among programmers working in artificial intelligence and—to name just one example—was used to develop the famous [SHRDLU][4] demonstration, which allowed a human to dictate simple actions involving toy blocks to a computer in natural language.
|
||||
|
||||
The problem with Lisp was that it could be slow. Simple operations could take twice as long to execute as was typical with other languages because Lisp variables were type-checked at runtime and not just during compilation. Lisp’s garbage collector was known to take up to an entire second to run on the IBM 7090 at MIT. These performance issues were especially unwelcome because the AI researchers using Lisp were trying to build applications like SHRDLU that interacted with users in real time. In the late 1970s, a group of MIT Artificial Intelligence Lab researchers decided to address these problems by building machines specifically designed to run Lisp programs. These “Lisp machines” had more memory and a compact instruction set better-suited to Lisp. Type-checking would be done by dedicated circuitry, speeding it up by orders of magnitude. And unlike most computer systems at the time, Lisp machines would not be time-shared, since ambitious Lisp programs needed all the resources a computer had available. Each user would be assigned his or her own CPU. In a memo, the Lisp Machine Group at MIT described how this would make Lisp programming significantly easier:
|
||||
|
||||
> The Lisp Machine is a personal computer. Personal computing means that the processor and main memory are not time-division multiplexed, instead each person gets his own. The personal computation system consists of a pool of processors, each with its own main memory, and its own disk for swapping. When a user logs in, he is assigned a processor, and he has exclusive use of it for the duration of the session. When he logs out, the processor is returned to the pool, for the next person to use. This way, there is no competition from other users for memory; the pages the user is frequently referring to remain in core, and so swapping overhead is considerably reduced. Thus the Lisp Machine solves a basic problem of the time-sharing Lisp system.
|
||||
|
||||
The Lisp machine would be a personal computer in a different sense than the one we think of today. As the Lisp Machine Group originally envisioned it, users would sit down in their offices not in front of their own Lisp machines but in front of terminals. The terminals would be connected to the actual Lisp machine, which would be elsewhere. Even though each user would be assigned his or her own processor, the processors would still be “kept off in a machine room,” since they would make noise and take up space and thus be “unwelcome office companions.” The processors would share access to a file system and to devices like printers via a high-speed local network “with completely distributed control.” That network was Chaosnet.
|
||||
|
||||
Chaosnet is both a hardware standard and a software protocol. The hardware standard resembles Ethernet, and in fact the Chaosnet software protocol was eventually run over Ethernet. The software protocol, which specifies both network-layer and transport-layer interactions, was, unlike TCP/IP, always meant to govern a local network. In another memo released by the MIT Artificial Intelligence Lab, David Moon, a member of the Lisp Machine Group, explained that Chaosnet “contains no special provisions for things such as low-speed links, noisy links, multiple paths, and long-distance links with significant transit time.” The focus was instead on designing a protocol that could outperform other protocols on a small network.
|
||||
|
||||
Speed was important because Chaosnet sat between each Lisp processor and the file system. Network delays would significantly slow rudimentary operations like viewing the contents of a text document. To be fast enough, Chaosnet incorporated several improvements over the Network Control Program then in use on Arpanet. According to Moon, “it was important to design out bottlenecks such as are found in Arpanet, for instance the control-link which is shared between multiple connections and the need to acknowledge each message before the next message is sent.” The Chaosnet protocol batches packet acknowledgments in much the same way that TCP does today and so reduced the number of packets that needed to be transmitted by a half to a third.
|
||||
|
||||
Chaosnet could also get away with a relatively simple routing algorithm, since most hosts on the Lisp machine network were probably connected by a single, short wire. Moon wrote that the Chaosnet routing scheme “is predicated on the assumption that the network geometry is simple, there are few multiple paths, and the length of any path is quite short. This makes more sophisticated schemes unnecessary.” The simplicity of the algorithm meant that implementing the Chaosnet protocol was easy. The implementation program was supposedly half the size of the Arpanet Network Control Program.
|
||||
|
||||
The Chaosnet protocol has other idiosyncrasies. A Chaosnet address is only 16 bits, half the size of an IPv4 address, which makes sense given that Chaosnet was only ever meant to work on a local network. Chaosnet also doesn’t use port numbers; instead, a process that wants to connect to another process on a different machine first makes a connection request that specifies a target “contact name.” That contact name is often just the name of a particular service. For example, one host may try to connect to another host using the contact name `TELNET`. In practice, I assume this works more or less just like TCP, since something well-known like port 80 might as well have the contact name `HTTP`.
|
||||
|
||||
The Chaosnet DNS class was added to the Domain Name System by [RFC 973][5] in 1986. It replaced another class that had been available early on, the `CSNET` class, which was there to support a network called the Computer Science Network. I haven’t been able to figure out why Chaosnet was picked out for special treatment by the Domain Name System. There were other protocol families that could have been added but never were. For example, Paul Mockapetris, one of the principal architects of the Domain Name System, has written that he originally imagined that DNS would include a class for Xerox’s network protocol. That never happened. Chaosnet may have been added just because so much of the early work on Arpanet and the internet happened at Bolt, Beranek and Newman in Cambridge, Massachusetts, whose employees were often connected in some way with MIT. Chaosnet was probably well-known among the then relatively small group of people working on computer networks.
|
||||
|
||||
Usage of Chaosnet presumably waned as Lisp machines became less and less popular. Though Lisp machines were for a short time commercially viable products—sold by companies such as Symbolics and Lisp Machines Inc. during the 1980s—they were soon displaced by cheaper microcomputers that could run Lisp just as quickly without special-purpose circuitry. TCP/IP also fixed many of the issues with the original Arpanet protocols that Chaosnet had been created to circumvent.
|
||||
|
||||
### Ghost in the Shell
|
||||
|
||||
There unfortunately isn’t a huge amount of information still around about Chaosnet. RFC 675, which was essentially the first draft of TCP/IP, was published in 1974. Chaosnet was first developed in 1975. TCP/IP eventually conquered the world, but Chaosnet seems to have been a technological dead end. Though it’s possible that Chaosnet influenced subsequent work on TCP/IP, I haven’t found any specific examples of that happening.
|
||||
|
||||
The only really visible remnant of Chaosnet is the `CH` DNS class. There’s something about that fact that I find strangely fascinating. The `CH` class is a vestigial ghost of an alternative network protocol in a world that has long since settled on TCP/IP. It’s exciting, at least to me, to know that the last traces of Chaosnet still lurk out there in the infrastructure of our networked society. The `CH` DNS class is a fun artifact of digital archaeology. But it’s also a living reminder that the internet was not born fully formed, that TCP/IP is not the only way to connect computers to each other, and that “the internet” is far from the coolest name we could have had for our global communication system.
|
||||
|
||||
If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][6] on Twitter or subscribe to the [RSS feed][7] to make sure you know when a new post is out.
|
||||
|
||||
Previously on TwoBitHistory…
|
||||
|
||||
> Where did RSS come from? Why are there so many competing formats? Why don't people seem to use it that much anymore?
|
||||
>
|
||||
> Answers to these questions and many more in this week's post about RSS:<https://t.co/BsCN5GQidR>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [September 17, 2018][8]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2018/09/30/chaosnet.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://en.wikipedia.org/wiki/Hesiod_(name_service)
|
||||
[2]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol
|
||||
[3]: http://www-formal.stanford.edu/jmc/recursive.pdf
|
||||
[4]: https://en.wikipedia.org/wiki/SHRDLU
|
||||
[5]: https://tools.ietf.org/html/rfc973
|
||||
[6]: https://twitter.com/TwoBitHistory
|
||||
[7]: https://twobithistory.org/feed.xml
|
||||
[8]: https://twitter.com/TwoBitHistory/status/1041485204802756608?ref_src=twsrc%5Etfw
|
@ -0,0 +1,75 @@
|
||||
qhwdw is translating
|
||||
|
||||
|
||||
Greg Kroah-Hartman Explains How the Kernel Community Is Securing Linux
|
||||
============================================================
|
||||
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/kernel-security_0.jpg?itok=hOaTQwWV)
|
||||
Kernel maintainer Greg Kroah-Hartman talks about how the kernel community is hardening Linux against vulnerabilities.[Creative Commons Zero][2]
|
||||
|
||||
As Linux adoption expands, it’s increasingly important for the kernel community to improve the security of the world’s most widely used technology. Security is vital not only for enterprise customers, it’s also important for consumers, as 80 percent of mobile devices are powered by Linux. In this article, Linux kernel maintainer Greg Kroah-Hartman provides a glimpse into how the kernel community deals with vulnerabilities.
|
||||
|
||||
### There will be bugs
|
||||
|
||||
|
||||
![Greg Kroah-Hartman](https://www.linux.com/sites/lcom/files/styles/floated_images/public/greg-k-h.png?itok=p4fREYuj "Greg Kroah-Hartman")
|
||||
|
||||
Greg Kroah-Hartman[The Linux Foundation][1]
|
||||
|
||||
As Linus Torvalds once said, most security holes are bugs, and bugs are part of the software development process. As long as the software is being written, there will be bugs.
|
||||
|
||||
“A bug is a bug. We don’t know if a bug is a security bug or not. There is a famous bug that I fixed and then three years later Red Hat realized it was a security hole,” said Kroah-Hartman.
|
||||
|
||||
There is not much the kernel community can do to eliminate bugs, but it can do more testing to find them. The kernel community now has its own security team that’s made up of kernel developers who know the core of the kernel.
|
||||
|
||||
“When we get a report, we involve the domain owner to fix the issue. In some cases it’s the same people, so we made them part of the security team to speed things up,” Kroah Hartman said. But he also stressed that all parts of the kernel have to be aware of these security issues because kernel is a trusted environment and they have to protect it.
|
||||
|
||||
“Once we fix things, we can put them in our stack analysis rules so that they are never reintroduced,” he said.
|
||||
|
||||
Besides fixing bugs, the community also continues to add hardening to the kernel. “We have realized that we need to have mitigations. We need hardening,” said Kroah-Hartman.
|
||||
|
||||
Huge efforts have been made by Kees Cook and others to take the hardening features that have been traditionally outside of the kernel and merge or adapt them for the kernel. With every kernel released, Cook provides a summary of all the new hardening features. But hardening the kernel is not enough, vendors have to enable the new features and take advantage of them. That’s not happening.
|
||||
|
||||
Kroah-Hartman [releases a stable kernel every week][5], and companies pick one to support for a longer period so that device manufacturers can take advantage of it. However, Kroah-Hartman has observed that, aside from the Google Pixel, most Android phones don’t include the additional hardening features, meaning all those phones are vulnerable. “People need to enable this stuff,” he said.
|
||||
|
||||
“I went out and bought all the top of the line phones based on kernel 4.4 to see which one actually updated. I found only one company that updated their kernel,” he said. “I'm working through the whole supply chain trying to solve that problem because it's a tough problem. There are many different groups involved -- the SoC manufacturers, the carriers, and so on. The point is that they have to push the kernel that we create out to people.”
|
||||
|
||||
The good news is that unlike with consumer electronics, the big vendors like Red Hat and SUSE keep the kernel updated even in the enterprise environment. Modern systems with containers, pods, and virtualization make this even easier. It’s effortless to update and reboot with no downtime. It is, in fact, easier to keep things secure than it used to be.
|
||||
|
||||
### Meltdown and Spectre
|
||||
|
||||
No security discussion is complete without the mention of Meltdown and Spectre. The kernel community is still working on fixes as new flaws are discovered. However, Intel has changed its approach in light of these events.
|
||||
|
||||
“They are reworking on how they approach security bugs and how they work with the community because they know they did it wrong,” Kroah-Hartman said. “The kernel has fixes for almost all of the big Spectre issues, but there is going to be a long tail of minor things.”
|
||||
|
||||
The good news is that these Intel vulnerabilities proved that things are getting better for the kernel community. “We are doing more testing. With the latest round of security patches, we worked on our own for four months before releasing them to the world because we were embargoed. But once they hit the real world, it made us realize how much we rely on the infrastructure we have built over the years to do this kind of testing, which ensures that we don’t have bugs before they hit other people,” he said. “So things are certainly getting better.”
|
||||
|
||||
The increasing focus on security is also creating more job opportunities for talented people. Since security is an area that gets eyeballs, those who want to build a career in kernel space, security is a good place to get started with.
|
||||
|
||||
“If there are people who want a job to do this type of work, we have plenty of companies who would love to hire them. I know some people who have started off fixing bugs and then got hired,” Kroah-Hartman said.
|
||||
|
||||
You can hear more in the video below:
|
||||
|
||||
[视频](https://youtu.be/jkGVabyMh1I)
|
||||
|
||||
_Check out the schedule of talks for Open Source Summit Europe and sign up to receive updates:_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/2018/10/greg-kroah-hartman-explains-how-kernel-community-securing-linux-0
|
||||
|
||||
作者:[SWAPNIL BHARTIYA][a]
|
||||
选题:[oska874][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/arnieswap
|
||||
[b]:https://github.com/oska874
|
||||
[1]:https://www.linux.com/licenses/category/linux-foundation
|
||||
[2]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[3]:https://www.linux.com/files/images/greg-k-hpng
|
||||
[4]:https://www.linux.com/files/images/kernel-securityjpg-0
|
||||
[5]:https://www.kernel.org/category/releases.html
|
@ -0,0 +1,126 @@
|
||||
qhwdw is translating
|
||||
|
||||
LinuxBoot for Servers: Enter Open Source, Goodbye Proprietary UEFI
|
||||
============================================================
|
||||
|
||||
[LinuxBoot][13] is an Open Source [alternative][14] to Proprietary [UEFI][15] firmware. It was released last year and is now being increasingly preferred by leading hardware manufacturers as default firmware. Last year, LinuxBoot was warmly [welcomed][16]into the Open Source family by The Linux Foundation.
|
||||
|
||||
This project was an initiative by Ron Minnich, author of LinuxBIOS and lead of [coreboot][17] at Google, in January 2017.
|
||||
|
||||
Google, Facebook, [Horizon Computing Solutions][18], and [Two Sigma][19] collaborated together to develop the [LinuxBoot project][20] (formerly called [NERF][21]) for server machines based on Linux.
|
||||
|
||||
Its openness allows Server users to easily customize their own boot scripts, fix issues, build their own [runtimes][22] and [reflash their firmware][23] with their own keys. They do not need to wait for vendor updates.
|
||||
|
||||
Following is a video of [Ubuntu Xenial][24] booting for the first time with NERF BIOS:
|
||||
|
||||
[视频](https://youtu.be/HBkZAN3xkJg)
|
||||
|
||||
Let’s talk about some other advantages by comparing it to UEFI in terms of Server hardware.
|
||||
|
||||
### Advantages of LinuxBoot over UEFI
|
||||
|
||||
![LinuxBoot vs UEFI](https://4bds6hergc-flywheel.netdna-ssl.com/wp-content/uploads/2018/10/linuxboot-uefi.png)
|
||||
|
||||
Here are some of the major advantages of LinuxBoot over UEFI:
|
||||
|
||||
### Significantly faster startup
|
||||
|
||||
It can boot up Server boards in less than twenty seconds, versus multiple minutes on UEFI.
|
||||
|
||||
### Significantly more flexible
|
||||
|
||||
LinuxBoot can make use of any devices, filesystems and protocols that Linux supports.
|
||||
|
||||
### Potentially more secure
|
||||
|
||||
Linux device drivers and filesystems have significantly more scrutiny than through UEFI.
|
||||
|
||||
We can argue that UEFI is partly open with [EDK II][25] and LinuxBoot is partly closed. But it has been [addressed][26] that even such EDK II code does not have the proper level of inspection and correctness as the [Linux Kernel][27] goes through, while there is a huge amount of other Closed Source components within UEFI development.
|
||||
|
||||
On the other hand, LinuxBoot has a significantly smaller amount of binaries with only a few hundred KB, compared to the 32 MB of UEFI binaries.
|
||||
|
||||
To be precise, LinuxBoot fits a whole lot better into the [Trusted Computing Base][28], unlike UEFI.
|
||||
|
||||
[Suggested readBest Free and Open Source Alternatives to Adobe Products for Linux][29]
|
||||
|
||||
LinuxBoot has a [kexec][30] based bootloader which does not support startup on Windows/non-Linux kernels, but that is insignificant since most clouds are Linux-based Servers.
|
||||
|
||||
### LinuxBoot adoption
|
||||
|
||||
In 2011, the [Open Compute Project][31] was started by [Facebook][32] who [open-sourced][33] designs of some of their Servers, built to make its data centers more efficient. LinuxBoot has been tested on a few Open Compute Hardware listed as under:
|
||||
|
||||
* Winterfell
|
||||
|
||||
* Leopard
|
||||
|
||||
* Tioga Pass
|
||||
|
||||
More [OCP][34] hardware are described [here][35] in brief. The OCP Foundation runs a dedicated project on firmware through [Open System Firmware][36].
|
||||
|
||||
Some other devices that support LinuxBoot are:
|
||||
|
||||
* [QEMU][9] emulated [Q35][10] systems
|
||||
|
||||
* [Intel S2600wf][11]
|
||||
|
||||
* [Dell R630][12]
|
||||
|
||||
Last month end, [Equus Compute Solutions][37] [announced][38] the release of its [WHITEBOX OPEN™][39] M2660 and M2760 Servers, as a part of their custom, cost-optimized Open-Hardware Servers and storage platforms. Both of them support LinuxBoot to customize the Server BIOS for flexibility, improved security, and create a blazingly fast booting experience.
|
||||
|
||||
### What do you think of LinuxBoot?
|
||||
|
||||
LinuxBoot is quite well documented [on GitHub][40]. Do you like the features that set it apart from UEFI? Would you prefer using LinuxBoot rather than UEFI for starting up Servers, owing to the former’s open-ended development and future? Let us know in the comments below.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/linuxboot-uefi/
|
||||
|
||||
作者:[ Avimanyu Bandyopadhyay][a]
|
||||
选题:[oska874][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://itsfoss.com/author/avimanyu/
|
||||
[b]:https://github.com/oska874
|
||||
[1]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[2]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[3]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[4]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[5]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[6]:https://itsfoss.com/linuxboot-uefi/#
|
||||
[7]:https://itsfoss.com/author/avimanyu/
|
||||
[8]:https://itsfoss.com/linuxboot-uefi/#comments
|
||||
[9]:https://en.wikipedia.org/wiki/QEMU
|
||||
[10]:https://wiki.qemu.org/Features/Q35
|
||||
[11]:https://trmm.net/S2600
|
||||
[12]:https://trmm.net/NERF#Installing_on_a_Dell_R630
|
||||
[13]:https://www.linuxboot.org/
|
||||
[14]:https://www.phoronix.com/scan.php?page=news_item&px=LinuxBoot-OSFC-2018-State
|
||||
[15]:https://itsfoss.com/check-uefi-or-bios/
|
||||
[16]:https://www.linuxfoundation.org/blog/2018/01/system-startup-gets-a-boost-with-new-linuxboot-project/
|
||||
[17]:https://en.wikipedia.org/wiki/Coreboot
|
||||
[18]:http://www.horizon-computing.com/
|
||||
[19]:https://www.twosigma.com/
|
||||
[20]:https://trmm.net/LinuxBoot_34c3
|
||||
[21]:https://trmm.net/NERF
|
||||
[22]:https://trmm.net/LinuxBoot_34c3#Runtimes
|
||||
[23]:http://www.tech-faq.com/flashing-firmware.html
|
||||
[24]:https://itsfoss.com/features-ubuntu-1604/
|
||||
[25]:https://www.tianocore.org/
|
||||
[26]:https://media.ccc.de/v/34c3-9056-bringing_linux_back_to_server_boot_roms_with_nerf_and_heads
|
||||
[27]:https://medium.com/@bhumikagoyal/linux-kernel-development-cycle-52b4c55be06e
|
||||
[28]:https://en.wikipedia.org/wiki/Trusted_computing_base
|
||||
[29]:https://itsfoss.com/adobe-alternatives-linux/
|
||||
[30]:https://en.wikipedia.org/wiki/Kexec
|
||||
[31]:https://en.wikipedia.org/wiki/Open_Compute_Project
|
||||
[32]:https://github.com/facebook
|
||||
[33]:https://github.com/opencomputeproject
|
||||
[34]:https://www.networkworld.com/article/3266293/lan-wan/what-is-the-open-compute-project.html
|
||||
[35]:http://hyperscaleit.com/ocp-server-hardware/
|
||||
[36]:https://www.opencompute.org/projects/open-system-firmware
|
||||
[37]:https://www.equuscs.com/
|
||||
[38]:http://www.dcvelocity.com/products/Software_-_Systems/20180924-equus-compute-solutions-introduces-whitebox-open-m2660-and-m2760-servers/
|
||||
[39]:https://www.equuscs.com/servers/whitebox-open/
|
||||
[40]:https://github.com/linuxboot/linuxboot
|
@ -1,60 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
Happy birthday, KDE: 11 applications you never knew existed
|
||||
======
|
||||
Which fun or quirky app do you need today?
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BIZ_DebucketizeOrgChart_A.png?itok=RB3WBeQQ)
|
||||
|
||||
The Linux desktop environment KDE celebrates its 22nd anniversary on October 14 this year. There are a gazillion* applications created by the KDE community of users, many of which provide fun and quirky services. We perused the list and picked out 11 applications you might like to know exist.
|
||||
|
||||
*Not really, but [there are a lot][1].
|
||||
|
||||
### 11 KDE applications you never knew existed
|
||||
|
||||
1\. [KTeaTime][2] is a timer for steeping tea. Set it by choosing the type of tea you are drinking—green, black, herbal, etc.—and the timer will ding when it's ready to remove the tea bag and drink.
|
||||
|
||||
2\. [KTux][3] is just a screensaver... or is it? Tux is flying in outer space in his green spaceship.
|
||||
|
||||
3\. [Blinken][4] is a memory game based on Simon Says, an electronic game released in 1978. Players are challenged to remember sequences of increasing length.
|
||||
|
||||
4\. [Tellico][5] is a collection manager for organizing your favorite hobby. Maybe you still collect baseball cards. Maybe you're part of a wine club. Maybe you're a serious bookworm. Maybe all three!
|
||||
|
||||
5\. [KRecipes][6] is **not** a simple recipe manager. It's got a lot going on! Shopping lists, nutrient analysis, advanced search, recipe ratings, import/export various formats, and more.
|
||||
|
||||
6\. [KHangMan][7] is based on the classic game Hangman where you guess the word letter by letter. This game is available in several languages, and it can be used to improve your learning of another language. It has four categories, one of which is "animals" which is great for kids.
|
||||
|
||||
7\. [KLettres][8] is another app that may help you learn a new language. It teaches the alphabet and challenges the user to read and pronounce syllables.
|
||||
|
||||
8\. [KDiamond][9] is similar to Bejeweled or other single player puzzle games where the goal of the game is to build lines of a certain number of the same type of jewel or object. In this case, diamonds.
|
||||
|
||||
9\. [KolourPaint][10] is a very simple editing tool for your images or app for creating simple vectors.
|
||||
|
||||
10\. [Kiriki][11] is a dice game for 2-6 players similar to Yahtzee.
|
||||
|
||||
11\. [RSIBreak][12] doesn't start with a K. What!? It starts with an "RSI" for "Repetitive Strain Injury," which can occur from working for long hours, day in and day out, with a mouse and keyboard. This app reminds you to take breaks and can be personalized to meet your needs.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/kde-applications
|
||||
|
||||
作者:[Opensource.com][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.kde.org/applications/
|
||||
[2]: https://www.kde.org/applications/games/kteatime/
|
||||
[3]: https://userbase.kde.org/KTux
|
||||
[4]: https://www.kde.org/applications/education/blinken
|
||||
[5]: http://tellico-project.org/
|
||||
[6]: https://www.kde.org/applications/utilities/krecipes/
|
||||
[7]: https://edu.kde.org/khangman/
|
||||
[8]: https://edu.kde.org/klettres/
|
||||
[9]: https://games.kde.org/game.php?game=kdiamond
|
||||
[10]: https://www.kde.org/applications/graphics/kolourpaint/
|
||||
[11]: https://www.kde.org/applications/games/kiriki/
|
||||
[12]: https://userbase.kde.org/RSIBreak
|
@ -1,3 +1,5 @@
|
||||
translating---geekpi
|
||||
|
||||
How To Lock Virtual Console Sessions On Linux
|
||||
======
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
Translating by way-ww
|
||||
|
||||
How to Enable or Disable Services on Boot in Linux Using chkconfig and systemctl Command
|
||||
======
|
||||
It’s a important topic for Linux admin (such a wonderful topic) so, everyone must be aware of this and practice how to use this in the efficient way.
|
||||
|
@ -1,84 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
Running Linux containers as a non-root with Podman
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/10/podman-816x345.jpg)
|
||||
|
||||
Linux containers are processes with certain isolation features provided by a Linux kernel — including filesystem, process, and network isolation. Containers help with portability — applications can be distributed in container images along with their dependencies, and run on virtually any Linux system with a container runtime.
|
||||
|
||||
Although container technologies exist for a very long time, Linux containers were widely popularized by Docker. The word “Docker” can refer to several different things, including the container technology and tooling, the community around that, or the Docker Inc. company. However, in this article, I’ll be using it to refer to the technology and the tooling that manages Linux containers.
|
||||
|
||||
### What is Docker
|
||||
|
||||
[Docker][1] is a daemon that runs on your system as root, and manages running containers by leveraging features of the Linux kernel. Apart from running containers, it also makes it easy to manage container images — interacting with container registries, storing images, managing container versions, etc. It basically supports all the operations you need to run individual containers.
|
||||
|
||||
But even though Docker is very a handy tool for managing Linux containers, it has two drawbacks: it is a daemon that needs to run on your system, and it needs to run with root privileges which might have certain security implications. Both of those, however, are being addressed by Podman.
|
||||
|
||||
### Introducing Podman
|
||||
|
||||
[Podman][2] is a container runtime providing a very similar features as Docker. And as already hinted, it doesn’t require any daemon to run on your system, and it can also run without root privileges. So let’s have a look at some examples of using Podman to run Linux containers.
|
||||
|
||||
#### Running containers with Podman
|
||||
|
||||
One of the simplest examples could be running a Fedora container, printing “Hello world!” in the command line:
|
||||
|
||||
```
|
||||
$ podman run --rm -it fedora:28 echo "Hello world!"
|
||||
```
|
||||
|
||||
Building an image using the common Dockerfile works the same way as it does with Docker:
|
||||
|
||||
```
|
||||
$ cat Dockerfile
|
||||
FROM fedora:28
|
||||
RUN dnf -y install cowsay
|
||||
|
||||
$ podman build . -t hello-world
|
||||
... output omitted ...
|
||||
|
||||
$ podman run --rm -it hello-world cowsay "Hello!"
|
||||
```
|
||||
|
||||
To build containers, Podman calls another tool called Buildah in the background. You can read a recent [post about building container images with Buildah][3] — not just using the typical Dockerfile.
|
||||
|
||||
Apart from building and running containers, Podman can also interact with container registries. To log in to a container registry, for example the widely used Docker Hub, run:
|
||||
|
||||
```
|
||||
$ podman login docker.io
|
||||
```
|
||||
|
||||
To push the image I just built, I just need to tag so it refers to the specific container registry and my personal namespace, and then simply push it.
|
||||
|
||||
```
|
||||
$ podman -t hello-world docker.io/asamalik/hello-world
|
||||
$ podman push docker.io/asamalik/hello-world
|
||||
```
|
||||
|
||||
By the way, have you noticed how I run everything as a non-root user? Also, there is no big fat daemon running on my system!
|
||||
|
||||
#### Installing Podman
|
||||
|
||||
Podman is available by default on [Silverblue][4] — a new generation of Linux Workstation for container-based workflows. To install it on any Fedora release, simply run:
|
||||
|
||||
```
|
||||
$ sudo dnf install podman
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/running-containers-with-podman/
|
||||
|
||||
作者:[Adam Šamalík][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/asamalik/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://docs.docker.com/
|
||||
[2]: https://podman.io/
|
||||
[3]: https://fedoramagazine.org/daemon-less-container-management-buildah/
|
||||
[4]: https://silverblue.fedoraproject.org/
|
@ -1,80 +0,0 @@
|
||||
translating---geekpi
|
||||
|
||||
Turn Your Old PC into a Retrogaming Console with Lakka Linux
|
||||
======
|
||||
**If you have an old computer gathering dust, you can turn it into a PlayStation like retrogaming console with Lakka Linux distribution. **
|
||||
|
||||
You probably already know that there are [Linux distributions specially crafted for reviving older computers][1]. But did you know about a Linux distribution that is created for the sole purpose of turning your old computer into a retro-gaming console?
|
||||
|
||||
![Lakka is a Linux distribution specially for retrogaming][2]
|
||||
|
||||
Meet [Lakka][3], a lightweight Linux distribution that will transform your old or low-end computer (like Raspberry Pi) into a complete retrogaming console,
|
||||
|
||||
When I say retrogaming console, I am serious about the console part. If you have ever used a PlayStation of Xbox, you know what a typical console interface looks like.
|
||||
|
||||
Lakka provides a similar interface and a similar experience. I’ll talk about the ‘experience’ later. Have a look at the interface first.
|
||||
|
||||
<https://itsfoss.com/wp-content/uploads/2018/10/lakka-linux-gaming-console.webm>
|
||||
Lakka Retrogaming interface
|
||||
|
||||
### Lakka: Linux distributions for retrogaming
|
||||
|
||||
Lakka is the official Linux distribution of [RetroArch][4] and the [Libretro][5] ecosystem.
|
||||
|
||||
RetroArch is a frontend for retro game emulators and game engines. The interface you saw in the video above is nothing but RetroArch. If you just want to play retro games, you can simply install RetroArch in your current Linux distribution.
|
||||
|
||||
Lakka provides Libretro core with RetroArch. So you get a preconfigured operating system that you can install or plug in the live USB and start playing games.
|
||||
|
||||
Lakka is lightweight and you can install it on most old systems or single board computers like Raspberry Pi.
|
||||
|
||||
It supports a huge number of emulators. You just need to download the ROMs on your system and Lakka will play the games from these ROMs. You can find the list supported emulators and hardware [here][6].
|
||||
|
||||
It enables you to run classic games on a wide range of computers and consoles through its slick graphical interface. Settings are also unified so configuration is done once and for all.
|
||||
|
||||
Let me summarize the main features of Lakka:
|
||||
|
||||
* PlayStation like interface with RetroArch
|
||||
* Support for a number of retro game emulators
|
||||
* Supports up to 5 players gaming on the same system
|
||||
* Savestates allow you to save your progress at any moment in the game
|
||||
* You can improve the look of your old games with various graphical filters
|
||||
* You can join multiplayer games over the network
|
||||
* Out of the box support for a number of joypads like XBOX360, Dualshock 3, and 8bitdo
|
||||
* Unlike trophies and badges by connecting to [RetroAchievements][7]
|
||||
|
||||
|
||||
|
||||
### Getting Lakka
|
||||
|
||||
Before you go on installing Lakka you should know that it is still under development so expect a few bugs here and there.
|
||||
|
||||
Keep in mind that Lakka only supports MBR partitioning. So if it doesn’t read your hard drive while installing, this could be a reason.
|
||||
|
||||
The [FAQ section of the project][8] answers the common doubts, so please refer to it for any further questions.
|
||||
|
||||
[Get Lakka][9]
|
||||
|
||||
Do you like playing retro games? What emulators do you use? Have you ever used Lakka before? Share your views with us in the comments section.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/lakka-retrogaming-linux/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/lightweight-linux-beginners/
|
||||
[2]: https://4bds6hergc-flywheel.netdna-ssl.com/wp-content/uploads/2018/10/lakka-retrogaming-linux.jpeg
|
||||
[3]: http://www.lakka.tv/
|
||||
[4]: https://www.retroarch.com/
|
||||
[5]: https://www.libretro.com/
|
||||
[6]: http://www.lakka.tv/powerful/
|
||||
[7]: https://retroachievements.org/
|
||||
[8]: http://www.lakka.tv/doc/FAQ/
|
||||
[9]; http://www.lakka.tv/disclaimer/
|
@ -0,0 +1,176 @@
|
||||
How To Determine Which System Manager Is Running On Linux System
|
||||
======
|
||||
We all are heard about this word many times but only few of us know what is this exactly. We will show you how to identify the system manager.
|
||||
|
||||
I will try my best to let you know about this. Most of us know about System V and systemd system manager. System V (Sysv) is an old and traditional init system and system manager for old systems.
|
||||
|
||||
Systemd is a new init system and system manager which was adapted by most of the major distribution.
|
||||
|
||||
There are three major init systems are available in Linux which are very famous and still in use. Most of the Linux distribution falls under in one of the below init system.
|
||||
|
||||
### What is init System Manager?
|
||||
|
||||
In Linux/Unix based operating systems, init (short for initialization) is the first process that started during the system boot up by the kernel.
|
||||
|
||||
It’s holding a process id (PID) of 1. It will be running in the background continuously until the system is shut down.
|
||||
|
||||
Init looks at the `/etc/inittab` file to decide the Linux run level then it starts all other processes & applications in the background as per the run level.
|
||||
|
||||
BIOS, MBR, GRUB and Kernel processes were kicked up before hitting init process as part of Linux booting process.
|
||||
|
||||
Below are the available run levels for Linux (There are seven runlevels exist, from zero to six).
|
||||
|
||||
* **`0:`** halt
|
||||
* **`1:`** Single user mode
|
||||
* **`2:`** Multiuser, without NFS
|
||||
* **`3:`** Full multiuser mode
|
||||
* **`4:`** Unused
|
||||
* **`5:`** X11 (GUI – Graphical User Interface)
|
||||
* **`:`** reboot
|
||||
|
||||
|
||||
|
||||
Below three init systems are widely used in Linux.
|
||||
|
||||
* **`System V (Sys V):`** System V (Sys V) is one of the first and traditional init system for Unix like operating system.
|
||||
* **`Upstart:`** Upstart is an event-based replacement for the /sbin/init daemon.
|
||||
* **`systemd:`** Systemd is a new init system and system manager which was implemented/adapted into all the major Linux distributions over the traditional SysV init systems.
|
||||
|
||||
|
||||
|
||||
### What is System V (Sys V)?
|
||||
|
||||
System V (Sys V) is one of the first and traditional init system for Unix like operating system. init is the first process that started during the system boot up by the kernel and it’s a parent process for everything.
|
||||
|
||||
Most of the Linux distributions started using traditional init system called System V (Sys V) first. Over the years, several replacement init systems were released to address design limitations in the standard versions such as launchd, the Service Management Facility, systemd and Upstart.
|
||||
|
||||
But systemd has been adopted by several major Linux distributions over the traditional SysV init systems.
|
||||
|
||||
### How to identify the System V (Sys V) system manager on Linux
|
||||
|
||||
Run the following commands to identify that your system is running with System V (Sys V) system manager.
|
||||
|
||||
### Method-1: Using ps command
|
||||
|
||||
ps – report a snapshot of the current processes. ps displays information about a selection of the active processes.
|
||||
This output doesn’t give the exact results either System V (SysV) or upstart so, i would suggest you to go with other method to confirm this.
|
||||
|
||||
```
|
||||
# ps -p1 | grep "init\|upstart\|systemd"
|
||||
1 ? 00:00:00 init
|
||||
```
|
||||
|
||||
### Method-2: Using rpm command
|
||||
|
||||
RPM stands for `Red Hat Package Manager` is a powerful, command line [Package Management][1] utility for Red Hat based system such as (RHEL, CentOS, Fedora, openSUSE & Mageia) distributions. The utility allow you to install, upgrade, remove, query & verify the software on your Linux system/server. RPM files comes with `.rpm` extension.
|
||||
RPM package built with required libraries and dependency which will not conflicts other packages were installed on your system.
|
||||
|
||||
```
|
||||
# rpm -qf /sbin/init
|
||||
SysVinit-2.86-17.el5
|
||||
```
|
||||
|
||||
### What is Upstart?
|
||||
|
||||
Upstart is an event-based replacement for the /sbin/init daemon which handles starting of tasks and services during boot, stopping them during shutdown and supervising them while the system is running.
|
||||
|
||||
It was originally developed for the Ubuntu distribution, but is intended to be suitable for deployment in all Linux distributions as a replacement for the venerable System-V init.
|
||||
|
||||
It was used in Ubuntu from 9.10 to Ubuntu 14.10 & RHEL 6 based systems after that they are replaced with systemd.
|
||||
|
||||
### How to identify the Upstart system manager on Linux
|
||||
|
||||
Run the following commands to identify that your system is running with Upstart system manager.
|
||||
|
||||
### Method-1: Using ps command
|
||||
|
||||
ps – report a snapshot of the current processes. ps displays information about a selection of the active processes.
|
||||
This output doesn’t give the exact results either System V (SysV) or upstart so, i would suggest you to go with other method to confirm this.
|
||||
|
||||
```
|
||||
# ps -p1 | grep "init\|upstart\|systemd"
|
||||
1 ? 00:00:00 init
|
||||
```
|
||||
|
||||
### Method-2: Using rpm command
|
||||
|
||||
RPM stands for `Red Hat Package Manager` is a powerful, command line Package Management utility for Red Hat based system such as (RHEL, CentOS, Fedora, openSUSE & Mageia) distributions. The [RPM Command][2] allow you to install, upgrade, remove, query & verify the software on your Linux system/server. RPM files comes with `.rpm` extension.
|
||||
RPM package built with required libraries and dependency which will not conflicts other packages were installed on your system.
|
||||
|
||||
```
|
||||
# rpm -qf /sbin/init
|
||||
upstart-0.6.5-16.el6.x86_64
|
||||
```
|
||||
|
||||
### Method-3: Using /sbin/init file
|
||||
|
||||
The `/sbin/init` program will load or switch the root file system from memory to the hard disk.
|
||||
This is the main part of the boot process. The runlevel at the start of this process is “N” (none). The /sbin/init program initializes the system following the description in the /etc/inittab configuration file.
|
||||
|
||||
```
|
||||
# /sbin/init --version
|
||||
init (upstart 0.6.5)
|
||||
Copyright (C) 2010 Canonical Ltd.
|
||||
|
||||
This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
```
|
||||
|
||||
### What is systemd?
|
||||
|
||||
Systemd is a new init system and system manager which was implemented/adapted into all the major Linux distributions over the traditional SysV init systems.
|
||||
|
||||
systemd is compatible with SysV and LSB init scripts. It can work as a drop-in replacement for sysvinit system. systemd is the first process get started by kernel and holding PID 1.
|
||||
|
||||
It’s a parant process for everything and Fedora 15 is the first distribution which was adapted systemd instead of upstart. [systemctl][3] is command line utility and primary tool to manage the systemd daemons/services such as (start, restart, stop, enable, disable, reload & status).
|
||||
|
||||
systemd uses .service files Instead of bash scripts (SysVinit uses). systemd sorts all daemons into their own Linux cgroups and you can see the system hierarchy by exploring `/cgroup/systemd` file.
|
||||
|
||||
### How to identify the systemd system manager on Linux
|
||||
|
||||
Run the following commands to identify that your system is running with systemd system manager.
|
||||
|
||||
### Method-1: Using ps command
|
||||
|
||||
ps – report a snapshot of the current processes. ps displays information about a selection of the active processes.
|
||||
|
||||
```
|
||||
# ps -p1 | grep "init\|upstart\|systemd"
|
||||
1 ? 00:18:09 systemd
|
||||
```
|
||||
|
||||
### Method-2: Using rpm command
|
||||
|
||||
RPM stands for `Red Hat Package Manager` is a powerful, command line Package Management utility for Red Hat based system such as (RHEL, CentOS, Fedora, openSUSE & Mageia) distributions. The utility allow you to install, upgrade, remove, query & verify the software on your Linux system/server. RPM files comes with `.rpm` extension.
|
||||
RPM package built with required libraries and dependency which will not conflicts other packages were installed on your system.
|
||||
|
||||
```
|
||||
# rpm -qf /sbin/init
|
||||
systemd-219-30.el7_3.9.x86_64
|
||||
```
|
||||
|
||||
### Method-3: Using /sbin/init file
|
||||
|
||||
The `/sbin/init` program will load or switch the root file system from memory to the hard disk.
|
||||
This is the main part of the boot process. The runlevel at the start of this process is “N” (none). The /sbin/init program initializes the system following the description in the /etc/inittab configuration file.
|
||||
|
||||
```
|
||||
# file /sbin/init
|
||||
/sbin/init: symbolic link to `../lib/systemd/systemd'
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.2daygeek.com/how-to-determine-which-init-system-manager-is-running-on-linux-system/
|
||||
|
||||
作者:[Prakash Subramanian][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.2daygeek.com/author/prakash/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.2daygeek.com/category/package-management/
|
||||
[2]: https://www.2daygeek.com/rpm-command-examples/
|
||||
[3]: https://www.2daygeek.com/how-to-check-all-running-services-in-linux/
|
@ -1,3 +1,5 @@
|
||||
translating---geekpi
|
||||
|
||||
MidnightBSD Hits 1.0! Checkout What’s New
|
||||
======
|
||||
A couple days ago, Lucas Holt announced the release of MidnightBSD 1.0. Let’s take a quick look at what is included in this new release.
|
||||
|
@ -0,0 +1,111 @@
|
||||
The case for open source classifiers in AI algorithms
|
||||
======
|
||||
Machine bias is a widespread problem with potentially serious human consequences, but it's not unmanageable.
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/brain_data.png?itok=RH6NA32X)
|
||||
|
||||
Dr. Carol Reiley's achievements are too long to list. She co-founded [Drive.ai][1], a self-driving car startup that [raised $50 million][2] in its second round of funding last year. Forbes magazine named her one of "[20 Incredible Women in AI][3]," and she built intelligent robot systems as a PhD candidate at Johns Hopkins University.
|
||||
|
||||
But when she built a voice-activated human-robot interface, her own creation couldn't recognize her voice.
|
||||
|
||||
Dr. Reiley used Microsoft's speech recognition API to build her interface. But since the API was built mostly by young men, it hadn't been exposed to enough voice variations. After some failed attempts to lower her voice so the system would recognize her, Dr. Reiley [enlisted a male graduate][4] to lead demonstrations of her work.
|
||||
|
||||
Did Microsoft train its API to recognize only male voices? Probably not. It's more likely that the dataset used to train this API didn't have a wide range of voices with diverse accents, inflections, etc.
|
||||
|
||||
AI-powered products learn from the data they're trained on. If Microsoft's API was exposed only to male voices within a certain age range, it wouldn't know how to recognize a female voice—even if a female built the product.
|
||||
|
||||
This is an example of machine bias at work—and it's a more widespread problem than we think.
|
||||
|
||||
### What is machine bias?
|
||||
|
||||
[According to Gartner research][5] (available for clients), "Machine bias arises when an algorithm unfairly prefers a particular group or unjustly discriminates against another when making predictions and drawing conclusions." This bias takes one of two forms:
|
||||
|
||||
* **Direct bias** occurs when models make predictions based on sensitive or prohibited attributes. These attributes include race, religion, gender, and sexual orientation.
|
||||
* **Indirect bias** is a byproduct of non-sensitive attributes that correlate with sensitive attributes. This is the more common form of machine bias. It's also the tougher form of bias to detect.
|
||||
|
||||
|
||||
|
||||
### The human impact of machine bias
|
||||
|
||||
In my [lightning talk][6] at Open Source Summit North America in August, I shared the Correctional Offender Management Profiling for Alternative Sanctions ([COMPAS][7]) algorithm as an example of indirect bias. Judges in more than 12 U.S. states use this algorithm to predict a defendant's likelihood to recommit crimes.
|
||||
|
||||
Unfortunately, [research from ProPublica][8] found that the COMPAS algorithm made incorrect predictions due to indirect bias based on race. The algorithm was two times more likely to incorrectly cite black defendants as high risks for recommitting crimes and two times more likely to incorrectly cite white defendants as low risks for recommitting crimes.
|
||||
|
||||
How did this happen? The COMPAS algorithm's predictions correlated with race (a sensitive/prohibited attribute). To confirm whether indirect bias exists within a dataset, the outcomes from one group are compared with another group's. If the difference exceeds some agreed-upon threshold, the model is considered unacceptably biased.
|
||||
|
||||
This isn't a "What if?" scenario: COMPAS's results impacted defendants' prison sentences, including the length of those sentences and whether defendants were released on parole.
|
||||
|
||||
Based partially on COMPAS's recommendation, a Wisconsin judged [denied probation][9] to a man named Eric Loomis. Instead, the judge gave Loomis a six-year prison sentence for driving a car that had been used in a recent shooting.
|
||||
|
||||
To make matters worse, we can't confirm how COMPAS reached its conclusions: The manufacturer refused to disclose how it works, which made it [a black-box algorithm][10]. But when Loomis took his case to the Supreme Court, the justices refused to give it a hearing.
|
||||
|
||||
This choice signaled that most Supreme Court justices condoned the algorithm's use without knowing how it reached (often incorrect) conclusions. This sets a dangerous legal precedent, especially as confusion about how AI works [shows no signs of slowing down][11].
|
||||
|
||||
### Why you should open source your AI algorithms
|
||||
|
||||
The open source community discussed this subject during a Birds of a Feather (BoF) session at Open Source Summit North America in August. During this discussion, some developers made cases for keeping machine learning algorithms private.
|
||||
|
||||
Along with proprietary concerns, these black-box algorithms are built on endless neurons that each have their own biases. Since these algorithms learn from the data they're trained on, they're at risk of manipulation by bad actors. One program manager at a major tech firm said his team is constantly on guard to protect their work from those with ill intent.
|
||||
|
||||
In spite of these reasons, there's a strong case in favor of making the datasets used to train machine learning algorithms open where possible. And a series of open source tools is helping developers solve this problem.
|
||||
|
||||
Local Interpretable Model-Agnostic Explanations (LIME) is an open source Python toolkit from the University of Washington. It doesn't try to dissect every factor influencing algorithms' decisions. Instead, it treats every model as a black box.
|
||||
|
||||
LIME uses a pick-step to select a representative set of predictions or conclusions to explain. Then it approximates the model closest to those predictions. It manipulates the inputs to the model and then measures how predictions change.
|
||||
|
||||
The image below, from [LIME's website][12], shows a classifier from text classification. The tool's researchers took two classes—Atheism and Christian—that are difficult to distinguish since they share so many words. Then, they [trained a forest with 500 trees][13] and got a test accuracy of 92.4%. If accuracy was your core measure of trust, you'd be able to trust this algorithm.
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/classifier.png)
|
||||
|
||||
Projects like LIME prove that while machine bias is unavoidable, it's not unmanageable. If you add bias testing to your product development lifecycles, you can decrease the risk of bias within datasets that are used to train AI-powered products built on machine learning.
|
||||
|
||||
### Avoid algorithm aversion
|
||||
|
||||
When we don't know how algorithms make decisions, we can't fully trust them. In the near future, companies will have no choice but to be more transparent about how their creations work.
|
||||
|
||||
We're already seeing legislation in Europe that would fine large tech companies for not revealing how their algorithms work. And extreme as this might sound, it's what users want.
|
||||
|
||||
Research from the University of Chicago and the University of Pennsylvania showed that users [have more trust in modifiable algorithms][14] than in those built by experts. People prefer algorithms when they can clearly see how those algorithms work—even if those algorithms are wrong.
|
||||
|
||||
This supports the crucial role that transparency plays in public trust of tech. It also makes [the case for open source projects][15] that aim to solve this problem.
|
||||
|
||||
Algorithm aversion is real, and rightfully so. Earlier this month, Amazon was the latest tech giant to [have its machine bias exposed][16]. If such companies can't defend how these machines reach conclusions, their end users will suffer.
|
||||
|
||||
I gave a full talk on machine bias—including steps to solve this problem—[at Google Dev Fest DC][17] as part of DC Startup Week in September. On October 23, I'll give a [lightning talk][18] on this same subject at All Things Open in Raleigh, N.C.
|
||||
|
||||
Lauren Maffeo will present [Erase unconscious bias from your AI datasets][19] at [All Things Open][20], October 21-23 in Raleigh, N.C.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/open-source-classifiers-ai-algorithms
|
||||
|
||||
作者:[Lauren Maffeo][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/lmaffeo
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: http://Drive.ai
|
||||
[2]: https://www.reuters.com/article/us-driveai-autonomous-idUSKBN19I2ZD
|
||||
[3]: https://www.forbes.com/sites/mariyayao/2017/05/18/meet-20-incredible-women-advancing-a-i-research/#1876954026f9
|
||||
[4]: https://techcrunch.com/2016/11/16/when-bias-in-product-design-means-life-or-death/
|
||||
[5]: https://www.gartner.com/doc/3889586/control-bias-eliminate-blind-spots
|
||||
[6]: https://www.youtube.com/watch?v=JtQzdTDv-P4
|
||||
[7]: https://en.wikipedia.org/wiki/COMPAS_(software)
|
||||
[8]: https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm
|
||||
[9]: https://www.nytimes.com/2017/10/26/opinion/algorithm-compas-sentencing-bias.html
|
||||
[10]: https://www.technologyreview.com/s/609338/new-research-aims-to-solve-the-problem-of-ai-bias-in-black-box-algorithms/
|
||||
[11]: https://www.thenetworkmediagroup.com/blog/ai-the-facts-and-myths-lauren-maffeo-getapp
|
||||
[12]: https://homes.cs.washington.edu/~marcotcr/blog/lime/
|
||||
[13]: https://towardsdatascience.com/decision-trees-in-machine-learning-641b9c4e8052
|
||||
[14]: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2616787
|
||||
[15]: https://github.com/mbilalzafar/fair-classification
|
||||
[16]: https://www.reuters.com/article/us-amazon-com-jobs-automation-insight/amazon-scraps-secret-ai-recruiting-tool-that-showed-bias-against-women-idUSKCN1MK08G
|
||||
[17]: https://www.facebook.com/DCstartupweek/videos/1919103555059439/?fref=mentions&__xts__%5B0%5D=68.ARD1fVGSdYCHajf8qSryp5g2MoKg4522wZ0KJGIIPJTtw3xulDIkl9A6Vg4BrnbB6BfSX-yl9D5sNMZ4rtZb8rIbBU9ueWA9xXnt6SDv_hPlo_cxIRVS2RUI_O0hYahfNvHvYi8AsCPsDRqiHO4Jt1Ex9VS67uoJ46MXynR1XQB4f5jdGp1UDQ&__tn__=K-R
|
||||
[18]: https://opensource.com/article/18/10/lightning-talks-all-things-open
|
||||
[19]: https://opensource.com/article/18/10/lightning-talks-all-things-open#4
|
||||
[20]: https://allthingsopen.org/
|
133
sources/tech/20181018 Understanding Linux Links- Part 1.md
Normal file
133
sources/tech/20181018 Understanding Linux Links- Part 1.md
Normal file
@ -0,0 +1,133 @@
|
||||
Understanding Linux Links: Part 1
|
||||
======
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/linux-link-498708.jpg?itok=DyVEcEsc)
|
||||
|
||||
Along with `cp` and `mv`, both of which we talked about at length in [the previous installment of this series][1], links are another way of putting files and directories where you want them to be. The advantage is that links let you have one file or directory show up in several places at the same time.
|
||||
|
||||
As noted previously, at the physical disk level, things like files and directories don't really exist. A filesystem conjures them up for our human convenience. But at the disk level, there is something called a _partition table_ , which lives at the beginning of every partition, and then the data scattered over the rest of the disk.
|
||||
|
||||
Although there are different types of partition tables, the ones at the beginning of a partition containing your data will map where each directory and file starts and ends. The partition table acts like an index: When you load a file from your disk, your operating system looks up the entry on the table and the table says where the file starts on the disk and where it finishes. The disk header moves to the start point, reads the data until it reaches the end point and, hey presto: here's your file.
|
||||
|
||||
### Hard Links
|
||||
|
||||
A hard link is simply an entry in the partition table that points to an area on a disk that **has already been assigned to a file**. In other words, a hard link points to data that has already been indexed by another entry. Let's see how this works.
|
||||
|
||||
Open a terminal, create a directory for tests and move into it:
|
||||
|
||||
```
|
||||
mkdir test_dir
|
||||
cd test_dir
|
||||
```
|
||||
|
||||
Create a file by [touching][1] it:
|
||||
|
||||
```
|
||||
touch test.txt
|
||||
```
|
||||
|
||||
For extra excitement (?), open _test.txt_ in a text editor and add some a few words into it.
|
||||
|
||||
Now make a hard link by executing:
|
||||
|
||||
```
|
||||
ln test.txt hardlink_test.txt
|
||||
```
|
||||
|
||||
Run `ls`, and you'll see your directory now contains two files... Or so it would seem. As you read before, really what you are seeing is two names for the exact same file: _hardlink_test.txt_ contains the same content, has not filled any more space in the disk (try with a large file to test this), and shares the same inode as _test.txt_ :
|
||||
|
||||
```
|
||||
$ ls -li *test*
|
||||
16515846 -rw-r--r-- 2 paul paul 14 oct 12 09:50 hardlink_test.txt
|
||||
16515846 -rw-r--r-- 2 paul paul 14 oct 12 09:50 test.txt
|
||||
```
|
||||
|
||||
_ls_ 's `-i` option shows the _inode number_ of a file. The _inode_ is the chunk of information in the partition table that contains the location of the file or directory on the disk, the last time it was modified, and other data. If two files share the same inode, they are, to all practical effects, the same file, regardless of where they are located in the directory tree.
|
||||
|
||||
### Fluffy Links
|
||||
|
||||
Soft links, also known as _symlinks_ , are different: a soft link is really an independent file, it has its own inode and its own little slot on the disk. But it only contains a snippet of data that points the operating system to another file or directory.
|
||||
|
||||
You can create a soft link using `ln` with the `-s` option:
|
||||
|
||||
```
|
||||
ln -s test.txt softlink_test.txt
|
||||
```
|
||||
|
||||
This will create the soft link _softlink_test.txt_ to _test.txt_ in the current directory.
|
||||
|
||||
By running `ls -li` again, you can see the difference between the two different kinds of links:
|
||||
|
||||
```
|
||||
$ ls -li
|
||||
total 8
|
||||
16515846 -rw-r--r-- 2 paul paul 14 oct 12 09:50 hardlink_test.txt
|
||||
16515855 lrwxrwxrwx 1 paul paul 8 oct 12 09:50 softlink_test.txt -> test.txt
|
||||
16515846 -rw-r--r-- 2 paul paul 14 oct 12 09:50 test.txt
|
||||
```
|
||||
|
||||
_hardlink_test.txt_ and _test.txt_ contain some text and take up the same space *literally*. They also share the same inode number. Meanwhile, _softlink_test.txt_ occupies much less and has a different inode number, marking it as a different file altogether. Using the _ls_ 's `-l` option also shows the file or directory your soft link points to.
|
||||
|
||||
### Why Use Links?
|
||||
|
||||
They are good for **applications that come with their own environment**. It often happens that your Linux distro does not come with the latest version of an application you need. Take the case of the fabulous [Blender 3D][2] design software. Blender allows you to create 3D still images as well as animated films and who wouldn't to have that on their machine? The problem is that the current version of Blender is always at least one version ahead of that found in any distribution.
|
||||
|
||||
Fortunately, [Blender provides downloads][3] that run out of the box. These packages come, apart from with the program itself, a complex framework of libraries and dependencies that Blender needs to work. All these bits and piece come within their own hierarchy of directories.
|
||||
|
||||
Every time you want to run Blender, you could `cd` into the folder you downloaded it to and run:
|
||||
|
||||
```
|
||||
./blender
|
||||
```
|
||||
|
||||
But that is inconvenient. It would be better if you could run the `blender` command from anywhere in your file system, as well as from your desktop command launchers.
|
||||
|
||||
The way to do that is to link the _blender_ executable into a _bin/_ directory. On many systems, you can make the `blender` command available from anywhere in the file system by linking to it like this:
|
||||
|
||||
```
|
||||
ln -s /path/to/blender_directory/blender /home/<username>/bin
|
||||
```
|
||||
|
||||
Another case in which you will need links is for **software that needs outdated libraries**. If you list your _/usr/lib_ directory with `ls -l,` you will see a lot of soft-linked files fly by. Take a closer look, and you will see that the links usually have similar names to the original files they are linking to. You may see _libblah_ linking to _libblah.so.2_ , and then, you may even notice that _libblah.so.2_ links in turn to _libblah.so.2.1.0_ , the original file.
|
||||
|
||||
This is because applications often require older versions of alibrary than what is installed. The problem is that, even if the more modern versions are still compatible with the older versions (and usually they are), the program will bork if it doesn't find the version it is looking for. To solve this problem distributions often create links so that the picky application believes it has found the older version, when, in reality, it has only found a link and ends up using the more up to date version of the library.
|
||||
|
||||
Somewhat related is what happens with **programs you compile yourself from the source code**. Programs you compile yourself often end up installed under _/usr/local_ : the program itself ends up in _/usr/local/bin_ and it looks for the libraries it needs _/_ in the _/usr/local/lib_ directory. But say that your new program needs _libblah_ , but _libblah_ lives in _/usr/lib_ and that's where all your other programs look for it. You can link it to _/usr/local/lib_ by doing:
|
||||
|
||||
```
|
||||
ln -s /usr/lib/libblah /usr/local/lib
|
||||
```
|
||||
|
||||
Or, if you prefer, by `cd`ing into _/usr/local/lib_...
|
||||
|
||||
```
|
||||
cd /usr/local/lib
|
||||
```
|
||||
|
||||
... and then linking with:
|
||||
|
||||
```
|
||||
ln -s ../lib/libblah
|
||||
```
|
||||
|
||||
There are dozens more cases in which linking proves useful, and you will undoubtedly discover them as you become more proficient in using Linux, but these are the most common. Next time, we’ll look at some linking quirks you need to be aware of.
|
||||
|
||||
Learn more about Linux through the free ["Introduction to Linux" ][4]course from The Linux Foundation and edX.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/intro-to-linux/2018/10/linux-links-part-1
|
||||
|
||||
作者:[Paul Brown][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.linux.com/users/bro66
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.linux.com/blog/2018/8/linux-beginners-moving-things-around
|
||||
[2]: https://www.blender.org/
|
||||
[3]: https://www.blender.org/download/
|
||||
[4]: https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
@ -0,0 +1,55 @@
|
||||
translating----geekpi
|
||||
|
||||
Edit your videos with Pitivi on Fedora
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/10/pitivi-816x346.png)
|
||||
Looking to produce a video of your adventures this weekend? There are many different options for editing videos out there. However, if you are looking for a video editor that is simple to pick up, and also available in the official Fedora Repositories, give [Pitivi][1] a go.
|
||||
|
||||
Pitivi is an open source, non-linear video editor that uses the GStreamer framework. Out of the box on Fedora, Pitivi supports OGG Video, WebM, and a range of other formats. Additionally, more support for for video formats is available via gstreamer plugins. Pitivi is also tightly integrated with the GNOME Desktop, so the UI will feel at home among the other newer applications on Fedora Workstation.
|
||||
|
||||
### Installing Pitivi on Fedora
|
||||
|
||||
Pitivi is available in the Fedora Repositories. On Fedora Workstation, simply search and install Pitivi from the Software application.
|
||||
|
||||
![][2]
|
||||
|
||||
Alternatively, install Pitivi using the following command in the Terminal:
|
||||
|
||||
```
|
||||
sudo dnf install pitivi
|
||||
```
|
||||
|
||||
### Basic Editing
|
||||
|
||||
Pitivi has a wide range of tools built-in to allow quick and effective editing of your clips. Simply import videos, audio, and images into the Pitivi media library, then drag them onto the timeline. Additionally, pitivi allows you to easily split, trim, and group parts of clips together, in addition to simple fade transitions on the timeline.
|
||||
|
||||
![][3]
|
||||
|
||||
### Transitions and Effects
|
||||
|
||||
In addition to a basic fade between two clips, Pitivi also features a range of different transitions and wipes. Additionally, there are over a hundred effects that can be applied to either videos or audio to change how the media elements are played or displayed in your final presentation
|
||||
|
||||
![][4]
|
||||
|
||||
Pitivi also features a range of other great features, so be sure to check out the [tour][5] on their website for a full description of the features of the awesome Pitivi.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/edit-your-videos-with-pitivi-on-fedora/
|
||||
|
||||
作者:[Ryan Lerch][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/introducing-flatpak/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: http://www.pitivi.org/
|
||||
[2]: https://fedoramagazine.org/wp-content/uploads/2018/10/Screenshot-from-2018-10-19-14-46-12.png
|
||||
[3]: https://fedoramagazine.org/wp-content/uploads/2018/10/Screenshot-from-2018-10-19-15-37-29.png
|
||||
[4]: http://www.pitivi.org/i/screenshots/archive/0.94.jpg
|
||||
[5]: http://www.pitivi.org/?go=tour
|
@ -0,0 +1,341 @@
|
||||
How to use Pandoc to produce a research paper
|
||||
======
|
||||
Learn how to manage section references, figures, tables, and more in Markdown.
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/life_paperclips.png?itok=j48op49T)
|
||||
|
||||
This article takes a deep dive into how to produce a research paper using (mostly) [Markdown][1] syntax. We'll cover how to create and reference sections, figures (in Markdown and [LaTeX][2]) and bibliographies. We'll also discuss troublesome cases and why writing them in LaTeX is the right approach.
|
||||
|
||||
### Research
|
||||
|
||||
Research papers usually contain references to sections, figures, tables, and a bibliography. [Pandoc][3] by itself cannot easily cross-reference these, but it can leverage the [pandoc-crossref][4] filter to do the automatic numbering and cross-referencing of sections, figures, and tables.
|
||||
|
||||
Let’s start by rewriting [an example of an educational research paper][5] originally written in LaTeX and rewrites it in Markdown (and some LaTeX) with Pandoc and pandoc-crossref.
|
||||
|
||||
#### Adding and referencing sections
|
||||
|
||||
Sections are automatically numbered and must be written using the Markdown heading H1. Subsections are written with subheadings H2-H4 (it is uncommon to need more than that). For example, to write a section titled “Implementation”, write `# Implementation {#sec:implementation}`, and Pandoc produces `3. Implementation` (or the corresponding numbered section). The title “Implementation” uses heading H1 and declares a label `{#sec:implementation}` that authors can use to refer to that section. To reference a section, type the `@` symbol followed by the label of the section and enclose it in square brackets: `[@sec:implementation]`.
|
||||
|
||||
[In this paper][5], we find the following example:
|
||||
|
||||
```
|
||||
we lack experience (consistency between TAs, [@sec:implementation]).
|
||||
```
|
||||
|
||||
Pandoc produces:
|
||||
|
||||
```
|
||||
we lack experience (consistency between TAs, Section 4).
|
||||
```
|
||||
|
||||
Sections are numbered automatically (this is covered in the `Makefile` at the end of the article). To create unnumbered sections, type the title of the section, followed by `{-}`. For example, `### Designing a game for maintainability {-}` creates an unnumbered subsection with the title “Designing a game for maintainability”.
|
||||
|
||||
#### Adding and referencing figures
|
||||
|
||||
Adding and referencing a figure is similar to referencing a section and adding a Markdown image:
|
||||
|
||||
```
|
||||
![Scatterplot matrix](data/scatterplots/RScatterplotMatrix2.png){#fig:scatter-matrix}
|
||||
```
|
||||
|
||||
The line above tells Pandoc that there is a figure with the caption Scatterplot matrix and the path to the image is `data/scatterplots/RScatterplotMatrix2.png`. `{#fig:scatter-matrix}` declares the name that should be used to reference the figure.
|
||||
|
||||
Here is an example of a figure reference from the example paper:
|
||||
|
||||
```
|
||||
The boxes "Enjoy", "Grade" and "Motivation" ([@fig:scatter-matrix]) ...
|
||||
```
|
||||
|
||||
Pandoc produces the following output:
|
||||
|
||||
```
|
||||
The boxes "Enjoy", "Grade" and "Motivation" (Fig. 1) ...
|
||||
```
|
||||
|
||||
#### Adding and referencing a bibliography
|
||||
|
||||
Most research papers keep references in a BibTeX database file. In this example, this file is named [biblio.bib][6] and it contains all the references of the paper. Here is what this file looks like:
|
||||
|
||||
```
|
||||
@inproceedings{wrigstad2017mastery,
|
||||
Author = {Wrigstad, Tobias and Castegren, Elias},
|
||||
Booktitle = {SPLASH-E},
|
||||
Title = {Mastery Learning-Like Teaching with Achievements},
|
||||
Year = 2017
|
||||
}
|
||||
|
||||
@inproceedings{review-gamification-framework,
|
||||
Author = {A. Mora and D. Riera and C. Gonzalez and J. Arnedo-Moreno},
|
||||
Publisher = {IEEE},
|
||||
Booktitle = {2015 7th International Conference on Games and Virtual Worlds
|
||||
for Serious Applications (VS-Games)},
|
||||
Doi = {10.1109/VS-GAMES.2015.7295760},
|
||||
Keywords = {formal specification;serious games (computing);design
|
||||
framework;formal design process;game components;game design
|
||||
elements;gamification design frameworks;gamification-based
|
||||
solutions;Bibliographies;Context;Design
|
||||
methodology;Ethics;Games;Proposals},
|
||||
Month = {Sept},
|
||||
Pages = {1-8},
|
||||
Title = {A Literature Review of Gamification Design Frameworks},
|
||||
Year = 2015,
|
||||
Bdsk-Url-1 = {http://dx.doi.org/10.1109/VS-GAMES.2015.7295760}
|
||||
}
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
The first line, `@inproceedings{wrigstad2017mastery,`, declares the type of publication (`inproceedings`) and the label used to refer to that paper (`wrigstad2017mastery`).
|
||||
|
||||
To cite the paper with its title, Mastery Learning-Like Teaching with Achievements, type:
|
||||
|
||||
```
|
||||
the achievement-driven learning methodology [@wrigstad2017mastery]
|
||||
```
|
||||
|
||||
Pandoc will output:
|
||||
|
||||
```
|
||||
the achievement- driven learning methodology [30]
|
||||
```
|
||||
|
||||
The paper we will produce includes a bibliography section with numbered references like these:
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/bibliography-example_0.png)
|
||||
|
||||
Citing a collection of articles is easy: Simply cite each article, separating the labeled references using a semi-colon: `;`. If there are two labeled references—i.e., `SEABORN201514` and `gamification-leaderboard-benefits`—cite them together, like this:
|
||||
|
||||
```
|
||||
Thus, the most important benefit is its potential to increase students' motivation
|
||||
|
||||
and engagement [@SEABORN201514;@gamification-leaderboard-benefits].
|
||||
```
|
||||
|
||||
Pandoc will produce:
|
||||
|
||||
```
|
||||
Thus, the most important benefit is its potential to increase students’ motivation
|
||||
|
||||
and engagement [26, 28]
|
||||
```
|
||||
|
||||
### Problematic cases
|
||||
|
||||
A common problem involves objects that do not fit in the page. They then float to wherever they fit best, even if that position is not where the reader expects to see it. Since papers are easier to read when figures or tables appear close to where they are mentioned, we need to have some control over where these elements are placed. For this reason, I recommend the use of the `figure` LaTeX environment, which enables users to control the positioning of figures.
|
||||
|
||||
Let’s take the figure example shown above:
|
||||
|
||||
```
|
||||
![Scatterplot matrix](data/scatterplots/RScatterplotMatrix2.png){#fig:scatter-matrix}
|
||||
```
|
||||
|
||||
And rewrite it in LaTeX:
|
||||
|
||||
```
|
||||
\begin{figure}[t]
|
||||
\includegraphics{data/scatterplots/RScatterplotMatrix2.png}
|
||||
\caption{\label{fig:matrix}Scatterplot matrix}
|
||||
\end{figure}
|
||||
```
|
||||
|
||||
In LaTeX, the `[t]` option in the `figure` environment declares that the image should be placed at the top of the page. For more options, refer to the Wikibooks article [LaTex/Floats, Figures, and Captions][7].
|
||||
|
||||
### Producing the paper
|
||||
|
||||
So far, we've covered how to add and reference (sub-)sections and figures and cite the bibliography—now let's review how to produce the research paper in PDF format. To generate the PDF, we will use Pandoc to generate a LaTeX file that can be compiled to the final PDF. We will also discuss how to generate the research paper in LaTeX using a customized template and a meta-information file, and how to compile the LaTeX document into its final PDF form.
|
||||
|
||||
Most conferences provide a **.cls** file or a template that specifies how papers should look; for example, whether they should use a two-column format and other design treatments. In our example, the conference provided a file named **acmart.cls**.
|
||||
|
||||
Authors are generally expected to include the institution to which they belong in their papers. However, this option was not included in the default Pandoc’s LaTeX template (note that the Pandoc template can be inspected by typing `pandoc -D latex`). To include the affiliation, take the default Pandoc’s LaTeX template and add a new field. The Pandoc template was copied into a file named `mytemplate.tex` as follows:
|
||||
|
||||
```
|
||||
pandoc -D latex > mytemplate.tex
|
||||
```
|
||||
|
||||
The default template contains the following code:
|
||||
|
||||
```
|
||||
$if(author)$
|
||||
\author{$for(author)$$author$$sep$ \and $endfor$}
|
||||
$endif$
|
||||
$if(institute)$
|
||||
\providecommand{\institute}[1]{}
|
||||
\institute{$for(institute)$$institute$$sep$ \and $endfor$}
|
||||
$endif$
|
||||
```
|
||||
|
||||
Because the template should include the author’s affiliation and email address, among other things, we updated it to include these fields (we made other changes as well but did not include them here due to the file length):
|
||||
|
||||
```
|
||||
latex
|
||||
$for(author)$
|
||||
$if(author.name)$
|
||||
\author{$author.name$}
|
||||
$if(author.affiliation)$
|
||||
\affiliation{\institution{$author.affiliation$}}
|
||||
$endif$
|
||||
$if(author.email)$
|
||||
\email{$author.email$}
|
||||
$endif$
|
||||
$else$
|
||||
$author$
|
||||
$endif$
|
||||
$endfor$
|
||||
```
|
||||
|
||||
With these changes in place, we should have the following files:
|
||||
|
||||
* `main.md` contains the research paper
|
||||
* `biblio.bib` contains the bibliographic database
|
||||
* `acmart.cls` is the class of the document that we should use
|
||||
* `mytemplate.tex` is the template file to use (instead of the default)
|
||||
|
||||
|
||||
|
||||
Let’s add the meta-information of the paper in a `meta.yaml`file:
|
||||
|
||||
```
|
||||
---
|
||||
template: 'mytemplate.tex'
|
||||
documentclass: acmart
|
||||
classoption: sigconf
|
||||
title: The impact of opt-in gamification on `\\`{=latex} students' grades in a software design course
|
||||
author:
|
||||
- name: Kiko Fernandez-Reyes
|
||||
affiliation: Uppsala University
|
||||
email: kiko.fernandez@it.uu.se
|
||||
- name: Dave Clarke
|
||||
affiliation: Uppsala University
|
||||
email: dave.clarke@it.uu.se
|
||||
- name: Janina Hornbach
|
||||
affiliation: Uppsala University
|
||||
email: janina.hornbach@fek.uu.se
|
||||
bibliography: biblio.bib
|
||||
abstract: |
|
||||
An achievement-driven methodology strives to give students more control over their learning with enough flexibility to engage them in deeper learning. (more stuff continues)
|
||||
|
||||
include-before: |
|
||||
\```{=latex}
|
||||
\copyrightyear{2018}
|
||||
\acmYear{2018}
|
||||
\setcopyright{acmlicensed}
|
||||
\acmConference[MODELS '18 Companion]{ACM/IEEE 21th International Conference on Model Driven Engineering Languages and Systems}{October 14--19, 2018}{Copenhagen, Denmark}
|
||||
\acmBooktitle{ACM/IEEE 21th International Conference on Model Driven Engineering Languages and Systems (MODELS '18 Companion), October 14--19, 2018, Copenhagen, Denmark}
|
||||
\acmPrice{XX.XX}
|
||||
\acmDOI{10.1145/3270112.3270118}
|
||||
\acmISBN{978-1-4503-5965-8/18/10}
|
||||
|
||||
\begin{CCSXML}
|
||||
<ccs2012>
|
||||
<concept>
|
||||
<concept_id>10010405.10010489</concept_id>
|
||||
<concept_desc>Applied computing~Education</concept_desc>
|
||||
<concept_significance>500</concept_significance>
|
||||
</concept>
|
||||
</ccs2012>
|
||||
\end{CCSXML}
|
||||
|
||||
\ccsdesc[500]{Applied computing~Education}
|
||||
|
||||
\keywords{gamification, education, software design, UML}
|
||||
\```
|
||||
figPrefix:
|
||||
- "Fig."
|
||||
- "Figs."
|
||||
secPrefix:
|
||||
- "Section"
|
||||
- "Sections"
|
||||
...
|
||||
```
|
||||
|
||||
This meta-information file sets the following variables in LaTeX:
|
||||
|
||||
* `template` refers to the template to use (‘mytemplate.tex’)
|
||||
* `documentclass` refers to the LaTeX document class to use (`acmart`)
|
||||
* `classoption` refers to the options of the class, in this case `sigconf`
|
||||
* `title` specifies the title of the paper
|
||||
* `author` is an object that contains other fields, such as `name`, `affiliation`, and `email`.
|
||||
* `bibliography`refers to the file that contains the bibliography (biblio.bib)
|
||||
* `abstract` contains the abstract of the paper
|
||||
* `include-before`is information that should be included before the actual content of the paper; this is known as the [preamble][8] in LaTeX. I have included it here to show how to generate a computer science paper, but you may choose to skip it
|
||||
* `figPrefix` specifies how to refer to figures in the document, i.e., what should be displayed when one refers to the figure `[@fig:scatter-matrix]`. For example, the current `figPrefix` produces in the example `The boxes "Enjoy", "Grade" and "Motivation" ([@fig:scatter-matrix])` this output: `The boxes "Enjoy", "Grade" and "Motivation" (Fig. 3)`. If there are multiple figures, the current setup declares that it should instead display `Figs.` next to the figure numbers.
|
||||
* `secPrefix` specifies how to refer to sections mentioned elsewhere in the document (similar to figures, described above)
|
||||
|
||||
|
||||
|
||||
Now that the meta-information is set, let’s create a `Makefile` that produces the desired output. This `Makefile` uses Pandoc to produce the LaTeX file, `pandoc-crossref` to produce the cross-references, `pdflatex` to compile the LaTeX to PDF, and `bibtex `to process the references.
|
||||
|
||||
The `Makefile` is shown below:
|
||||
|
||||
```
|
||||
all: paper
|
||||
|
||||
paper:
|
||||
@pandoc -s -F pandoc-crossref --natbib meta.yaml --template=mytemplate.tex -N \
|
||||
-f markdown -t latex+raw_tex+tex_math_dollars+citations -o main.tex main.md
|
||||
@pdflatex main.tex &> /dev/null
|
||||
@bibtex main &> /dev/null
|
||||
@pdflatex main.tex &> /dev/null
|
||||
@pdflatex main.tex &> /dev/null
|
||||
|
||||
clean:
|
||||
rm main.aux main.tex main.log main.bbl main.blg main.out
|
||||
|
||||
.PHONY: all clean paper
|
||||
```
|
||||
|
||||
Pandoc uses the following flags:
|
||||
|
||||
* `-s` to create a standalone LaTeX document
|
||||
* `-F pandoc-crossref` to make use of the filter `pandoc-crossref`
|
||||
* `--natbib` to render the bibliography with `natbib` (you can also choose `--biblatex`)
|
||||
* `--template` sets the template file to use
|
||||
* `-N` to number the section headings
|
||||
* `-f` and `-t` specify the conversion from and to which format. `-t` usually contains the format and is followed by the Pandoc extensions used. In the example, we declared `raw_tex+tex_math_dollars+citations` to allow use of `raw_tex` LaTeX in the middle of the Markdown file. `tex_math_dollars` enables us to type math formulas as in LaTeX, and `citations` enables us to use [this extension][9].
|
||||
|
||||
|
||||
|
||||
To generate a PDF from LaTeX, follow the guidelines [from bibtex][10] to process the bibliography:
|
||||
|
||||
```
|
||||
@pdflatex main.tex &> /dev/null
|
||||
@bibtex main &> /dev/null
|
||||
@pdflatex main.tex &> /dev/null
|
||||
@pdflatex main.tex &> /dev/null
|
||||
```
|
||||
|
||||
The script contains `@` to ignore the output, and we redirect the file handle of the standard output and error to `/dev/null`so that we don’t see the output generated from the execution of these commands.
|
||||
|
||||
The final result is shown below. The repository for the article can be found [on GitHub][11]:
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/abstract-image.png)
|
||||
|
||||
### Conclusion
|
||||
|
||||
In my opinion, research is all about collaboration, dissemination of ideas, and improving the state of the art in whatever field one happens to be in. Most computer scientists and engineers write papers using the LaTeX document system, which provides excellent support for math. Researchers from the social sciences seem to stick to DOCX documents.
|
||||
|
||||
When researchers from different communities write papers together, they should first discuss which format they will use. While DOCX may not be convenient for engineers if there is math involved, LaTeX may be troublesome for researchers who lack a programming background. As this article shows, Markdown is an easy-to-use language that can be used by both engineers and social scientists.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/9/pandoc-research-paper
|
||||
|
||||
作者:[Kiko Fernandez-Reyes][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/kikofernandez
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://en.wikipedia.org/wiki/Markdown
|
||||
[2]: https://www.latex-project.org/
|
||||
[3]: https://pandoc.org/
|
||||
[4]: http://lierdakil.github.io/pandoc-crossref/
|
||||
[5]: https://dl.acm.org/citation.cfm?id=3270118
|
||||
[6]: https://github.com/kikofernandez/pandoc-examples/blob/master/research-paper/biblio.bib
|
||||
[7]: https://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions#Figures
|
||||
[8]: https://www.sharelatex.com/learn/latex/Creating_a_document_in_LaTeX#The_preamble_of_a_document
|
||||
[9]: http://pandoc.org/MANUAL.html#citations
|
||||
[10]: http://www.bibtex.org/Using/
|
||||
[11]: https://github.com/kikofernandez/pandoc-examples/tree/master/research-paper
|
@ -0,0 +1,151 @@
|
||||
To BeOS or not to BeOS, that is the Haiku
|
||||
======
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/autumn-haiku-100.jpg?itok=RTSPZu9U)
|
||||
|
||||
Back in 2001, a new operating system arrived that promised to change the way users worked with their computers. That platform was BeOS and I remember it well. What I remember most about it was the desktop, and how much it looked and felt like my favorite window manager (at the time) AfterStep. I also remember how awkward and overly complicated BeOS was to install and use. In fact, upon installation, it was never all too clear how to make the platform function well enough to use on a daily basis. That was fine, however, because BeOS seemed to live in a perpetual state of “alpha release.”
|
||||
|
||||
That was then. This is very much now.
|
||||
|
||||
Now we have haiku
|
||||
|
||||
Bringing BeOS to life
|
||||
|
||||
An AfterStep joy.
|
||||
|
||||
No, Haiku has nothing to do with AfterStep, but it fit perfectly with the haiku meter, so work with me.
|
||||
|
||||
The [Haiku][1] project released it’s R1 Alpha 4 six years ago. Back in September of 2018, it finally released it’s R1 Beta 1 and although it took them eons (in computer time), seeing Haiku installed (on a virtual machine) was worth the wait … even if only for the nostalgia aspect. The big difference between R1 Beta 1 and R1 Alpha 4 (and BeOS, for that matter), is that Haiku now works like a real operating system. It’s lighting fast (and I do mean fast), it finally enjoys a modicum of stability, and has a handful of useful apps. Before you get too excited, you’re not going to install Haiku and immediately become productive. In fact, the list of available apps is quite limiting (more on this later). Even so, Haiku is definitely worth installing, even if only to see how far the project has come.
|
||||
|
||||
Speaking of which, let’s do just that.
|
||||
|
||||
### Installing Haiku
|
||||
|
||||
The installation isn’t quite as point and click as the standard Linux distribution. That doesn’t mean it’s a challenge. It’s not; in fact, the installation is handled completely through a GUI, so you won’t have to even touch the command line.
|
||||
|
||||
To install Haiku, you must first [download an image][2]. Download this file into your ~/Downloads directory. This image will be in a compressed format, so once it’s downloaded you’ll need to decompress it. Open a terminal window and issue the command unzip ~/Downloads/haiku*.zip. A new directory will be created, called haiku-r1beta1XXX-anyboot (Where XXX is the architecture for your hardware). Inside that directory you’ll find the ISO image to be used for installation.
|
||||
|
||||
For my purposes, I installed Haiku as a VirtualBox virtual machine. I highly recommend going the same route, as you don’t want to have to worry about hardware detection. Creating Haiku as a virtual machine doesn’t require any special setup (beyond the standard). Once the live image has booted, you’ll be asked if you want to run the installer or boot directly to the desktop (Figure 1). Click Run Installer to begin the process.
|
||||
|
||||
|
||||
![Haiku installer][4]
|
||||
|
||||
Figure 1: Selecting to run the Haiku installer.
|
||||
|
||||
[Used with permission][5]
|
||||
|
||||
The next window is nothing more than a warning that Haiku is beta software and informing you that the installer will make the Haiku partition bootable, but doesn’t integrate with your existing boot menu (in other words, it will not set up dual booting). In this window, click the Continue button.
|
||||
|
||||
You will then be warned that no partitions have been found. Click the OK button, so you can create a partition table. In the remaining window (Figure 2), click the Set up partitions button.
|
||||
|
||||
![Haiku][7]
|
||||
|
||||
Figure 2: The Haiku Installer in action.
|
||||
|
||||
[Used with permission][5]
|
||||
|
||||
In the resulting window (Figure 3), select the partition to be used and then click Disk > Initialize > GUID Partition Map. You will be prompted to click Continue and then Write Changes.
|
||||
|
||||
![target partition][9]
|
||||
|
||||
Figure 3: Our target partition ready to be initialized.
|
||||
|
||||
[Used with permission][5]
|
||||
|
||||
Select the newly initialized partition and then click Partition > Format > Be File System. When prompted, click Continue. In the resulting window, leave everything default and click Initialize and then click Write changes.
|
||||
|
||||
Close the DriveSetup window (click the square in the titlebar) to return to the Haiku Installer. You should now be able to select the newly formatted partition in the Onto drop-down (Figure 4).
|
||||
|
||||
![partition][11]
|
||||
|
||||
Figure 4: Selecting our partition for installation.
|
||||
|
||||
[Used with permission][5]
|
||||
|
||||
After selecting the partition, click Begin and the installation will start. Don’t blink, as the entire installation takes less than 30 seconds. You read that correctly—the installation of Haiku takes less than 30 seconds. When it finishes, click Restart to boot your newly installed Haiku OS.
|
||||
|
||||
### Usage
|
||||
|
||||
When Haiku boots, it’ll go directly to the desktop. There is no login screen (or even the means to log in). You’ll be greeted with a very simple desktop that includes a few clickable icons and what is called the Tracker(Figure 5).
|
||||
|
||||
![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_5.jpg?itok=eNmbsFGV)
|
||||
|
||||
The Tracker includes any minimized application and a desktop menu that gives you access to all of the installed applications. Left click on the leaf icon in the Tracker to reveal the desktop menu (Figure 6).
|
||||
|
||||
![menu][13]
|
||||
|
||||
Figure 6: The Haiku desktop menu.
|
||||
|
||||
[Used with permission][5]
|
||||
|
||||
From within the menu, click Applications and you’ll see all the available tools. In that menu you’ll find the likes of:
|
||||
|
||||
* ActivityMonitor (Track system resources)
|
||||
|
||||
* BePDF (PDF reader)
|
||||
|
||||
* CodyCam (allows you to take pictures from a webcam)
|
||||
|
||||
* DeskCalc (calculator)
|
||||
|
||||
* Expander (unpack common archives)
|
||||
|
||||
* HaikuDepot (app store)
|
||||
|
||||
* Mail (email client)
|
||||
|
||||
* MediaPlay (play audio files)
|
||||
|
||||
* People (contact database)
|
||||
|
||||
* PoorMan (simple web server)
|
||||
|
||||
* SoftwareUpdater (update Haiku software)
|
||||
|
||||
* StyledEdit (text editor)
|
||||
|
||||
* Terminal (terminal emulator)
|
||||
|
||||
* WebPositive (web browser)
|
||||
|
||||
|
||||
|
||||
|
||||
You will find, in the HaikuDepot, a limited number of available applications. What you won’t find are many productivity tools. Missing are office suites, image editors, and more. What we have with this beta version of Haiku is not a replacement for your desktop, but a view into the work the developers have put into giving the now-defunct BoOS new life. Chances are you won’t spend too much time with Haiku, beyond kicking the tires. However, this blast from the past is certainly worth checking out.
|
||||
|
||||
### A positive step forward
|
||||
|
||||
Based on my experience with BeOS and the alpha of Haiku (all those years ago), the developers have taken a big, positive step forward. Hopefully, the next beta release won’t take as long and we might even see a final release in the coming years. Although Haiku won’t challenge the likes of Ubuntu, Mint, Arch, or Elementary OS, it could develop its own niche following. No matter its future, it’s good to see something new from the developers. Bravo to Haiku.
|
||||
|
||||
Your OS is prime
|
||||
|
||||
For a beta 2 release
|
||||
|
||||
Make it so, my friends.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/learn/2018/10/beos-or-not-beos-haiku
|
||||
|
||||
作者:[Jack Wallen][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.linux.com/users/jlwallen
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.haiku-os.org/
|
||||
[2]: https://www.haiku-os.org/get-haiku
|
||||
[3]: /files/images/haiku1jpg
|
||||
[4]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_1.jpg?itok=PTTBoLCf (Haiku installer)
|
||||
[5]: /licenses/category/used-permission
|
||||
[6]: /files/images/haiku2jpg
|
||||
[7]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_2.jpg?itok=NV1yavv_ (Haiku)
|
||||
[8]: /files/images/haiku3jpg
|
||||
[9]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_3.jpg?itok=XWBz6kVT (target partition)
|
||||
[10]: /files/images/haiku4jpg
|
||||
[11]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_4.jpg?itok=6RbuCbAx (partition)
|
||||
[12]: /files/images/haiku6jpg
|
||||
[13]: https://www.linux.com/sites/lcom/files/styles/rendered_file/public/haiku_6.jpg?itok=-mmzNBxa (menu)
|
@ -0,0 +1,79 @@
|
||||
5 tips for choosing the right open source database
|
||||
======
|
||||
When selecting a mission-critical application, you can't afford to make mistakes.
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/server_data_system_admin.png?itok=q6HCfNQ8)
|
||||
|
||||
So, your company has a directive to adopt more open source database technologies, and they've recruited you to select the right direction. Whether you are an open source technology veteran or a newcomer, this is a daunting and overwhelming task.
|
||||
|
||||
Over the past several years, open source technology adoption has steadily increased in the enterprise space. With its popularity comes a crowded marketplace with open source software companies promising that their solution will solve every problem and fit every workload. Be wary of these promises. Choosing the right open source technology—especially a database—is an important and difficult decision you can't make lightly.
|
||||
|
||||
In my experience as an IT professional at [Percona][1] and other companies, I've been fortunate to work hands-on in adopting open source technologies and guiding others in making the right decisions. There are many important factors to consider; hopefully, this article will shine a light on a few.
|
||||
|
||||
### 1. Have a goal.
|
||||
|
||||
This may seem simple, but based on my many conversations with people exploring MySQL, MongoDB, or PostgreSQL, it is top of the list in importance.
|
||||
|
||||
To avoid getting overwhelmed by the unlimited combinations of open source database software in the market, have a specific goal in mind. Maybe your goal is to provide your internal developers with a standardized, open source database backend that is managed by your internal database team. Perhaps your goal is to rip and replace the entire functionality of a legacy application and database backend with new open source technology.
|
||||
|
||||
Once you have defined a goal, you can focus your efforts. This will lead to better conversations internally as well as externally with open source database software vendors and advocates.
|
||||
|
||||
### 2. Understand your workload.
|
||||
|
||||
Despite the increasing ability of database technologies to wear many hats, each specializes in certain areas, e.g., MongoDB is now transactional, MySQL now has JSON storage. A growing trend in open source databases involves providing check boxes claiming certain features are available. One of the biggest mistakes is not using the right tool for the right job. Something leads a company down the wrong path—perhaps an overzealous developer or a manager with tunnel vision. The unfortunate thing is that the wrong tool can work fine for smaller volumes of transactions and data, but later there will be bottlenecks that can be solved only by using a different tool.
|
||||
|
||||
If you want a data analytics warehouse, an open source relational database is probably not the right choice. If you want a transaction-processing app with rigid data integrity and consistency, NoSQL options may not be the right option.
|
||||
|
||||
### 3. Don't reinvent the wheel.
|
||||
|
||||
Open source database technologies have rapidly grown, expanded, and hardened over the past several decades. We've seen a transformation from new, questionably production-ready databases to proven, enterprise-grade database backends. It's no longer necessary to be a bleeding edge, early adopter to choose open source database technologies. Organizations have grown around these communities to provide production support and tooling in the open source database space for a growing number of startups, midsized businesses, and Fortune 500 companies.
|
||||
|
||||
Battery Ventures, a tech-focused investment firm, recently introduced its [BOSS Index][2] for tracking the most popular open source projects. It's not perfect, but it provides great insight into some of the most widely adopted and active open source projects. Not surprisingly, database technologies dominate the list, comprising five of the top 10 technologies. This is a great starting point for someone new to the open source database space. A lot of times, vendors have already produced suitable architectures for solving specific problems.
|
||||
|
||||
My point is that someone has probably already done what you are trying to do. Learn from their successes and failures. Even if it is not a perfect fit, a solution can likely be modified to suit your needs. For example, Amazon provides a [CloudFormation script][3] for deploying MongoDB in its EC2 environment.
|
||||
|
||||
If you are a bleeding-edge early adopter, that doesn't mean you can't explore. If you have a unique challenge or workload that seems to fit a new open source database technology, go for it. Keep in mind that there are inherent risks (and rewards!) to being an early adopter.
|
||||
|
||||
### 4\. Start simple
|
||||
|
||||
|
||||
How many [nines][4] does your database truly need? "Achieving high availability" is often a nebulous goal for many companies. Of course, the most common answer is "it's mission-critical, and we cannot afford any downtime."
|
||||
|
||||
The more complicated your database environment, the more difficult and costly it is to manage. You can theoretically achieve higher uptime, but the tradeoffs will be the feasibility of management and performance. When in doubt, start simple. There are always options to scale out when the need arises.
|
||||
|
||||
For example, Booking.com is a widely known travel reservation site. It might be less widely known that it uses MySQL as a database backend. Nicolai Plum, a Booking.com senior systems architect, gave [a talk][5] outlining the evolution of the company's MySQL database. One of the takeaways was that the database started simple. It had to evolve over time, but in the beginning, simple master–replica architecture sufficed. As the workload and dataset increased, it introduced load balancers, multiple read replicas, archiving to Hadoop for analytics, etc. However, the early architecture was extremely simple.
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/internet_app_barrett_chambers.png)
|
||||
|
||||
### 5. When in doubt, ask an expert.
|
||||
|
||||
If you're unsure whether a database would be a good fit, reach out on forums, websites, or to vendors and strike up a conversation. This can be exciting as you research which database technologies meet your requirements and which do not. Often there are suitable alternatives that you haven't considered. The open source community is all about sharing knowledge.
|
||||
|
||||
There is one important thing to be aware of when reaching out to open source software and services vendors. Many have open-core business models that incentivize adopting their database software. Take their advice or guidance with a grain of salt and use your own ability to research, create proofs of concept, and explore alternatives.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Choosing the right open source database is an important decision. Start by asking the right questions. All too often, people put the cart before the horse, making decisions before really understanding their needs.
|
||||
|
||||
Barrett Chambers will present [Choosing the Right Open Source Database][6] at [All Things Open][7], October 21-23 in Raleigh, N.C.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/tips-choosing-right-open-source-database
|
||||
|
||||
作者:[Barrett Chambers][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/barrettc
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.percona.com/
|
||||
[2]: https://techcrunch.com/2017/04/07/tracking-the-explosive-growth-of-open-source-software/
|
||||
[3]: https://docs.aws.amazon.com/quickstart/latest/mongodb/welcome.html
|
||||
[4]: https://en.wikipedia.org/wiki/Five_nines
|
||||
[5]: https://www.percona.com/live/mysql-conference-2015/sessions/bookingcom-evolution-mysql-system-design
|
||||
[6]: https://allthingsopen.org/talk/choosing-the-right-open-source-database/
|
||||
[7]: https://allthingsopen.org/
|
@ -0,0 +1,282 @@
|
||||
translating by dianbanjiu
|
||||
How to set up WordPress on a Raspberry Pi
|
||||
======
|
||||
|
||||
Run your WordPress website on your Raspberry Pi with this simple tutorial.
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/edu_raspberry-pi-classroom_lead.png?itok=KIyhmR8W)
|
||||
|
||||
WordPress is a popular open source blogging platform and content management system (CMS). It's easy to set up and has a thriving community of developers building websites and creating themes and plugins for others to use.
|
||||
|
||||
Although getting hosting packages with a "one-click WordPress setup" is easy, it's also simple to set up your own on a Linux server with only command-line access, and the [Raspberry Pi][1] is a perfect way to try it out and learn something along the way.
|
||||
|
||||
The four components of a commonly used web stack are Linux, Apache, MySQL, and PHP. Here's what you need to know about each.
|
||||
|
||||
### Linux
|
||||
|
||||
The Raspberry Pi runs Raspbian, which is a Linux distribution based on Debian and optimized to run well on Raspberry Pi hardware. It comes with two options to start: Desktop or Lite. The Desktop version boots to a familiar-looking desktop and comes with lots of educational software and programming tools, as well as the LibreOffice suite, Minecraft, and a web browser. The Lite version has no desktop environment, so it's command-line only and comes with only the essential software.
|
||||
|
||||
This tutorial will work with either version, but if you use the Lite version you'll have to use another computer to access your website.
|
||||
|
||||
### Apache
|
||||
|
||||
Apache is a popular web server application you can install on the Raspberry Pi to serve web pages. On its own, Apache can serve static HTML files over HTTP. With additional modules, it can serve dynamic web pages using scripting languages such as PHP.
|
||||
|
||||
Installing Apache is very simple. Open a terminal window and type the following command:
|
||||
|
||||
```
|
||||
sudo apt install apache2 -y
|
||||
```
|
||||
|
||||
By default, Apache puts a test HTML file in a web folder you can view from your Pi or another computer on your network. Just open the web browser and enter the address **<http://localhost>**. Alternatively (particularly if you're using Raspbian Lite), enter the Pi's IP address instead of **localhost**. You should see this in your browser window:
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/apache-it-works.png)
|
||||
|
||||
This means you have Apache working!
|
||||
|
||||
This default webpage is just an HTML file on the filesystem. It is located at **/var/www/html/index.html**. You can try replacing this file with some HTML of your own using the [Leafpad][2] text editor:
|
||||
|
||||
```
|
||||
cd /var/www/html/
|
||||
sudo leafpad index.html
|
||||
```
|
||||
|
||||
Save and close Leafpad then refresh the browser to see your changes.
|
||||
|
||||
### MySQL
|
||||
|
||||
MySQL (pronounced "my S-Q-L" or "my sequel") is a popular database engine. Like PHP, it's widely used on web servers, which is why projects like WordPress use it and why those projects are so popular.
|
||||
|
||||
Install MySQL Server by entering the following command into the terminal window:
|
||||
|
||||
```
|
||||
sudo apt-get install mysql-server -y
|
||||
```
|
||||
|
||||
WordPress uses MySQL to store posts, pages, user data, and lots of other content.
|
||||
|
||||
### PHP
|
||||
|
||||
PHP is a preprocessor: it's code that runs when the server receives a request for a web page via a web browser. It works out what needs to be shown on the page, then sends that page to the browser. Unlike static HTML, PHP can show different content under different circumstances. PHP is a very popular language on the web; huge projects like Facebook and Wikipedia are written in PHP.
|
||||
|
||||
Install PHP and the MySQL extension:
|
||||
|
||||
```
|
||||
sudo apt-get install php php-mysql -y
|
||||
```
|
||||
|
||||
Delete the **index.html** file and create **index.php** :
|
||||
|
||||
```
|
||||
sudo rm index.html
|
||||
sudo leafpad index.php
|
||||
```
|
||||
|
||||
Add the following line:
|
||||
|
||||
```
|
||||
<?php phpinfo(); ?>
|
||||
```
|
||||
|
||||
Save, exit, and refresh your browser. You'll see the PHP status page:
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/phpinfo.png)
|
||||
|
||||
### WordPress
|
||||
|
||||
You can download WordPress from [wordpress.org][3] using the **wget** command. Helpfully, the latest version of WordPress is always available at [wordpress.org/latest.tar.gz][4], so you can grab it without having to look it up on the website. As I'm writing, this is version 4.9.8.
|
||||
|
||||
Make sure you're in **/var/www/html** and delete everything in it:
|
||||
|
||||
```
|
||||
cd /var/www/html/
|
||||
sudo rm *
|
||||
```
|
||||
|
||||
Download WordPress using **wget** , then extract the contents and move the WordPress files to the **html** directory:
|
||||
|
||||
```
|
||||
sudo wget http://wordpress.org/latest.tar.gz
|
||||
sudo tar xzf latest.tar.gz
|
||||
sudo mv wordpress/* .
|
||||
```
|
||||
|
||||
Tidy up by removing the tarball and the now-empty **wordpress** directory:
|
||||
|
||||
```
|
||||
sudo rm -rf wordpress latest.tar.gz
|
||||
```
|
||||
|
||||
Running the **ls** or **tree -L 1** command will show the contents of a WordPress project:
|
||||
|
||||
```
|
||||
.
|
||||
├── index.php
|
||||
├── license.txt
|
||||
├── readme.html
|
||||
├── wp-activate.php
|
||||
├── wp-admin
|
||||
├── wp-blog-header.php
|
||||
├── wp-comments-post.php
|
||||
├── wp-config-sample.php
|
||||
├── wp-content
|
||||
├── wp-cron.php
|
||||
├── wp-includes
|
||||
├── wp-links-opml.php
|
||||
├── wp-load.php
|
||||
├── wp-login.php
|
||||
├── wp-mail.php
|
||||
├── wp-settings.php
|
||||
├── wp-signup.php
|
||||
├── wp-trackback.php
|
||||
└── xmlrpc.php
|
||||
|
||||
3 directories, 16 files
|
||||
```
|
||||
|
||||
This is the source of a default WordPress installation. The files you edit to customize your installation belong in the **wp-content** folder.
|
||||
|
||||
You should now change the ownership of all these files to the Apache user:
|
||||
|
||||
```
|
||||
sudo chown -R www-data: .
|
||||
```
|
||||
|
||||
### WordPress database
|
||||
|
||||
To get your WordPress site set up, you need a database. This is where MySQL comes in!
|
||||
|
||||
Run the MySQL secure installation command in the terminal window:
|
||||
|
||||
```
|
||||
sudo mysql_secure_installation
|
||||
```
|
||||
|
||||
You will be asked a series of questions. There's no password set up initially, but you should set one in the second step. Make sure you enter a password you will remember, as you'll need it to connect to WordPress. Press Enter to say Yes to each question that follows.
|
||||
|
||||
When it's complete, you will see the messages "All done!" and "Thanks for using MariaDB!"
|
||||
|
||||
Run **mysql** in the terminal window:
|
||||
|
||||
```
|
||||
sudo mysql -uroot -p
|
||||
```
|
||||
|
||||
Enter the root password you created. You will be greeted by the message "Welcome to the MariaDB monitor." Create the database for your WordPress installation at the **MariaDB [(none)] >** prompt using:
|
||||
|
||||
```
|
||||
create database wordpress;
|
||||
```
|
||||
|
||||
Note the semicolon at the end of the statement. If the command is successful, you should see this:
|
||||
|
||||
```
|
||||
Query OK, 1 row affected (0.00 sec)
|
||||
```
|
||||
|
||||
Grant database privileges to the root user, entering your password at the end of the statement:
|
||||
|
||||
```
|
||||
GRANT ALL PRIVILEGES ON wordpress.* TO 'root'@'localhost' IDENTIFIED BY 'YOURPASSWORD';
|
||||
```
|
||||
|
||||
For the changes to take effect, you will need to flush the database privileges:
|
||||
|
||||
```
|
||||
FLUSH PRIVILEGES;
|
||||
```
|
||||
|
||||
Exit the MariaDB prompt with **Ctrl+D** to return to the Bash shell.
|
||||
|
||||
### WordPress configuration
|
||||
|
||||
Open the web browser on your Raspberry Pi and open **<http://localhost>**. You should see a WordPress page asking you to pick your language. Select your language and click **Continue**. You will be presented with the WordPress welcome screen. Click the **Let's go!** button.
|
||||
|
||||
Fill out the basic site information as follows:
|
||||
|
||||
```
|
||||
Database Name: wordpress
|
||||
User Name: root
|
||||
Password: <YOUR PASSWORD>
|
||||
Database Host: localhost
|
||||
Table Prefix: wp_
|
||||
```
|
||||
|
||||
Click **Submit** to proceed, then click **Run the install**.
|
||||
|
||||
![](https://opensource.com/sites/default/files/uploads/wp-info.png)
|
||||
|
||||
Fill in the form: Give your site a title, create a username and password, and enter your email address. Hit the **Install WordPress** button, then log in using the account you just created. Now that you're logged in and your site is set up, you can see your website by visiting **<http://localhost/wp-admin>**.
|
||||
|
||||
### Permalinks
|
||||
|
||||
It's a good idea to change your permalink settings to make your URLs more friendly.
|
||||
|
||||
To do this, log into WordPress and go to the dashboard. Go to **Settings** , then **Permalinks**. Select the **Post name** option and click **Save Changes**. You'll need to enable Apache's **rewrite** module:
|
||||
|
||||
```
|
||||
sudo a2enmod rewrite
|
||||
```
|
||||
|
||||
You'll also need to tell the virtual host serving the site to allow requests to be overwritten. Edit the Apache configuration file for your virtual host:
|
||||
|
||||
```
|
||||
sudo leafpad /etc/apache2/sites-available/000-default.conf
|
||||
```
|
||||
|
||||
Add the following lines after line 1:
|
||||
|
||||
```
|
||||
<Directory "/var/www/html">
|
||||
AllowOverride All
|
||||
</Directory>
|
||||
```
|
||||
|
||||
Ensure it's within the **< VirtualHost *:80>** like so:
|
||||
|
||||
```
|
||||
<VirtualHost *:80>
|
||||
<Directory "/var/www/html">
|
||||
AllowOverride All
|
||||
</Directory>
|
||||
...
|
||||
```
|
||||
|
||||
Save the file and exit, then restart Apache:
|
||||
|
||||
```
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
### What's next?
|
||||
|
||||
WordPress is very customizable. By clicking your site name in the WordPress banner at the top of the page (when you're logged in), you'll be taken to the Dashboard. From there, you can change the theme, add pages and posts, edit the menu, add plugins, and do lots more.
|
||||
|
||||
Here are some interesting things you can try on the Raspberry Pi's web server.
|
||||
|
||||
* Add pages and posts to your website
|
||||
* Install different themes from the Appearance menu
|
||||
* Customize your website's theme or create your own
|
||||
* Use your web server to display useful information for people on your network
|
||||
|
||||
|
||||
|
||||
Don't forget, the Raspberry Pi is a Linux computer. You can also follow these instructions to install WordPress on a server running Debian or Ubuntu.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/10/setting-wordpress-raspberry-pi
|
||||
|
||||
作者:[Ben Nuttall][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/bennuttall
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sitewide-search?search_api_views_fulltext=raspberry%20pi
|
||||
[2]: https://en.wikipedia.org/wiki/Leafpad
|
||||
[3]: http://wordpress.org/
|
||||
[4]: https://wordpress.org/latest.tar.gz
|
@ -0,0 +1,183 @@
|
||||
Improve login security with challenge-response authentication
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/10/challenge-response-816x345.png)
|
||||
|
||||
### Introduction
|
||||
|
||||
Today, Fedora offers multiple ways to improve the secure authentication of our user accounts. Of course it has the familiar user name and password to login. It also offers additional authentication options such as biometric, fingerprint, smart card, one-time password, and even challenge-response authentication.
|
||||
|
||||
Each authentication method has clear pros and cons. That, in itself, could be a topic for a rather lengthy article. Fedora Magazine has covered a few of these options previously:
|
||||
|
||||
|
||||
+ [Using the YubiKey4 with Fedora][1]
|
||||
+ [Fedora 28: Better smart card support in OpenSSH][2]
|
||||
|
||||
|
||||
One of the most secure methods in modern Fedora releases is offline hardware challenge-response. It’s also one of the easiest to deploy. Here’s how.
|
||||
|
||||
### Challenge-response authentication
|
||||
|
||||
Technically, when you provide a password, you’re responding to a user name challenge. The offline challenge response covered here requires your user name first. Next, Fedora challenges you to provide an encrypted physical hardware token. The token responds to the challenge with another encrypted key it stores via the Pluggable Authentication Modules (PAM) framework. Finally, Fedora prompts you for the password. This prevents someone from just using a found hardware token, or just using a user name and password without the correct encrypted key.
|
||||
|
||||
This means that in addition to your user name and password, you must have previously registered one or more encrypted hardware tokens with the OS. And you have to provide that physical hardware token to be able to authenticate with your user name.
|
||||
|
||||
Some challenge-response methods, like one time passwords (OTP), take an encrypted code key on the hardware token, and pass that key across the network to a remote authentication server. The server then tells Fedora’s PAM framework if it’s is a valid token for that user name. This is great if the authentication server(s) are on the local network. The downside is if the network connection is down or you’re working remote without a network connection, you can’t use this remote authentication method. You could be locked out of the system until you can connect through the network to the server.
|
||||
|
||||
Sometimes a workplace requires use of Yubikey One Time Passwords (OTP) configuration. However, on home or personal systems you may prefer a local challenge-response configuration. Everything is local, and the method requires no remote network calls. The following process works on Fedora 27, 28, and 29.
|
||||
|
||||
### Preparation
|
||||
|
||||
#### Hardware token keys
|
||||
|
||||
First you need a secure hardware token key. Specifically, this process requires a Yubikey 4, Yubikey NEO, or a recently released Yubikey 5 series device which also supports FIDO2. You should purchase two of them to provide a backup in case one becomes lost or damaged. You can use these keys on numerous workstations. The simpler FIDO or FIDO U2F only versions don’t work for this process, but are great for online services that use FIDO.
|
||||
|
||||
#### Backup, backup, and backup
|
||||
|
||||
Next, make a backup of all your important data. You may want to test the configuration in a Fedora 27/28/29 cloned VM to make sure you understand the process before setting up your personal workstation.
|
||||
|
||||
#### Updating and installing
|
||||
|
||||
Now make sure Fedora is up to date. Then install the required Fedora Yubikey packages via these dnf commands:
|
||||
|
||||
```
|
||||
$ sudo dnf upgrade
|
||||
$ sudo dnf install ykclient* ykpers* pam_yubico*
|
||||
$ cd
|
||||
```
|
||||
|
||||
If you’re in a VM environment, such as Virtual Box, make sure the Yubikey device is inserted in a USB port, and enable USB access to the Yubikey in the VM control.
|
||||
|
||||
### Configuring Yubikey
|
||||
|
||||
Verify that your user account has access to the USB Yubikey:
|
||||
|
||||
```
|
||||
$ ykinfo -v
|
||||
version: 3.5.0
|
||||
```
|
||||
|
||||
If the YubiKey is not detected, the following error message appears:
|
||||
|
||||
```
|
||||
Yubikey core error: no yubikey present
|
||||
```
|
||||
|
||||
Next, initialize each of your new Yubikeys with the following ykpersonalize command. This sets up the Yubikey configuration slot 2 with a Challenge Response using the HMAC-SHA1 algorithm, even with less than 64 characters. If you have already setup your Yubikeys for challenge-response, you don’t need to run ykpersonalize again.
|
||||
|
||||
```
|
||||
ykpersonalize -2 -ochal-resp -ochal-hmac -ohmac-lt64 -oserial-api-visible
|
||||
```
|
||||
|
||||
Some users leave the YubiKey in their workstation while using it, and even use challenge-response for virtual machines. However, for more security you may prefer to manually trigger the Yubikey to respond to challenge.
|
||||
|
||||
To add that manual challenge button trigger, add the -ochal-btn-trig flag. This flag causes the Yubikey to flash the yubikey LED on a request. It waits for you to press the button on the hardware key area within 15 seconds to produce the response key.
|
||||
|
||||
```
|
||||
$ ykpersonalize -2 -ochal-resp -ochal-hmac -ohmac-lt64 -ochal-btn-trig -oserial-api-visible
|
||||
```
|
||||
|
||||
Do this for each of your new hardware keys, only once per key. Once you have programmed your keys, store the Yubikey configuration to ~/.yubico with the following command:
|
||||
|
||||
```
|
||||
$ ykpamcfg -2 -v
|
||||
debug: util.c:222 (check_firmware_version): YubiKey Firmware version: 4.3.4
|
||||
|
||||
Sending 63 bytes HMAC challenge to slot 2
|
||||
Sending 63 bytes HMAC challenge to slot 2
|
||||
Stored initial challenge and expected response in '/home/chuckfinley/.yubico/challenge-9992567'.
|
||||
```
|
||||
|
||||
If you are setting up multiple keys for backup purposes, configure all the keys the same, and store each key’s challenge-response using the ykpamcfg utility. If you run the command ykpersonalize on an existing registered key, you must store the configuration again.
|
||||
|
||||
### Configuring /etc/pam.d/sudo
|
||||
|
||||
Now to verify this configuration worked, **in the same terminal window** you’ll setup sudo to require the use of the Yubikey challenge-response. Insert the following line into the /etc/pam.d/sudo file:
|
||||
|
||||
```
|
||||
auth required pam_yubico.so mode=challenge-response
|
||||
```
|
||||
|
||||
Insert the above auth line into the file above the auth include system-auth line. Then save the file and exit the editor. In a default Fedora 29 setup, /etc/pam.d/sudo should now look like this:
|
||||
|
||||
```
|
||||
#%PAM-1.0
|
||||
auth required pam_yubico.so mode=challenge-response
|
||||
auth include system-auth
|
||||
account include system-auth
|
||||
password include system-auth
|
||||
session optional pam_keyinit.so revoke
|
||||
session required pam_limits.so
|
||||
session include system-auth
|
||||
```
|
||||
|
||||
**Keep this original terminal window open** , and test by opening another new terminal window. In the new terminal window type:
|
||||
|
||||
```
|
||||
$ sudo echo testing
|
||||
```
|
||||
|
||||
You should notice the LED blinking on the key. Tap the Yubikey button and you should see a prompt for your sudo password. After you enter your password, you should see “testing” echoed in the terminal screen.
|
||||
|
||||
Now test to ensure a correct failure. Start another terminal window and remove the Yubikey from the USB port. Verify that sudo no longer works without the Yubikey with this command:
|
||||
|
||||
```
|
||||
$ sudo echo testing fail
|
||||
```
|
||||
|
||||
You should immediately be prompted for the sudo password. Even if you enter the password, it should fail.
|
||||
|
||||
### Configuring Gnome Desktop Manager
|
||||
|
||||
Once your testing is complete, now you can add challenge-response support for the graphical login. Re-insert your Yubikey into the USB port. Next you’ll add the following line to the /etc/pam.d/gdm-password file:
|
||||
|
||||
```
|
||||
auth required pam_yubico.so mode=challenge-response
|
||||
```
|
||||
|
||||
Open a terminal window, and issue the following command. You can use another editor if desired:
|
||||
|
||||
```
|
||||
$ sudo vi /etc/pam.d/gdm-password
|
||||
```
|
||||
|
||||
You should see the yubikey LED blinking. Press the yubikey button, then enter the password at the prompt.
|
||||
|
||||
Modify the /etc/pam.d/gdm-password file to add the new auth line above the existing line auth substack password-auth. The top of the file should now look like this:
|
||||
|
||||
```
|
||||
auth [success=done ignore=ignore default=bad] pam_selinux_permit.so
|
||||
auth required pam_yubico.so mode=challenge-response
|
||||
auth substack password-auth
|
||||
auth optional pam_gnome_keyring.so
|
||||
auth include postlogin
|
||||
|
||||
account required pam_nologin.so
|
||||
```
|
||||
|
||||
Save the changes and exit the editor. If you use vi, the key sequence is to hit the **Esc** key, then type wq! at the prompt to save and exit.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Now log out of GNOME. With the Yubikey inserted into the USB port, click on your user name in the graphical login. The Yubikey LED begins to flash. Touch the button, and you will be prompted for your password.
|
||||
|
||||
If you lose the Yubikey, you can still use the secondary backup Yubikey in addition to your set password. You can also add additional Yubikey configurations to your user account.
|
||||
|
||||
If someone gains access to your password, they still can’t login without your physical hardware Yubikey. Congratulations! You’ve now dramatically increased the security of your workstation login.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/login-challenge-response-authentication/
|
||||
|
||||
作者:[nabooengineer][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/nabooengineer/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://fedoramagazine.org/using-the-yubikey4-with-fedora/
|
||||
[2]: https://fedoramagazine.org/fedora-28-better-smart-card-support-openssh/
|
||||
|
@ -1,4 +1,4 @@
|
||||
9 个方法,提升开发者与设计师之间的协作
|
||||
9 个提升开发者与设计师协作的方法
|
||||
======
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BUS_consensuscollab1.png?itok=ULQdGjlV)
|
||||
@ -9,45 +9,45 @@
|
||||
|
||||
两边都有自己的成见。工程师经常认为设计师们古怪不理性,而设计师也认为工程师们死板要求高。在一天的工作快要结束时,情况会变得更加微妙。设计师和开发者们的命运永远交织在一起。
|
||||
|
||||
做到以下九件事,便可以增强他们之间的合作
|
||||
做到以下九件事,便可以改进他们之间的合作。
|
||||
|
||||
### 1\. 首先,说实在的,打破壁垒。
|
||||
### 1. 首先,说实在的,打破壁垒
|
||||
|
||||
几乎每一个行业都有“<ruby>迷惑之墙<rt>wall of confusion</rt></ruby>”的模子。无论你干什么工作,拆除这堵墙的第一步就是要双方都认同它需要拆除。一旦所有的人都认为现有的流程效率低下,你就可以从其他想法中获得灵感,然后解决问题。
|
||||
几乎每一个行业都有“<ruby>迷墙<rt>wall of confusion</rt></ruby>”的因子。无论你干什么工作,拆除这堵墙的第一步就是要双方都认同它需要拆除。一旦所有的人都认为现有的流程效率低下,你就可以从其它想法中获得灵感,然后解决问题。
|
||||
|
||||
### 2\. 学会共情
|
||||
### 2. 学会共情
|
||||
|
||||
在撸起袖子开始干之前,休息一下。这是团队建设的重要的交汇点。一个时机去认识到:我们都是成人,我们都有自己的优点与缺点,更重要的是,我们是一个团队。围绕工作流程与工作效率的讨论会经常发生,因此在开始之前,建立一个信任与协作的基础至关重要。
|
||||
在撸起袖子开始干之前,先等一下。这是团队建设的重要的交汇点,也是建立共同认知的时机:我们都是成人,我们都有自己的优点与缺点,更重要的是,我们是一个团队。围绕工作流程与工作效率的讨论会经常发生,因此在开始之前,建立一个信任与协作的基础至关重要。
|
||||
|
||||
### 3\. 认识差异
|
||||
### 3. 认识差异
|
||||
|
||||
设计师和开发者从不同的角度攻克问题。对于相同的问题,设计师会追求更好的效果,而开发者会寻求更高的效率。这两种观点不必互相排斥。谈判和妥协的余地很大,并且在二者之间必然存在一个用户满意度最佳的中点。
|
||||
|
||||
### 4\. 拥抱共性
|
||||
### 4. 拥抱共性
|
||||
|
||||
这一切都是与工作流程相关的。<ruby>持续集成<rt>Continuous Integration</rt></ruby>/<ruby>持续交付<rt>Continuous Delivery</rt></ruby>,scrum,agille 等等,都基本上说了一件事:构思,迭代,考察,重复。迭代和重复是两种工作的相同点。因此,不再让开发周期紧跟设计周期,而是同时并行地运行它们,这样会更有意义。<ruby>同步周期<rt>Syncing cycles</rt></ruby>允许团队在每一步上交流、协作、互相影响。
|
||||
这一切都是与工作流程相关的。<ruby>持续集成<rt>Continuous Integration</rt></ruby>/<ruby>持续交付<rt>Continuous Delivery</rt></ruby>,scrum,agile 等等,都基本上说了一件事:构思,迭代,考察,重复。迭代和重复是两种工作的相同点。因此,不再让开发周期紧跟设计周期,而是同时并行地运行它们,这样会更有意义。<ruby>同步周期<rt>Syncing cycles</rt></ruby>允许团队在每个环节交流、协作、互相影响。
|
||||
|
||||
### 5\. 管理期望
|
||||
### 5. 管理期望
|
||||
|
||||
一切冲突的起因一言以蔽之:期望不符。因此,防止系统性分裂的简单办法就是通过确保团队成员在说之前先想、在做之前先说来管理期望。设定的期望往往会通过日常对话不断演变。强迫团队通过开会以达到其效果可能会适得其反。
|
||||
|
||||
### 6\. 按需开会
|
||||
### 6. 按需开会
|
||||
|
||||
只在工作开始和工作结束开一次会远远不够。但也不意味着每天或每周都要开会。定期开会也可能会适得其反。试着按需开会吧。即兴会议可能会发生很棒的事情,即使是在开水房。如果你的团队是分散式的或者甚至有一名远程员工,视频会议,文本聊天或者打电话都是开会的好方法。团队中的每人都有多种方式互相沟通,这一点非常重要。
|
||||
只在工作开始和工作结束开一次会远远不够。但也不意味着每天或每周都要开会。定期开会也可能会适得其反。试着按需开会吧。即兴会议,即使是员工闲聊,也可能会发生很棒的事情。如果你的团队是分散式的或者甚至有一名远程员工,视频会议,文本聊天或者打电话都是开会的好方法。团队中的每人都有多种方式互相沟通,这一点非常重要。
|
||||
|
||||
### 7\. 建立词库
|
||||
### 7. 建立词库
|
||||
|
||||
设计师和开发者有时候对相似的想法有着不同的术语,就像把猫叫了个咪。毕竟,所有人都用的惯比起术语的准确度和适应度更重要。
|
||||
设计师和开发者有时候对相似的想法有着不同的术语,就像把猫叫成喵。毕竟,比起术语的准确度和合适度来,大家统一说法才更重要。
|
||||
|
||||
### 8\. 学会沟通
|
||||
### 8. 学会沟通
|
||||
|
||||
无论什么时候,团队中的每个人都有责任去维持一个有效的沟通。每个人都应该努力做到一字一板。
|
||||
|
||||
### 9\. 不断改善
|
||||
### 9. 不断改善
|
||||
|
||||
仅一名团队成员就能破坏整个进度。全力以赴。如果每个人都不关心产品或目标,继续项目或者做出改变的动机就会出现问题。
|
||||
|
||||
本文参考 [Designers and developers: Finding common ground for effective collaboration][2],演讲的作者将会出席在旧金山五月 8-10 号举办的[Red Hat Summit 2018][3]。[五月 7 号][3]注册将节省 500 美元。支付时使用优惠码 **OPEN18** 以获得更多折扣。
|
||||
本文参考[开发者与设计师: 找出有效合作的共同点][2],演讲的作者将会出席 5 月 8-10 号在旧金山举办的[红帽峰会 2018][3]。[5 月 7 号][3]注册将节省 500 美元。支付时使用优惠码 **OPEN18** 以获得更多折扣。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -56,11 +56,11 @@ via: https://opensource.com/article/18/5/9-ways-improve-collaboration-developers
|
||||
作者:[Jason Brock][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[LuuMing](https://github.com/LuuMing)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[pityonline](https://github.com/pityonline)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://opensource.com/users/jkbrock
|
||||
[1]:https://opensource.com/users/lightguardjp
|
||||
[2]:https://agenda.summit.redhat.com/SessionDetail.aspx?id=154267
|
||||
[3]:https://www.redhat.com/en/summit/2018
|
||||
[a]: https://opensource.com/users/jkbrock
|
||||
[1]: https://opensource.com/users/lightguardjp
|
||||
[2]: https://agenda.summit.redhat.com/SessionDetail.aspx?id=154267
|
||||
[3]: https://www.redhat.com/en/summit/2018
|
||||
|
@ -0,0 +1,74 @@
|
||||
CPU 电源管理工具 - Linux 系统中 CPU 主频的控制和管理
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/Manage-CPU-Frequency-720x340.jpeg)
|
||||
|
||||
你使用笔记本的话,可能知道 Linux 系统的电源管理做的很不好。虽然有 **TLP**、[**Laptop Mode Tools** 和 **powertop**][1] 这些工具来辅助减少电量消耗,但跟 Windows 和 Mac OS 系统比较起来,电池的整个使用周期还是不尽如意。此外,还有一种降低功耗的办法就是限制 CPU 的频率。这是可行的,然而却需要编写很复杂的终端命令来设置,所以使用起来不太方便。幸好,有一款名为 **CPU Power Manager** 的 GNOME 扩展插件,可以很容易的就设置和管理你的 CPU 主频。GNOME 桌面系统中,CPU Power Manager 使用名为 **intel_pstate** 的功率驱动程序(几乎所有的 Intel CPU 都支持)来控制和管理 CPU 主频。
|
||||
|
||||
使用这个扩展插件的另一个原因是可以减少系统的发热量,因为很多系统在正常使用中的发热量总让人不舒服,限制 CPU 的主频就可以减低发热量。它还可以减少 CPU 和其他组件的磨损。
|
||||
|
||||
### 安装 CPU Power Manager
|
||||
|
||||
首先,进入[**扩展插件主页面**][2],安装此扩展插件。
|
||||
|
||||
安装好插件后,在 GNOME 顶部栏的右侧会出现一个 CPU 图标。点击图标,会出现安装此扩展一个选项提示,如下示:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-icon.png)
|
||||
|
||||
点击**“尝试安装”**按纽,会弹出输入密码确认框。插件需要 root 权限来添加 policykit 规则,进而控制 CPU 主频。下面是弹出的提示框样子:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-1.png)
|
||||
|
||||
输入密码,点击**“认证”**按纽,完成安装。最后在 **/usr/share/polkit-1/actions** 目录下添加了一个名为 **mko.cpupower.setcpufreq.policy** 的 policykit 文件。
|
||||
|
||||
都安装完成后,如果点击右上脚的 CPU 图标,会出现如下所示:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager.png)
|
||||
|
||||
### 功能特性
|
||||
|
||||
* **查看 CPU 主频:** 显然,你可以通过这个提示窗口看到 CPU 的当前运行频率。
|
||||
* **设置最大最小主频:** 使用此扩展,你可以根据列出的最大、最小频率百分比进度条来分别设置其频率限制。一旦设置,CPU 将会严格按照此设置范围运行。
|
||||
* **开/关 Turbo Boost:** 这是我最喜欢的功能特性。大多数 Intel CPU 都有 “Turbo Boost” 特性,为了提高额外性能,其中的一个内核为自动进行超频。此功能虽然可以使系统获得更高的性能,但也大大增加功耗。所以,如果不做 CPU 密集运行的话,为节约电能,最好关闭 Turbo Boost 功能。事实上,在我电脑上,我大部分时间是把 Turbo Boost 关闭的。
|
||||
* **生成配置文件:** 可以生成最大和最小频率的配置文件,就可以很轻松打开/关闭,而不是每次手工调整设置。
|
||||
|
||||
|
||||
|
||||
### 偏好设置
|
||||
|
||||
你也可以通过偏好设置窗口来自定义扩展插件显示形式:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences.png)
|
||||
|
||||
如你所见,你可以设置是否显示 CPU 主频,也可以设置是否以 **Ghz** 来代替 **Mhz** 显示。
|
||||
|
||||
你也可以编辑和创建/删除配置:
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences-1.png)
|
||||
|
||||
可以为每个配置分别设置最大、最小主频及开/关 Turbo boost。
|
||||
|
||||
### 结论
|
||||
|
||||
正如我在开始时所说的,Linux 系统的电源管理并不是最好的,许多人总是希望他们的 Linux 笔记本电脑电池能多用几分钟。如果你也是其中一员,就试试此扩展插件吧。为了省电,虽然这是非常规的做法,但有效果。我确实喜欢这个插件,到现在已经使用了好几个月了。
|
||||
|
||||
What do you think about this extension? Put your thoughts in the comments below!你对此插件有何看法呢?请把你的观点留在下面的评论区吧。
|
||||
|
||||
祝贺!
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/cpu-power-manager-control-and-manage-cpu-frequency-in-linux/
|
||||
|
||||
作者:[EDITOR][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.ostechnix.com/author/editor/
|
||||
[1]: https://www.ostechnix.com/improve-laptop-battery-performance-linux/
|
||||
[2]: https://extensions.gnome.org/extension/945/cpu-power-manager/
|
@ -0,0 +1,637 @@
|
||||
# 用 350 行代码从零开始,将 Lisp 编译成 JavaScript
|
||||
|
||||
我们将会在本篇文章中看到从零开始实现的编译器,将简单的类 LISP 计算语言编译成 JavaScript。完整的源代码在 [这里][7].
|
||||
|
||||
我们将会:
|
||||
|
||||
1. 自定义语言,并用它编写一个简单的程序
|
||||
|
||||
2. 实现一个简单的解析器组合器
|
||||
|
||||
3. 为该语言实现一个解析器
|
||||
|
||||
4. 为该语言实现一个美观的打印器
|
||||
|
||||
5. 为我们的需求定义 JavaScript 的一个子集
|
||||
|
||||
6. 实现代码转译器,将代码转译成我们定义的 JavaScript 子集
|
||||
|
||||
7. 把所有东西整合在一起
|
||||
|
||||
开始吧!
|
||||
|
||||
### 1. 定义语言
|
||||
|
||||
lisps 最迷人的地方在于,它们的语法就是树状表示的,这就是这门语言很容易解析的原因。我们很快就能接触到它。但首先让我们把自己的语言定义好。关于我们语言的语法的范式(BNF)描述如下:
|
||||
|
||||
```
|
||||
program ::= expr
|
||||
expr ::= <integer> | <name> | ([<expr>])
|
||||
```
|
||||
|
||||
基本上,我们可以在该语言的最顶层定义表达式并对其进行运算。表达式由一个整数(比如 `5`)、一个变量(比如 `x`)或者一个表达式列表(比如 `(add x 1)`)组成。
|
||||
|
||||
整数对应它本身的值,变量对应它在当前环境中绑定的值,表达式列表对应一个函数调用,该列表的第一个参数是相应的函数,剩下的表达式是传递给这个函数的参数。
|
||||
|
||||
该语言中,我们保留一些内建的特殊形式,这样我们就能做一些更有意思的事情:
|
||||
|
||||
* let 表达式使我们可以在它的 body 环境中引入新的变量。语法如下:
|
||||
|
||||
```
|
||||
let ::= (let ([<letarg>]) <body>)
|
||||
letargs ::= (<name> <expr>)
|
||||
body ::= <expr>
|
||||
```
|
||||
|
||||
* lambda 表达式:也就是匿名函数定义。语法如下:
|
||||
|
||||
```
|
||||
lambda ::= (lambda ([<name>]) <body>)
|
||||
```
|
||||
|
||||
还有一些内建函数: `add`、`mul`、`sub`、`div` 和 `print`。
|
||||
|
||||
让我们看看用我们这门语言编写的入门示例程序:
|
||||
|
||||
```
|
||||
(let
|
||||
((compose
|
||||
(lambda (f g)
|
||||
(lambda (x) (f (g x)))))
|
||||
(square
|
||||
(lambda (x) (mul x x)))
|
||||
(add1
|
||||
(lambda (x) (add x 1))))
|
||||
(print ((compose square add1) 5)))
|
||||
```
|
||||
|
||||
这个程序定义了 3 个函数:`compose`、`square` 和 `add1`。然后将计算结果的值 `((compose square add1) 5)` 输出出来。
|
||||
|
||||
我相信了解这门语言,这些信息就足够了。开始实现它吧。
|
||||
|
||||
在 Haskell 中,我们可以这样定义语言:
|
||||
|
||||
```
|
||||
type Name = String
|
||||
|
||||
data Expr
|
||||
= ATOM Atom
|
||||
| LIST [Expr]
|
||||
deriving (Eq, Read, Show)
|
||||
|
||||
data Atom
|
||||
= Int Int
|
||||
| Symbol Name
|
||||
deriving (Eq, Read, Show)
|
||||
```
|
||||
|
||||
我们可以解析用该语言用 `Expr` 定义的程序。而且,这里我们添加了新数据类型 `Eq`、`Read` 和 `Show` 等实例用于测试和调试。你能够在 REPL 中使用这些数据类型,验证它们确实有用。
|
||||
|
||||
我们不在语法中定义 `lambda`、`let` 或其它的内建函数,原因在于,当前情况下我们没必要用到这些东西。这些函数仅仅是 `LIST` (表达式列表)的更加特殊的用例。所以我决定将它放到后面的部分。
|
||||
|
||||
一般来说你想要在抽象语法中定义这些特殊用例 —— 用于改进错误信息、禁用静态分析和优化等等,但在这里我们不会这样做,对我们来说这些已经足够了。
|
||||
|
||||
另一件你想做的事情可能是在语法中添加一些注释信息。比如定位:`Expr` 是来自哪个文件的,具体到这个文件的哪一行哪一列。你可以在后面的阶段中使用这一特性,打印出错误定位,即使它们不是处于解析阶段。
|
||||
|
||||
* _练习 1_:添加一个 `Program` 数据类型,可以按顺序包含多个 `Expr`
|
||||
|
||||
* _练习 2_:向语法树中添加一个定位注解。
|
||||
|
||||
### 2. 实现一个简单的解析器组合库
|
||||
|
||||
我们要做的第一件事情是定义一个嵌入式领域专用语言(Embedded Domain Specific Language 或者 EDSL),我们会用它来定义我们的语言解析器。这常常被称为解析器组合库。我们做这件事完全是出于学习的目的,Haskell 里有很好的解析库,在实际构建软件或者进行实验时,你应该使用它们。[megaparsec][8] 就是这样的一个库。
|
||||
|
||||
首先我们来谈谈解析库的实现的思路。本质上,我们的解析器就是一个函数,接受一些输入,可能会读取输入的一些或全部内容,然后返回解析出来的值和无法解析的输入部分,或者在解析失败时抛出异常。我们把它写出来。
|
||||
|
||||
```
|
||||
newtype Parser a
|
||||
= Parser (ParseString -> Either ParseError (a, ParseString))
|
||||
|
||||
data ParseString
|
||||
= ParseString Name (Int, Int) String
|
||||
|
||||
data ParseError
|
||||
= ParseError ParseString Error
|
||||
|
||||
type Error = String
|
||||
|
||||
```
|
||||
|
||||
这里我们定义了三个主要的新类型。
|
||||
|
||||
第一个,`Parser a` 是之前讨论的解析函数。
|
||||
|
||||
第二个,`ParseString` 是我们的输入或携带的状态。它有三个重要的部分:
|
||||
|
||||
* `Name`: 这是源的名字
|
||||
|
||||
* `(Int, Int)`: 这是源的当前位置
|
||||
|
||||
* `String`: 这是等待解析的字符串
|
||||
|
||||
第三个,`ParseError` 包含了解析器的当前状态和一个错误信息。
|
||||
|
||||
现在我们想让这个解析器更灵活,我们将会定义一些常用类型的实例。这些实例让我们能够将小巧的解析器和复杂的解析器结合在一起(因此它的名字叫做 “解析器组合器”)。
|
||||
|
||||
第一个是 `Functor` 实例。我们需要 `Functor` 实例,因为我们要能够对解析值应用函数从而使用不同的解析器。当我们定义自己语言的解析器时,我们将会看到关于它的示例。
|
||||
|
||||
```
|
||||
instance Functor Parser where
|
||||
fmap f (Parser parser) =
|
||||
Parser (\str -> first f <$> parser str)
|
||||
```
|
||||
|
||||
第二个是 `Applicative` 实例。该实例的常见用例是在多个解析器中实现一个纯函数。
|
||||
|
||||
```
|
||||
instance Applicative Parser where
|
||||
pure x = Parser (\str -> Right (x, str))
|
||||
(Parser p1) <*> (Parser p2) =
|
||||
Parser $
|
||||
\str -> do
|
||||
(f, rest) <- p1 str
|
||||
(x, rest') <- p2 rest
|
||||
pure (f x, rest')
|
||||
|
||||
```
|
||||
|
||||
(注意:_我们还会实现一个 Monad 实例,这样我们才能使用符号_)
|
||||
|
||||
第三个是 `Alternative` 实例。万一前面的解析器解析失败了,我们要能够提供一个备用的解析器。
|
||||
|
||||
```
|
||||
instance Alternative Parser where
|
||||
empty = Parser (`throwErr` "Failed consuming input")
|
||||
(Parser p1) <|> (Parser p2) =
|
||||
Parser $
|
||||
\pstr -> case p1 pstr of
|
||||
Right result -> Right result
|
||||
Left _ -> p2 pstr
|
||||
```
|
||||
|
||||
第四个是 `Monad` 实例。这样我们就能链接解析器。
|
||||
|
||||
```
|
||||
instance Monad Parser where
|
||||
(Parser p1) >>= f =
|
||||
Parser $
|
||||
\str -> case p1 str of
|
||||
Left err -> Left err
|
||||
Right (rs, rest) ->
|
||||
case f rs of
|
||||
Parser parser -> parser rest
|
||||
|
||||
```
|
||||
|
||||
接下来,让我们定义一种的方式,用于运行解析器和防止失败的助手函数:
|
||||
|
||||
```
|
||||
|
||||
runParser :: String -> String -> Parser a -> Either ParseError (a, ParseString)
|
||||
runParser name str (Parser parser) = parser $ ParseString name (0,0) str
|
||||
|
||||
throwErr :: ParseString -> String -> Either ParseError a
|
||||
throwErr ps@(ParseString name (row,col) _) errMsg =
|
||||
Left $ ParseError ps $ unlines
|
||||
[ "*** " ++ name ++ ": " ++ errMsg
|
||||
, "* On row " ++ show row ++ ", column " ++ show col ++ "."
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
现在我们将会开始实现组合器,这是 EDSL 的 API,也是它的核心。
|
||||
|
||||
首先,我们会定义 `oneOf`。如果输入列表中的字符后面还有字符的话,`oneOf` 将会成功,否则就会失败。
|
||||
|
||||
```
|
||||
oneOf :: [Char] -> Parser Char
|
||||
oneOf chars =
|
||||
Parser $ \case
|
||||
ps@(ParseString name (row, col) str) ->
|
||||
case str of
|
||||
[] -> throwErr ps "Cannot read character of empty string"
|
||||
(c:cs) ->
|
||||
if c `elem` chars
|
||||
then Right (c, ParseString name (row, col+1) cs)
|
||||
else throwErr ps $ unlines ["Unexpected character " ++ [c], "Expecting one of: " ++ show chars]
|
||||
```
|
||||
|
||||
`optional` 将会抛出异常,停止解析器。失败时它仅仅会返回 `Nothing`。
|
||||
|
||||
```
|
||||
optional :: Parser a -> Parser (Maybe a)
|
||||
optional (Parser parser) =
|
||||
Parser $
|
||||
\pstr -> case parser pstr of
|
||||
Left _ -> Right (Nothing, pstr)
|
||||
Right (x, rest) -> Right (Just x, rest)
|
||||
```
|
||||
|
||||
`many` 将会试着重复运行解析器,直到失败。当它完成的时候,会返回成功运行的解析器列表。`many1` 做的事情是一样的,但解析失败时它至少会抛出一次异常。
|
||||
|
||||
```
|
||||
many :: Parser a -> Parser [a]
|
||||
many parser = go []
|
||||
where go cs = (parser >>= \c -> go (c:cs)) <|> pure (reverse cs)
|
||||
|
||||
many1 :: Parser a -> Parser [a]
|
||||
many1 parser =
|
||||
(:) <$> parser <*> many parser
|
||||
|
||||
```
|
||||
|
||||
下面的这些解析器通过我们定义的组合器来实现一些特殊的解析器:
|
||||
|
||||
```
|
||||
char :: Char -> Parser Char
|
||||
char c = oneOf [c]
|
||||
|
||||
string :: String -> Parser String
|
||||
string = traverse char
|
||||
|
||||
space :: Parser Char
|
||||
space = oneOf " \n"
|
||||
|
||||
spaces :: Parser String
|
||||
spaces = many space
|
||||
|
||||
spaces1 :: Parser String
|
||||
spaces1 = many1 space
|
||||
|
||||
withSpaces :: Parser a -> Parser a
|
||||
withSpaces parser =
|
||||
spaces *> parser <* spaces
|
||||
|
||||
parens :: Parser a -> Parser a
|
||||
parens parser =
|
||||
(withSpaces $ char '(')
|
||||
*> withSpaces parser
|
||||
<* (spaces *> char ')')
|
||||
|
||||
sepBy :: Parser a -> Parser b -> Parser [b]
|
||||
sepBy sep parser = do
|
||||
frst <- optional parser
|
||||
rest <- many (sep *> parser)
|
||||
pure $ maybe rest (:rest) frst
|
||||
|
||||
```
|
||||
|
||||
现在为该门语言定义解析器所需要的所有东西都有了。
|
||||
|
||||
* _练习_ :实现一个 EOF(end of file/input,即文件或输入终止符)解析器组合器。
|
||||
|
||||
### 3. 为我们的语言实现解析器
|
||||
|
||||
我们会用自顶而下的方法定义解析器。
|
||||
|
||||
```
|
||||
parseExpr :: Parser Expr
|
||||
parseExpr = fmap ATOM parseAtom <|> fmap LIST parseList
|
||||
|
||||
parseList :: Parser [Expr]
|
||||
parseList = parens $ sepBy spaces1 parseExpr
|
||||
|
||||
parseAtom :: Parser Atom
|
||||
parseAtom = parseSymbol <|> parseInt
|
||||
|
||||
parseSymbol :: Parser Atom
|
||||
parseSymbol = fmap Symbol parseName
|
||||
|
||||
```
|
||||
|
||||
注意到这四个函数是在我们这门语言中属于高阶描述。这解释了为什么 Haskell 执行解析工作这么棒。在定义完高级部分后,我们还需要定义低级别的 `parseName` 和 `parseInt`。
|
||||
|
||||
我们能在这门语言中用什么字符作为名字呢?用小写的字母、数字和下划线吧,而且名字的第一个字符必须是字母。
|
||||
|
||||
```
|
||||
parseName :: Parser Name
|
||||
parseName = do
|
||||
c <- oneOf ['a'..'z']
|
||||
cs <- many $ oneOf $ ['a'..'z'] ++ "0123456789" ++ "_"
|
||||
pure (c:cs)
|
||||
```
|
||||
|
||||
整数是一系列数字,数字前面可能有负号 ‘-’:
|
||||
|
||||
```
|
||||
parseInt :: Parser Atom
|
||||
parseInt = do
|
||||
sign <- optional $ char '-'
|
||||
num <- many1 $ oneOf "0123456789"
|
||||
let result = read $ maybe num (:num) sign of
|
||||
pure $ Int result
|
||||
```
|
||||
|
||||
最后,我们会定义用来运行解析器的函数,返回值可能是一个 `Expr` 或者是一条错误信息。
|
||||
|
||||
```
|
||||
runExprParser :: Name -> String -> Either String Expr
|
||||
runExprParser name str =
|
||||
case runParser name str (withSpaces parseExpr) of
|
||||
Left (ParseError _ errMsg) -> Left errMsg
|
||||
Right (result, _) -> Right result
|
||||
```
|
||||
|
||||
* _练习 1_ :为第一节中定义的 `Program` 类型编写一个解析器
|
||||
|
||||
* _练习 2_ :用 Applicative 的形式重写 `parseName`
|
||||
|
||||
* _练习 3_ :`parseInt` 可能出现溢出情况,找到处理它的方法,不要用 `read`。
|
||||
|
||||
### 4. 为这门语言实现一个更好看的输出器
|
||||
|
||||
我们还想做一件事,将我们的程序以源代码的形式打印出来。这对完善错误信息很有用。
|
||||
|
||||
```
|
||||
printExpr :: Expr -> String
|
||||
printExpr = printExpr' False 0
|
||||
|
||||
printAtom :: Atom -> String
|
||||
printAtom = \case
|
||||
Symbol s -> s
|
||||
Int i -> show i
|
||||
|
||||
printExpr' :: Bool -> Int -> Expr -> String
|
||||
printExpr' doindent level = \case
|
||||
ATOM a -> indent (bool 0 level doindent) (printAtom a)
|
||||
LIST (e:es) ->
|
||||
indent (bool 0 level doindent) $
|
||||
concat
|
||||
[ "("
|
||||
, printExpr' False (level + 1) e
|
||||
, bool "\n" "" (null es)
|
||||
, intercalate "\n" $ map (printExpr' True (level + 1)) es
|
||||
, ")"
|
||||
]
|
||||
|
||||
indent :: Int -> String -> String
|
||||
indent tabs e = concat (replicate tabs " ") ++ e
|
||||
```
|
||||
|
||||
* _练习_ :为第一节中定义的 `Program` 类型编写一个美观的输出器
|
||||
|
||||
好,目前为止我们写了近 200 行代码,这些代码一般叫做编译器的前端。我们还要写大概 150 行代码,用来执行三个额外的任务:我们需要根据需求定义一个 JS 的子集,定义一个将我们的语言转译成这个子集的转译器,最后把所有东西整合在一起。开始吧。
|
||||
|
||||
### 5. 根据需求定义 JavaScript 的子集
|
||||
|
||||
首先,我们要定义将要使用的 JavaScript 的子集:
|
||||
|
||||
```
|
||||
data JSExpr
|
||||
= JSInt Int
|
||||
| JSSymbol Name
|
||||
| JSBinOp JSBinOp JSExpr JSExpr
|
||||
| JSLambda [Name] JSExpr
|
||||
| JSFunCall JSExpr [JSExpr]
|
||||
| JSReturn JSExpr
|
||||
deriving (Eq, Show, Read)
|
||||
|
||||
type JSBinOp = String
|
||||
```
|
||||
|
||||
这个数据类型表示 JavaScript 表达式。我们有两个原子类型 `JSInt` 和 `JSSymbol`,它们是由我们这个语言中的 `Atom` 转译来的,我们用 `JSBinOp` 来表示二元操作,比如 `+` 或 `*`,用 `JSLambda` 来表示匿名函数,和我们语言中的 `lambda expression(lambda 表达式)` 一样,我们将会用 `JSFunCall` 来调用函数,用 `let` 来引入新名字,用 `JSReturn` 从函数中返回值,在 JavaScript 中是需要返回值的。
|
||||
|
||||
`JSExpr` 类型是对 JavaScript 表达式的 **抽象表示**。我们会把自己语言中表达式的抽象表示 `Expr` 转译成 JavaScript 表达式的抽象表示 `JSExpr`。但为了实现这个功能,我们需要实现 `JSExpr` ,并从这个抽象表示中生成 JavaScript 代码。我们将通过递归匹配 `JSExpr` 实现,将 JS 代码当作 `String` 来输出。这和我们在 `printExpr` 中做的基本上是一样的。我们还会追踪元素的作用域,这样我们才可以用合适的方式缩进生成的代码。
|
||||
|
||||
```
|
||||
printJSOp :: JSBinOp -> String
|
||||
printJSOp op = op
|
||||
|
||||
printJSExpr :: Bool -> Int -> JSExpr -> String
|
||||
printJSExpr doindent tabs = \case
|
||||
JSInt i -> show i
|
||||
JSSymbol name -> name
|
||||
JSLambda vars expr -> (if doindent then indent tabs else id) $ unlines
|
||||
["function(" ++ intercalate ", " vars ++ ") {"
|
||||
,indent (tabs+1) $ printJSExpr False (tabs+1) expr
|
||||
] ++ indent tabs "}"
|
||||
JSBinOp op e1 e2 -> "(" ++ printJSExpr False tabs e1 ++ " " ++ printJSOp op ++ " " ++ printJSExpr False tabs e2 ++ ")"
|
||||
JSFunCall f exprs -> "(" ++ printJSExpr False tabs f ++ ")(" ++ intercalate ", " (fmap (printJSExpr False tabs) exprs) ++ ")"
|
||||
JSReturn expr -> (if doindent then indent tabs else id) $ "return " ++ printJSExpr False tabs expr ++ ";"
|
||||
```
|
||||
|
||||
* _练习 1_ :添加 `JSProgram` 类型,它可以包含多个 `JSExpr` ,然后创建一个叫做 `printJSExprProgram` 的函数来生成代码。
|
||||
|
||||
* _练习 2_ :添加 `JSExpr` 的新类型:`JSIf`,并为其生成代码。
|
||||
|
||||
### 6. 实现到我们定义的 JavaScript 子集的代码转译器
|
||||
|
||||
我们快做完了。这一节将会创建函数,将 `Expr` 转译成 `JSExpr`。
|
||||
|
||||
基本思想很简单,我们会将 `ATOM` 转译成 `JSSymbol` 或者 `JSInt`,然后会将 `LIST` 转译成一个函数调用或者转译的特例。
|
||||
|
||||
```
|
||||
type TransError = String
|
||||
|
||||
translateToJS :: Expr -> Either TransError JSExpr
|
||||
translateToJS = \case
|
||||
ATOM (Symbol s) -> pure $ JSSymbol s
|
||||
ATOM (Int i) -> pure $ JSInt i
|
||||
LIST xs -> translateList xs
|
||||
|
||||
translateList :: [Expr] -> Either TransError JSExpr
|
||||
translateList = \case
|
||||
[] -> Left "translating empty list"
|
||||
ATOM (Symbol s):xs
|
||||
| Just f <- lookup s builtins ->
|
||||
f xs
|
||||
f:xs ->
|
||||
JSFunCall <$> translateToJS f <*> traverse translateToJS xs
|
||||
|
||||
```
|
||||
|
||||
`builtins` 是一系列要转译的特例,就像 `lambada` 和 `let`。每一种情况都可以获得一系列参数,验证它是否合乎语法规范,然后将其转译成等效的 `JSExpr`。
|
||||
|
||||
```
|
||||
type Builtin = [Expr] -> Either TransError JSExpr
|
||||
type Builtins = [(Name, Builtin)]
|
||||
|
||||
builtins :: Builtins
|
||||
builtins =
|
||||
[("lambda", transLambda)
|
||||
,("let", transLet)
|
||||
,("add", transBinOp "add" "+")
|
||||
,("mul", transBinOp "mul" "*")
|
||||
,("sub", transBinOp "sub" "-")
|
||||
,("div", transBinOp "div" "/")
|
||||
,("print", transPrint)
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
我们这种情况,会将内建的特殊形式当作特殊的、非第一类的进行对待,因此不可能将它们当作第一类函数。
|
||||
|
||||
我们会把 Lambda 表达式转译成一个匿名函数:
|
||||
|
||||
```
|
||||
transLambda :: [Expr] -> Either TransError JSExpr
|
||||
transLambda = \case
|
||||
[LIST vars, body] -> do
|
||||
vars' <- traverse fromSymbol vars
|
||||
JSLambda vars' <$> (JSReturn <$> translateToJS body)
|
||||
|
||||
vars ->
|
||||
Left $ unlines
|
||||
["Syntax error: unexpected arguments for lambda."
|
||||
,"expecting 2 arguments, the first is the list of vars and the second is the body of the lambda."
|
||||
,"In expression: " ++ show (LIST $ ATOM (Symbol "lambda") : vars)
|
||||
]
|
||||
|
||||
fromSymbol :: Expr -> Either String Name
|
||||
fromSymbol (ATOM (Symbol s)) = Right s
|
||||
fromSymbol e = Left $ "cannot bind value to non symbol type: " ++ show e
|
||||
|
||||
```
|
||||
|
||||
我们会将 let 转译成带有相关名字参数的函数定义,然后带上参数调用函数,因此会在这一作用域中引入变量:
|
||||
|
||||
```
|
||||
transLet :: [Expr] -> Either TransError JSExpr
|
||||
transLet = \case
|
||||
[LIST binds, body] -> do
|
||||
(vars, vals) <- letParams binds
|
||||
vars' <- traverse fromSymbol vars
|
||||
JSFunCall . JSLambda vars' <$> (JSReturn <$> translateToJS body) <*> traverse translateToJS vals
|
||||
where
|
||||
letParams :: [Expr] -> Either Error ([Expr],[Expr])
|
||||
letParams = \case
|
||||
[] -> pure ([],[])
|
||||
LIST [x,y] : rest -> ((x:) *** (y:)) <$> letParams rest
|
||||
x : _ -> Left ("Unexpected argument in let list in expression:\n" ++ printExpr x)
|
||||
|
||||
vars ->
|
||||
Left $ unlines
|
||||
["Syntax error: unexpected arguments for let."
|
||||
,"expecting 2 arguments, the first is the list of var/val pairs and the second is the let body."
|
||||
,"In expression:\n" ++ printExpr (LIST $ ATOM (Symbol "let") : vars)
|
||||
]
|
||||
```
|
||||
|
||||
我们会将可以在多个参数之间执行的操作符转译成一系列二元操作符。比如:`(add 1 2 3)` 将会变成 `1 + (2 + 3)`。
|
||||
|
||||
```
|
||||
transBinOp :: Name -> Name -> [Expr] -> Either TransError JSExpr
|
||||
transBinOp f _ [] = Left $ "Syntax error: '" ++ f ++ "' expected at least 1 argument, got: 0"
|
||||
transBinOp _ _ [x] = translateToJS x
|
||||
transBinOp _ f list = foldl1 (JSBinOp f) <$> traverse translateToJS list
|
||||
```
|
||||
|
||||
然后我们会将 `print` 转换成对 `console.log` 的调用。
|
||||
|
||||
```
|
||||
transPrint :: [Expr] -> Either TransError JSExpr
|
||||
transPrint [expr] = JSFunCall (JSSymbol "console.log") . (:[]) <$> translateToJS expr
|
||||
transPrint xs = Left $ "Syntax error. print expected 1 arguments, got: " ++ show (length xs)
|
||||
|
||||
```
|
||||
|
||||
注意,如果我们将这些代码当作 `Expr` 的特例进行解析,那我们就可能会跳过语法验证。
|
||||
|
||||
* _练习 1_ :将 `Program` 转译成 `JSProgram`
|
||||
|
||||
* _练习 2_ :为 `if Expr Expr Expr` 添加一个特例,并将它转译成你在上一次练习中实现的 `JSIf` 条件语句。
|
||||
|
||||
### 7. 把所有东西整合到一起
|
||||
|
||||
最终,我们将会把所有东西整合到一起。我们会:
|
||||
|
||||
1. 读取文件
|
||||
|
||||
2. 将文件解析成 `Expr`
|
||||
|
||||
3. 将文件转译成 `JSExpr`
|
||||
|
||||
4. 将 JavaScript 代码发送到标准输出流
|
||||
|
||||
我们还会启用一些用于测试的标志位:
|
||||
|
||||
* `--e` 将进行解析并打印出表达式的抽象表示(`Expr`)
|
||||
|
||||
* `--pp` 将进行解析,美化输出
|
||||
|
||||
* `--jse` 将进行解析、转译、并打印出生成的 JS 表达式(`JSExpr`)的抽象表示
|
||||
|
||||
* `--ppc` 将进行解析,美化输出并进行编译
|
||||
|
||||
```
|
||||
main :: IO ()
|
||||
main = getArgs >>= \case
|
||||
[file] ->
|
||||
printCompile =<< readFile file
|
||||
["--e",file] ->
|
||||
either putStrLn print . runExprParser "--e" =<< readFile file
|
||||
["--pp",file] ->
|
||||
either putStrLn (putStrLn . printExpr) . runExprParser "--pp" =<< readFile file
|
||||
["--jse",file] ->
|
||||
either print (either putStrLn print . translateToJS) . runExprParser "--jse" =<< readFile file
|
||||
["--ppc",file] ->
|
||||
either putStrLn (either putStrLn putStrLn) . fmap (compile . printExpr) . runExprParser "--ppc" =<< readFile file
|
||||
_ ->
|
||||
putStrLn $ unlines
|
||||
["Usage: runghc Main.hs [ --e, --pp, --jse, --ppc ] <filename>"
|
||||
,"--e print the Expr"
|
||||
,"--pp pretty print Expr"
|
||||
,"--jse print the JSExpr"
|
||||
,"--ppc pretty print Expr and then compile"
|
||||
]
|
||||
|
||||
printCompile :: String -> IO ()
|
||||
printCompile = either putStrLn putStrLn . compile
|
||||
|
||||
compile :: String -> Either Error String
|
||||
compile str = printJSExpr False 0 <$> (translateToJS =<< runExprParser "compile" str)
|
||||
|
||||
```
|
||||
|
||||
大功告成。将自己的语言编译到 JS 子集的编译器已经完成了。再说一次,你可以在 [这里][9] 看到完整的源文件。
|
||||
|
||||
用我们的编译器运行第一节的示例,产生的 JavaScript 代码如下:
|
||||
|
||||
```
|
||||
$ runhaskell Lisp.hs example.lsp
|
||||
(function(compose, square, add1) {
|
||||
return (console.log)(((compose)(square, add1))(5));
|
||||
})(function(f, g) {
|
||||
return function(x) {
|
||||
return (f)((g)(x));
|
||||
};
|
||||
}, function(x) {
|
||||
return (x * x);
|
||||
}, function(x) {
|
||||
return (x + 1);
|
||||
})
|
||||
```
|
||||
|
||||
如果你在自己电脑上安装了 node.js,你可以用以下命令运行这段代码:
|
||||
|
||||
```
|
||||
$ runhaskell Lisp.hs example.lsp | node -p
|
||||
36
|
||||
undefined
|
||||
```
|
||||
|
||||
* _最终练习_ : 编译有多个表达式的程序而非仅编译一个表达式。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://gilmi.me/blog/post/2016/10/14/lisp-to-js
|
||||
|
||||
作者:[ Gil Mizrahi ][a]
|
||||
选题:[oska874][b]
|
||||
译者:[BriFuture](https://github.com/BriFuture)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://gilmi.me/home
|
||||
[b]:https://github.com/oska874
|
||||
[1]:https://gilmi.me/blog/authors/Gil
|
||||
[2]:https://gilmi.me/blog/tags/compilers
|
||||
[3]:https://gilmi.me/blog/tags/fp
|
||||
[4]:https://gilmi.me/blog/tags/haskell
|
||||
[5]:https://gilmi.me/blog/tags/lisp
|
||||
[6]:https://gilmi.me/blog/tags/parsing
|
||||
[7]:https://gist.github.com/soupi/d4ff0727ccb739045fad6cdf533ca7dd
|
||||
[8]:https://mrkkrp.github.io/megaparsec/
|
||||
[9]:https://gist.github.com/soupi/d4ff0727ccb739045fad6cdf533ca7dd
|
||||
[10]:https://gilmi.me/blog/post/2016/10/14/lisp-to-js
|
@ -1,24 +1,25 @@
|
||||
translating by Flowsnow
|
||||
|
||||
Peeking into your Linux packages
|
||||
探秘你的Linux软件包
|
||||
======
|
||||
Do you ever wonder how many _thousands_ of packages are installed on your Linux system? And, yes, I said "thousands." Even a fairly modest Linux system is likely to have well over a thousand packages installed. And there are many ways to get details on what they are.
|
||||
你有没有想过你的 Linux 系统上安装了多少千个软件包? 是的,我说的是“千”。 即使是相当一般的 Linux 系统也可能安装了超过一千个软件包。 有很多方法可以获得这些包到底是什么包的详细信息。
|
||||
|
||||
首先,要在基于 Debian 的发行版(如 Ubuntu)上快速得到已安装的软件包数量,请使用 **apt list --installed**, 如下:
|
||||
|
||||
First, to get a quick count of your installed packages on a Debian-based distribution such as Ubuntu, use the command **apt list --installed** like this:
|
||||
```
|
||||
$ apt list --installed | wc -l
|
||||
2067
|
||||
|
||||
```
|
||||
|
||||
This number is actually one too high because the output contains "Listing..." as its first line. This command would be more accurate:
|
||||
这个数字实际上多了一个,因为输出中包含了 “Listing ...” 作为它的第一行。 这个命令会更准确:
|
||||
|
||||
```
|
||||
$ apt list --installed | grep -v "^Listing" | wc -l
|
||||
2066
|
||||
|
||||
```
|
||||
|
||||
To get some details on what all these packages are, browse the list like this:
|
||||
要获得所有这些包的详细信息,请按以下方式浏览列表:
|
||||
|
||||
```
|
||||
$ apt list --installed | more
|
||||
Listing...
|
||||
@ -32,9 +33,9 @@ account-plugin-salut/xenial,now 3.12.11-0ubuntu3 amd64 [installed]
|
||||
|
||||
```
|
||||
|
||||
That's a lot of detail to absorb -- especially if you let your eyes wander through all 2,000+ files rolling by. It contains the package names, versions, and more but isn't the easiest information display for us humans to parse. The dpkg-query makes the descriptions quite a bit easier to understand, but they will wrap around your command window unless it's _very_ wide. So, the data display below has been split into the left and right hand sides to make this post easier to read.
|
||||
这需要观察很多细节--特别是让你的眼睛在所有 2000 多个文件中徘徊。 它包含包名称,版本等,但不是我们人类解析的最简单的信息显示。 dpkg-query 使得描述更容易理解,但这些描述塞满你的命令窗口,除非窗口非常宽。 因此,为了让此篇文章更容易阅读,下面的数据显示已经分成了左右两侧。
|
||||
|
||||
Left side:
|
||||
左侧:
|
||||
```
|
||||
$ dpkg-query -l | more
|
||||
Desired=Unknown/Install/Remove/Purge/Hold
|
||||
@ -54,7 +55,7 @@ rc account-plugin-windows-live 0.11+14.04.20140409.1-0ubuntu2
|
||||
|
||||
```
|
||||
|
||||
Right side:
|
||||
右侧:
|
||||
```
|
||||
Architecture Description
|
||||
============-=====================================================================
|
||||
@ -70,7 +71,8 @@ all GNOME Control Center account plugin for single signon - windows live
|
||||
|
||||
```
|
||||
|
||||
The "ii" and "rc" designations at the beginning of each line (see "Left side" above) are package state indicators. The first letter represents the desirable package state:
|
||||
每行开头的 “ii” 和 “rc” 名称(见上文“左侧”)是包状态指示符。 第一个字母表示包的理想状态:
|
||||
|
||||
```
|
||||
u -- unknown
|
||||
i -- install
|
||||
@ -80,7 +82,8 @@ h -- hold
|
||||
|
||||
```
|
||||
|
||||
The second represents the current package state:
|
||||
第二个代表包的当前状态:
|
||||
|
||||
```
|
||||
n -- not-installed
|
||||
i -- installed
|
||||
@ -93,9 +96,10 @@ t -- triggers-pending (the package has been triggered)
|
||||
|
||||
```
|
||||
|
||||
An added "R" at the end of the normally two-character field would indicate that reinstallation is required. You may never run into these.
|
||||
在通常的双字符字段末尾添加的 “R” 表示需要重新安装。 你可能永远不会碰到这些。
|
||||
|
||||
快速查看整体包状态的一种简单方法是计算在不同状态中包含的包的数量:
|
||||
|
||||
One easy way to take a quick look at your overall package status is to count how many packages are in which of the different states:
|
||||
```
|
||||
$ dpkg-query -l | tail -n +6 | awk '{print $1}' | sort | uniq -c
|
||||
2066 ii
|
||||
@ -103,25 +107,24 @@ $ dpkg-query -l | tail -n +6 | awk '{print $1}' | sort | uniq -c
|
||||
|
||||
```
|
||||
|
||||
I excluded the top five lines from the dpkg-query output above because these are the header lines that would have confused the output.
|
||||
我从上面的 dpkg-query 输出中排除了前五行,因为这些是标题行,会混淆输出。
|
||||
|
||||
这两行基本上告诉我们,在这个系统上,应该安装了 2066 个软件包,而 134 个其他的软件包已被删除,但已经留下了配置文件。 你始终可以使用以下命令删除程序包的剩余配置文件:
|
||||
|
||||
The two lines basically tell us that on this system, 2,066 packages should be and are installed, while 134 other packages have been removed but have left configuration files behind. You can always remove a package's remaining configuration files with a command like this:
|
||||
```
|
||||
$ sudo dpkg --purge xfont-mathml
|
||||
|
||||
```
|
||||
|
||||
Note that the command above would have removed the package binaries along with the configuration files if both were still installed.
|
||||
|
||||
请注意,如果程序包二进制文件和配置文件都已经安装了,则上面的命令将两者都删除。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.networkworld.com/article/3242808/linux/peeking-into-your-linux-packages.html
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
译者:[runningwater](https://github.com/runningwater)
|
||||
译者:[Flowsnow](https://github.com/Flowsnow)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
||||
[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
@ -1,134 +0,0 @@
|
||||
# 2018 年最好的 Linux 发行版
|
||||
|
||||
![Linux distros 2018](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/linux-distros-2018.jpg?itok=Z8sdx4Zu "Linux distros 2018")
|
||||
Jack Wallen 分享他挑选的 2018 年最好的 Linux 发行版。
|
||||
|
||||
这是新的一年,Linux仍有无限可能。而且许多 Linux 在 2017 年都带来了许多重大的改变,我相信在 2018 年它在服务器和桌面上将会带来更加稳定的系统和市场份额的增长。
|
||||
|
||||
对于那些期待迁移到开源平台(或是那些想要切换到)的人对于即将到来的一年,什么是最好的选择?如果你去 [Distrowatch][14] 找一下,你可能会因为众多的发行版而感到头晕,其中一些的排名在上升,而还有一些则恰恰相反。
|
||||
|
||||
因此,哪个 Linux 发行版将在 2018 年得到偏爱?我有我的看法。事实上,我现在就要和你们分享它。
|
||||
|
||||
跟我做的 [去年清单][15] 相似,我将会打破那张清单,使任务更加轻松。普通的 Linux 用户,至少包含以下几个类别:系统管理员,轻量级发行版,桌面,为物联网和服务器发行的版本。
|
||||
|
||||
根据这些,让我们开始 2018 年最好的 Linux 发行版清单吧。
|
||||
|
||||
### 对系统管理员最好的发行版
|
||||
|
||||
[Debian][16] 不常出现在“最好的”列表中。但他应该出现,为什么呢?如果了解到 Ubuntu 是基于 Debian 构建的(其实有很多的发行版都基于 Debian),你就很容易理解为什么这个发行版应该在许多“最好”清单中。但为什么是对管理员最好的呢?我想这是由于两个非常重要的原因:
|
||||
|
||||
* 容易使用
|
||||
* 非常稳定
|
||||
|
||||
因为 Debain 使用 dpkg 和 apt 包管理,它使得使用环境非常简单。而且因为 Debian 提供了最稳定的 Linux 平台之一,它为许多事物提供了理想的环境:桌面,服务器,测试,开发。虽然 Debian 可能不包括去年获奖者发现的大量应用程序,但添加完成任务所需的任何/所有必要应用程序都非常容易。而且因为 Debian 可以根据你的选择安装桌面(Cinnamon, GNOME, KDE, LXDE, Mate, 或者 Xfce),你可以确定满足你需要的桌面。
|
||||
|
||||
![debian](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/debian.jpg?itok=XkHHG692 "debian")
|
||||
图1:在 Debian 9.3 上运行的 GNOME 桌面。[使用][1]
|
||||
|
||||
同时,Debain 在 Distrowatch 上名列第二。下载,安装,然后让它为你的工作而服务吧。Debain 尽管不那么华丽,但是对于管理员的工作来说十分有用。
|
||||
|
||||
### 最轻量级的发行版
|
||||
|
||||
轻量级的发行版对于一些老旧或是性能底下的机器有很好的支持。但是这不意味着这些发行版仅仅只为了老旧的硬件机器而生。如果你想要的是运行速度,你可能会想知道在你的现代机器上,这类发行版的运行速度。
|
||||
|
||||
在 2018 年上榜的最轻量级的发行版是 [Lubuntu][18]。尽管在这个类别里还有很多选择,而且尽管 Lubuntu 的大小与 Puppy Linux 相接近,但得益于它是 Ubuntu 家庭的一员,这弥补了它在易用性上的一些不足。但是不要担心,Lubuntu 对于硬件的要求并不高:
|
||||
|
||||
+ CPU:奔腾 4 或者 奔腾 M 或者 AMD K8 以上
|
||||
+ 对于本地应用,512 MB 的内存就可以了,对于网络使用(Youtube,Google+,Google Drive, Facebook),建议 1 GB 以上。
|
||||
|
||||
Lubuntu 使用的是 LXDE 桌面,这意味着用户在初次使用这个 Linux 发行版时不会有任何问题。这份短清单中包含的应用(例如:Abiword, Gnumeric, 和 Firefox)都是非常轻量,且对用户友好的。
|
||||
|
||||
### [lubuntu,jpg][8]
|
||||
![Lubuntu](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/lubuntu_2.jpg?itok=BkTnh7hU "Lubuntu")
|
||||
图2:LXDE桌面。[使用][2]
|
||||
|
||||
Lubuntu 能让十年以上的电脑如获新生。
|
||||
|
||||
### 最好的桌面发行版
|
||||
|
||||
[Elementary OS][19] 连续两年都是我清单中最好的桌面发行版。对于许多人,[Linux Mint][20] 都是桌面发行版的领导。但是,与我来说,它在易用性和稳定性上很难打败 Elementary OS。例如,我确信 [Ubuntu][21] 17.10 的发布会让我迁移回 Canonical 的发行版。不久之后我会迁移到 新的使用 GNOME 桌面的 Ubuntu,但是我发现我少了 Elementary OS 外观,可用性和感觉。在使用 Ubuntu 两周以后,我又换回了 Elementary OS。
|
||||
|
||||
### [elementaros.jpg][9]
|
||||
|
||||
![Elementary OS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/elementaros.jpg?itok=SRZC2vkg "Elementary OS")
|
||||
图3:Pantheon 桌面是一件像艺术品一样的桌面。[使用][3]
|
||||
|
||||
任何使用 Elementary OS 的感觉很好。Pantheon 桌面是缺省和用户友好做的最完美的桌面。每次更新,它都会变得更好。
|
||||
|
||||
尽管 Elementary OS 在 Distrowatch 中排名第六,但我预计到 2018 年第,它将至少上升至第三名。Elementary 开发人员非常关注用户的需求。他们倾听并且改进,他们目前的状态是如此之好,似乎所有他们都可以做的更好。 如果您需要一个具有出色可靠性和易用性的桌面,Elementary OS 就是你的发行版。
|
||||
|
||||
### 能够证明自己的最好的发行版
|
||||
|
||||
很长一段时间内,[Gentoo][22]都稳坐“展现你技能”的发行版的首座。但是,我认为现在 Gentoo 是时候让出“证明自己”的宝座给 [Linux From Svratch][23]。你可能认为这不公平,因为 LFS 实际上不是一个发行版,而是一个帮助用户创建自己的 Linux 发行版的项目。但是,有什么能比你自己创建一个自己的发行版更能证明自己所学的 Linux 知识的呢?在 LFS 项目中,你可以从头开始构建自定义的 Linux 系统。 所以,如果你真的有需要证明的东西,请下载 [Linux From Scratch Book][24] 并开始构建。
|
||||
|
||||
### 对于物联网最好的发行版
|
||||
|
||||
[Ubuntu Core][25] 已经是第二年赢得了该项的冠军。Ubuntu Core 是 Ubuntu 的一个小型版本,专为嵌入式和物联网设备而构建。使Ubuntu Core 如此完美的物联网的原因在于它将重点放在快照包 - 通用包上,可以安装到平台上,而不会干扰基本系统。这些快照包包含它们运行所需的所有内容(包括依赖项),因此不必担心安装会破坏操作系统(或任何其他已安装的软件)。 此外,快照非常容易升级并在隔离的沙箱中运行,这使它们成为物联网的理想解决方案。
|
||||
|
||||
Ubuntu Core 内置的另一个安全领域是登录机制。Ubuntu Core使用Ubuntu One ssh密钥,这样登录系统的唯一方法是通过上传的ssh密钥到[Ubuntu One帐户][26]。这为你的物联网设备提供了更高的安全性。
|
||||
|
||||
### [ubuntucore.jpg][10]
|
||||
![ Ubuntu Core](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntucore.jpg?itok=Ydfq8NKH " Ubuntu Core")
|
||||
图4:Ubuntu Core屏幕指示通过Ubuntu One用户启用远程访问。[使用][3]
|
||||
|
||||
### 最好的服务器发行版
|
||||
|
||||
这让事情变得有些混乱。 主要原因是支持。 如果你需要商业支持,乍一看,你最好的选择可能是 [Red Hat Enterprise Linux][27]。红帽年复一年地证明了自己不仅是全球最强大的企业服务器平台之一,而且是单一最赚钱的开源业务(年收入超过20亿美元)。
|
||||
|
||||
但是,Red Hat 并不是唯一的服务器发行版。 实际上,Red Hat 甚至不支持企业服务器计算的各个方面。如果你关注亚马逊 Elastic Compute Cloud 上的云统计数据,Ubuntu 就会打败红帽企业Linux。根据[云市场][28],EC2 统计数据显示 RHEL 的部署率低于 10 万,而 Ubuntu 的部署量超过 20 万。
|
||||
|
||||
最终的结果是,Ubuntu 几乎已经成为云计算的领导者。如果你将它与 Ubuntu 易于使用和管理容器结合起来,就会发现 Ubuntu Server 是服务器类别的明显赢家。而且,如果你需要商业支持,Canonical 将为你提供 [Ubuntu Advantage][29]。
|
||||
|
||||
对使用 Ubuntu Server 的一个警告是它默认为纯文本界面。如果需要,你可以安装 GUI,但使用Ubuntu Server 命令行非常简单(每个Linux管理员都应该知道)。
|
||||
|
||||
### [ubuntuserver.jpg][11]
|
||||
|
||||
![Ubuntu server](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntuserver_1.jpg?itok=qtFSUlee "Ubuntu server")
|
||||
图5:Ubuntu 服务器登录,通知更新。[使用][3]
|
||||
|
||||
### 你最好的选择
|
||||
|
||||
正如我之前所说,这些选择都非常主观,但如果你正在寻找一个好的开始,那就试试这些发行版。每一个都可以用于非常特定的目的,并且比大多数做得更好。虽然你可能不同意我的特定选择,但你可能会同意 Linux 在每个方面都提供了惊人的可能性。并且,请继续关注下周更多“最佳发行版”选秀。
|
||||
|
||||
通过 Linux 基金会和 edX 的免费[“Linux 简介”][13]课程了解有关Linux的更多信息。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/blog/learn/intro-to-linux/2018/1/best-linux-distributions-2018
|
||||
|
||||
作者:[JACK WALLEN ][a]
|
||||
译者:[dianbanjiu](https://github.com/dianbanjiu)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://www.linux.com/users/jlwallen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/used-permission
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/licenses/category/used-permission
|
||||
[5]:https://www.linux.com/licenses/category/used-permission
|
||||
[6]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[7]:https://www.linux.com/files/images/debianjpg
|
||||
[8]:https://www.linux.com/files/images/lubuntujpg-2
|
||||
[9]:https://www.linux.com/files/images/elementarosjpg
|
||||
[10]:https://www.linux.com/files/images/ubuntucorejpg
|
||||
[11]:https://www.linux.com/files/images/ubuntuserverjpg-1
|
||||
[12]:https://www.linux.com/files/images/linux-distros-2018jpg
|
||||
[13]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[14]:https://distrowatch.com/
|
||||
[15]:https://www.linux.com/news/learn/sysadmin/best-linux-distributions-2017
|
||||
[16]:https://www.debian.org/
|
||||
[17]:https://www.parrotsec.org/
|
||||
[18]:http://lubuntu.me/
|
||||
[19]:https://elementary.io/
|
||||
[20]:https://linuxmint.com/
|
||||
[21]:https://www.ubuntu.com/
|
||||
[22]:https://www.gentoo.org/
|
||||
[23]:http://www.linuxfromscratch.org/
|
||||
[24]:http://www.linuxfromscratch.org/lfs/download.html
|
||||
[25]:https://www.ubuntu.com/core
|
||||
[26]:https://login.ubuntu.com/
|
||||
[27]:https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux
|
||||
[28]:http://thecloudmarket.com/stats#/by_platform_definition
|
||||
[29]:https://buy.ubuntu.com/?_ga=2.177313893.113132429.1514825043-1939188204.1510782993
|
@ -0,0 +1,102 @@
|
||||
对于开发者来说5个最好的Linux发行版
|
||||
============================================================
|
||||
![Linux distros for devs](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/king-penguins_1920.jpg?itok=qmy8htw6 "Linux distros for devs")
|
||||
Jack Wallen介绍了一些非常适合用来做开发工作的Linux发行版本.[Creative Commons Zero][6]
|
||||
在考虑使用Linux时,需要做很多的考量。你希望使用什么包管理器?你更喜欢现代还是比较旧的桌面界面?易用性是你使用Linux的首选吗?你希望分发的灵活性?发行版的服务性任务是什么?
|
||||
这是你在开始使用Linux之前必须考虑的问题。发行版是作为桌面还是服务器运行?你会做网络或者系统审计吗?或者你会开发?如果你花了很多时间考虑Linux,你知道每个任务都有非常合适的Linux发行版。这当然非常适用于开发人员。尽管Linux在设计上对于开发人员来说是一个理想的平台,但某些发行版高于其他的发行版,可以作为最好的开发人员的操作系统去服务开发人员。
|
||||
我想来分享我自己认为是你在做开发工作当中的最佳Linux发行版。虽然这五个发行版的每一个都可以用来通用开发(可能有一个是例外),但是它们都有各自的特定目的,你看会或不会对这些选择感觉到惊讶
|
||||
话虽如此,让我们做出选择
|
||||
### Debian
|
||||
在[Debian][14]的发行版中许多Linux列表中排名靠前。 有充分的理由。 Debian是许多人所依赖的发行版. 这就是为什么更多的开发人员去选择debian的理由。 当你在Debian上开发一个软件的时候,很有可能该软件包可以适用于[Ubuntu][15], [Linux Mint][16], [Elementary OS][17],以及大量的其他Debian发行版。
|
||||
除了这个非常明显的答案之外,Debian还通过默认存储库提供了大量可用的应用程序(图1)。
|
||||
![Debian apps](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_1.jpg?itok=3mpkS3Kp "Debian apps")
|
||||
图 1: 标准的Debian存储库里面可用的应用程序。[Used with permission][1]
|
||||
为了让程序员友好,这些应用程序 (以及它们的依赖项) 易于安装.例如,构建必需的包(可以安装在Debian的任何衍生发行版上)。该软件包包括dkpg-dev,g ++,gcc,hurd-dev,libc-dev以及开发过程所需的make-all工具。可以使用命令sudo apt install build-essential安装build-essential软件包。
|
||||
标准存储库当中提供了数百种的特定于开发人员的应用程序,例如:
|
||||
* Autoconf—配置构建脚本的软件
|
||||
* Autoproject—为新程序创建源码包
|
||||
* Bison—通用的解析生成器
|
||||
* Bluefish—面向程序员的强大GUI编辑器
|
||||
* Geany—轻量化的IDE
|
||||
* Kate—强大的文本编辑器
|
||||
* Eclipse—帮助构建者独立开发与其他工具的集成性软件
|
||||
这个清单一直在继续更新.
|
||||
Debian也是你能找到的坚于磐石的发行版,因此很少有人担心因为桌面崩溃而让你失去宝贵的工作。作为奖励,Debian的所有应用程序都必须符合[Debian自由软件指南][18], 该指南遵守以下 “社会契约”:
|
||||
* Debian 保持完全免费.
|
||||
* 我们将无偿回馈自由软件社区.
|
||||
* 我们不会隐藏问题.
|
||||
* 我们的首要任务是我们的用户和自由软件
|
||||
* 不符合我们的免费软件标准的作品在非免费档案中..
|
||||
此外,你不熟悉在Linux上进行开发,Debian在其[用户手册][19]中有一个方便编程的部分。
|
||||
### openSUSE Tumbleweed (滚动更新版)
|
||||
如果你希望开发出最前沿的滚动发行版本, [openSUSE][20] 将提供最好的[Tumbleweed][21]之一。 还可以借助openSUSE当中令人惊叹的管理员工具(其中包括YaST)来实现这一目标。如果你不熟悉YaST(又一个设置工具)的话,它是一个非常强大的软件,允许您从一个方便的位置来管理整个平台。在YaST中,您还可以使用RPM组进行安装。打开YaST,单击RPM Groups(按目的分组的软件),然后向下滚动到Development部分以查看可安装的大量组(图2)
|
||||
![openSUSE](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_2.jpg?itok=EeCjn1cx "openSUSE")
|
||||
图 2: 在openSUSE Tumbleweed中安装软件包组.[Creative Commons Zero][2]
|
||||
openSUSE还允许您通过简单的单击链接快速安装所有必需的devtools [rpmdevtools安装页面][22], 然后单击Tumbleweed的链接。这将自动添加必要的存储库并安装rpmdevtools
|
||||
对于开发者来说,通过滚动版本进行开发,你可以知道你已安装的软件是最新版本。
|
||||
### CentOS
|
||||
让我们来看一下, [红帽企业版Linux][23] (RHEL) 是企业事务的事实标准. 如果你正在寻找针对特定平台进行开发,并且你有点担心无法承担RHEL的许可证,那么[CentOS][24]就是你不错的选择— 实际上,它是RHEL的社区版本。你会发现CentOS上的许多软件包与RHEL中的软件包相同 - 所以一旦熟悉了一个软件包,你就可以使用其他的软件包。
|
||||
如果你认真考虑在企业级平台上进行开发,那么CentOS就是不错的选择。而且由于CentOS是特定于服务器的发行版,因此您可以更轻松地以Web为中心的平台进行开发。您可以轻松地将CentOS作为开发和测试的理想主机,而不是开发您的工作然后将其迁移到服务器(托管在不同的计算机上).
|
||||
寻找满足你开发需求的软件? 你只需要打开CentOS软件中心, 其中包含了集成开发环境(IDE - 图3)的专用子部分
|
||||
![CentOS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_3.jpg?itok=0oe4zj9j "CentOS")
|
||||
图 3: 在CentOS中安装功能强大的IDE很简单。.[Used with permission][3]
|
||||
Centos还包括安全增强性的Linux(SElinux),它使你可以更加轻松的去测试你的软件与RHEL中的同一安全平台的集成功能,SElinux经常会让设计不佳的软件感到头疼,因此准备了它可以真正有利于确保你的应用程序在RHEL之类的应用程序上面运行。如果你不确定如何在Centos上进行开发工作。你可以阅读[RHEL 7 开发人员指南][25].
|
||||
### Raspbian
|
||||
让我们来看一下, 嵌入式操作系统风靡一时. 使用这种 操作系统最简单的一种方法就是通过Raspberry Pi——一种极小的单片机(也可以称为小型计算机). 事实上,Raspberry Pi 已经成为了全球喜爱DIY用户使用的硬件. 为这些 设备供电的是 [Raspbian][26]操作系统. Raspbian包含[BlueJ][27], [Geany][28], [Greenfoot][29], [Sense HAT Emulator][30], [Sonic Pi][31], 和 [Thonny Python IDE][32], [Python][33], 和 [Scratch][34]等一些工具, 因此你不需要开发软件。Raspbian还包括一个用户友好的桌面UI(图4),使事情变得更加容易。
|
||||
![Raspbian](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_4.jpg?itok=VLoYak6L "Raspbian")
|
||||
图 4: Raspbian主菜单,显示预安装的开发人员软件.[Used with permission][4]
|
||||
对于任何想要对Raspberry Pi平台开发的人来说,Raspbian是必要的。如果你想在不使用Raspberry Pi硬件的情况下使用Raspbian系统,您可以通过下载[此处][35]的ISO映像将其安装在VirtualBox虚拟机中
|
||||
### Pop!_OS
|
||||
不要让这个名字迷惑你,,不要让这个名字迷惑你, 进入[System76][36]的 [Pop!_OS][37]操作系统世界是非常严格的. 虽然System76对这个Ubuntu衍生产品做了很多修改但不是很明显,但这是特别的。
|
||||
System76的目标是创建一个特定于开发人员,制造商和计算机科学专业人员的操作系统。通过新设计的GNOME主题,Pop!_OS非常漂亮(图5),并且功能与硬件制造商使桌面设计人员一样强大。
|
||||
### [devel_5.jpg][11]
|
||||
![Pop!_OS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/devel_5.jpg?itok=n4K7k7Gd "Pop!_OS")
|
||||
图 5: Pop!_OS 桌面.[Used with permission][5]
|
||||
但是,Pop!_OS的特殊之处在于它是由一家致力于Linux硬件的公司开发的。这意味着,当您购买System76笔记本电脑,台式机或服务器时,您就会知道操作系统将与硬件无缝协作 - 这是其他公司无法提供的。我预测,Pop!_OS将使System76将成为Linux界Apple。
|
||||
### 工作时间
|
||||
以他们自己的方式,每个发行版。你有一个稳定的桌面(Debian),一个尖端的桌面(openSUSE Tumbleweed),一个服务器(CentOS),一个嵌入式平台(Raspbian),以及一个与硬件无缝融合的发行版(Pop!_OS)。除了Raspbian之外,这些发行版中的任何一个都将成为一个出色的开发平台。安装一个并开始自信地开展下一个项目。
|
||||
可以通过Linux Foundation和edX 免费提供的["Linux简介" ][13]来了解更多的有关Linux信息
|
||||
--------------------------------------------------------------------------------
|
||||
via: https://www.linux.com/blog/learn/intro-to-linux/2018/1/5-best-linux-distributions-development
|
||||
作者:[JACK WALLEN ][a]
|
||||
译者:[geekmar](https://github.com/geekmar)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
[a]:https://www.linux.com/users/jlwallen
|
||||
[1]:https://www.linux.com/licenses/category/used-permission
|
||||
[2]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[3]:https://www.linux.com/licenses/category/used-permission
|
||||
[4]:https://www.linux.com/licenses/category/used-permission
|
||||
[5]:https://www.linux.com/licenses/category/used-permission
|
||||
[6]:https://www.linux.com/licenses/category/creative-commons-zero
|
||||
[7]:https://www.linux.com/files/images/devel1jpg
|
||||
[8]:https://www.linux.com/files/images/devel2jpg
|
||||
[9]:https://www.linux.com/files/images/devel3jpg
|
||||
[10]:https://www.linux.com/files/images/devel4jpg
|
||||
[11]:https://www.linux.com/files/images/devel5jpg
|
||||
[12]:https://www.linux.com/files/images/king-penguins1920jpg
|
||||
[13]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux
|
||||
[14]:https://www.debian.org/
|
||||
[15]:https://www.ubuntu.com/
|
||||
[16]:https://linuxmint.com/
|
||||
[17]:https://elementary.io/
|
||||
[18]:https://www.debian.org/social_contract
|
||||
[19]:https://www.debian.org/doc/manuals/debian-reference/ch12.en.html
|
||||
[20]:https://www.opensuse.org/
|
||||
[21]:https://en.opensuse.org/Portal:Tumbleweed
|
||||
[22]:https://software.opensuse.org/download.html?project=devel%3Atools&package=rpmdevtools
|
||||
[23]:https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux
|
||||
[24]:https://www.centos.org/
|
||||
[25]:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/pdf/developer_guide/Red_Hat_Enterprise_Linux-7-Developer_Guide-en-US.pdf
|
||||
[26]:https://www.raspberrypi.org/downloads/raspbian/
|
||||
[27]:https://www.bluej.org/
|
||||
[28]:https://www.geany.org/
|
||||
[29]:https://www.greenfoot.org/
|
||||
[30]:https://www.raspberrypi.org/blog/sense-hat-emulator/
|
||||
[31]:http://sonic-pi.net/
|
||||
[32]:http://thonny.org/
|
||||
[33]:https://www.python.org/
|
||||
[34]:https://scratch.mit.edu/
|
||||
[35]:http://rpf.io/x86iso
|
||||
[36]:https://system76.com/
|
||||
[37]:https://system76.com/pop
|
99
translated/tech/20180601 Download an OS with GNOME Boxes.md
Normal file
99
translated/tech/20180601 Download an OS with GNOME Boxes.md
Normal file
@ -0,0 +1,99 @@
|
||||
用 GNOME Boxes 下载一个镜像
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/06/boxes-install-os-816x345.jpg)
|
||||
|
||||
Boxes 是 GNOME 上的虚拟机应用。最近 Boxes 添加了一个新的特性,使得它在运行不同的 Linux 发行版时更加容易。你现在可以在 Boxes 中自动安装列表中这些发行版。该列表甚至包括红帽企业 Linux。红帽开发人员计划包括[免费订阅红帽企业版 Linux][1]。 使用[红帽开发者][2]帐户,Boxes 可以自动设置一个名为 Developer Suite 订阅的 RHEL 虚拟机。 下面是它的工作原理。
|
||||
|
||||
### 红帽企业版 Linux
|
||||
|
||||
要创建一个红帽企业版 Linux 的虚拟机,启动 Boxes,点击新建。从源选择列表中选择下载一个镜像。在顶部,点击红帽企业版 Linux。这将会打开网址为 [developers.redhat.com][2] 的一个网络表单。使用已有的红帽开发者账号登录,或是新建一个。
|
||||
|
||||
![][3]
|
||||
|
||||
如果这是一个新帐号,Boxes 在继续之前需要一些额外的信息。这一步需要在账户中开启开发者订阅。还要确保 [接受条款和条件][4],这样可以在之后的注册中节省一步。
|
||||
|
||||
![][5]
|
||||
|
||||
点击提交,然后就会开始下载安装磁盘镜像。下载需要的时间取决于你的网络状况。在这期间你可以去喝杯茶或者咖啡歇息一下。
|
||||
|
||||
![][6]
|
||||
|
||||
等媒体下载完成(一般位于 ~/Downloads ),Boxes 会有一个快速安装的显示。填入账号和密码然后点击继续,当你确认了虚拟机的信息之后点击创建。快速安装会自动完成接下来的整个安装!(现在你可以去享受你的第二杯茶或者咖啡了)
|
||||
|
||||
![][7]
|
||||
|
||||
![][8]
|
||||
|
||||
![][9]
|
||||
|
||||
等到安装结束,虚拟机会直接重启并登录到桌面。在虚拟机里,在应用菜单的系统工具一栏启动红帽订阅管理。这一步需要输入管理员密码。
|
||||
|
||||
![][10]
|
||||
|
||||
单击“注册”按钮,然后按照注册助手中的步骤操作。 出现提示时,使用你的红帽开发者帐户登录。
|
||||
|
||||
![][11]
|
||||
|
||||
![][12]
|
||||
|
||||
现在你可以通过任何一种更新方法,像是 yum 或是 GNOME Software 进行下载和更新了。
|
||||
|
||||
![][13]
|
||||
|
||||
### FreeDOS 或是其他
|
||||
|
||||
Boxes 可以安装很多的 Linux 发行版,而不仅仅只是红帽企业版。 作为 KVM 和 qemu 的前端,Boxes 支持各种操作系统。 使用 [libosinfo][14],Boxes 可以自动下载(在某些情况下安装)相当多不同操作系统。
|
||||
|
||||
![][15]
|
||||
|
||||
要从列表中安装一个操作系统,只需选择并完成创建一个新的虚拟机。一些操作系统,比如 FreeDOS,并不支持快速安装。这些操作系统需要虚拟机从安装介质中引导。之后你可以手动安装。
|
||||
|
||||
![][16]
|
||||
|
||||
![][17]
|
||||
|
||||
### 在 Boxes 上受欢迎的操作系统
|
||||
|
||||
这里仅仅是一些目前在它上面比较受欢迎的选择。
|
||||
|
||||
![][18]![][19]![][20]![][21]![][22]![][23]
|
||||
|
||||
Fedora 会定期更新它的操作系统信息数据库。确保你会经常检查是否有新的操作系统选项。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/download-os-gnome-boxes/
|
||||
|
||||
作者:[Link Dupont][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[dianbanjiu](https://github.com/dianbanjiu)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://fedoramagazine.org/author/linkdupont/
|
||||
[1]:https://developers.redhat.com/blog/2016/03/31/no-cost-rhel-developer-subscription-now-available/
|
||||
[2]:http://developers.redhat.com
|
||||
[3]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-33-13.png
|
||||
[4]:https://www.redhat.com/wapps/tnc/termsack?event%5B%5D=signIn
|
||||
[5]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-34-37.png
|
||||
[6]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-14-37-27.png
|
||||
[7]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-09-11.png
|
||||
[8]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-15-19-1024x815.png
|
||||
[9]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-21-53-1024x815.png
|
||||
[10]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-26-29-1024x815.png
|
||||
[11]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-30-48-1024x815.png
|
||||
[12]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-31-17-1024x815.png
|
||||
[13]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-32-29-1024x815.png
|
||||
[14]:https://libosinfo.org
|
||||
[15]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-20-02-56.png
|
||||
[16]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-40-25.png
|
||||
[17]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-15-43-02-1024x815.png
|
||||
[18]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-55-20-1024x815.png
|
||||
[19]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-28-28-1024x815.png
|
||||
[20]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-11-43-1024x815.png
|
||||
[21]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-16-58-09-1024x815.png
|
||||
[22]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-17-46-38-1024x815.png
|
||||
[23]:https://fedoramagazine.org/wp-content/uploads/2018/05/Screenshot-from-2018-05-25-18-34-11-1024x815.png
|
@ -0,0 +1,182 @@
|
||||
如何在 Linux 中一次重命名多个文件
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/06/Rename-Multiple-Files-720x340.png)
|
||||
|
||||
你可能已经知道,我们使用 mv 命令在类 Unix 操作系统中重命名或者移动文件和目录。 但是,mv 命令不支持一次重命名多个文件。 不用担心。 在本教程中,我们将学习使用 Linux 中的 “mmv” 命令一次重命名多个文件。 此命令用于在类 Unix 操作系统中使用标准通配符批量移动,复制,追加和重命名文件。
|
||||
|
||||
### 在 Linux 中一次重命名多个文件
|
||||
|
||||
mmv 程序可在基于 Debian 的系统的默认仓库中使用。 要想在 Debian,Ubuntu,Linux Mint 上安装它,请运行以下命令:
|
||||
|
||||
```
|
||||
$ sudo apt-get install mmv
|
||||
```
|
||||
|
||||
我们假设你在当前目录中有以下文件。
|
||||
|
||||
```
|
||||
$ ls
|
||||
a1.txt a2.txt a3.txt
|
||||
```
|
||||
|
||||
现在,你想要将所有以字母 “a” 开头的文件重命名为以 “b” 开头的。 当然,你可以在几秒钟内手动执行此操作。 但是想想你是否有数百个文件想要重命名? 这是一个非常耗时的过程。 这时候 **mmv** 命令就很有帮助了。
|
||||
|
||||
要将所有以字母 “a” 开头的文件重命名为以字母 “b” 开头的,只需要运行:
|
||||
|
||||
```
|
||||
$ mmv a\* b\#1
|
||||
```
|
||||
|
||||
让我们检查一下文件是否都已经重命名了。
|
||||
|
||||
```
|
||||
$ ls
|
||||
b1.txt b2.txt b3.txt
|
||||
|
||||
```
|
||||
|
||||
如你所见,所有以字母 “a” 开头的文件(即 a1.txt,a2.txt,a3.txt)都重命名为 b1.txt,b2.txt,b3.txt。
|
||||
|
||||
**解释**
|
||||
|
||||
在上面的例子中,第一个参数(a\\*)是 'from' 模式,第二个参数是 'to' 模式(b\\#1)。根据上面的例子,mmv 将查找任何以字母 'a' 开头的文件名,并根据第二个参数重命名匹配的文件,即 'to' 模式。我们使用通配符,例如用 '*','?' 和 '[]' 来匹配一个或多个任意字符。请注意,你必须避免使用通配符,否则它们将被 shell 扩展,mmv 将无法理解。
|
||||
|
||||
'to' 模式中的 '#1' 是通配符索引。它匹配 'from' 模式中的第一个通配符。 'to' 模式中的 '#2' 将匹配第二个通配符,依此类推。在我们的例子中,我们只有一个通配符(星号),所以我们写了一个 #1。并且,哈希标志也应该被转义。此外,你也可以用引号括起模式。
|
||||
|
||||
你甚至可以将具有特定扩展名的所有文件重命名为其他扩展名。例如,要将当前目录中的所有 **.txt** 文件重命名为 **.doc** 文件格式,只需运行:
|
||||
|
||||
```
|
||||
$ mmv \*.txt \#1.doc
|
||||
|
||||
```
|
||||
|
||||
这是另一个例子。 我们假设你有以下文件。
|
||||
|
||||
```
|
||||
$ ls
|
||||
abcd1.txt abcd2.txt abcd3.txt
|
||||
|
||||
```
|
||||
|
||||
你希望在当前目录下的所有文件中将第一次出现的 **abc** 替换为 **xyz**。 你会怎么做呢?
|
||||
|
||||
很简单。
|
||||
|
||||
```
|
||||
$ mmv '*abc*' '#1xyz#2'
|
||||
|
||||
```
|
||||
|
||||
请注意,在上面的示例中,模式被单引号括起来了。
|
||||
|
||||
让我们检查下 “abc” 是否实际上被替换为 “xyz”。
|
||||
|
||||
```
|
||||
$ ls
|
||||
xyzd1.txt xyzd2.txt xyzd3.txt
|
||||
|
||||
```
|
||||
|
||||
看到没? 文件 **abcd1.txt**,**abcd2.txt** 和 **abcd3.txt** 已经重命名为 **xyzd1.txt**,**xyzd2.txt** 和 **xyzd3.txt**。
|
||||
|
||||
mmv 命令的另一个值得注意的功能是你可以使用 **-n** 选项打印输出而不是重命名文件,如下所示。
|
||||
|
||||
```
|
||||
$ mmv -n a\* b\#1
|
||||
a1.txt -> b1.txt
|
||||
a2.txt -> b2.txt
|
||||
a3.txt -> b3.txt
|
||||
|
||||
```
|
||||
|
||||
这样,你可以在重命名文件之前简单地验证 mmv 命令实际执行的操作。
|
||||
|
||||
有关更多详细信息,请参阅 man 页面。
|
||||
|
||||
```
|
||||
$ man mmv
|
||||
|
||||
```
|
||||
|
||||
**更新:**
|
||||
|
||||
**Thunar 文件管理器**默认具有内置**批量重命名**选项。 如果你正在使用thunar,那么重命名文件要比使用mmv命令容易得多。
|
||||
|
||||
Thunar在大多数Linux发行版的默认仓库库中都可用。
|
||||
|
||||
要在基于Arch的系统上安装它,请运行:
|
||||
|
||||
```
|
||||
$ sudo pacman -S thunar
|
||||
```
|
||||
|
||||
在 RHEL,CentOS 上:
|
||||
```
|
||||
$ sudo yum install thunar
|
||||
```
|
||||
|
||||
在 Fedora 上:
|
||||
```
|
||||
$ sudo dnf install thunar
|
||||
|
||||
```
|
||||
|
||||
在 openSUSE 上:
|
||||
```
|
||||
$ sudo zypper install thunar
|
||||
|
||||
```
|
||||
|
||||
在 Debian,Ubuntu,Linux Mint 上:
|
||||
```
|
||||
$ sudo apt-get install thunar
|
||||
|
||||
```
|
||||
|
||||
安装后,你可以从菜单或应用程序启动器中启动批量重命名程序。 要从终端启动它,请使用以下命令:
|
||||
|
||||
```
|
||||
$ thunar -B
|
||||
|
||||
```
|
||||
|
||||
批量重命名就是这么回事。
|
||||
|
||||
![][1]
|
||||
|
||||
单击加号,然后选择要重命名的文件列表。 批量重命名可以重命名文件的名称,文件的后缀或者同事重命名文件的名称和后缀。 Thunar 目前支持以下批量重命名:
|
||||
|
||||
- 插入日期或时间
|
||||
- 插入或覆盖
|
||||
- 编号
|
||||
- 删除字符
|
||||
- 搜索和替换
|
||||
- 大写或小写
|
||||
|
||||
当你从选项列表中选择其中一个条件时,你将在“新名称”列中看到更改的预览,如下面的屏幕截图所示。
|
||||
|
||||
![][2]
|
||||
|
||||
选择条件后,单击**重命名文件**选项来重命名文件。
|
||||
|
||||
你还可以通过选择两个或更多文件从 Thunar 中打开批量重命名器。 选择文件后,按F2或右键单击并选择**重命名**。
|
||||
|
||||
嗯,这就是本次的所有内容了。希望有所帮助。更多干货即将到来。敬请关注!
|
||||
|
||||
祝快乐!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/how-to-rename-multiple-files-at-once-in-linux/
|
||||
|
||||
作者:[SK][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[Flowsnow](https://github.com/Flowsnow)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.ostechnix.com/author/sk/
|
||||
[1]: http://www.ostechnix.com/wp-content/uploads/2018/06/bulk-rename.png
|
||||
[2]: http://www.ostechnix.com/wp-content/uploads/2018/06/bulk-rename-1.png
|
@ -0,0 +1,59 @@
|
||||
6 个托管你 git 仓库的地方
|
||||
======
|
||||
|
||||
![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/house_home_colors_live_building.jpg?itok=HLpsIfIL)
|
||||
|
||||
也许你是少数一些没有注意到的人之一就在几周前,[微软收购了 GitHub][1]。两家公司达成了共识。微软在近些年已经变成了开源的有力支持者,GitHub 从成立起,就已经成为了许多开源项目的实际代码库。
|
||||
|
||||
然而,最近的购买可能会带给你一些烦躁。毕竟公司的收购让你意识到了你的开源代码放在了一个商业平台上。可能你现在还没准备好迁移到其他的平台上去,但是至少这可以给你提供一些可选项。让我们找找网上现在都有哪些可用的平台。
|
||||
|
||||
### 选择之一: GitHub
|
||||
|
||||
严格来说,这是一个合格的选项。[GitHub][2] 历史上没有什么糟糕的失败,而且微软最近也确实发展了不少开源项目。把你的项目继续放在 GitHub 上,继续保持观望没有什么不可以。它现在依然是最大的软件开发的网络社区,同时还有许多对于问题追踪、代码复查、持续集成、通用的代码管理很有用的工具。而且它还是基于 Git 的,Git 是每个人都喜欢的开源版本控制系统。你的代码还是你的代码。
|
||||
|
||||
### 选择之二: GitLab
|
||||
|
||||
[GitLab][3] 是代码库平台主要的竞争者。它是完全开源的。你可以像在 GitHhub 一样把你的代码托管在 GitLab,但你也可以选择在你自己的服务器上自行托管你自己的 GitLab 实例,并完全控制谁可以访问那里的所有内容以及如何访问、管理。 GitLab 与 GitHub 功能几乎相同,有些人甚至可能会说它的持续集成和测试工具更优越。尽管 GitLab 上的开发者社区肯定比 GitHub 上的开发者社区要小,但它仍然没有什么可以被指责的。你可能会在那里的人群中找到更多志同道合的开发者。
|
||||
|
||||
### 选择之三: Bitbucket
|
||||
|
||||
[Bitbucket][4] 已经存在很多年了。在某些方面,它可以作为 GitHub 未来的一面镜子。 Bitbucket 八年前被一家大公司(Atlassian)收购,并且已经经历了一些转换过程。 它仍然是一个像 GitHub 这样的商业平台,但它远不是一个创业公司,而且从组织上说它的基础相当稳定。 Bitbucket 分享了 GitHub 和 GitLab 上的大部分功能,以及它自己的一些新功能,如对 [Mercurial][5] 存储库的本机支持。
|
||||
|
||||
### 选择之四: SourceForge
|
||||
|
||||
[SourceForge][6] 是开源代码库的鼻祖。如果你曾经有一个开源项目,Sourceforge 是一个托管你的代码和向他人分享你的发行版的地方。迁移到 Git 进行版本控制需要一段时间,它有自己的商业收购和重新组构的事件,以及一些开源项目的一些不幸的捆绑决策。也就是说,SourceForge 从那时起似乎已经恢复,该网站仍然是一个有着不少开源项目的地方。 然而,很多人仍然感到有点受伤,而且有些人并不是各种尝试通过平台货币化的忠实粉丝,所以一定要睁大眼睛。
|
||||
|
||||
### 选择之五: 自己管理
|
||||
|
||||
如果你想自己掌握自己项目的命运(除了你自己没人可以责备你),然后一切都由自己来做对你来说可能是最佳的选择。无论对于大项目还是小项目。Git 是开源的,所以自己托管也很容易。如果你问题追踪和代码审查,你可以运行一个 GitLab 或者 [Phabricator][7] 的实例。对于持续集成,你可以设置自己的 [Jenkins][8] 自动化服务的实例。是的,你需要对自己的基础架构开销和相关的安全要求负责。但是,这个设置过程并不是很困难。所以如果你不想自己的代码被其他人的平台所吞没,这就是一种很好的方法。
|
||||
|
||||
### 选择之六:以上全部
|
||||
|
||||
以下是所有这些的美妙之处:尽管这些平台上有一些专有的选项,但它们仍然建立在坚实的开源技术之上。 而且不仅仅是开源,而是明确设计为分布在大型网络(如互联网)上的多个节点上。 你不需要只使用一个。 你可以使用一对......或者全部。 使用 GitLab 将你自己的设置作为保证的基础,并在 GitHub 和 Bitbucket 上安装克隆存储库,以进行问题跟踪和持续集成。 将你的主代码库保留在 GitHub 上,但是为了你自己的想法,可以在 GitLab 上安装“备份”克隆。
|
||||
|
||||
关键在于你的选择是什么。我们能有这么多选择,都是得益于那些非常有用的项目上的开源协议。未来一片光明。
|
||||
|
||||
当然,在这个列表中我肯定忽略了一些开源平台。你是否使用了很多的平台?哪个是你最喜欢的?你都可以在这里说出来!
|
||||
|
||||
:)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/8/github-alternatives
|
||||
|
||||
作者:[Jason van Gumster][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[dianbanjiu](https://github.com/dianbanjiu)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/mairin
|
||||
[1]: https://www.theverge.com/2018/6/4/17422788/microsoft-github-acquisition-official-deal
|
||||
[2]: https://github.com/
|
||||
[3]: https://gitlab.com
|
||||
[4]: https://bitbucket.org
|
||||
[5]: https://www.mercurial-scm.org/wiki/Repository
|
||||
[6]: https://sourceforge.net
|
||||
[7]: https://phacility.com/phabricator/
|
||||
[8]: https://jenkins.io
|
@ -0,0 +1,153 @@
|
||||
Flameshot – 一个简洁但功能丰富的截图工具
|
||||
======
|
||||
|
||||
![](https://www.ostechnix.com/wp-content/uploads/2018/09/Flameshot-720x340.png)
|
||||
|
||||
截图是我工作的一部分,我先前使用深度截图工具来截图,深度截图是一个简单、轻量级且非常简洁的截图工具。它自带许多功能例如窗口识别、快捷键支持、图片编辑、延时截图、社交分享、智能存储以及图片清晰度调整等功能。今天我碰巧发现了另一个具备多种功能的截图工具,它就是 **Flameshot** ,一个简单但功能丰富的针对类 Unix 系统的截图工具。它简单易用,可定制并且有选项可以支持上传截图到在线图片分享网站 **imgur** 上。同时 Flameshot 有一个 CLI 版本,所以你也可以从命令行来进行截图。Flameshot 是一个完全免费且开源的工具。在本教程中,我们将看到如何安装 Flameshot 以及如何使用它来截图。
|
||||
|
||||
### 安装 Flameshot
|
||||
|
||||
**在 Arch Linux 上:**
|
||||
|
||||
Flameshot 可以从 Arch LInux 的 [community] 仓库中获取。确保你已经启用了 community 仓库,然后就可以像下面展示的那样使用 pacman 来安装 Flameshot :
|
||||
```
|
||||
$ sudo pacman -S flameshot
|
||||
```
|
||||
|
||||
它也可以从 [**AUR**][1] 中获取,所以你还可以使用任意一个 AUR 帮助程序(例如 [**Yay**][2])来在基于 Arch 的系统中安装它:
|
||||
```
|
||||
$ yay -S flameshot-git
|
||||
```
|
||||
|
||||
**在 Fedora 中:**
|
||||
|
||||
```
|
||||
$ sudo dnf install flameshot
|
||||
```
|
||||
|
||||
在 **Debian 10+** 和 **Ubuntu 18.04+** 中,可以使用 APT 包管理器来安装它:
|
||||
```
|
||||
$ sudo apt install flameshot
|
||||
```
|
||||
|
||||
**在 openSUSE 上:**
|
||||
|
||||
```
|
||||
$ sudo zypper install flameshot
|
||||
```
|
||||
在其他的 Linux 发行版中,可以从源代码编译并安装它。编译过程中需要 **Qt version 5.3** 以及 **GCC 4.9.2** 或者它们的更高版本。
|
||||
|
||||
### 使用
|
||||
|
||||
可以从菜单或者应用启动器中启动 Flameshot。在 MATE 桌面环境,它通常可以在 **Applications - > Graphics** 下找到。
|
||||
|
||||
一旦打开了它,你就可以在系统面板中看到 Flameshot 的托盘图标。
|
||||
|
||||
**注意:**
|
||||
|
||||
假如你使用 Gnome 桌面环境,为了能够看到系统托盘图标,你需要安装 [TopIcons][3] 扩展。
|
||||
|
||||
在 Flameshot 托盘图标上右击,你便会看到几个菜单项,例如打开配置窗口、信息窗口以及退出该应用。
|
||||
|
||||
要进行截图,只需要点击托盘图标就可以了。接着你将看到如何使用 Flameshot 的帮助窗口。选择一个截图区域,然后敲 **ENTER** 键便可以截屏了,点击右键便可以看到颜色拾取器,再敲空格键便可以查看屏幕侧边的面板。你可以使用鼠标的滚轮来增加或者减少指针的宽度。
|
||||
|
||||
Flameshot 自带一系列非常好的功能,例如:
|
||||
|
||||
* 可以进行手写
|
||||
* 可以划直线
|
||||
* 可以画长方形或者圆形框
|
||||
* 可以进行长方形区域选择
|
||||
* 可以画箭头
|
||||
* 可以对要点进行标注
|
||||
* 可以添加文本
|
||||
* 可以对图片或者文字进行模糊处理
|
||||
* 可以展示图片的尺寸大小
|
||||
* 在编辑图片是可以进行撤销和重做操作
|
||||
* 可以将选择的东西复制到剪贴板
|
||||
* 可以保存选择
|
||||
* 可以离开截屏
|
||||
* 可以选择另一个 app 来打开图片
|
||||
* 可以上传图片到 imgur 网站
|
||||
* 可以将图片固定到桌面上
|
||||
|
||||
下面是一个示例的视频:
|
||||
|
||||
<http://www.ostechnix.com/wp-content/uploads/2018/09/Flameshot-demo.mp4>
|
||||
|
||||
**快捷键**
|
||||
|
||||
Frameshot 也支持快捷键。在 Flameshot 的托盘图标上右击并点击 **Information** 窗口便可以看到在 GUI 模式下所有可用的快捷键。下面是在 GUI 模式下可用的快捷键清单:
|
||||
|
||||
| 快捷键 | 描述 |
|
||||
|------------------------|------------------------------|
|
||||
| ←, ↓, ↑, → | 移动选择区域 1px |
|
||||
| Shift + ←, ↓, ↑, → | 将选择区域大小更改 1px |
|
||||
| Esc | 退出截图 |
|
||||
| Ctrl + C | 复制到粘贴板 |
|
||||
| Ctrl + S | 将选择区域保存为文件 |
|
||||
| Ctrl + Z | 撤销最近的一次操作 |
|
||||
| Right Click | 展示颜色拾取器 |
|
||||
| Mouse Wheel | 改变工具的宽度 |
|
||||
|
||||
边按住 Shift 键并拖动选择区域的其中一个控制点将会对它相反方向的控制点做类似的拖放操作。
|
||||
|
||||
**命令行选项**
|
||||
|
||||
Flameshot 也支持一系列的命令行选项来延时截图和保存图片到自定义的路径。
|
||||
|
||||
要使用 Flameshot GUI 模式,运行:
|
||||
```
|
||||
$ flameshot gui
|
||||
```
|
||||
|
||||
要使用 GUI 模式截屏并将你选取的区域保存到一个自定义的路径,运行:
|
||||
```
|
||||
$ flameshot gui -p ~/myStuff/captures
|
||||
```
|
||||
|
||||
要延时 2 秒后打开 GUI 模式可以使用:
|
||||
```
|
||||
$ flameshot gui -d 2000
|
||||
```
|
||||
|
||||
要延时 2 秒并将截图保存到一个自定义的路径(无 GUI)可以使用:
|
||||
```
|
||||
$ flameshot full -p ~/myStuff/captures -d 2000
|
||||
```
|
||||
|
||||
要截图全屏并保存到自定义的路径和粘贴板中使用:
|
||||
```
|
||||
$ flameshot full -c -p ~/myStuff/captures
|
||||
```
|
||||
|
||||
要在截屏中包含鼠标并将图片保存为 **PNG** 格式可以使用:
|
||||
```
|
||||
$ flameshot screen -r
|
||||
```
|
||||
|
||||
要对屏幕 1 进行截屏并将截屏复制到粘贴板中可以运行:
|
||||
```
|
||||
$ flameshot screen -n 1 -c
|
||||
```
|
||||
|
||||
你还需要什么功能呢?Flameshot 拥有几乎截屏的所有功能:添加注释、编辑图片、模糊处理或者对要点做高亮等等功能。我想:在我找到它的最佳替代品之前,我将一直使用 Flameshot 来作为我当前的截图工具。请尝试一下它,你不会失望的。
|
||||
|
||||
好了,这就是今天的全部内容了。后续将有更多精彩内容,请保持关注!
|
||||
|
||||
Cheers!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.ostechnix.com/flameshot-a-simple-yet-powerful-feature-rich-screenshot-tool/
|
||||
|
||||
作者:[SK][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[FSSlc](https://github.com/FSSlc)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.ostechnix.com/author/sk/
|
||||
[1]: https://aur.archlinux.org/packages/flameshot-git
|
||||
[2]: https://www.ostechnix.com/yay-found-yet-another-reliable-aur-helper/
|
||||
[3]: https://extensions.gnome.org/extension/1031/topicons/
|
177
translated/tech/20180924 Why Linux users should try Rust.md
Normal file
177
translated/tech/20180924 Why Linux users should try Rust.md
Normal file
@ -0,0 +1,177 @@
|
||||
为什么linux用户应该尝试Rust
|
||||
======
|
||||
|
||||
![](https://images.idgesg.net/images/article/2018/09/rust-rusted-metal-100773678-large.jpg)
|
||||
|
||||
Rust是一种相当年轻和现代的编程语言,因为具有许多功能,所以它非常灵活而且非常安全。 数据显示它正在变得非常受欢迎,连续三年在Stack Overflow开发者调查中获得“最受喜爱的编程语言”的第一名 - [2016] [1],[2017] [2]和[2018] [3]。
|
||||
|
||||
Rust也是开源语言的一种,它具有一系列特功能,使得它可以适应许多不同的编程项目。 它最初源于2006年Mozilla员工的个人项目,几年后(2009年)被Mozilla收集为特别项目,然后在2010年宣布供公众使用。
|
||||
|
||||
Rust程序运行速度极快,可防止段错误,并保证线程安全。 这些属性使该语言极大地吸引了专注于应用程序安全性的开发人员。 Rust也是一种非常易读的语言,可用于从简单程序到非常大而复杂的项目。
|
||||
|
||||
Rust 优点:
|
||||
|
||||
* 内存安全—— Rust不会受到悬空指针,缓冲区溢出或其他与内存相关的错误的影响。 它提供内存安全,无回收垃圾。
|
||||
* 通用 - Rust是适用于任何类型编程的适当语言
|
||||
* 快速 - Rust在性能上与C / C ++相当,但具有更好的安全功能。
|
||||
* 高效 - Rust是为了便于并发编程而构建的。
|
||||
* 面向项目 - Rust具有内置的依赖关系和构建管理系统Cargo。
|
||||
* 得到很好的支持 - Rust有一个令人印象深刻的[支持社区] [4]。
|
||||
|
||||
|
||||
|
||||
Rust还强制执行RAII(资源获取初始化)。 这意味着当一个对象超出范围时,将调用其析构函数并释放其资源,从而提供防止资源泄漏的屏蔽。 它提供了功能抽象和一个伟大的[类型系统] [5]以及速度和数学健全性。
|
||||
|
||||
简而言之,Rust是一种令人印象深刻的系统编程语言,具有其他大多数语言所缺乏的功能,使其成为C,C++和Objective-C等多年来一直被使用的语言的有力竞争者。
|
||||
|
||||
### 安装 Rust
|
||||
|
||||
安装Rust是一个相当简单的过程。
|
||||
|
||||
```
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
```
|
||||
|
||||
安装Rust后,使用rustc** --version **或** which **命令显示版本信息。
|
||||
|
||||
```
|
||||
$ which rustc
|
||||
rustc 1.27.2 (58cc626de 2018-07-18)
|
||||
$ rustc --version
|
||||
rustc 1.27.2 (58cc626de 2018-07-18)
|
||||
```
|
||||
|
||||
### Rust入门
|
||||
|
||||
Rust即使是最简单的代码也与你之前使用过的语言的输入完全不同。
|
||||
|
||||
```
|
||||
$ cat hello.rs
|
||||
fn main() {
|
||||
// Print a greeting
|
||||
println!("Hello, world!");
|
||||
}
|
||||
```
|
||||
|
||||
在这些行中,我们正在设置一个函数(main),添加一个描述该函数的注释,并使用println语句来创建输出。 您可以使用下面显示的命令编译然后运行这样的程序。
|
||||
|
||||
```
|
||||
$ rustc hello.rs
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
```
|
||||
|
||||
你可以创建一个“项目”(通常仅用于比这个更复杂的程序!)来保持代码的有序性。
|
||||
|
||||
```
|
||||
$ mkdir ~/projects
|
||||
$ cd ~/projects
|
||||
$ mkdir hello_world
|
||||
$ cd hello_world
|
||||
```
|
||||
|
||||
请注意,即使是简单的程序,一旦编译,就会变成相当大的可执行文件。
|
||||
|
||||
```
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
$ ls -l hello*
|
||||
-rwxrwxr-x 1 shs shs 5486784 Sep 23 19:02 hello <== executable
|
||||
-rw-rw-r-- 1 shs shs 68 Sep 23 15:25 hello.rs
|
||||
```
|
||||
|
||||
当然,这只是一个开始且传统的“Hello, world!” 程序。 Rust语言具有一系列功能,可帮助你快速进入高级编程技能。
|
||||
|
||||
### 学习 Rust
|
||||
|
||||
![rust programming language book cover][6]
|
||||
No Starch Press
|
||||
|
||||
Steve Klabnik和Carol Nichols(2018)的Rust Programming Language一书提供了学习Rust的最佳方法之一。 这本书由核心开发团队的两名成员撰写,可从[No Starch Press] [7]出版社获得纸制书或者从[rust-lang.org] [8]获得电子书。 它已经成为Rust开发者社区中的参考书。
|
||||
|
||||
在所涉及的众多主题中,你将了解这些高级主题:
|
||||
|
||||
* 所有权和borrowing
|
||||
|
||||
* 安全保障
|
||||
|
||||
* 测试和错误处理
|
||||
|
||||
* 智能指针和多线程
|
||||
|
||||
* 高级模式匹配
|
||||
|
||||
* 使用Cargo(内置包管理器)
|
||||
|
||||
* 使用Rust的高级编译器
|
||||
|
||||
|
||||
|
||||
#### 目录
|
||||
|
||||
|
||||
```
|
||||
前言(Nicholas Matsakis和Aaron Turon编写)
|
||||
致谢
|
||||
介绍
|
||||
第1章:新手入门
|
||||
第2章:猜谜游戏
|
||||
第3章:通用编程概念
|
||||
第4章:了解所有权
|
||||
第五章:结构
|
||||
第6章:枚举和模式匹配
|
||||
第7章:模块
|
||||
第8章:常见集合
|
||||
第9章:错误处理
|
||||
第10章:通用类型,特征和生命周期
|
||||
第11章:测试
|
||||
第12章:输入/输出项目
|
||||
第13章:迭代器和闭包
|
||||
第14章:关于货物和Crates.io的更多信息
|
||||
第15章:智能指针
|
||||
第16章:并发
|
||||
第17章:Rust面向对象?
|
||||
第18章:模式
|
||||
第19章:关于生命周期的更多信息
|
||||
第20章:高级类型系统功能
|
||||
附录A:关键字
|
||||
附录B:运算符和符号
|
||||
附录C:可衍生的特征
|
||||
附录D:宏
|
||||
索引
|
||||
|
||||
```
|
||||
|
||||
[Rust编程语言] [7]将你从基本安装和语言语法带到复杂的主题,例如模块,错误处理,crates(与其他语言中的'library'或'package'同义),模块(允许你 将你的代码分配到包箱本身,生命周期等。
|
||||
|
||||
可能最重要的是,本书可以让您从基本的编程技巧转向构建和编译复杂,安全且非常有用的程序。
|
||||
|
||||
### 结束
|
||||
|
||||
如果你已经准备好用一种非常值得花时间和精力学习并且越来越受欢迎的语言进行一些严肃的编程,那么Rust是一个不错的选择!
|
||||
|
||||
加入[Facebook] [9]和[LinkedIn] [10]上的Network World社区,评论最重要的话题。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
|
||||
via: https://www.networkworld.com/article/3308162/linux/why-you-should-try-rust.html
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[way-ww](https://github.com/way-ww)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
||||
[1]: https://insights.stackoverflow.com/survey/2016#technology-most-loved-dreaded-and-wanted
|
||||
[2]: https://insights.stackoverflow.com/survey/2017#technology-most-loved-dreaded-and-wanted-languages
|
||||
[3]: https://insights.stackoverflow.com/survey/2018#technology-most-loved-dreaded-and-wanted-languages
|
||||
[4]: https://www.rust-lang.org/en-US/community.html
|
||||
[5]: https://doc.rust-lang.org/reference/type-system.html
|
||||
[6]: https://images.idgesg.net/images/article/2018/09/rust-programming-language_book-cover-100773679-small.jpg
|
||||
[7]: https://nostarch.com/Rust
|
||||
[8]: https://doc.rust-lang.org/book/2018-edition/index.html
|
||||
[9]: https://www.facebook.com/NetworkWorld/
|
||||
[10]: https://www.linkedin.com/company/network-world
|
@ -1,38 +1,39 @@
|
||||
PyTorch 1.0 预览版发布: Facebook 最新 AI 开源框架
|
||||
PyTorch 1.0 预览版发布:Facebook 最新 AI 开源框架
|
||||
======
|
||||
|
||||
Facebook 在人工智能项目中广泛使用自己的开源 AI 框架 PyTorch,最近,他们已经发布了 PyTorch 1.0 的预览版本。
|
||||
|
||||
对于那些不熟悉的人, [PyTorch][1] 是一个基于 Python 的科学计算库。
|
||||
如果你尚不了解,[PyTorch][1] 是一个基于 Python 的科学计算库。
|
||||
|
||||
PyTorch 利用 [GPUs 超强的运算能力 ][2] 来实现复杂的 [张量][3] 计算 和 [深度神经网络][4]。 因此, 它被世界各地的研究人员和开发人员广泛使用。
|
||||
PyTorch 利用 [GPU 超强的运算能力][2] 来实现复杂的 [张量][3] 计算 和 [深度神经网络][4]。 因此, 它被世界各地的研究人员和开发人员广泛使用。
|
||||
|
||||
这一新的能够使用的 [预览版][5] 已在2018年10月2日周二旧金山举办的 [PyTorch 开发人员大会][6] 的[中途][7]宣布。
|
||||
这一新的可以投入使用的 [预览版][5] 已于 2018 年 10 月 2 日周二在旧金山 [The Midway][7] 举办的 [PyTorch 开发人员大会][6] 宣布。
|
||||
|
||||
### PyTorch 1.0 候选版本的亮点
|
||||
|
||||
![PyTorhc is Python based open source AI framework from Facebook][8]
|
||||
|
||||
候选版本中的一些主要新功能包括:
|
||||
候选版本中的一些主要新功能包括:
|
||||
|
||||
#### 1\. JIT
|
||||
#### 1、 JIT
|
||||
|
||||
JIT 是一个编译工具集,使研究和生产更加接近。 它包含一个基于 Python 语言的叫做 Torch Script 的脚本语言,也有能使现有代码与它自己兼容的方法。
|
||||
|
||||
#### 2\. 全新的 torch.distributed 库: “C10D”
|
||||
#### 2、 全新的 torch.distributed 库: “C10D”
|
||||
|
||||
“C10D” 能够在不同的后端上启用异步操作, 并在较慢的网络上提高性能。
|
||||
|
||||
#### 3\. C++ 前端 (实验性功能)
|
||||
#### 3、 C++ 前端 (实验性功能)
|
||||
|
||||
虽然它被特别提到是一个不稳定的 API (预计在预发行版中), 这是一个 PyTorch 后端的纯 c++ 接口, 遵循 API 和建立的 Python 前端的体系结构,以实现高性能、 低延迟的研究和开发直接安装在硬件上的 c++ 应用程序。
|
||||
虽然它被特别提到是一个不稳定的 API (估计是在预发行版中), 这是一个 PyTorch 后端的纯 C++ 接口, 遵循 API 和建立的 Python 前端的体系结构,以实现高性能、低延迟的研究和开发直接安装在硬件上的 C++ 应用程序。
|
||||
|
||||
想要了解更多,可以在 GitHub 上查看完整的 [更新说明][9]。
|
||||
|
||||
第一个PyTorch 1.0 的稳定版本将在夏季发布。
|
||||
第一个 PyTorch 1.0 的稳定版本将在夏季发布。(LCTT 译注:此信息可能有误)
|
||||
|
||||
### 在 Linux 上安装 PyTorch
|
||||
|
||||
为了安装 PyTorch v1.0rc0, 开发人员建议使用 [conda][10], 同时也可以按照[本地安装][11]所示,使用其他方法可以安装,所有必要的细节详见文档。
|
||||
为了安装 PyTorch v1.0rc0, 开发人员建议使用 [conda][10], 同时也可以按照[本地安装页面][11]所示,使用其他方法可以安装,所有必要的细节详见文档。
|
||||
|
||||
#### 前提
|
||||
|
||||
@ -41,18 +42,16 @@ JIT 是一个编译工具集,使研究和生产更加接近。 它包含一个
|
||||
* Python
|
||||
* [CUDA][12] (对于使用 Nvidia GPU 的用户)
|
||||
|
||||
|
||||
|
||||
我们已经知道[如何安装和使用 Pip][13],那就让我们来了解如何使用 Pip 安装 PyTorch。
|
||||
|
||||
请注意,PyTorch 具有 GPU 和仅限 CPU 的不同安装包。你应该安装一个适合你硬件的安装包。
|
||||
|
||||
#### 安装 PyTorch 的旧版本和稳定版
|
||||
|
||||
如果你想在 GPU 机器上安装稳定版(0.4 版本),使用:
|
||||
|
||||
```
|
||||
pip install torch torchvision
|
||||
|
||||
```
|
||||
|
||||
使用以下两个命令,来安装仅用于 CPU 的稳定版:
|
||||
@ -60,7 +59,6 @@ pip install torch torchvision
|
||||
```
|
||||
pip install http://download.pytorch.org/whl/cpu/torch-0.4.1-cp27-cp27mu-linux_x86_64.whl
|
||||
pip install torchvision
|
||||
|
||||
```
|
||||
|
||||
#### 安装 PyTorch 1.0 候选版本
|
||||
@ -69,21 +67,19 @@ pip install torchvision
|
||||
|
||||
```
|
||||
pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu92/torch_nightly.html
|
||||
|
||||
```
|
||||
如果没有GPU,并且更喜欢使用 仅限CPU 版本,使用如下命令:
|
||||
如果没有GPU,并且更喜欢使用 仅限 CPU 版本,使用如下命令:
|
||||
|
||||
```
|
||||
pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
|
||||
```
|
||||
|
||||
#### 验证 PyTorch 安装
|
||||
|
||||
使用如下简单的命令,启动终端上的 python 控制台:
|
||||
|
||||
```
|
||||
python
|
||||
|
||||
```
|
||||
|
||||
现在,按行输入下面的示例代码以验证您的安装:
|
||||
@ -93,7 +89,6 @@ from __future__ import print_function
|
||||
import torch
|
||||
x = torch.rand(5, 3)
|
||||
print(x)
|
||||
|
||||
```
|
||||
|
||||
你应该得到如下输出:
|
||||
@ -104,7 +99,6 @@ tensor([[0.3380, 0.3845, 0.3217],
|
||||
[0.2979, 0.7141, 0.9069],
|
||||
[0.1449, 0.1132, 0.1375],
|
||||
[0.4675, 0.3947, 0.1426]])
|
||||
|
||||
```
|
||||
|
||||
若要检查是否可以使用 PyTorch 的 GPU 功能, 可以使用以下示例代码:
|
||||
@ -112,18 +106,18 @@ tensor([[0.3380, 0.3845, 0.3217],
|
||||
```
|
||||
import torch
|
||||
torch.cuda.is_available()
|
||||
|
||||
```
|
||||
|
||||
输出结果应该是:
|
||||
|
||||
```
|
||||
True
|
||||
|
||||
```
|
||||
|
||||
支持 PyTorch 的 AMD GPU 仍在开发中, 因此, 尚未按[报告][14]提供完整的测试覆盖,如果您有 AMD GPU ,请在[这里][15]提出建议。
|
||||
|
||||
现在让我们来看看一些广泛使用 PyTorch 的研究项目:
|
||||
|
||||
### 基于 PyTorch 的持续研究项目
|
||||
|
||||
* [Detectron][16]: Facebook AI 研究院的软件系统, 可以智能地进行对象检测和分类。它之前是基于 Caffe2 的。今年早些时候,Caffe2 和 PyTorch [合力][17]创建了一个研究 + 生产的 PyTorch 1.0
|
||||
@ -144,7 +138,7 @@ via: https://itsfoss.com/pytorch-open-source-ai-framework/
|
||||
作者:[Avimanyu Bandyopadhyay][a]
|
||||
选题:[lujun9972](https://github.com/lujun9972)
|
||||
译者:[distant1219](https://github.com/distant1219)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
|
@ -1,63 +1,62 @@
|
||||
translating by dianbanjiu
|
||||
How To List The Enabled/Active Repositories In Linux
|
||||
列出在 Linux 上已开启/激活的仓库
|
||||
======
|
||||
There are many ways to list enabled repositories in Linux.
|
||||
这里有很多方法可以列出在 Linux 已开启的仓库。
|
||||
|
||||
Here we are going to show you the easy methods to list active repositories.
|
||||
我们将在下面展示给你列出已激活仓库的简便方法。
|
||||
|
||||
It will helps you to know what are the repositories enabled on your system.
|
||||
这有助于你知晓你的系统上都开启了哪些仓库。
|
||||
|
||||
Once you have this information in handy then you can add any repositories that you want if it’s not already enabled.
|
||||
一旦你掌握了这些信息,你就可以添加任何之前还没有准备开启的仓库了。
|
||||
|
||||
Say for example, if you would like to enable `epel repository` then you need to check whether the epel repository is enabled or not. In this case this tutorial would help you.
|
||||
举个例子,如果你想开启 `epel repository` ,你需要先检查 epel repository 是否已经开启了。这篇教程将会帮助你做这件事情。
|
||||
|
||||
### What Is Repository?
|
||||
### 什么是仓库?
|
||||
|
||||
A software repository is a central place which stores the software packages for the particular application.
|
||||
存储特定程序软件包的中枢位置就是一个软件仓库。
|
||||
|
||||
All the Linux distributions are maintaining their own repositories and they allow users to retrieve and install packages on their machine.
|
||||
所有的 Linux 发行版都开发了他们自己的仓库,而且允许用户下载并安装这些软件包到他们的机器上。
|
||||
|
||||
Each vendor offered a unique package management tool to manage their repositories such as search, install, update, upgrade, remove, etc.
|
||||
每个供应商都提供了一套包管理工具,用以管理他们的仓库,比如搜索、安装、更新、升级、移除等等。
|
||||
|
||||
Most of the Linux distributions comes as freeware except RHEL and SUSE. To access their repositories you need to buy a subscriptions.
|
||||
大多数 Linux 发行版都作为免费软件,除了 RHEL 和 SUSE。接收他们的仓库你需要先购买订阅。
|
||||
|
||||
**Suggested Read :**
|
||||
**(#)** [How To Add, Enable And Disable A Repository By Using The DNF/YUM Config Manager Command On Linux][1]
|
||||
**(#)** [How To List Installed Packages By Size (Largest) On Linux][2]
|
||||
**(#)** [How To View/List The Available Packages Updates In Linux][3]
|
||||
**(#)** [How To View A Particular Package Installed/Updated/Upgraded/Removed/Erased Date On Linux][4]
|
||||
**(#)** [How To View Detailed Information About A Package In Linux][5]
|
||||
**(#)** [How To Search If A Package Is Available On Your Linux Distribution Or Not][6]
|
||||
**(#)** [How To List An Available Package Groups In Linux][7]
|
||||
**(#)** [Newbies corner – A Graphical frontend tool for Linux Package Manager][8]
|
||||
**(#)** [Linux Expert should knows, list of Command line Package Manager & Usage][9]
|
||||
**建议阅读:**
|
||||
**(#)** [在 Linux 上,如何通过 DNF/YUM 设置管理命令添加、开启、关闭一个仓库][1]
|
||||
**(#)** [在 Linux 上如何以尺寸列出已安装的包][2]
|
||||
**(#)** [在 Linux 上如何列出升级的包][3]
|
||||
**(#)** [在 Linux 上如何查看一个特定包已安装/已升级/已更新/已移除/已清除的数据][4]
|
||||
**(#)** [在 Linux 上如何查看一个包的详细信息][5]
|
||||
**(#)** [在你的 Linux 发行版上如何查看一个包是否可用][6]
|
||||
**(#)** [在 Linux 如何列出可用的软件包组][7]
|
||||
**(#)** [Newbies corner - 一个图形化的 Linux 包管理的前端工具][8]
|
||||
**(#)** [Linux 专家须知,命令行包管理 & 使用列表][9]
|
||||
|
||||
### How To List The Enabled Repositories on RHEL/CentOS
|
||||
### 在 RHEL/CentOS上列出已开启的库
|
||||
|
||||
RHEL & CentOS systems are using RPM packages hence we can use the `Yum Package Manager` to get this information.
|
||||
RHEL 和 CentOS 系统使用的是 RPM 包管理,所以我们可以使用 `Yum 包管理` 查看这些信息。
|
||||
|
||||
YUM stands for Yellowdog Updater, Modified is an open-source command-line front-end package-management utility for RPM based systems such as Red Hat Enterprise Linux (RHEL) and CentOS.
|
||||
YUM 代表的是 `Yellowdog Updater,Modified`,它是一个包管理的开源前端,作用在基于 RPM 的系统上,例如 RHEL 和 CentOS。
|
||||
|
||||
Yum is the primary tool for getting, installing, deleting, querying, and managing RPM packages from distribution repositories, as well as other third-party repositories.
|
||||
YUM 是获取、安装、删除、查询和管理来自发行版仓库和其他第三方库的 RPM 包的主要工具。
|
||||
|
||||
**Suggested Read :** [YUM Command To Manage Packages on RHEL/CentOS Systems][10]
|
||||
**建议阅读:Suggested Read :** [在 RHEL/CentOS 系统上用 YUM 命令管理包][10]
|
||||
|
||||
RHEL based systems are mainly offering the below three major repositories. These repository will be enabled by default.
|
||||
基于 RHEL 的系统主要提供以下三个主要的仓库。这些仓库是默认开启的。
|
||||
|
||||
* **`base:`** It’s containing all the core packages and base packages.
|
||||
* **`extras:`** It provides additional functionality to CentOS without breaking upstream compatibility or updating base components. It is an upstream repository, as well as additional CentOS packages.
|
||||
* **`updates:`** It’s offering bug fixed packages, Security packages and Enhancement packages.
|
||||
* **`base:`** 它包含了所有的核心包和基础包。
|
||||
* **`extras:`** 它向 CentOS 提供不破坏上游兼容性或更新基本组件的额外功能。这是一个上游仓库,还有额外的 CentOS 包。
|
||||
* **`updates:`** 它提供了 bug 修复包、安全性包和增强包。
|
||||
|
||||
|
||||
|
||||
```
|
||||
# yum repolist
|
||||
or
|
||||
或者
|
||||
# yum repolist enabled
|
||||
|
||||
Loaded plugins: fastestmirror
|
||||
Determining fastest mirrors
|
||||
选题模板.txt 中文排版指北.md comic core.md Dict.md lctt2014.md lctt2016.md lctt2018.md LCTT翻译规范.md LICENSE Makefile published README.md sign.md sources translated epel: ewr.edge.kernel.org
|
||||
epel: ewr.edge.kernel.org
|
||||
repo id repo name status
|
||||
!base/7/x86_64 CentOS-7 - Base 9,911
|
||||
!epel/x86_64 Extra Packages for Enterprise Linux 7 - x86_64 12,687
|
||||
@ -67,26 +66,26 @@ repolist: 24,349
|
||||
|
||||
```
|
||||
|
||||
### How To List The Enabled Repositories on Fedora
|
||||
### 如何列出 Fedora 上已开启的包
|
||||
|
||||
DNF stands for Dandified yum. We can tell DNF, the next generation of yum package manager (Fork of Yum) using hawkey/libsolv library for backend. Aleš Kozumplík started working on DNF since Fedora 18 and its implemented/launched in Fedora 22 finally.
|
||||
DNF 代表 Dandified yum。我们可以说 DNF 是下一代的 yum 包管理,使用了 hawkey/libsolv 作为后端。自从 Fedroa 18 开始,Aleš Kozumplík 就开始研究 DNF 最终在 Fedora 22 上实现。
|
||||
|
||||
Dnf command is used to install, update, search & remove packages on Fedora 22 and later system. It automatically resolve dependencies and make it smooth package installation without any trouble.
|
||||
Fedora 22 及之后的系统上都使用 Dnf 安装、升级、搜索和移除包。它可以自动解决依赖问题,并使包的安装过程平顺没有任何麻烦。
|
||||
|
||||
Yum replaced by DNF due to several long-term problems in Yum which was not solved. Asked why ? he did not patches the Yum issues. Aleš Kozumplík explains that patching was technically hard and YUM team wont accept the changes immediately and other major critical, YUM is 56K lines but DNF is 29K lies. So, there is no option for further development, except to fork.
|
||||
因为 Yum 许多未解决的问题,现在 Yum 已经被 DNF 所替代。你问为什么?他没有给 Yum 打补丁。Aleš Kozumplík 解释说修补在技术上太困难了,YUM 团队无法立即承受这些变更,还有其他的问题,YUM 是 56k 行,而 DNF 是 29k 行。因此,除了 fork 之外,别无选择。
|
||||
|
||||
**Suggested Read :** [DNF (Fork of YUM) Command To Manage Packages on Fedora System][11]
|
||||
**建议阅读:** [在 Fedora 上使用 DNF(Fork 自 YUM)管理软件][11]
|
||||
|
||||
Fedora system is mainly offering the below two major repositories. These repository will be enabled by default.
|
||||
Fedora 主要提供下面两个主仓库。这些库将被默认开启。
|
||||
|
||||
* **`fedora:`** It’s containing all the core packages and base packages.
|
||||
* **`updates:`** It’s offering bug fixed packages, Security packages and Enhancement packages from the stable release branch.
|
||||
* **`fedora:`** 它包括所有的核心包和基础包。
|
||||
* **`updates:`** 它提供了来自稳定发行版的 bug 修复包、安全性包和增强包
|
||||
|
||||
|
||||
|
||||
```
|
||||
# dnf repolist
|
||||
or
|
||||
或者
|
||||
# dnf repolist enabled
|
||||
|
||||
Last metadata expiration check: 0:02:56 ago on Wed 10 Oct 2018 06:12:22 PM IST.
|
||||
@ -106,13 +105,13 @@ rabiny-albert Copr repo for albert owned by rabiny 3
|
||||
|
||||
```
|
||||
|
||||
### How To List The Enabled Repositories on Debian/Ubuntu
|
||||
### 如何列出 Debian/Ubuntu 上已开启的仓库
|
||||
|
||||
Debian based systems are using APT/APT-GET package manager hence we can use the `APT/APT-GET Package Manager` to get this information.
|
||||
基于 Debian 的系统使用的是 APT/APT-GET 包管理,因此我们可以使用 `APT/APT-GET 包管理` 去获取更多的信息。
|
||||
|
||||
APT stands for Advanced Packaging Tool (APT) which is replacement for apt-get, like how DNF came to picture instead of YUM. It’s feature rich command-line tools with included all the futures in one command (APT) such as apt-cache, apt-search, dpkg, apt-cdrom, apt-config, apt-key, etc..,. and several other unique features. For example we can easily install .dpkg packages through APT but we can’t do through Apt-Get similar more features are included into APT command. APT-GET replaced by APT Due to lock of futures missing in apt-get which was not solved.
|
||||
APT 代表 Advanced Packaging Tool,它取代了 apt-get,就像 DNF 取代 Yum一样。 它具有丰富的命令行工具,在一个命令(APT)中包含了所有,如 apt-cache,apt-search,dpkg,apt-cdrom,apt-config,apt-key等。 还有其他几个独特的功能。 例如,我们可以通过 APT 轻松安装 .dpkg 软件包,而我们无法通过 Apt-Get 获得和包含在 APT 命令中类似的更多功能。 由于未能解决的 apt-get 问题,用 APT 取代了 APT-GET 的锁定。
|
||||
|
||||
Apt-Get stands for Advanced Packaging Tool (APT). apg-get is a powerful command-line tool which is used to automatically download and install new software packages, upgrade existing software packages, update the package list index, and to upgrade the entire Debian based systems.
|
||||
APT_GET 代表 Advanced Packaging Tool。apt-get 是一个强大的命令行工具,它用以自动下载和安装新的软件包、升级已存在的软件包、更新包索引列表、还有升级整个基于 Debian 的系统。
|
||||
|
||||
```
|
||||
# apt-cache policy
|
||||
@ -156,13 +155,13 @@ Pinned packages:
|
||||
|
||||
```
|
||||
|
||||
### How To List The Enabled Repositories on openSUSE
|
||||
### 如何在 openSUSE 上列出已开启的仓库
|
||||
|
||||
openSUSE system uses zypper package manager hence we can use the zypper Package Manager to get this information.
|
||||
openSUSE 使用 zypper 包管理,因此我们可以使用 zypper 包管理获得更多信息。
|
||||
|
||||
Zypper is a command line package manager for suse & openSUSE distributions. It’s used to install, update, search & remove packages & manage repositories, perform various queries, and more. Zypper command-line interface to ZYpp system management library (libzypp).
|
||||
Zypper 是 suse 和 openSUSE 发行版的命令行包管理。它用于安装、更新、搜索、移除包和管理仓库,执行各种查询等。Zypper 以 libzypp(ZYpp 系统管理库)作为后端。
|
||||
|
||||
**Suggested Read :** [Zypper Command To Manage Packages On openSUSE & suse Systems][12]
|
||||
**建议阅读:** [在 openSUSE 和 suse 系统上使用 Zypper 命令管理包][12]
|
||||
|
||||
```
|
||||
# zypper repos
|
||||
@ -179,7 +178,7 @@ Zypper is a command line package manager for suse & openSUSE distributions. It
|
||||
|
||||
```
|
||||
|
||||
List Repositories with URI.
|
||||
以 URI 列出仓库。
|
||||
|
||||
```
|
||||
# zypper lr -u
|
||||
@ -196,7 +195,7 @@ List Repositories with URI.
|
||||
|
||||
```
|
||||
|
||||
List Repositories by priority.
|
||||
通过优先级列出仓库。
|
||||
|
||||
```
|
||||
# zypper lr -p
|
||||
@ -213,13 +212,13 @@ List Repositories by priority.
|
||||
|
||||
```
|
||||
|
||||
### How To List The Enabled Repositories on ArchLinux
|
||||
### 如何列出 Arch Linux 上已开启的仓库
|
||||
|
||||
Arch Linux based systems are using pacman package manager hence we can use the pacman Package Manager to get this information.
|
||||
基于 Arch Linux 的系统使用 pacman 包管理,因此我们可以使用 pacman 包管理获取这些信息。
|
||||
|
||||
pacman stands for package manager utility (pacman). pacman is a command-line utility to install, build, remove and manage Arch Linux packages. pacman uses libalpm (Arch Linux Package Management (ALPM) library) as a back-end to perform all the actions.
|
||||
pacman 代表 package manager utility。pacman 是一个命令行实用程序,用以安装、构建、移除和管理 Arch Linux 包。pacman 使用 libalpm (Arch Linux包管理库)作为后端去进行这些操作。
|
||||
|
||||
**Suggested Read :** [Pacman Command To Manage Packages On Arch Linux Based Systems][13]
|
||||
**建议阅读:** [在基于 Arch Linux的系统上使用 Pacman命令管理包][13]
|
||||
|
||||
```
|
||||
# pacman -Syy
|
||||
@ -231,15 +230,15 @@ pacman stands for package manager utility (pacman). pacman is a command-line uti
|
||||
|
||||
```
|
||||
|
||||
### How To List The Enabled Repositories on Linux using INXI Utility
|
||||
### 如何使用 INXI Utility 列出 Linux 上已开启的仓库
|
||||
|
||||
inxi is a nifty tool to check hardware information on Linux and offers wide range of option to get all the hardware information on Linux system that i never found in any other utility which are available in Linux. It was forked from the ancient and mindbendingly perverse yet ingenius infobash, by locsmif.
|
||||
inix 是 Linux 上检查硬件信息非常有用的工具,还提供很多的选项去获取 Linux 上的所有硬件信息,我从未在 Linux 上发现其他有如此效用的程序。它由 locsmif fork 自 ingenius infobash。
|
||||
|
||||
inxi is a script that quickly shows system hardware, CPU, drivers, Xorg, Desktop, Kernel, GCC version(s), Processes, RAM usage, and a wide variety of other useful information, also used for forum technical support & debugging tool.
|
||||
inix 是一个可以快速显示硬件信息、CPU、硬盘、Xorg、桌面、内核、GCC 版本、进程、内存使用和很多其他有用信息的程序,还使用于论坛技术支持和调试工具上。
|
||||
|
||||
Additionally this utility will display all the distribution repository data information such as RHEL, CentOS, Fedora, Debain, Ubuntu, LinuxMint, ArchLinux, openSUSE, Manjaro, etc.,
|
||||
这个实用程序将会显示所有发行版仓库的数据信息,例如 RHEL、CentOS、Fedora、Debain、Ubuntu、LinuxMint、ArchLinux、openSUSE、Manjaro等。
|
||||
|
||||
**Suggested Read :** [inxi – A Great Tool to Check Hardware Information on Linux][14]
|
||||
**建议阅读:** [inxi – 一个在 Linux 上检查硬件信息的好工具][14]
|
||||
|
||||
```
|
||||
# inxi -r
|
||||
@ -267,7 +266,7 @@ via: https://www.2daygeek.com/how-to-list-the-enabled-active-repositories-in-lin
|
||||
|
||||
作者:[Prakash Subramanian][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
译者:[dianbanjiu](https://github.com/dianbanjiu)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
@ -0,0 +1,82 @@
|
||||
使用 Podman 以非 root 用户身份运行 Linux 容器
|
||||
======
|
||||
|
||||
![](https://fedoramagazine.org/wp-content/uploads/2018/10/podman-816x345.jpg)
|
||||
|
||||
Linux容器是有由 Linux 内核提供的具有特定隔离功能的进程 - 包括文件系统、进程和网络隔离。容器有助于实现可移植性 - 应用可以在容器镜像中与其依赖项一起分发,并可在几乎任何有容器运行时的 Linux 系统上运行。
|
||||
|
||||
虽然容器技术存在了很长时间,但 Linux 容器是由 Docker 广泛推广。 “Docker” 这个词可以指几个不同的东西,包括容器技术和工具,周围的社区,或者 Docker Inc. 公司。但是,在本文中,我将用来指管理 Linux 容器的技术和工具。
|
||||
|
||||
### 什么是 Docker
|
||||
|
||||
[Docker][1] 是一个以 root 身份在你的系统上运行的守护程序,它利用 Linux 内核的功能来管理正在运行的容器。除了运行容器之外,它还可以轻松管理容器镜像 - 与容器托管交互、存储映像、管理容器版本等。它基本上支持运行单个容器所需的所有操作。
|
||||
|
||||
但即使 Docker 是管理 Linux 容器的一个非常方便的工具,它也有两个缺点:它是一个需要在你的系统上运行的守护进程,并且需要以 root 权限运行,这可能有一定的安全隐患。然而,Podman 在解决这两个问题。
|
||||
|
||||
### Podman 介绍
|
||||
|
||||
[Podman][2] 是一个容器运行时,提供与 Docker 非常相似的功能。正如已经提示的那样,它不需要在你的系统上运行任何守护进程,并且它也可以在没有 root 权限的情况下运行。让我们看看使用 Podman 运行 Linux 容器的一些示例。
|
||||
|
||||
#### 使用 Podman 运行容器
|
||||
|
||||
其中一个最简单的例子可能是运行 Fedora 容器,在命令行中打印 “Hello world!”:
|
||||
|
||||
```
|
||||
$ podman run --rm -it fedora:28 echo "Hello world!"
|
||||
```
|
||||
|
||||
使用通用 Dockerfile 构建镜像的方式与 Docker 相同:
|
||||
|
||||
```
|
||||
$ cat Dockerfile
|
||||
FROM fedora:28
|
||||
RUN dnf -y install cowsay
|
||||
|
||||
$ podman build . -t hello-world
|
||||
... output omitted ...
|
||||
|
||||
$ podman run --rm -it hello-world cowsay "Hello!"
|
||||
```
|
||||
|
||||
为了构建容器,Podman 在后台调用另一个名为 Buildah 的工具。你可以阅读最近一篇[关于使用 Buildah 构建容器镜像的文章][3] - 它不仅仅是使用典型的 Dockerfile。
|
||||
|
||||
除了构建和运行容器外,Podman 还可以与容器托管进行交互。要登录容器托管,例如广泛使用的 Docker Hub,请运行:
|
||||
|
||||
```
|
||||
$ podman login docker.io
|
||||
```
|
||||
|
||||
为了推送我刚刚构建的镜像,我只需打上标记来代表特定的容器托管,然后直接推送它。
|
||||
|
||||
```
|
||||
$ podman -t hello-world docker.io/asamalik/hello-world
|
||||
$ podman push docker.io/asamalik/hello-world
|
||||
```
|
||||
|
||||
顺便说一下,你是否注意到我如何以非 root 用户身份运行所有内容?此外,我的系统上没有运行大的守护进程!
|
||||
|
||||
#### 安装 Podman
|
||||
|
||||
Podman 默认在 [Silverblue][4] 上提供 - 一个基于容器的工作流的新一代 Linux 工作站。要在任何 Fedora 版本上安装它,只需运行:
|
||||
|
||||
```
|
||||
$ sudo dnf install podman
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/running-containers-with-podman/
|
||||
|
||||
作者:[Adam Šamalík][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/asamalik/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://docs.docker.com/
|
||||
[2]: https://podman.io/
|
||||
[3]: https://fedoramagazine.org/daemon-less-container-management-buildah/
|
||||
[4]: https://silverblue.fedoraproject.org/
|
@ -0,0 +1,78 @@
|
||||
使用 Lakka Linux 将你的旧 PC 变成复古游戏主机
|
||||
======
|
||||
**如果你有一台吃灰的旧计算机,你可以用 Lakka Linux 将它变成像 PlayStation 那样的复古游戏主机。**
|
||||
|
||||
你可能已经了解[专门用于复活旧计算机的 Linux 发行版][1]。但是你知道有个 Linux 发行版专门是为了将旧电脑变成复古游戏主机创建的么?
|
||||
|
||||
![Lakka is a Linux distribution specially for retrogaming][2]
|
||||
|
||||
认识下 [Lakka][3],它是一个轻量级 Linux 发行版,可以将旧的或低端的计算机(如 Raspberry Pi)变成一个完整的复古游戏主机,
|
||||
|
||||
当我说复古游戏主机时,我对主机部分很认真。如果你曾经使用过 Xbox 和 PlayStation,你就会知道典型的主机界面是什么样的。
|
||||
|
||||
Lakka 提供类似的界面和类似的体验。我稍后会谈到“体验”。先看一下界面。
|
||||
|
||||
<https://itsfoss.com/wp-content/uploads/2018/10/lakka-linux-gaming-console.webm>
|
||||
Lakka 复古游戏界面
|
||||
|
||||
### Lakka:为复古游戏而生的 Linux 发行版
|
||||
|
||||
Lakka 是 [RetroArch][4] 和 [Libretro][5] 生态系统的官方 Linux 发行版。
|
||||
|
||||
RetroArch 是复古游戏模拟器和游戏引擎的前端。你在上面的视频中看到的界面只是 RetroArch。如果你是只想玩复古游戏,只需在当前的 Linux 发行版中安装 RetroArch 即可。
|
||||
|
||||
Lakka 提供了带有 Libretro 核心的 RetroArch。因此,你会获得一个预先配置完的操作系统,你可以安装或插入 live USB 并开始玩游戏。
|
||||
|
||||
Lakka 是轻量级的,你可以将它安装在大多数老系统或单板计算机上,如 Raspberry Pi 上。
|
||||
|
||||
它支持大量的模拟器。你只需要在系统上下载 ROM,Lakka 将从这些 ROM 运行游戏。你可以在[这里][6]找到支持的模拟器和硬件列表。
|
||||
|
||||
它通过器顺滑的图形界面让你能够在许多计算机和主机上运行经典游戏。设置也是统一的,因此可以一劳永逸地完成配置。
|
||||
|
||||
让我总结一下 Lakka 的主要特点:
|
||||
|
||||
* RetroArch 中与 PlayStation 类似的界面
|
||||
* 支持许多复古游戏模拟器
|
||||
* 支持最多 5 名玩家在同一系统上玩游戏
|
||||
* 存档允许你随时保存游戏中的进度
|
||||
* 你可以使用各种图形过滤器改善旧游戏的外表
|
||||
* 你可以通过网络加入多人游戏
|
||||
* 开箱即用支持 XBOX360、Dualshock 3 和 8bitdo 等多种游戏手柄
|
||||
* 连接到 [RetroAchievements] [7] 获取奖杯和徽章
|
||||
|
||||
|
||||
|
||||
### 获取 Lakka
|
||||
|
||||
在你继续安装 Lakka 之前,你应该了解它仍在开发中,因此会有一些 bug。
|
||||
|
||||
请记住,Lakka 仅支持 MBR 分区。因此,如果在安装时没有读到你的硬盘,这可能是一个原因。
|
||||
|
||||
[项目的 FAQ 部分][8]回答了常见的疑问,所以如有任何其他的问题,请参考它。
|
||||
|
||||
[获取 Lakka][9]
|
||||
|
||||
你喜欢复古游戏吗?你使用什么模拟器?你以前用过 Lakka 吗?在评论区与我们分享你的观点。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/lakka-retrogaming-linux/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/lightweight-linux-beginners/
|
||||
[2]: https://4bds6hergc-flywheel.netdna-ssl.com/wp-content/uploads/2018/10/lakka-retrogaming-linux.jpeg
|
||||
[3]: http://www.lakka.tv/
|
||||
[4]: https://www.retroarch.com/
|
||||
[5]: https://www.libretro.com/
|
||||
[6]: http://www.lakka.tv/powerful/
|
||||
[7]: https://retroachievements.org/
|
||||
[8]: http://www.lakka.tv/doc/FAQ/
|
||||
[9]; http://www.lakka.tv/disclaimer/
|
Loading…
Reference in New Issue
Block a user