diff --git a/.travis.yml b/.travis.yml index ff9b70cbc2..0b25cff718 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,18 @@ language: c script: - - make -s check + - sh ./scripts/check.sh + - ./scripts/badge.sh +branches: + only: + - master + except: + - gh-pages +git: + submodules: false +deploy: + provider: pages + skip_cleanup: true + github_token: $GITHUB_TOKEN + local_dir: build + on: + branch: master diff --git a/Makefile b/Makefile deleted file mode 100644 index f33b3e3f7d..0000000000 --- a/Makefile +++ /dev/null @@ -1,58 +0,0 @@ -DIR_PATTERN := (news|talk|tech) -NAME_PATTERN := [0-9]{8} [a-zA-Z0-9_.,() -]*\.md - -RULES := rule-source-added \ - rule-translation-requested \ - rule-translation-completed \ - rule-translation-revised \ - rule-translation-published -.PHONY: check match $(RULES) - -CHANGE_FILE := /tmp/changes - -check: $(CHANGE_FILE) - echo 'PR #$(TRAVIS_PULL_REQUEST) Changes:' - cat $(CHANGE_FILE) - echo - echo 'Check for rules...' - make -k $(RULES) 2>/dev/null | grep '^Rule Matched: ' - -$(CHANGE_FILE): - git --no-pager diff $(TRAVIS_BRANCH) origin/master --no-renames --name-status > $@ - -rule-source-added: - echo 'Unmatched Files:' - egrep -v '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) || true - echo '[End of Unmatched Files]' - [ $(shell egrep '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) -ge 1 ] - [ $(shell egrep -v '^A\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 0 ] - echo 'Rule Matched: $(@)' - -rule-translation-requested: - [ $(shell egrep '^M\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ] - echo 'Rule Matched: $(@)' - -rule-translation-completed: - [ $(shell egrep '^D\s*"?sources/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell egrep '^A\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ] - echo 'Rule Matched: $(@)' - -rule-translation-revised: - [ $(shell egrep '^M\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell cat $(CHANGE_FILE) | wc -l) = 1 ] - echo 'Rule Matched: $(@)' - -rule-translation-published: - [ $(shell egrep '^D\s*"?translated/$(DIR_PATTERN)/$(NAME_PATTERN)"?' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell egrep '^A\s*"?published/$(NAME_PATTERN)' $(CHANGE_FILE) | wc -l) = 1 ] - [ $(shell cat $(CHANGE_FILE) | wc -l) = 2 ] - echo 'Rule Matched: $(@)' - -badge: - mkdir -p build/badge - ./lctt-scripts/show_status.sh -s published >build/badge/published.svg - ./lctt-scripts/show_status.sh -s translated >build/badge/translated.svg - ./lctt-scripts/show_status.sh -s translating >build/badge/translating.svg - ./lctt-scripts/show_status.sh -s sources >build/badge/sources.svg diff --git a/published/20180105 The Best Linux Distributions for 2018.md b/published/20180105 The Best Linux Distributions for 2018.md new file mode 100644 index 0000000000..6717f6233a --- /dev/null +++ b/published/20180105 The Best Linux Distributions for 2018.md @@ -0,0 +1,135 @@ +2018 年最好的 Linux 发行版 +====== + +![Linux distros 2018](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/linux-distros-2018.jpg?itok=Z8sdx4Zu "Linux distros 2018") + +> Jack Wallen 分享他挑选的 2018 年最好的 Linux 发行版。 + +这是新的一年,Linux 仍有无限可能。而且许多 Linux 发行版在 2017 年都带来了许多重大的改变,我相信在 2018 年它在服务器和桌面上将会带来更加稳定的系统和市场份额的增长。 + +对于那些期待迁移到开源平台(或是那些想要切换到)的人对于即将到来的一年,什么是最好的选择?如果你去 [Distrowatch][14] 找一下,你可能会因为众多的发行版而感到头晕,其中一些的排名在上升,而还有一些则恰恰相反。 + +因此,哪个 Linux 发行版将在 2018 年得到偏爱?我有我的看法。事实上,我现在就要和你们分享它。 + +跟我做的 [去年清单][15] 相似,我将会打破那张清单,使任务更加轻松。普通的 Linux 用户,至少包含以下几个类别:系统管理员,轻量级发行版,桌面,为物联网和服务器发行的版本。 + +根据这些,让我们开始 2018 年最好的 Linux 发行版清单吧。 + +### 对系统管理员最好的发行版 + +[Debian][16] 不常出现在“最好的”列表中。但它应该出现,为什么呢?如果了解到 Ubuntu 是基于 Debian 构建的(其实有很多的发行版都基于 Debian),你就很容易理解为什么这个发行版应该在许多“最好”清单中。但为什么是对管理员最好的呢?我想这是由于两个非常重要的原因: + +* 容易使用 +* 非常稳定 + +因为 Debain 使用 dpkg 和 apt 包管理,它使得使用该环境非常简单。而且因为 Debian 提供了最稳定的 Linux 平台之一,它为许多事物提供了理想的环境:桌面、服务器、测试、开发。虽然 Debian 可能不包括去年本分类的优胜者 [Parrot Linux][17] 所带有的大量应用程序,但添加完成任务所需的任何或全部必要的应用程序都非常容易。而且因为 Debian 可以根据你的选择安装不同的桌面(Cinnamon、GNOME、KDE、LXDE、Mate 或者 Xfce),肯定可以满足你对桌面的需求。 + +![debian](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/debian.jpg?itok=XkHHG692 "debian") + +*图 1:在 Debian 9.3 上运行的 GNOME 桌面。* + +同时,Debain 在 Distrowatch 上名列第二。下载、安装,然后让它为你的工作而服务吧。Debain 尽管不那么华丽,但是对于管理员的工作来说十分有用。 + +### 最轻量级的发行版 + +轻量级的发行版有其特殊的用途:给予一些老旧或是性能低下的机器以新生。但是这不意味着这些特别的发行版仅仅只为了老旧的硬件机器而生。如果你想要的是运行速度,你可能会想知道在你的现代机器上这类发行版的运行速度能有多快。 + +在 2018 年上榜的最轻量级的发行版是 [Lubuntu][18]。尽管在这个类别里还有很多选择,而且尽管 Lubuntu 的资源占用与 Puppy Linux 一样小,但得益于它是 Ubuntu 家庭的一员,其易用性为它加了分。但是不要担心,Lubuntu 对于硬件的要求并不高: + ++ CPU:奔腾 4 或者奔腾 M 或者 AMD K8 以上 ++ 对于本地应用,512 MB 的内存就可以了,对于网络使用(Youtube、Google+、Google Drive、Facebook),建议 1 GB 以上。 + +Lubuntu 使用的是 LXDE 桌面(图 2),这意味着新接触 Linux 的用户在使用这个发行版时不会有任何问题。这份简短清单中包含的应用(例如:Abiword、Gnumeric 和 Firefox)都是非常轻量的,且对用户友好的。 + +![Lubuntu](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/lubuntu_2.jpg?itok=BkTnh7hU "Lubuntu") + +*图 2:LXDE桌面。* + +Lubuntu 能让十年以上的电脑如获新生。 + +### 最好的桌面发行版 + +[Elementary OS][19] 连续两年都是我清单中最好的桌面发行版。对于许多人,[Linux Mint][20] (也是一个非常棒的分支)都是桌面发行版的领袖。但是,于我来说,它在易用性和稳定性上很难打败 Elementary OS。例如,我确信是 [Ubuntu][21] 17.10 的发布让我迁移回了 Canonical 的发行版。迁移到新的使用 GNOME 桌面的 Ubuntu 不久之后,我发现我缺少了 Elementary OS 外观、可用性和感觉(图 3)。在使用 Ubuntu 两周以后,我又换回了 Elementary OS。 + +![Elementary OS](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/elementaros.jpg?itok=SRZC2vkg "Elementary OS") + +*图 3:Pantheon 桌面是一件像艺术品一样的桌面。* + +使用 Elementary OS 的任何一个人都会觉得宾至如归。Pantheon 桌面是将操作顺滑和用户友好结合的最完美的桌面。每次更新,它都会变得更好。 + +尽管 Elementary OS 在 Distrowatch 页面访问量中排名第六,但我预计到 2018 年末,它将至少上升至第三名。Elementary 开发人员非常关注用户的需求。他们倾听并且改进,这个发行版目前的状态是如此之好,似乎他们一切都可以做的更好。 如果您需要一个具有出色可靠性和易用性的桌面,Elementary OS 就是你的发行版。 + +### 能够证明自己的最好的发行版 + +很长一段时间内,[Gentoo][22] 都稳坐“展现你技能”的发行版的首座。但是,我认为现在 Gentoo 是时候让出“证明自己”的宝座给 [Linux From Scratch(LFS)][23]。你可能认为这不公平,因为 LFS 实际上不是一个发行版,而是一个帮助用户创建自己的 Linux 发行版的项目。但是,有什么能比你自己创建一个自己的发行版更能证明自己所学的 Linux 知识的呢?在 LFS 项目中,你可以从头开始构建自定义的 Linux 系统,而且是从源代码开始。 所以,如果你真的想证明些什么,请下载 [Linux From Scratch Book][24] 并开始构建。 + +### 对于物联网最好的发行版 + +[Ubuntu Core][25] 已经是第二年赢得了该项的冠军。Ubuntu Core 是 Ubuntu 的一个小型的、事务型版本,专为嵌入式和物联网设备而构建。使 Ubuntu Core 如此完美支持物联网的原因在于它将重点放在 snap 包上 —— 这种通用包可以安装到一个平台上而不会干扰其基本系统。这些 snap 包包含它们运行所需的所有内容(包括依赖项),因此不必担心安装它会破坏操作系统(或任何其他已安装的软件)。 此外,snap 包非常容易升级,并运行在隔离的沙箱中,这使它们成为物联网的理想解决方案。 + +Ubuntu Core 内置的另一个安全领域是登录机制。Ubuntu Core 使用Ubuntu One ssh密钥,这样登录系统的唯一方法是通过上传的 ssh 密钥到 [Ubuntu One帐户][26](图 4)。这为你的物联网设备提供了更高的安全性。 + +![ Ubuntu Core](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntucore.jpg?itok=Ydfq8NKH " Ubuntu Core") + +*图 4:Ubuntu Core屏幕指示通过Ubuntu One用户启用远程访问。* + +### 最好的服务器发行版 + +这里有点意见不统一。主要原因是支持。如果你需要商业支持,乍一看,你最好的选择可能是 [Red Hat Enterprise Linux][27]。红帽年复一年地证明了自己不仅是全球最强大的企业服务器平台之一,而且是单一最赚钱的开源业务(年收入超过 20 亿美元)。 + +但是,Red Hat 并不是唯一的服务器发行版。 实际上,Red Hat 甚至并不能垄断企业服务器计算的各个方面。如果你关注亚马逊 Elastic Compute Cloud 上的云统计数据,Ubuntu 就会打败红帽企业 Linux。根据[云市场][28]的报告,EC2 统计数据显示 RHEL 的部署率低于 10 万,而 Ubuntu 的部署量超过 20 万。 + +最终的结果是,Ubuntu 几乎已经成为云计算的领导者。如果你将它与 Ubuntu 对容器的易用性和可管理性结合起来,就会发现 Ubuntu Server 是服务器类别的明显赢家。而且,如果你需要商业支持,Canonical 将为你提供 [Ubuntu Advantage][29]。 + +对使用 Ubuntu Server 的一个警告是它默认为纯文本界面(图 5)。如果需要,你可以安装 GUI,但使用 Ubuntu Server 命令行非常简单(每个 Linux 管理员都应该知道)。 + +![Ubuntu server](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/ubuntuserver_1.jpg?itok=qtFSUlee "Ubuntu server") + +*图 5:Ubuntu 服务器登录,通知更新。* + +### 你怎么看 + +正如我之前所说,这些选择都非常主观,但如果你正在寻找一个好的开始,那就试试这些发行版。每一个都可以用于非常特定的目的,并且比大多数做得更好。虽然你可能不同意我的个别选择,但你可能会同意 Linux 在每个方面都提供了惊人的可能性。并且,请继续关注下周更多“最佳发行版”选秀。 + +通过 Linux 基金会和 edX 的免费[“Linux 简介”][13]课程了解有关Linux的更多信息。 + +-------------------------------------------------------------------------------- + +via: https://www.linux.com/blog/learn/intro-to-linux/2018/1/best-linux-distributions-2018 + +作者:[JACK WALLEN][a] +译者:[dianbanjiu](https://github.com/dianbanjiu) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]:https://www.linux.com/users/jlwallen +[1]:https://www.linux.com/licenses/category/used-permission +[2]:https://www.linux.com/licenses/category/used-permission +[3]:https://www.linux.com/licenses/category/used-permission +[4]:https://www.linux.com/licenses/category/used-permission +[5]:https://www.linux.com/licenses/category/used-permission +[6]:https://www.linux.com/licenses/category/creative-commons-zero +[7]:https://www.linux.com/files/images/debianjpg +[8]:https://www.linux.com/files/images/lubuntujpg-2 +[9]:https://www.linux.com/files/images/elementarosjpg +[10]:https://www.linux.com/files/images/ubuntucorejpg +[11]:https://www.linux.com/files/images/ubuntuserverjpg-1 +[12]:https://www.linux.com/files/images/linux-distros-2018jpg +[13]:https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux +[14]:https://distrowatch.com/ +[15]:https://www.linux.com/news/learn/sysadmin/best-linux-distributions-2017 +[16]:https://www.debian.org/ +[17]:https://www.parrotsec.org/ +[18]:http://lubuntu.me/ +[19]:https://elementary.io/ +[20]:https://linuxmint.com/ +[21]:https://www.ubuntu.com/ +[22]:https://www.gentoo.org/ +[23]:http://www.linuxfromscratch.org/ +[24]:http://www.linuxfromscratch.org/lfs/download.html +[25]:https://www.ubuntu.com/core +[26]:https://login.ubuntu.com/ +[27]:https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux +[28]:http://thecloudmarket.com/stats#/by_platform_definition +[29]:https://buy.ubuntu.com/?_ga=2.177313893.113132429.1514825043-1939188204.1510782993 diff --git a/translated/talk/20180919 How Writing Can Expand Your Skills and Grow Your Career.md b/published/20180919 How Writing Can Expand Your Skills and Grow Your Career.md similarity index 79% rename from translated/talk/20180919 How Writing Can Expand Your Skills and Grow Your Career.md rename to published/20180919 How Writing Can Expand Your Skills and Grow Your Career.md index f75c55b892..23d730cca0 100644 --- a/translated/talk/20180919 How Writing Can Expand Your Skills and Grow Your Career.md +++ b/published/20180919 How Writing Can Expand Your Skills and Grow Your Career.md @@ -1,17 +1,21 @@ 写作是如何帮助技能拓展和事业成长的 ====== +> 了解为什么写作可以帮助学习新技能和事业成长 + ![](https://www.linux.com/sites/lcom/files/styles/rendered_file/public/graffiti-1281310_1920.jpg?itok=RCayfGKv) +Creative Commons Zero Pixabay + 在最近的[温哥华开源峰会][1]上,我参加了一个小组讨论,叫做“写作是如何改变你的职业生涯的(即使你不是个作家)”。主持人是 Opensource.com 的社区经理兼编辑 Rikki Endsley,成员有开源策略顾问 VM (Vicky) Brasseur,The New Stack 的创始人兼主编 Alex Williams,还有 The Scale Factory 的顾问 Dawn Foster。 -Rikki 在她的[这篇文章][3]中总结了一些能愉悦你,并且能以意想不到的方式改善你职业生涯的写作方法,我在峰会上的发言是受她这篇文章的启发。透露一下,我认识 Rikki 很久了,我们在同一家公司共事了很多年,一起带过孩子,到现在还是很亲密的朋友。 +Rikki 在她的[这篇文章][3]中总结了一些令人愉快的,并且能以意想不到的方式改善你职业生涯的写作方法,我在峰会上的发言是受她这篇文章的启发。透露一下,我认识 Rikki 很久了,我们在同一家公司共事了很多年,一起带过孩子,到现在还是很亲密的朋友。 ### 写作和学习 正如 Rikki 对这个小组讨论的描述,“即使你自认为不是一个‘作家’,你也应该考虑写一下对开源的贡献,还有你的项目或者社区”。写作是一种很好的方式,来分享自己的知识并让别人参与到你的工作中来,当然它对个人也有好处。写作能帮助你结识新人,学习新技能,还能改善你的沟通。 -我发现写作能让我搞清楚自己对某个主题有哪些不懂的地方。写作的过程会让知识体系的空白很突出,这激励了我通过进一步的研究、阅读和提问来填补空白。 +我发现写作能让我搞清楚自己对某个主题有哪些不懂的地方。写作的过程会让知识体系的空白很突出,这激励了我通过进一步的研究、阅读和提问来填补这些空白。 Rikki 说:“写那些你不知道的东西会更加困难也更加耗时,但是也更有成就感,更有益于你的事业。我发现写我不知道的东西有助于自己学习,因为得研究透彻才能给读者解释清楚。” @@ -19,12 +23,11 @@ Rikki 说:“写那些你不知道的东西会更加困难也更加耗时, ### 更明确的沟通 - -写作有助于练习思考和准确讲话,尤其是面向国际受众写作(或演讲)时。例如,在[这篇文章中][5],Isabel Drost-Fromm 为那些母语不是英语的演讲者提供了几个技巧来消除歧义。不管是在会议上还是在自己团队内发言,写作还能帮你在演示之前理清思路。 +写作有助于思维训练和准确表达,尤其是面向国际受众写作(或演讲)时。例如,在[这篇文章中][5],Isabel Drost-Fromm 为那些母语不是英语的演讲者提供了几个技巧来消除歧义。不管是在会议上还是在自己团队内发言,写作还能帮你在演示幻灯片之前理清思路。 Rikki 说:“写文章的过程有助于我组织整理自己的发言和演示稿,也是一个给参会者提供笔记的好方式,还可以分享给没有参加活动的更多国际观众。” -如果你有兴趣,我鼓励你去写作。我强烈建议你参考这里提到的文章,开始思考你要写的内容。 不幸的是,我们在开源峰会上的讨论没有记录,但我希望将来能再做一次讨论,分享更多的想法。 +如果你有兴趣,我鼓励你去写作。我强烈建议你参考这里提到的文章,开始思考你要写的内容。不幸的是,我们在开源峰会上的讨论没有记录下来,但我希望将来能再做一次讨论,分享更多的想法。 -------------------------------------------------------------------------------- @@ -33,7 +36,7 @@ via: https://www.linux.com/blog/2018/9/how-writing-can-help-you-learn-new-skills 作者:[Amber Ankerholz][a] 选题:[lujun9972](https://github.com/lujun9972) 译者:[belitex](https://github.com/belitex) -校对:[校对者ID](https://github.com/校对者ID) +校对:[pityonline](https://github.com/pityonline) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/translated/talk/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md b/published/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md similarity index 59% rename from translated/talk/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md rename to published/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md index ad76c2d42b..6e8852ed4c 100644 --- a/translated/talk/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md +++ b/published/20180926 CPU Power Manager - Control And Manage CPU Frequency In Linux.md @@ -1,25 +1,25 @@ -CPU 电源管理工具 - Linux 系统中 CPU 主频的控制和管理 +CPU 电源管理器:Linux 系统中 CPU 主频的控制和管理 ====== ![](https://www.ostechnix.com/wp-content/uploads/2018/09/Manage-CPU-Frequency-720x340.jpeg) -你使用笔记本的话,可能知道 Linux 系统的电源管理做的很不好。虽然有 **TLP**、[**Laptop Mode Tools** 和 **powertop**][1] 这些工具来辅助减少电量消耗,但跟 Windows 和 Mac OS 系统比较起来,电池的整个使用周期还是不尽如意。此外,还有一种降低功耗的办法就是限制 CPU 的频率。这是可行的,然而却需要编写很复杂的终端命令来设置,所以使用起来不太方便。幸好,有一款名为 **CPU Power Manager** 的 GNOME 扩展插件,可以很容易的就设置和管理你的 CPU 主频。GNOME 桌面系统中,CPU Power Manager 使用名为 **intel_pstate** 的功率驱动程序(几乎所有的 Intel CPU 都支持)来控制和管理 CPU 主频。 +你使用笔记本的话,可能知道 Linux 系统的电源管理做的很不好。虽然有 **TLP**、[**Laptop Mode Tools** 和 **powertop**][1] 这些工具来辅助减少电量消耗,但跟 Windows 和 Mac OS 系统比较起来,电池的整个使用周期还是不尽如意。此外,还有一种降低功耗的办法就是限制 CPU 的频率。这是可行的,然而却需要编写很复杂的终端命令来设置,所以使用起来不太方便。幸好,有一款名为 **CPU Power Manager** 的 GNOME 扩展插件,可以很容易的就设置和管理你的 CPU 主频。GNOME 桌面系统中,CPU Power Manager 使用名为 **intel_pstate** 的频率调整驱动程序(几乎所有的 Intel CPU 都支持)来控制和管理 CPU 主频。 使用这个扩展插件的另一个原因是可以减少系统的发热量,因为很多系统在正常使用中的发热量总让人不舒服,限制 CPU 的主频就可以减低发热量。它还可以减少 CPU 和其他组件的磨损。 ### 安装 CPU Power Manager -首先,进入[**扩展插件主页面**][2],安装此扩展插件。 +首先,进入[扩展插件主页面][2],安装此扩展插件。 安装好插件后,在 GNOME 顶部栏的右侧会出现一个 CPU 图标。点击图标,会出现安装此扩展一个选项提示,如下示: ![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-icon.png) -点击**“尝试安装”**按纽,会弹出输入密码确认框。插件需要 root 权限来添加 policykit 规则,进而控制 CPU 主频。下面是弹出的提示框样子: +点击“尝试安装”按纽,会弹出输入密码确认框。插件需要 root 权限来添加 policykit 规则,进而控制 CPU 主频。下面是弹出的提示框样子: ![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-1.png) -输入密码,点击**“认证”**按纽,完成安装。最后在 **/usr/share/polkit-1/actions** 目录下添加了一个名为 **mko.cpupower.setcpufreq.policy** 的 policykit 文件。 +输入密码,点击“认证”按纽,完成安装。最后在 `/usr/share/polkit-1/actions` 目录下添加了一个名为 `mko.cpupower.setcpufreq.policy` 的 policykit 文件。 都安装完成后,如果点击右上脚的 CPU 图标,会出现如下所示: @@ -27,12 +27,10 @@ CPU 电源管理工具 - Linux 系统中 CPU 主频的控制和管理 ### 功能特性 - * **查看 CPU 主频:** 显然,你可以通过这个提示窗口看到 CPU 的当前运行频率。 - * **设置最大最小主频:** 使用此扩展,你可以根据列出的最大、最小频率百分比进度条来分别设置其频率限制。一旦设置,CPU 将会严格按照此设置范围运行。 - * **开/关 Turbo Boost:** 这是我最喜欢的功能特性。大多数 Intel CPU 都有 “Turbo Boost” 特性,为了提高额外性能,其中的一个内核为自动进行超频。此功能虽然可以使系统获得更高的性能,但也大大增加功耗。所以,如果不做 CPU 密集运行的话,为节约电能,最好关闭 Turbo Boost 功能。事实上,在我电脑上,我大部分时间是把 Turbo Boost 关闭的。 - * **生成配置文件:** 可以生成最大和最小频率的配置文件,就可以很轻松打开/关闭,而不是每次手工调整设置。 - - + * **查看 CPU 主频:** 显然,你可以通过这个提示窗口看到 CPU 的当前运行频率。 + * **设置最大、最小主频:** 使用此扩展,你可以根据列出的最大、最小频率百分比进度条来分别设置其频率限制。一旦设置,CPU 将会严格按照此设置范围运行。 + * **开/关 Turbo Boost:** 这是我最喜欢的功能特性。大多数 Intel CPU 都有 “Turbo Boost” 特性,为了提高额外性能,其中的一个内核为自动进行超频。此功能虽然可以使系统获得更高的性能,但也大大增加功耗。所以,如果不做 CPU 密集运行的话,为节约电能,最好关闭 Turbo Boost 功能。事实上,在我电脑上,我大部分时间是把 Turbo Boost 关闭的。 + * **生成配置文件:** 可以生成最大和最小频率的配置文件,就可以很轻松打开/关闭,而不是每次手工调整设置。 ### 偏好设置 @@ -40,24 +38,23 @@ CPU 电源管理工具 - Linux 系统中 CPU 主频的控制和管理 ![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences.png) -如你所见,你可以设置是否显示 CPU 主频,也可以设置是否以 **Ghz** 来代替 **Mhz** 显示。 +如你所见,你可以设置是否显示 CPU 主频,也可以设置是否以 **Ghz** 来代替 **Mhz** 显示。 -你也可以编辑和创建/删除配置: +你也可以编辑和创建/删除配置文件: ![](https://www.ostechnix.com/wp-content/uploads/2018/09/CPU-Power-Manager-preferences-1.png) -可以为每个配置分别设置最大、最小主频及开/关 Turbo boost。 +可以为每个配置文件分别设置最大、最小主频及开/关 Turbo boost。 ### 结论 正如我在开始时所说的,Linux 系统的电源管理并不是最好的,许多人总是希望他们的 Linux 笔记本电脑电池能多用几分钟。如果你也是其中一员,就试试此扩展插件吧。为了省电,虽然这是非常规的做法,但有效果。我确实喜欢这个插件,到现在已经使用了好几个月了。 -What do you think about this extension? Put your thoughts in the comments below!你对此插件有何看法呢?请把你的观点留在下面的评论区吧。 +你对此插件有何看法呢?请把你的观点留在下面的评论区吧。 祝贺! - -------------------------------------------------------------------------------- via: https://www.ostechnix.com/cpu-power-manager-control-and-manage-cpu-frequency-in-linux/ @@ -65,7 +62,7 @@ via: https://www.ostechnix.com/cpu-power-manager-control-and-manage-cpu-frequenc 作者:[EDITOR][a] 选题:[lujun9972](https://github.com/lujun9972) 译者:[runningwater](https://github.com/runningwater) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/translated/talk/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md b/published/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md similarity index 50% rename from translated/talk/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md rename to published/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md index e55455508d..776c5e5c8e 100644 --- a/translated/talk/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md +++ b/published/20180930 Creator of the World Wide Web is Creating a New Decentralized Web.md @@ -1,27 +1,27 @@ -万维网的创建者正在创建一个新的分布式网络 +万维网的创建者正在创建一个新的去中心化网络 ====== -**万维网的创建者 Tim Berners-Lee 公布了他计划创建一个新的分布式网络,网络中的数据将由用户控制** +> 万维网(WWW)的创建者 Tim Berners-Lee 公布了他计划创建一个新的去中心化网络,该网络中的数据将由用户控制。 -[Tim Berners-Lee] [1]以创建万维网而闻名,万维网就是你现在所知的互联网。二十多年之后,Tim 致力于将互联网从企业巨头的掌控中解放出来,并通过分布式网络将权力交回给人们。 +[Tim Berners-Lee][1] 以创建万维网而闻名,万维网就是你现在所知的互联网。二十多年之后,Tim 致力于将互联网从企业巨头的掌控中解放出来,并通过去中心化网络Decentralized Web将权力交回给人们。 -Berners-Lee 对互联网“强权”们处理用户数据的方式感到不满。所以他[开始致力于他自己的开源项目][2] Solid “来将在网络上的权力归还给人们” +Berners-Lee 对互联网“强权”们处理用户数据的方式感到不满。所以他[开始致力于他自己的开源项目][2] Solid “来将在网络上的权力归还给人们”。 -> Solid 改变了当前用户必须将个人数据交给数字巨头以换取可感知价值的模型。正如我们都已发现的那样,这不符合我们的最佳利益。Solid 是我们如何驱动网络进化以恢复平衡——以一种革命性的方式,让我们每个人完全地控制数据,无论数据是否是个人数据。 +> Solid 改变了当前用户必须将个人数据交给数字巨头以换取可感知价值的模型。正如我们都已发现的那样,这不符合我们的最佳利益。Solid 是我们如何驱动网络进化以恢复平衡 —— 以一种革命性的方式,让我们每个人完全地控制数据,无论数据是否是个人数据。 ![Tim Berners-Lee is creating a decentralized web with open source project Solid][3] -基本上,[Solid][4]是一个使用现有网络构建的平台,在这里你可以创建自己的 “pods” (个人数据存储)。你决定这个 “pods” 将被托管在哪里,谁将访问哪些数据元素以及数据将如何通过这个 pod 分享。 +基本上,[Solid][4] 是一个使用现有网络构建的平台,在这里你可以创建自己的 “pod” (个人数据存储)。你决定这个 “pod” 将被托管在哪里,谁将访问哪些数据元素以及数据将如何通过这个 pod 分享。 -Berners-Lee 相信 Solid "将以一种全新的方式,授权个人、开发者和企业来构思、构建和寻找创新、可信和有益的应用和服务。" +Berners-Lee 相信 Solid “将以一种全新的方式,授权个人、开发者和企业来构思、构建和寻找创新、可信和有益的应用和服务。” 开发人员需要将 Solid 集成进他们的应用程序和网站中。 Solid 仍在早期阶段,所以目前没有相关的应用程序。但是项目网站宣称“第一批 Solid 应用程序正在开发当中”。 -Berners-Lee 已经创立一家名为[Inrupt][5] 的初创公司,并已从麻省理工学院休假来全职工作在 Solid,来将其”从少部分人的愿景带到多数人的现实“。 +Berners-Lee 已经创立一家名为 [Inrupt][5] 的初创公司,并已从麻省理工学院休学术假来全职工作在 Solid,来将其”从少部分人的愿景带到多数人的现实“。 -如果你对 Solid 感兴趣,[学习如何开发应用程序][6]或者以自己的方式[给项目做贡献][7]。当然,建立和推动 Solid 的广泛采用将需要大量的努力,所以每一点的贡献都将有助于分布式网络的成功。 +如果你对 Solid 感兴趣,可以[学习如何开发应用程序][6]或者以自己的方式[给项目做贡献][7]。当然,建立和推动 Solid 的广泛采用将需要大量的努力,所以每一点的贡献都将有助于去中心化网络的成功。 -你认为[分布式网络][8]会成为现实吗?你是如何看待分布式网络,特别是 Solid 项目的? +你认为[去中心化网络][8]会成为现实吗?你是如何看待去中心化网络,特别是 Solid 项目的? -------------------------------------------------------------------------------- @@ -30,7 +30,7 @@ via: https://itsfoss.com/solid-decentralized-web/ 作者:[Abhishek Prakash][a] 选题:[lujun9972](https://github.com/lujun9972) 译者:[ypingcn](https://github.com/ypingcn) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/translated/tech/20181012 Happy birthday, KDE- 11 applications you never knew existed.md b/published/20181012 Happy birthday, KDE- 11 applications you never knew existed.md similarity index 74% rename from translated/tech/20181012 Happy birthday, KDE- 11 applications you never knew existed.md rename to published/20181012 Happy birthday, KDE- 11 applications you never knew existed.md index 3e357e99aa..6dc68f2983 100644 --- a/translated/tech/20181012 Happy birthday, KDE- 11 applications you never knew existed.md +++ b/published/20181012 Happy birthday, KDE- 11 applications you never knew existed.md @@ -1,16 +1,16 @@ -生日快乐,KDE:你从不知道的 11 个应用 +你从不知道的 11 个 KDE 应用 ====== -你今天需要哪种有趣或奇特的应用? + +> 你今天需要哪种有趣或奇特的应用? + ![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BIZ_DebucketizeOrgChart_A.png?itok=RB3WBeQQ) -Linux 桌面环境 KDE 将于今年 10 月 14 日庆祝诞生 22 周年。KDE 社区用户创建了大量应用,它们很多都提供有趣和奇特的服务。我们仔细看了该列表,并挑选出了你可能想了解的 11 个应用。 - -没有很多,但[也有不少][1]。 +Linux 桌面环境 KDE 于今年 10 月 14 日庆祝诞生 22 周年。KDE 社区用户创建了海量应用(并没有很多,但[也有不少][1]),它们很多都提供有趣和奇特的服务。我们仔细看了该列表,并挑选出了你可能想了解的 11 个应用。 ### 11 个你从没了解的 KDE 应用 -1. [KTeaTime][2] 是一个泡茶计时器。选择你正在饮用的茶的类型 - 绿茶、红茶、凉茶等 - 当可以取出茶包来饮用时,计时器将会响。 -2. [KTux][3] 就是一个屏保程序......是么?Tux 用他的绿色飞船在外太空飞行。 +1. [KTeaTime][2] 是一个泡茶计时器。选择你正在饮用的茶的类型 —— 绿茶、红茶、凉茶等 —— 当可以取出茶包来饮用时,计时器将会响。 +2. [KTux][3] 就是一个屏保程序……是么?Tux 用它的绿色飞船在外太空飞行。 3. [Blinken][4] 是一款基于 Simon Says 的记忆游戏,这是一个 1978 年发布的电子游戏。玩家们在记住长度增加的序列时会有挑战。 4. [Tellico][5] 是一个收集管理器,用于组织你最喜欢的爱好。也许你还在收集棒球卡。也许你是红酒俱乐部的一员。也许你是一个严肃的书虫。也许三个都是! 5. [KRecipes][6] **不是** 简单的食谱管理器。它还有很多其他功能!购物清单、营养素分析、高级搜索、菜谱评级、导入/导出各种格式等。 @@ -19,7 +19,7 @@ Linux 桌面环境 KDE 将于今年 10 月 14 日庆祝诞生 22 周年。KDE 8. [KDiamond][9] 类似于宝石迷阵或其他单人益智游戏,其中游戏的目标是搭建一定数量的相同类型的宝石或物体的行。这里是钻石。 9. [KolourPaint][10] 是一个非常简单的图像编辑工具,也可以用于创建简单的矢量图形。 10. [Kiriki][11] 是一款类似于 Yahtzee 的 2-6 名玩家的骰子游戏。 -11. [RSIBreak][12] 没有以 K 开头。什么!?它以“RSI”开头代表“重复性劳损” (Repetitive Strain Injury),这会在日复一日长时间使用鼠标和键盘后发生。这个应用会提醒你休息,并可以个性化,以满足你的需求。 +11. [RSIBreak][12] 居然没有以 K 开头!?它以“RSI”开头代表“重复性劳损Repetitive Strain Injury” ,这会在日复一日长时间使用鼠标和键盘后发生。这个应用会提醒你休息,并可以个性化定制,以满足你的需求。 -------------------------------------------------------------------------------- @@ -28,7 +28,7 @@ via: https://opensource.com/article/18/10/kde-applications 作者:[Opensource.com][a] 选题:[lujun9972][b] 译者:[geekpi](https://github.com/geekpi) -校对:[校对者ID](https://github.com/校对者ID) +校对:[wxy](https://github.com/wxy) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/scripts/badge.sh b/scripts/badge.sh new file mode 100755 index 0000000000..fd3070c7dc --- /dev/null +++ b/scripts/badge.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# 重新生成badge +set -o errexit + +SCRIPTS_DIR=$(cd $(dirname "$0") && pwd) +BUILD_DIR=$(cd $SCRIPTS_DIR/.. && pwd)/build +mkdir -p ${BUILD_DIR}/badge +for catalog in published translated translating sources;do + ${SCRIPTS_DIR}/badge/show_status.sh -s ${catalog} > ${BUILD_DIR}/badge/${catalog}.svg +done diff --git a/scripts/badge/show_status.sh b/scripts/badge/show_status.sh new file mode 100755 index 0000000000..aab852b486 --- /dev/null +++ b/scripts/badge/show_status.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +set -e + +function help() +{ + cat < + + + + + + + + + + + + + + ${comment} + ${comment} + ${num} + ${num} + + +EOF + else + cat< /tmp/stats + OTHER_REGEX='^$' + for TYPE in 'SRC' 'TSL' 'PUB'; do + for STAT in 'A' 'M' 'D'; do + # 统计每个类别的每个操作 + REGEX="$(get_operation_regex "$STAT" "$TYPE")" + OTHER_REGEX="${OTHER_REGEX}|${REGEX}" + eval "${TYPE}_${STAT}=\"\$(grep -Ec '$REGEX' /tmp/changes)\"" || true + eval echo "${TYPE}_${STAT}=\$${TYPE}_${STAT}" + done + done + + # 统计其他操作 + OTHER="$(grep -Evc "$OTHER_REGEX" /tmp/changes)" || true + echo "OTHER=$OTHER" + + # 统计变更总数 + TOTAL="$(wc -l < /tmp/changes )" + echo "TOTAL=$TOTAL" +} + + +echo "[分析] 统计文件变更……" +do_analyze > /tmp/stats +echo "[分析] 已写入统计结果:" +cat /tmp/stats diff --git a/scripts/check/check.sh b/scripts/check/check.sh new file mode 100644 index 0000000000..a527c225ab --- /dev/null +++ b/scripts/check/check.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# 检查脚本状态 +set -e + +################################################################################ +# 暂时仅供开发使用 +################################################################################ + +shellcheck -e SC2034 -x mock/stats.sh "$(dirname "$0")"/*.sh \ + && echo '[检查] ShellCheck 通过' diff --git a/scripts/check/collect.sh b/scripts/check/collect.sh new file mode 100644 index 0000000000..dc6293e280 --- /dev/null +++ b/scripts/check/collect.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# PR 文件变更收集 +set -e + +################################################################################ +# 读入:(无) +# 写出: +# - /tmp/changes # 文件变更列表 +################################################################################ + + +echo "[收集] 计算 PR 分支与目标分支的分叉点……" + +TARGET_BRANCH="${TRAVIS_BRANCH:-master}" +echo "[收集] 目标分支设定为:${TARGET_BRANCH}" + +MERGE_BASE='HEAD^' +[ "$TRAVIS_PULL_REQUEST" != 'false' ] \ + && MERGE_BASE="$(git merge-base "$TARGET_BRANCH" HEAD)" +echo "[收集] 找到分叉节点:${MERGE_BASE}" + +echo "[收集] 变更摘要:" +git --no-pager show --summary "${MERGE_BASE}..HEAD" + +{ + git --no-pager log --oneline "${MERGE_BASE}..HEAD" | grep -Eq '绕过检查' && { + touch /tmp/bypass + echo "[收集] 已标记为绕过检查项" + } +} || true + +echo "[收集] 写出文件变更列表……" + +git diff "$MERGE_BASE" HEAD --no-renames --name-status > /tmp/changes +echo "[收集] 已写出文件变更列表:" +cat /tmp/changes +{ [ -z "$(cat /tmp/changes)" ] && echo "(无变更)"; } || true diff --git a/scripts/check/common.inc.sh b/scripts/check/common.inc.sh new file mode 100644 index 0000000000..6012bc2fe5 --- /dev/null +++ b/scripts/check/common.inc.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +################################################################################ +# 公用常量和函数 +################################################################################ + +# 定义类别目录 +export SRC_DIR='sources' # 未翻译 +export TSL_DIR='translated' # 已翻译 +export PUB_DIR='published' # 已发布 + +# 定义匹配规则 +export CATE_PATTERN='(news|talk|tech)' # 类别 +export FILE_PATTERN='[0-9]{8} [a-zA-Z0-9_.,() -]*\.md' # 文件名 + +# 用法:get_operation_regex 状态 类型 +# +# 状态为: +# - A:添加 +# - M:修改 +# - D:删除 +# 类型为: +# - SRC:未翻译 +# - TSL:已翻译 +# - PUB:已发布 +get_operation_regex() { + STAT="$1" + TYPE="$2" + echo "^${STAT}\\s+\"?$(eval echo "\$${TYPE}_DIR")/" +} diff --git a/scripts/check/identify.sh b/scripts/check/identify.sh new file mode 100644 index 0000000000..f8e4c44160 --- /dev/null +++ b/scripts/check/identify.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# 匹配 PR 规则 +set -e + +################################################################################ +# 读入: +# - /tmp/stats +# 写出:(无) +################################################################################ + +# 加载公用常量和函数 +# shellcheck source=common.inc.sh +. "$(dirname "$0")/common.inc.sh" + +echo "[匹配] 加载统计结果……" +# 加载统计结果 +# shellcheck source=mock/stats.sh +. /tmp/stats + +# 定义 PR 规则 + +# 绕过检查:绕过 PR 检查 +rule_bypass_check() { + [ -f /tmp/bypass ] && echo "匹配规则:绕过检查" +} + +# 添加原文:添加至少一篇原文 +rule_source_added() { + [ "$SRC_A" -ge 1 ] \ + && [ "$TOTAL" -eq "$SRC_A" ] && echo "匹配规则:添加原文 ${SRC_A} 篇" +} + +# 申领翻译:只能申领一篇原文 +rule_translation_requested() { + [ "$SRC_M" -eq 1 ] \ + && [ "$TOTAL" -eq 1 ] && echo "匹配规则:申领翻译" +} + +# 提交译文:只能提交一篇译文 +rule_translation_completed() { + [ "$SRC_D" -eq 1 ] && [ "$TSL_A" -eq 1 ] \ + && [ "$TOTAL" -eq 2 ] && echo "匹配规则:提交译文" +} + +# 校对译文:只能校对一篇 +rule_translation_revised() { + [ "$TSL_M" -eq 1 ] \ + && [ "$TOTAL" -eq 1 ] && echo "匹配规则:校对译文" +} + +# 发布译文:发布多篇译文 +rule_translation_published() { + [ "$TSL_D" -ge 1 ] && [ "$PUB_A" -ge 1 ] && [ "$TSL_D" -eq "$PUB_A" ] \ + && [ "$TOTAL" -eq $(($TSL_D + $PUB_A)) ] \ + && echo "匹配规则:发布译文 ${PUB_A} 篇" +} + +# 定义常见错误 + +# 未知错误 +error_undefined() { + echo "未知错误:无匹配规则,请尝试只对一篇文章进行操作" +} + +# 申领多篇 +error_translation_requested_multiple() { + [ "$SRC_M" -gt 1 ] \ + && echo "匹配错误:申领多篇,请一次仅申领一篇" +} + +# 执行检查并输出匹配项目 +do_check() { + rule_bypass_check \ + || rule_source_added \ + || rule_translation_requested \ + || rule_translation_completed \ + || rule_translation_revised \ + || rule_translation_published \ + || { + error_translation_requested_multiple \ + || error_undefined + exit 1 + } +} + +do_check diff --git a/scripts/check/mock/stats.sh b/scripts/check/mock/stats.sh new file mode 100644 index 0000000000..966f1b04fd --- /dev/null +++ b/scripts/check/mock/stats.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# 给 ShellCheck 用的 Mock 统计 +SRC_A=0 +SRC_M=0 +SRC_D=0 +TSL_A=0 +TSL_M=0 +TSL_D=0 +PUB_A=0 +PUB_M=0 +PUB_D=0 +OTHER=0 +TOTAL=0 diff --git a/sources/talk/20170921 The Rise and Rise of JSON.md b/sources/talk/20170921 The Rise and Rise of JSON.md new file mode 100644 index 0000000000..84a594c89a --- /dev/null +++ b/sources/talk/20170921 The Rise and Rise of JSON.md @@ -0,0 +1,93 @@ +The Rise and Rise of JSON +====== +JSON has taken over the world. Today, when any two applications communicate with each other across the internet, odds are they do so using JSON. It has been adopted by all the big players: Of the ten most popular web APIs, a list consisting mostly of APIs offered by major companies like Google, Facebook, and Twitter, only one API exposes data in XML rather than JSON. Twitter, to take an illustrative example from that list, supported XML until 2013, when it released a new version of its API that dropped XML in favor of using JSON exclusively. JSON has also been widely adopted by the programming rank and file: According to Stack Overflow, a question and answer site for programmers, more questions are now asked about JSON than about any other data interchange format. + +![][1] + +XML still survives in many places. It is used across the web for SVGs and for RSS and Atom feeds. When Android developers want to declare that their app requires a permission from the user, they do so in their app’s manifest, which is written in XML. XML also isn’t the only alternative to JSON—some people now use technologies like YAML or Google’s Protocol Buffers. But these are nowhere near as popular as JSON. For the time being, JSON appears to be the go-to format for communicating with other programs over the internet. + +JSON’s dominance is surprising when you consider that as recently as 2005 the web world was salivating over the potential of “Asynchronous JavaScript and XML” and not “Asynchronous JavaScript and JSON.” It is of course possible that this had nothing to do with the relative popularity of the two formats at the time and reflects only that “AJAX” must have seemed a more appealing acronym than “AJAJ.” But even if some people were already using JSON instead of XML in 2005 (and in fact not many people were yet), one still wonders how XML’s fortunes could have declined so precipitously that a mere decade or so later “Asynchronous JavaScript and XML” has become an ironic misnomer. What happened in that decade? How did JSON supersede XML in so many applications? And who came up with this data format now depended on by engineers and systems all over the world? + +### The Birth of JSON + +The first JSON message was sent in April of 2001. Since this was a historically significant moment in computing, the message was sent from a computer in a Bay-Area garage. Douglas Crockford and Chip Morningstar, co-founders of a technology consulting company called State Software, had gathered in Morningstar’s garage to test out an idea. + +Crockford and Morningstar were trying to build AJAX applications well before the term “AJAX” had been coined. Browser support for what they were attempting was not good. They wanted to pass data to their application after the initial page load, but they had not found a way to do this that would work across all the browsers they were targeting. + +Though it’s hard to believe today, Internet Explorer represented the bleeding edge of web browsing in 2001. As early as 1999, Internet Explorer 5 supported a primordial form of XMLHttpRequest, which programmers could access using a framework called ActiveX. Crockford and Morningstar could have used this technology to fetch data for their application, but they could not have used the same solution in Netscape 4, another browser that they sought to support. So Crockford and Morningstar had to use a different system that worked in both browsers. + +The first JSON message looked like this: + +``` + +``` + +Only a small part of the message resembles JSON as we know it today. The message itself is actually an HTML document containing some JavaScript. The part that resembles JSON is just a JavaScript object literal being passed to a function called `receive()`. + +Crockford and Morningstar had decided that they could abuse an HTML frame to send themselves data. They could point a frame at a URL that would return an HTML document like the one above. When the HTML was received, the JavaScript would be run, passing the object literal back to the application. This worked as long as you were careful to sidestep browser protections preventing a sub-window from accessing its parent; you can see that Crockford and Mornginstar did that by explicitly setting the document domain. (This frame-based technique, sometimes called the hidden frame technique, was commonly used in the late 90s before the widespread implementation of XMLHttpRequest.) + +The amazing thing about the first JSON message is that it’s not obviously the first usage of a new kind of data format at all. It’s just JavaScript! In fact the idea of using JavaScript this way is so straightforward that Crockford himself has said that he wasn’t the first person to do it—he claims that somebody at Netscape was using JavaScript array literals to communicate information as early as 1996. Since the message is just JavaScript, it doesn’t require any kind of special parsing. The JavaScript interpreter can do it all. + +The first ever JSON message actually ran afoul of the JavaScript interpreter. JavaScript reserves an enormous number of words—there are 64 reserved words as of ECMAScript 6—and Crockford and Morningstar had unwittingly used one in their message. They had used `do` as a key, but `do` is reserved. Since JavaScript has so many reserved words, Crockford decided that, rather than avoid using all those reserved words, he would just mandate that all JSON keys be quoted. A quoted key would be treated as a string by the JavaScript interpreter, meaning that reserved words could be used safely. This is why JSON keys are quoted to this day. + +Crockford and Morningstar realized they had something that could be used in all sorts of applications. They wanted to name their format “JSML”, for JavaScript Markup Language, but found that the acronym was already being used for something called Java Speech Markup Language. So they decided to go with “JavaScript Object Notation”, or JSON. They began pitching it to clients but soon found that clients were unwilling to take a chance on an unknown technology that lacked an official specification. So Crockford decided he would write one. + +In 2002, Crockford bought the domain [JSON.org][2] and put up the JSON grammar and an example implementation of a parser. The website is still up, though it now includes a prominent link to the JSON ECMA standard ratified in 2013. After putting up the website, Crockford did little more to promote JSON, but soon found that lots of people were submitting JSON parser implementations in all sorts of different programming languages. JSON’s lineage clearly tied it to JavaScript, but it became apparent that JSON was well-suited to data interchange between arbitrary pairs of languages. + +### Doing AJAX Wrong + +JSON got a big boost in 2005. That year, a web designer and developer named Jesse James Garrett coined the term “AJAX” in a blog post. He was careful to stress that AJAX wasn’t any one new technology, but rather “several technologies, each flourishing in its own right, coming together in powerful new ways.” AJAX was the name that Garrett was giving to a new approach to web application development that he had noticed gaining favor. His blog post went on to describe how developers could leverage JavaScript and XMLHttpRequest to build new kinds of applications that were more responsive and stateful than the typical web page. He pointed to Gmail and Flickr as examples of websites already relying on AJAX techniques. + +The “X” in “AJAX” stood for XML, of course. But in a follow-up Q&A post, Garrett pointed to JSON as an entirely acceptable alternative to XML. He wrote that “XML is the most fully-developed means of getting data in and out of an AJAX client, but there’s no reason you couldn’t accomplish the same effects using a technology like JavaScript Object Notation or any similar means of structuring data.” + +Developers indeed found that they could easily use JSON to build AJAX applications and many came to prefer it to XML. And so, ironically, the interest in AJAX led to an explosion in JSON’s popularity. It was around this time that JSON drew the attention of the blogosphere. + +In 2006, Dave Winer, a prolific blogger and the engineer behind a number of XML-based technologies such as RSS and XML-RPC, complained that JSON was reinventing XML for no good reason. Though one might think that a contest between data interchange formats would be unlikely to engender death threats, Winer wrote: + +> No doubt I can write a routine to parse [JSON], but look at how deep they went to re-invent, XML itself wasn’t good enough for them, for some reason (I’d love to hear the reason). Who did this travesty? Let’s find a tree and string them up. Now. + +It’s easy to understand Winer’s frustration. XML has never been widely loved. Even Winer has said that he does not love XML. But XML was designed to be a system that could be used by everyone for almost anything imaginable. To that end, XML is actually a meta-language that allows you to define domain-specific languages for individual applications—RSS, the web feed technology, and SOAP (Simple Object Access Protocol) are examples. Winer felt that it was important to work toward consensus because of all the benefits a common interchange format could bring. He felt that XML’s flexibility should be able to accommodate everybody’s needs. And yet here was JSON, a format offering no benefits over XML except those enabled by throwing out the cruft that made XML so flexible. + +Crockford saw Winer’s blog post and left a comment on it. In response to the charge that JSON was reinventing XML, Crockford wrote, “The good thing about reinventing the wheel is that you can get a round one.” + +### JSON vs XML + +By 2014, JSON had been officially specified by both an ECMA standard and an RFC. It had its own MIME type. JSON had made it to the big leagues. + +Why did JSON become so much more popular than XML? + +On [JSON.org][2], Crockford summarizes some of JSON’s advantages over XML. He writes that JSON is easier for both humans and machines to understand, since its syntax is minimal and its structure is predictable. Other bloggers have focused on XML’s verbosity and “the angle bracket tax.” Each opening tag in XML must be matched with a closing tag, meaning that an XML document contains a lot of redundant information. This can make an XML document much larger than an equivalent JSON document when uncompressed, but, perhaps more importantly, it also makes an XML document harder to read. + +Crockford has also claimed that another enormous advantage for JSON is that JSON was designed as a data interchange format. It was meant to carry structured information between programs from the very beginning. XML, though it has been used for the same purpose, was originally designed as a document markup language. It evolved from SGML (Standard Generalized Markup Language), which in turn evolved from a markup language called Scribe, intended as a word processing system similar to LaTeX. In XML, a tag can contain what is called “mixed content,” or text with inline tags surrounding words or phrases. This recalls the image of an editor marking up a manuscript with a red or blue pen, which is arguably the central metaphor of a markup language. JSON, on the other hand, does not support a clear analogue to mixed content, but that means that its structure can be simpler. A document is best modeled as a tree, but by throwing out the document idea Crockford could limit JSON to dictionaries and arrays, the basic and familiar elements all programmers use to build their programs. + +Finally, my own hunch is that people disliked XML because it was confusing, and it was confusing because it seemed to come in so many different flavors. At first blush, it’s not obvious where the line is between XML proper and its sub-languages like RSS, ATOM, SOAP, or SVG. The first lines of a typical XML document establish the XML version and then the particular sub-language the XML document should conform to. That is a lot of variation to account for already, especially when compared to JSON, which is so straightforward that no new version of the JSON specification is ever expected to be written. The designers of XML, in their attempt to make XML the one data interchange format to rule them all, fell victim to that classic programmer’s pitfall: over-engineering. XML was so generalized that it was hard to use for something simple. + +In 2000, a campaign was launched to get HTML to conform to the XML standard. A specification was published for XML-compliant HTML, thereafter known as XHTML. Some browser vendors immediately started supporting the new standard, but it quickly became obvious that the vast HTML-producing public were unwilling to revise their habits. The new standard called for stricter validation of XHTML than had been the norm for HTML, but too many websites depended on HTML’s forgiving rules. By 2009, an attempt to write a second version of the XHTML standard was aborted when it became clear that the future of HTML was going to be HTML5, a standard that did not insist on XML compliance. + +If the XHTML effort had succeeded, then maybe XML would have become the common data format that its designers hoped it would be. Imagine a world in which HTML documents and API responses had the exact same structure. In such a world, JSON might not have become as ubiquitous as it is today. But I read the failure of XHTML as a kind of moral defeat for the XML camp. If XML wasn’t the best tool for HTML, then maybe there were better tools out there for other applications also. In that world, our world, it is easy to see how a format as simple and narrowly tailored as JSON could find great success. + +If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][3] on Twitter or subscribe to the [RSS feed][4] to make sure you know when a new post is out. + +-------------------------------------------------------------------------------- + +via: https://twobithistory.org/2017/09/21/the-rise-and-rise-of-json.html + +作者:[Two-Bit History][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://twobithistory.org +[b]: https://github.com/lujun9972 +[1]: https://twobithistory.org/images/json.svg +[2]: http://JSON.org +[3]: https://twitter.com/TwoBitHistory +[4]: https://twobithistory.org/feed.xml diff --git a/sources/talk/20171007 The Most Important Database You-ve Never Heard of.md b/sources/talk/20171007 The Most Important Database You-ve Never Heard of.md new file mode 100644 index 0000000000..f429aba373 --- /dev/null +++ b/sources/talk/20171007 The Most Important Database You-ve Never Heard of.md @@ -0,0 +1,50 @@ +The Most Important Database You've Never Heard of +====== +In 1962, JFK challenged Americans to send a man to the moon by the end of the decade, inspiring a heroic engineering effort that culminated in Neil Armstrong’s first steps on the lunar surface. Many of the fruits of this engineering effort were highly visible and sexy—there were new spacecraft, new spacesuits, and moon buggies. But the Apollo Program was so staggeringly complex that new technologies had to be invented even to do the mundane things. One of these technologies was IBM’s Information Management System (IMS). + +IMS is a database management system. NASA needed one in order to keep track of all the parts that went into building a Saturn V rocket, which—because there were two million of them—was expected to be a challenge. Databases were a new idea in the 1960s and there weren’t any already available for NASA to use, so, in 1965, NASA asked IBM to work with North American Aviation and Caterpillar Tractor to create one. By 1968, IBM had installed a working version of IMS at NASA, though at the time it was called ICS/DL/I for “Informational Control System and Data Language/Interface.” (IBM seems to have gone through a brief, unfortunate infatuation with the slash; see [PL/I][1].) Two years later, IBM rebranded ICS/DL/I as “IMS” and began selling it to other customers. It was one of the first commercially available database management systems. + +The incredible thing about IMS is that it is still in use today. And not just on a small scale: Banks, insurance companies, hospitals, and government agencies still use IMS for all sorts of critical tasks. Over 95% of Fortune 1000 companies use IMS in some capacity, as do all of the top five US banks. Whenever you withdraw cash from an ATM, the odds are exceedingly good that you are interacting with IMS at some point in the course of your transaction. In a world where the relational database is an old workhorse increasingly in competition with trendy new NoSQL databases, IMS is a freaking dinosaur. It is a relic from an era before the relational database was even invented, which didn’t happen until 1970. And yet it seems to be the database system in charge of all the important stuff. + +I think this makes IMS pretty interesting. Depending on how you feel about relational databases, it either offers insight into how the relational model improved on its predecessors or else exemplifies an alternative model better suited to certain problems. + +IMS works according to a hierarchical model, meaning that, instead of thinking about data as tables that can be brought together using JOIN operations, IMS thinks about data as trees. Each kind of record you store can have other kinds of records as children; these child record types represent additional information that you might be interested in given a record of the parent type. + +To take an example, say that you want to store information about bank customers. You might have one type of record to represent customers and another type of record to represent accounts. Like in a relational database, where each table has columns, these records will have different fields; we might want to have a first name field, a last name field, and a city field for each customer. We must then decide whether we are likely to first lookup a customer and then information about that customer’s account, or whether we are likely to first lookup an account and then information about that account’s owner. Assuming we decide that we will access customers first, then we will make our account record type a child of our customer record type. Diagrammed, our database model would look something like this: + +![][2] + +And an actual database might look like: + +![][3] + +By modeling our data this way, we are hewing close to the reality of how our data is stored. Each parent record includes pointers to its children, meaning that moving down our tree from the root node is efficient. (Actually, each parent basically stores just one pointer to the first of its children. The children in turn contain pointers to their siblings. This ensures that the size of a record does not vary with the number of children it has.) This efficiency can make data accesses very fast, provided that we are accessing our data in ways that we anticipated when we first structured our database. According to IBM, an IMS instance can process over 100,000 transactions a second, which is probably a large part of why IMS is still used, particularly at banks. But the downside is that we have lost a lot of flexibility. If we want to access our data in ways we did not anticipate, we will have a hard time. + +To illustrate this, consider what might happen if we decide that we would like to access accounts before customers. Perhaps customers are calling in to update their addresses, and we would like them to uniquely identify themselves using their account numbers. So we want to use an account number to find an account, and then from there find the account’s owner. But since all accesses start at the root of our tree, there’s no way for us to get to an account efficiently without first deciding on a customer. To fix this problem, we could introduce a second tree or hierarchy starting with account records; these account records would then have customer records as children. This would let us access accounts and then customers efficiently. But it would involve duplicating information that we already have stored in our database—we would have two trees storing the same information in different orders. Another option would be to establish an index of accounts that could point us to the right account record given an account number. That would work too, but it would entail extra work during insert and update operations in the future. + +It was precisely this inflexibility and the problem of duplicated information that pushed E. F. Codd to propose the relational model. In his 1970 paper, A Relational Model of Data for Large Shared Data Banks, he states at the outset that he intends to present a model for data storage that can protect users from having to know anything about how their data is stored. Looked at one way, the hierarchical model is entirely an artifact of how the designers of IMS chose to store data. It is a bottom-up model, the implication of a physical reality. The relational model, on the other hand, is an abstract model based on relational algebra, and is top-down in that the data storage scheme can be anything provided it accommodates the model. The relational model’s great advantage is that, just because you’ve made decisions that have caused the database to store your data in a particular way, you won’t find yourself effectively unable to make certain queries. + +All that said, the relational model is an abstraction, and we all know abstractions aren’t free. Banks and large institutions have stuck with IMS partly because of the performance benefits, though it’s hard to say if those benefits would be enough to keep them from switching to a modern database if they weren’t also trying to avoid rewriting mission-critical legacy code. However, today’s popular NoSQL databases demonstrate that there are people willing to drop the conveniences of the relational model in return for better performance. Something like MongoDB, which encourages its users to store data in a denormalized form, isn’t all that different from IMS. If you choose to store some entity inside of another JSON record, then in effect you have created something like the IMS hierarchy, and you have constrained your ability to query for that data in the future. But perhaps that’s a tradeoff you’re willing to make. So, even if IMS hadn’t predated E. F. Codd’s relational model by several years, there are still reasons why IMS’ creators might not have adopted the relational model wholesale. + +Unfortunately, IMS isn’t something that you can download and take for a spin on your own computer. First of all, IMS is not free, so you would have to buy it from IBM. But the bigger problem is that IMS only runs on IBM mainframes like the IBM z13. That’s a shame, because it would be a joy to play around with IMS and get a sense for exactly how it differs from something like MySQL. But even without that opportunity, it’s interesting to think about software systems that work in ways we don’t expect or aren’t used to. And it’s especially interesting when those systems, alien as they are, turn out to undergird your local hospital, the entire financial sector, and even the federal government. + +If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][4] on Twitter or subscribe to the [RSS feed][5] to make sure you know when a new post is out. + +-------------------------------------------------------------------------------- + +via: https://twobithistory.org/2017/10/07/the-most-important-database.html + +作者:[Two-Bit History][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://twobithistory.org +[b]: https://github.com/lujun9972 +[1]: https://en.wikipedia.org/wiki/PL/I +[2]: https://twobithistory.org/images/hierarchical-model.png +[3]: https://twobithistory.org/images/hierarchical-db.png +[4]: https://twitter.com/TwoBitHistory +[5]: https://twobithistory.org/feed.xml diff --git a/sources/talk/20171119 The Ruby Story.md b/sources/talk/20171119 The Ruby Story.md new file mode 100644 index 0000000000..90d5f41790 --- /dev/null +++ b/sources/talk/20171119 The Ruby Story.md @@ -0,0 +1,84 @@ +The Ruby Story +====== +Ruby has always been one of my favorite languages, though I’ve sometimes found it hard to express why that is. The best I’ve been able to do is this musical analogy: Whereas Python feels to me like punk rock—it’s simple, predictable, but rigid—Ruby feels like jazz. Ruby gives programmers a radical freedom to express themselves, though that comes at the cost of added complexity and can lead to programmers writing programs that don’t make immediate sense to other people. + +I’ve always been aware that freedom of expression is a core value of the Ruby community. But what I didn’t appreciate is how deeply important it was to the development and popularization of Ruby in the first place. One might create a programming lanugage in pursuit of better peformance, or perhaps timesaving abstractions—the Ruby story is interesting because instead the goal was, from the very beginning, nothing more or less than the happiness of the programmer. + +### Yukihiro Matsumoto + +Yukihiro Matsumoto, also known as “Matz,” graduated from the University of Tsukuba in 1990. Tsukuba is a small town just northeast of Tokyo, known as a center for scientific research and technological devlopment. The University of Tsukuba is particularly well-regarded for its STEM programs. Matsumoto studied Information Science, with a focus on programming languages. For a time he worked in a programming language lab run by Ikuo Nakata. + +Matsumoto started working on Ruby in 1993, only a few years after graduating. He began working on Ruby because he was looking for a scripting language with features that no existing scripting language could provide. He was using Perl at the time, but felt that it was too much of a “toy language.” Python also fell short; in his own words: + +> I knew Python then. But I didn’t like it, because I didn’t think it was a true object-oriented language—OO features appeared to be an add-on to the language. As a language maniac and OO fan for 15 years, I really wanted a genuine object-oriented, easy-to-use scripting language. I looked for one, but couldn’t find one. + +So one way of understanding Matsumoto’s motivations in creating Ruby is that he was trying to create a better, object-oriented version of Perl. + +But at other times, Matsumoto has said that his primary motivation in creating Ruby was simply to make himself and others happier. Toward the end of a Google tech talk that Matsumoto gave in 2008, he showed the following slide: + +![][1] + +He told his audience, + +> I hope to see Ruby help every programmer in the world to be productive, and to enjoy programming, and to be happy. That is the primary purpose of the Ruby language. + +Matsumoto goes on to joke that he created Ruby for selfish reasons, because he was so underwhelmed by other languages that he just wanted to create something that would make him happy. + +The slide epitomizes Matsumoto’s humble style. Matsumoto, it turns out, is a practicing Mormon, and I’ve wondered whether his religious commitments have any bearing on his legendary kindness. In any case, this kindness is so well known that the Ruby community has a principle known as MINASWAN, or “Matz Is Nice And So We Are Nice.” The slide must have struck the audience at Google as an unusual one—I imagine that any random slide drawn from a Google tech talk is dense with code samples and metrics showing how one engineering solution is faster or more efficient than another. Few, I suspect, come close to stating nobler goals more simply. + +Ruby was influenced primarily by Perl. Perl was created by Larry Wall in the late 1980s as a means of processing and transforming text-based reports. It became well-known for its text processing and regular expression capabilities. A Perl program contains many syntactic elements that would be familiar to a Ruby programmer—there are `$` signs, `@` signs, and even `elsif`s, which I’d always thought were one of Ruby’s less felicitous idiosyncracies. On a deeper level, Ruby borrows much of Perl’s regular expression handling and standard library. + +But Perl was by no means the only influence on Ruby. Prior to beginning work on Ruby, Matsumoto worked on a mail client written entirely in Emacs Lisp. The experience taught him a lot about the inner workings of Emacs and the Lisp language, which Matsumoto has said influenced the underlying object model of Ruby. On top of that he added a Smalltalk-style messsage passing system which forms the basis for any behavior relying on Ruby’s `#method_missing`. Matsumoto has also claimed Ada and Eiffel as influences on Ruby. + +When it came time to decide on a name for Ruby, Matsumoto and a colleague, Keiju Ishitsuka, considered several alternatives. They were looking for something that suggested Ruby’s relationship to Perl and also to shell scripting. In an [instant message exchange][2] that is well-worth reading, Ishitsuka and Matsumoto probably spend too much time thinking about the relationship between shells, clams, oysters, and pearls and get close to calling the Ruby language “Coral” or “Bisque” instead. Thankfully, they decided to go with “Ruby”, the idea being that it was, like “pearl”, the name of a valuable jewel. It also turns out that the birthstone for June is a pearl while the birthstone for July is a ruby, meaning that the name “Ruby” is another tongue-in-cheek “incremental improvement” name like C++ or C#. + +### Ruby Goes West + +Ruby grew popular in Japan very quickly. Soon after its initial release in 1995, Matz was hired by a Japanese software consulting group called Netlab (also known as Network Applied Communication Laboratory) to work on Ruby full-time. By 2000, only five years after it was initially released, Ruby was more popular in Japan than Python. But it was only just beginning to make its way to English-speaking countries. There had been a Japanese-language mailing list for Ruby discussion since almost the very beginning of Ruby’s existence, but the English-language mailing list wasn’t started until 1998. Initially, the English-language mailing list was used by Japanese Rubyists writing in English, but this gradually changed as awareness of Ruby grew. + +In 2000, Dave Thomas published Programming Ruby, the first English-language book to cover Ruby. The book became known as the “pickaxe” book for the pickaxe it featured on its cover. It introduced Ruby to many programmers in the West for the first time. Like it had in Japan, Ruby spread quickly, and by 2002 the English-language Ruby mailing list had more traffic than the original Japanese-language mailing list. + +By 2005, Ruby had become more popular, but it was still not a mainstream programming language. That changed with the release of Ruby on Rails. Ruby on Rails was the “killer app” for Ruby, and it did more than any other project to popularize Ruby. After the release of Ruby on Rails, interest in Ruby shot up across the board, as measured by the TIOBE language index: + +![][3] + +It’s sometimes joked that the only programs anybody writes in Ruby are Ruby-on-Rails web applications. That makes it sound as if Ruby on Rails completely took over the Ruby community, which is only partly true. While Ruby has certainly come to be known as that language people write Rails apps in, Rails owes as much to Ruby as Ruby owes to Rails. + +The Ruby philosophy heavily informed the design and implementation of Rails. David Heinemeier Hansson, who created Rails, often talks about how his first contact with Ruby was an almost religious experience. He has said that the encounter was so transformative that it “imbued him with a calling to do missionary work in service of Matz’s creation.” For Hansson, Ruby’s no-shackles approach was a politically courageous rebellion against the top-down impositions made by languages like Python and Java. He appreciated that the language trusted him and empowered him to make his own judgements about how best to express his programs. + +Like Matsumoto, Hansson claims that he created Rails out of a frustration with the status quo and a desire to make things better for himself. He, like Matsumoto, prioritized programmer happiness above all else, evaluating additions to Rails by what he calls “The Principle of The Bigger Smile.” Whatever made Hansson smile more was what made it into the Rails codebase. As a result, Rails would come to include unorthodox features like the “Inflector” class (which tries to map singular class names to plural database table names automatically) and Rails’ `Time` extensions (allowing programmers to write cute expressions like `2.days.ago`). To some, these features were truly weird, but the success of Rails is testament to the number of people who found it made their lives much easier. + +And so, while it might seem that Rails was an incidental application of Ruby that happened to become extremely popular, Rails in fact embodies many of Ruby’s core principles. Futhermore, it’s hard to see how Rails could have been built in any other language, given its dependence on Ruby’s macro-like class method calls to implement things like model associations. Some people might take the fact that so much of Ruby development revolves around Ruby on Rails as a sign of an unhealthy ecosystem, but there are good reasons that Ruby and Ruby on Rails are so intertwined. + +### The Future of Ruby + +People seem to have an inordinate amount of interest in whether or not Ruby (and Ruby on Rails) are dying. Since as early as 2011, it seems that Stack Overflow and Quora have been full of programmers asking whether or not they should bother learning Ruby if it will no longer be around in the next few years. These concerns are not unjustified; according to the TIOBE index and to Stack Overflow trends, Ruby and Ruby on Rails have been shrinking in popularity. Though Ruby on Rails was once the hot new thing, it has since been eclipsed by hotter and newer frameworks. + +One theory for why this has happened is that programmers are abandoning dynamically typed languages for statically typed ones. Analysts at TIOBE index figure that a rise in quality requirements have made runtime exceptions increasingly unacceptable. They cite TypeScript as an example of this trend—a whole new version of JavaScript was created just to ensure that client-side code could be written with the benefit of compile-time safety guarantees. + +A more likely answer, I think, is just that Ruby on Rails now has many more competitors than it once did. When Rails was first introduced in 2005, there weren’t that many ways to create web applications—the main alternative was Java. Today, you can create web applications using great frameworks built for Go, JavaScript, or Python, to name only the most popular options. The web world also seems to be moving toward a more distributed architecture for applications, meaning that, rather than having one codebase responsible for everything from database access to view rendering, responsibilites are split between different components that focus on doing one thing well. Rails feels overbroad and bloated for something as focused as a JSON API that talks to a JavaScript frontend. + +All that said, there are reasons to be optimistic about Ruby’s future. Both Rails and Ruby continue to be actively developed. Matsumoto and others are working hard on Ruby’s third major release, which they aim to make three times faster than the existing version of Ruby, possibly alleviating the performance concerns that have always dogged Ruby. And even if the world of web frameworks has become more diverse since 2005, that doesn’t mean that there won’t always be room for Ruby on Rails. It is now a mature tool with an enormous amount of built-in power that will always be a good choice for certain kinds of applications. + +But even if Ruby and Rails go the way of the dinosaurs, one thing that seems certain to survive is the Ruby ethos of programmer happiness. Ruby has had a profound influence on the design of many new programming languages, which have adopted many of its best ideas. Other new lanuages have tried to be “more modern” interpretations of Ruby: Elixir, for example, is a version of Ruby that emphasizes the functional programming paradigm, while Crystal, which is still in development, aims to be a statically typed version of Ruby. Many programmers around the world have fallen in love with Ruby and its syntax, so we can count on its influence persisting for a long while to come. + +If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][4] on Twitter or subscribe to the [RSS feed][5] to make sure you know when a new post is out. + +-------------------------------------------------------------------------------- + +via: https://twobithistory.org/2017/11/19/the-ruby-story.html + +作者:[Two-Bit History][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://twobithistory.org +[b]: https://github.com/lujun9972 +[1]: https://twobithistory.org/images/matz.png +[2]: http://blade.nagaokaut.ac.jp/cgi-bin/scat.rb/ruby/ruby-talk/88819 +[3]: https://twobithistory.org/images/tiobe_ruby.png +[4]: https://twitter.com/TwoBitHistory +[5]: https://twobithistory.org/feed.xml diff --git a/sources/talk/20171229 Important Papers- Codd and the Relational Model.md b/sources/talk/20171229 Important Papers- Codd and the Relational Model.md new file mode 100644 index 0000000000..8fc8b1f701 --- /dev/null +++ b/sources/talk/20171229 Important Papers- Codd and the Relational Model.md @@ -0,0 +1,44 @@ +Important Papers: Codd and the Relational Model +====== +It’s hard to believe today, but the relational database was once the cool new kid on the block. In 2017, the relational model competes with all sorts of cutting-edge NoSQL technologies that make relational database systems seem old-fashioned and boring. Yet, 50 years ago, none of the dominant database systems were relational. Nobody had thought to structure their data that way. When the relational model did come along, it was a radical new idea that revolutionized the database world and spawned a multi-billion dollar industry. + +The relational model was introduced in 1970. Edgar F. Codd, a researcher at IBM, published a [paper][1] called “A Relational Model of Data for Large Shared Data Banks.” The paper was a rewrite of a paper he had circulated internally at IBM a year earlier. The paper is unassuming; Codd does not announce in his abstract that he has discovered a brilliant new approach to storing data. He only claims to have employed a novel tool (the mathematical notion of a “relation”) to address some of the inadequacies of the prevailing database models. + +In 1970, there were two schools of thought about how to structure a database: the hierarchical model and the network model. The hierarchical model was used by IBM’s Information Management System (IMS), the dominant database system at the time. The network model had been specified by a standards committee called CODASYL (which also—random tidbit—specified COBOL) and implemented by several other database system vendors. The two models were not really that different; both could be called “navigational” models. They persisted tree or graph data structures to disk using pointers to preserve the links between the data. Retrieving a record stored toward the bottom of the tree would involve first navigating through all of its ancestor records. These databases were fast (IMS is still used by many financial institutions partly for this reason, see [this excellent blog post][2]) but inflexible. Woe unto those database administrators who suddenly found themselves needing to query records from the bottom of the tree without having an obvious place to start at the top. + +Codd saw this inflexibility as a symptom of a larger problem. Programs using a hierarchical or network database had to know about how the stored data was structured. Programs had to know this because they were responsible for navigating down this structure to find the information they needed. This was so true that when Charles Bachman, a major pioneer of the network model, received a Turing Award for his work in 1973, he gave a speech titled “[The Programmer as Navigator][3].” Of course, if programs were saddled with this responsibility, then they would immediately break if the structure of the database ever changed. In the introduction to his 1970 paper, Codd motivates the search for a better model by arguing that we need “data independence,” which he defines as “the independence of application programs and terminal activities from growth in data types and changes in data representation.” The relational model, he argues, “appears to be superior in several respects to the graph or network model presently in vogue,” partly because, among other benefits, the relational model “provides a means of describing data with its natural structure only.” By this he meant that programs could safely ignore any artificial structures (like trees) imposed upon the data for storage and retrieval purposes only. + +To further illustrate the problem with the navigational models, Codd devotes the first section of his paper to an example data set involving machine parts and assembly projects. This dataset, he says, could be represented in existing systems in at least five different ways. Any program that is developed assuming one of five structures will fail when run against at least three of the other structures. The program could instead try to figure out ahead of time which of the structures it might be dealing with, but it would be difficult to do so in this specific case and practically impossible in the general case. So, as long as the program needs to know about how the data is structured, we cannot switch to an alternative structure without breaking the program. This is a real bummer because (and this is from the abstract) “changes in data representation will often be needed as a result of changes in query, update, and report traffic and natural growth in the types of stored information.” + +Codd then introduces his relational model. This model would be refined and expanded in subsequent papers: In 1971, Codd wrote about ALPHA, a SQL-like query language he created; in another 1971 paper, he introduced the first three normal forms we know and love today; and in 1972, he further developed relational algebra and relational calculus, the mathematically rigorous underpinnings of the relational model. But Codd’s 1970 paper contains the kernel of the relational idea: + +> The term relation is used here in its accepted mathematical sense. Given sets (not necessarily distinct), is a relation on these sets if it is a set of -tuples each of which has its first element from , its second element from , and so on. We shall refer to as the th domain of . As defined above, is said to have degree . Relations of degree 1 are often called unary, degree 2 binary, degree 3 ternary, and degree n-ary. + +Today, we call a relation a table, and a domain an attribute or a column. The word “table” actually appears nowhere in the paper, though Codd’s visual representations of relations (which he calls “arrays”) do resemble tables. Codd defines several more terms, some of which we continue to use and others we have replaced. He explains primary and foreign keys, as well as what he calls the “active domain,” which is the set of all distinct values that actually appear in a given domain or column. He then spends some time distinguishing between a “simple” and a “nonsimple” domain. A simple domain contains “atomic” or “nondecomposable” values, like integers. A nonsimple domain has relations as elements. The example Codd gives here is that of an employee with a salary history. The salary history is not one salary but a collection of salaries each associated with a date. So a salary history cannot be represented by a single number or string. + +It’s not obvious how one could store a nonsimple domain in a multi-dimensional array, AKA a table. The temptation might be to denote the nonsimple relationship using some kind of pointer, but then we would be repeating the mistakes of the navigational models. Instead. Codd introduces normalization, which at least in the 1970 paper involves nothing more than turning nonsimple domains into simple ones. This is done by expanding the child relation so that it includes the primary key of the parent. Each tuple of the child relation references its parent using simple domains, eliminating the need for a nonsimple domain in the parent. Normalization means no pointers, sidestepping all the problems they cause in the navigational models. + +At this point, anyone reading Codd’s paper would have several questions, such as “Okay, how would I actually query such a system?” Codd mentions the possibility of creating a universal sublanguage for querying relational databases from other programs, but declines to define such a language in this particular paper. He does explain, in mathematical terms, many of the fundamental operations such a language would have to support, like joins, “projection” (`SELECT` in SQL), and “restriction” (`WHERE`). The amazing thing about Codd’s 1970 paper is that, really, all the ideas are there—we’ve been writing `SELECT` statements and joins for almost half a century now. + +Codd wraps up the paper by discussing ways in which a normalized relational database, on top of its other benefits, can reduce redundancy and improve consistency in data storage. Altogether, the paper is only 11 pages long and not that difficult of a read. I encourage you to look through it yourself. It would be another ten years before Codd’s ideas were properly implemented in a functioning system, but, when they finally were, those systems were so obviously better than previous systems that they took the world by storm. + +If you enjoyed this post, more like it come out every two weeks! Follow [@TwoBitHistory][4] on Twitter or subscribe to the [RSS feed][5] to make sure you know when a new post is out. + +-------------------------------------------------------------------------------- + +via: https://twobithistory.org/2017/12/29/codd-relational-model.html + +作者:[Two-Bit History][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/译者ID) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://twobithistory.org +[b]: https://github.com/lujun9972 +[1]: https://cs.uwaterloo.ca/~david/cs848s14/codd-relational.pdf +[2]: https://twobithistory.org/2017/10/07/the-most-important-database.html +[3]: https://pdfs.semanticscholar.org/f371/d196bf0e7b43df6dcbbc44de461925a21709.pdf +[4]: https://twitter.com/TwoBitHistory +[5]: https://twobithistory.org/feed.xml diff --git a/sources/talk/20180209 How writing can change your career for the better, even if you don-t identify as a writer.md b/sources/talk/20180209 How writing can change your career for the better, even if you don-t identify as a writer.md index 98d57bcca3..55618326c6 100644 --- a/sources/talk/20180209 How writing can change your career for the better, even if you don-t identify as a writer.md +++ b/sources/talk/20180209 How writing can change your career for the better, even if you don-t identify as a writer.md @@ -1,4 +1,4 @@ -How writing can change your career for the better, even if you don't identify as a writer Translating by FelixYFZ +How writing can change your career for the better, even if you don't identify as a writer ====== Have you read Marie Kondo's book [The Life-Changing Magic of Tidying Up][1]? Or did you, like me, buy it and read a little bit and then add it to the pile of clutter next to your bed? diff --git a/sources/talk/20180527 Whatever Happened to the Semantic Web.md b/sources/talk/20180527 Whatever Happened to the Semantic Web.md new file mode 100644 index 0000000000..22d48c150a --- /dev/null +++ b/sources/talk/20180527 Whatever Happened to the Semantic Web.md @@ -0,0 +1,106 @@ +Whatever Happened to the Semantic Web? +====== +In 2001, Tim Berners-Lee, inventor of the World Wide Web, published an article in Scientific American. Berners-Lee, along with two other researchers, Ora Lassila and James Hendler, wanted to give the world a preview of the revolutionary new changes they saw coming to the web. Since its introduction only a decade before, the web had fast become the world’s best means for sharing documents with other people. Now, the authors promised, the web would evolve to encompass not just documents but every kind of data one could imagine. + +They called this new web the Semantic Web. The great promise of the Semantic Web was that it would be readable not just by humans but also by machines. Pages on the web would be meaningful to software programs—they would have semantics—allowing programs to interact with the web the same way that people do. Programs could exchange data across the Semantic Web without having to be explicitly engineered to talk to each other. According to Berners-Lee, Lassila, and Hendler, a typical day living with the myriad conveniences of the Semantic Web might look something like this: + +> The entertainment system was belting out the Beatles’ “We Can Work It Out” when the phone rang. When Pete answered, his phone turned the sound down by sending a message to all the other local devices that had a volume control. His sister, Lucy, was on the line from the doctor’s office: “Mom needs to see a specialist and then has to have a series of physical therapy sessions. Biweekly or something. I’m going to have my agent set up the appointments.” Pete immediately agreed to share the chauffeuring. At the doctor’s office, Lucy instructed her Semantic Web agent through her handheld Web browser. The agent promptly retrieved the information about Mom’s prescribed treatment within a 20-mile radius of her home and with a rating of excellent or very good on trusted rating services. It then began trying to find a match between available appointment times (supplied by the agents of individual providers through their Web sites) and Pete’s and Lucy’s busy schedules. + +The vision was that the Semantic Web would become a playground for intelligent “agents.” These agents would automate much of the work that the world had only just learned to do on the web. + +![][1] + +For a while, this vision enticed a lot of people. After new technologies such as AJAX led to the rise of what Silicon Valley called Web 2.0, Berners-Lee began referring to the Semantic Web as Web 3.0. Many thought that the Semantic Web was indeed the inevitable next step. A New York Times article published in 2006 quotes a speech Berners-Lee gave at a conference in which he said that the extant web would, twenty years in the future, be seen as only the “embryonic” form of something far greater. A venture capitalist, also quoted in the article, claimed that the Semantic Web would be “profound,” and ultimately “as obvious as the web seems obvious to us today.” + +Of course, the Semantic Web we were promised has yet to be delivered. In 2018, we have “agents” like Siri that can do certain tasks for us. But Siri can only do what it can because engineers at Apple have manually hooked it up to a medley of web services each capable of answering only a narrow category of questions. An important consequence is that, without being large and important enough for Apple to care, you cannot advertise your services directly to Siri from your own website. Unlike the physical therapists that Berners-Lee and his co-authors imagined would be able to hang out their shingles on the web, today we are stuck with giant, centralized repositories of information. Today’s physical therapists must enter information about their practice into Google or Yelp, because those are the only services that the smartphone agents know how to use and the only ones human beings will bother to check. The key difference between our current reality and the promised Semantic future is best captured by this throwaway aside in the excerpt above: “…appointment times (supplied by the agents of individual providers through **their** Web sites)…” + +In fact, over the last decade, the web has not only failed to become the Semantic Web but also threatened to recede as an idea altogether. We now hardly ever talk about “the web” and instead talk about “the internet,” which as of 2016 has become such a common term that newspapers no longer capitalize it. (To be fair, they stopped capitalizing “web” too.) Some might still protest that the web and the internet are two different things, but the distinction gets less clear all the time. The web we have today is slowly becoming a glorified app store, just the easiest way among many to download software that communicates with distant servers using closed protocols and schemas, making it functionally identical to the software ecosystem that existed before the web. How did we get here? If the effort to build a Semantic Web had succeeded, would the web have looked different today? Or have there been so many forces working against a decentralized web for so long that the Semantic Web was always going to be stillborn? + +### Semweb Hucksters and Their Metacrap + +To some more practically minded engineers, the Semantic Web was, from the outset, a utopian dream. + +The basic idea behind the Semantic Web was that everyone would use a new set of standards to annotate their webpages with little bits of XML. These little bits of XML would have no effect on the presentation of the webpage, but they could be read by software programs to divine meaning that otherwise would only be available to humans. + +The bits of XML were a way of expressing metadata about the webpage. We are all familiar with metadata in the context of a file system: When we look at a file on our computers, we can see when it was created, when it was last updated, and whom it was originally created by. Likewise, webpages on the Semantic Web would be able to tell your browser who authored the page and perhaps even where that person went to school, or where that person is currently employed. In theory, this information would allow Semantic Web browsers to answer queries across a large collection of webpages. In their article for Scientific American, Berners-Lee and his co-authors explain that you could, for example, use the Semantic Web to look up a person you met at a conference whose name you only partially remember. + +Cory Doctorow, a blogger and digital rights activist, published an influential essay in 2001 that pointed out the many problems with depending on voluntarily supplied metadata. A world of “exhaustive, reliable” metadata would be wonderful, he argued, but such a world was “a pipe-dream, founded on self-delusion, nerd hubris, and hysterically inflated market opportunities.” Doctorow had found himself in a series of debates over the Semantic Web at tech conferences and wanted to catalog the serious issues that the Semantic Web enthusiasts (Doctorow calls them “semweb hucksters”) were overlooking. The essay, titled “Metacrap,” identifies seven problems, among them the obvious fact that most web users were likely to provide either no metadata at all or else lots of misleading metadata meant to draw clicks. Even if users were universally diligent and well-intentioned, in order for the metadata to be robust and reliable, users would all have to agree on a single representation for each important concept. Doctorow argued that in some cases a single representation might not be appropriate, desirable, or fair to all users. + +Indeed, the web had already seen people abusing the HTML `` tag (introduced at least as early as HTML 4) in an attempt to improve the visibility of their webpages in search results. In a 2004 paper, Ben Munat, then an academic at Evergreen State College, explains how search engines once experimented with using keywords supplied via the `` tag to index results, but soon discovered that unscrupulous webpage authors were including tags unrelated to the actual content of their webpage. As a result, search engines came to ignore the `` tag in favor of using complex algorithms to analyze the actual content of a webpage. Munat concludes that a general-purpose Semantic Web is unworkable, and that the focus should be on specific domains within medicine and science. + +Others have also seen the Semantic Web project as tragically flawed, though they have located the flaw elsewhere. Aaron Swartz, the famous programmer and another digital rights activist, wrote in an unfinished book about the Semantic Web published after his death that Doctorow was “attacking a strawman.” Nobody expected that metadata on the web would be thoroughly accurate and reliable, but the Semantic Web, or at least a more realistically scoped version of it, remained possible. The problem, in Swartz’ view, was the “formalizing mindset of mathematics and the institutional structure of academics” that the “semantic Webheads” brought to bear on the challenge. In forums like the World Wide Web Consortium (W3C), a huge amount of effort and discussion went into creating standards before there were any applications out there to standardize. And the standards that emerged from these “Talmudic debates” were so abstract that few of them ever saw widespread adoption. The few that did, like XML, were “uniformly scourges on the planet, offenses against hardworking programmers that have pushed out sensible formats (like JSON) in favor of overly-complicated hairballs with no basis in reality.” The Semantic Web might have thrived if, like the original web, its standards were eagerly adopted by everyone. But that never happened because—as [has been discussed][2] on this blog before—the putative benefits of something like XML are not easy to sell to a programmer when the alternatives are both entirely sufficient and much easier to understand. + +### Building the Semantic Web + +If the Semantic Web was not an outright impossibility, it was always going to require the contributions of lots of clever people working in concert. + +The long effort to build the Semantic Web has been said to consist of four phases. The first phase, which lasted from 2001 to 2005, was the golden age of Semantic Web activity. Between 2001 and 2005, the W3C issued a slew of new standards laying out the foundational technologies of the Semantic future. + +The most important of these was the Resource Description Framework (RDF). The W3C issued the first version of the RDF standard in 2004, but RDF had been floating around since 1997, when a W3C working group introduced it in a draft specification. RDF was originally conceived of as a tool for modeling metadata and was partly based on earlier attempts by Ramanathan Guha, an Apple engineer, to develop a metadata system for files stored on Apple computers. The Semantic Web working groups at W3C repurposed RDF to represent arbitrary kinds of general knowledge. + +RDF would be the grammar in which Semantic webpages expressed information. The grammar is a simple one: Facts about the world are expressed in RDF as triplets of subject, predicate, and object. Tim Bray, who worked with Ramanathan Guha on an early version of RDF, gives the following example, describing TV shows and movies: + +``` +@prefix rdf: . + +@prefix ex: . + + +ex:vincent_donofrio ex:starred_in ex:law_and_order_ci . + +ex:law_and_order_ci rdf:type ex:tv_show . + +ex:the_thirteenth_floor ex:similar_plot_as ex:the_matrix . +``` + +The syntax is not important, especially since RDF can be represented in a number of formats, including XML and JSON. This example is in a format called Turtle, which expresses RDF triplets as straightforward sentences terminated by periods. The three essential sentences, which appear above after the `@prefix` preamble, state three facts: Vincent Donofrio starred in Law and Order, Law and Order is a type of TV Show, and the movie The Thirteenth Floor has a similar plot as The Matrix. (If you don’t know who Vincent Donofrio is and have never seen The Thirteenth Floor, I, too, was watching Nickelodeon and sipping Capri Suns in 1999.) + +Other specifications finalized and drafted during this first era of Semantic Web development describe all the ways in which RDF can be used. RDF in Attributes (RDFa) defines how RDF can be embedded in HTML so that browsers, search engines, and other programs can glean meaning from a webpage. RDF Schema and another standard called OWL allows RDF authors to demarcate the boundary between valid and invalid RDF statements in their RDF documents. RDF Schema and OWL, in other words, are tools for creating what are known as ontologies, explicit specifications of what can and cannot be said within a specific domain. An ontology might include a rule, for example, expressing that no person can be the mother of another person without also being a parent of that person. The hope was that these ontologies would be widely used not only to check the accuracy of RDF found in the wild but also to make inferences about omitted information. + +In 2006, Tim Berners-Lee posted a short article in which he argued that the existing work on Semantic Web standards needed to be supplemented by a concerted effort to make semantic data available on the web. Furthermore, once on the web, it was important that semantic data link to other kinds of semantic data, ensuring the rise of a data-based web as interconnected as the existing web. Berners-Lee used the term “linked data” to describe this ideal scenario. Though “linked data” was in one sense just a recapitulation of the original vision for the Semantic Web, it became a term that people could rally around and thus amounted to a rebranding of the Semantic Web project. + +Berners-Lee’s article launched the second phase of the Semantic Web’s development, where the focus shifted from setting standards and building toy examples to creating and popularizing large RDF datasets. Perhaps the most successful of these datasets was [DBpedia][3], a giant repository of RDF triplets extracted from Wikipedia articles. DBpedia, which made heavy use of the Semantic Web standards that had been developed in the first half of the 2000s, was a standout example of what could be accomplished using the W3C’s new formats. Today DBpedia describes 4.58 million entities and is used by organizations like the NY Times, BBC, and IBM, which employed DBpedia as a knowledge source for IBM Watson, the Jeopardy-winning artificial intelligence system. + +![][4] + +The third phase of the Semantic Web’s development involved adapting the W3C’s standards to fit the actual practices and preferences of web developers. By 2008, JSON had begun its meteoric rise to popularity. Whereas XML came packaged with a bunch of associated technologies of indeterminate purpose (XLST, XPath, XQuery, XLink), JSON was just JSON. It was less verbose and more readable. Manu Sporny, an entrepreneur and member of the W3C, had already started using JSON at his company and wanted to find an easy way for RDFa and JSON to work together. The result would be JSON-LD, which in essence was RDF reimagined for a world that had chosen JSON over XML. Sporny, together with his CTO, Dave Longley, issued a draft specification of JSON-LD in 2010. For the next few years, JSON-LD and an updated RDF specification would be the primary focus of Semantic Web work at the W3C. JSON-LD could be used on its own or it could be embedded within a `