diff --git a/.github/workflows/lctt-article-badge.yml b/.github/workflows/lctt-article-badge.yml new file mode 100644 index 0000000000..aef3d9613d --- /dev/null +++ b/.github/workflows/lctt-article-badge.yml @@ -0,0 +1,27 @@ +name: LCTT Article Badge + +on: + push: + branches: [master] +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: checkout old pages branch + uses: actions/checkout@v2 + with: + repository: lctt/translateproject + path: build + ref: gh-pages + - name: remove pages .git + run: rm -rf ./build/.git + - name: run badge + run: sh ./scripts/badge.sh; + - uses: crazy-max/ghaction-github-pages@v2.2.0 + with: + build_dir: ./build + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lctt-article-checker.yml b/.github/workflows/lctt-article-checker.yml new file mode 100644 index 0000000000..32d4354454 --- /dev/null +++ b/.github/workflows/lctt-article-checker.yml @@ -0,0 +1,18 @@ +name: LCTT Article Checker + +on: + pull_request: + branches: [master] +jobs: + build: + runs-on: ubuntu-latest + env: + PULL_REQUEST_ID: ${{ github.event.number }} + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: "checkout master branch & return to pull request branch" + run: CURRENT=$(echo ${{github.ref}} | sed "s|refs/|refs/remotes/|") && git checkout master && git checkout $CURRENT + - name: run check + run: sh ./scripts/check.sh; diff --git a/.github/workflows/lctt-article-status.yml b/.github/workflows/lctt-article-status.yml new file mode 100644 index 0000000000..69ac7d5be8 --- /dev/null +++ b/.github/workflows/lctt-article-status.yml @@ -0,0 +1,28 @@ +name: LCTT Article Status + +on: + schedule: + - cron: "*/30 * * * *" + workflow_dispatch: +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: checkout old pages branch + uses: actions/checkout@v2 + with: + repository: lctt/translateproject + path: build + ref: gh-pages + - name: remove pages .git + run: rm -rf ./build/.git + - name: run status + run: sh ./scripts/status.sh; + - uses: crazy-max/ghaction-github-pages@v2.2.0 + with: + build_dir: ./build + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0b12ca6653..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: minimal -install: - - sudo apt-get install jq - - git clone --depth=1 -b gh-pages https://github.com/LCTT/TranslateProject/ build && rm -rf build/.git -script: - - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then sh ./scripts/check.sh; fi' - - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then sh ./scripts/badge.sh; fi' - - 'if [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then sh ./scripts/status.sh; fi' - -branches: - only: - - master - # - status - except: - - gh-pages -git: - submodules: false - depth: false -deploy: - provider: pages - skip_cleanup: true - github_token: $GITHUB_TOKEN - local_dir: build - on: - branch: - - master - # - status diff --git a/published/20171025 Typeset your docs with LaTeX and TeXstudio on Fedora.md b/published/20171025 Typeset your docs with LaTeX and TeXstudio on Fedora.md new file mode 100644 index 0000000000..8339eaf7d4 --- /dev/null +++ b/published/20171025 Typeset your docs with LaTeX and TeXstudio on Fedora.md @@ -0,0 +1,161 @@ +[#]: collector: (Chao-zhi) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13136-1.html) +[#]: subject: (Typeset your docs with LaTeX and TeXstudio on Fedora) +[#]: via: (https://fedoramagazine.org/typeset-latex-texstudio-fedora/) +[#]: author: (Julita Inca Chiroque https://fedoramagazine.org/author/yulytas/) + +使用 LaTeX 和 TeXstudio 排版文档 +====== + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/latex-texstudio-945x400.jpg) + +LaTeX 是一个服务于高质量排版的[文档准备系统][1]。通常用于大量的技术和科学文档的排版。不过,你也可以使用 LaTex 排版各种形式的文档。教师可以编辑他们的考试和教学大纲,学生可以展示他们的论文和报告。这篇文章让你尝试使用 TeXstudio。TeXstudio 是一个便于编辑 LaTeX 文档的软件。 + +### 启动 TeXstudio + +如果你使用的是 Fedora Workstation,请启动软件管理,然后输入 TeXstudio 以搜索该应用程序。然后选择安装并添加 TeXstudio 到你的系统。你可以从软件管理中启动这个程序,或者像以往一样在概览中启动这个软件。 + +或者,如果你使用终端,请输入 `texstudio`。如果未安装该软件包,系统将提示你安装它。键入 `y` 开始安装。 + +``` +$ texstudio +bash: texstudio: command not found... +Install package 'texstudio' to provide command 'texstudio'? [N/y] y +``` + +### 你的第一份文档 + +LaTeX 命令通常以反斜杠 `\` 开头,命令参数用大括号 `{}` 括起来。首先声明 `documentclass` 的类型。这个例子向你展示了该文档的类是一篇文章。 + +然后,在声明 `documentclass` 之后,用 `begin` 和 `end` 标记文档的开始和结束。在这些命令之间,写一段类似以下的内容: + +``` +\documentclass{article} +\begin{document} +The Fedora Project is a project sponsored by Red Hat primarily to co-ordinate the development of the Linux-based Fedora operating system, operating with the vision that the project "creates a world where free culture is welcoming and widespread, collaboration is commonplace, and people control their content and devices". The Fedora Project was founded on 22 September 2003 when Red Hat decided to split Red Hat Linux into Red Hat Enterprise Linux (RHEL) and a community-based operating system, Fedora. +\end{document} +``` + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/Screenshot-from-2017-10-05-20-19-15.png) + +### 使用间距 + +要创建段落分隔符,请在文本之间保留一个或多个换行符。下面是一个包含四个段落的示例: + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/Screenshot-from-2017-10-18-14-24-42.png) + +从该示例中可以看出,多个换行符不会在段落之间创建额外的空格。但是,如果你确实需要留出额外的空间,请使用 `hspace` 和 `vspace` 命令。这两个命令分别添加水平和垂直空间。下面是一些示例代码,显示了段落周围的附加间距: + +``` +\documentclass{article} +\begin{document} + +\hspace{2.5cm} The four essential freedoms + +\vspace{0.6cm} +A program is free software if the program's users have the 4 essential freedoms: + +The freedom to run the program as you wish, for any purpose (freedom 0).\vspace{0.2cm} +The freedom to study how the program works, and change it so it does your computing as you wish (freedom 1). Access to the source code is a precondition for this.\vspace{0.2cm} + +The freedom to redistribute copies so you can help your neighbour (freedom 2).\vspace{0.2cm} + +The freedom to distribute copies of your modified versions to others (freedom 3). By doing this you can give the whole community a chance to benefit from your changes. Access to the source code is a precondition for this. + +\end{document} +``` + +如果需要,你也可以使用 `noindent` 命令来避免缩进。这里是上面 LaTeX 源码的结果: + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/Screenshot-from-2017-10-18-17-24-53.png) + +### 使用列表和格式 + +如果把自由软件的四大基本自由列为一个清单,这个例子看起来会更好。通过在列表的开头使用`\begin{itemize}`,在末尾使用 `\end{itemize}` 来设置列表结构。在每个项目前面加上 `\item` 命令。 + +额外的格式也有助于使文本更具可读性。用于格式化的有用命令包括粗体、斜体、下划线、超大、大、小和 textsc 以帮助强调文本: + +``` +\documentclass{article} +\begin{document} + +\hspace{2cm} {\huge The four essential freedoms} + +\vspace{0.6cm} +\noindent {\large A program is free software if the program's users have the 4 essential freedoms}: +\begin{itemize} +\item \vspace{0.2cm} +\noindent \textbf{The freedom to run} the program as you wish, for any purpose \textit{(freedom 0)}. \vspace{0.2cm} +\item \noindent \textbf{The freedom to study} how the program works, and change it so it does your computing as you wish \textit{(freedom 1)}. Access to the source code is a precondition for this.\vspace{0.2cm} + +\item \noindent \textbf{The freedom to redistribute} copies so you can help your neighbour \textit{(freedom 2)}.\vspace{0.2cm} + +\item \noindent \textbf{The freedom to distribute copies of your modified versions} to others \textit{(freedom 3)}. \tiny{By doing this you can give the whole community a chance to benefit from your changes.\underline{\textsc{ Access to the source code is a precondition for this.}}} +\end{itemize} +\end{document} +``` + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/Screenshot-from-2017-10-18-17-21-30.png) + +### 添加列、图像和链接 + +列、图像和链接有助于为文本添加更多信息。LaTeX 包含一些高级功能的函数作为宏包。`\usepackage` 命令加载宏包以便你可以使用这些功能。 + +例如,要使用图像,可以使用命令 `\usepackage{graphicx}`。或者,要设置列和链接,请分别使用 `\usepackage{multicol}` 和 `\usepackage{hyperref}`。 + +`\includegraphics` 命令将图像内联放置在文档中。(为简单起见,请将图形文件包含在与 LaTeX 源文件相同的目录中。) + +下面是一个使用所有这些概念的示例。它还使用下载的两个 PNG 图片。试试你自己的图片,看看它们是如何工作的。 + +``` +\documentclass{article} +\usepackage{graphicx} +\usepackage{multicol} +\usepackage{hyperref} +\begin{document} + \textbf{GNU} + \vspace{1cm} + + GNU is a recursive acronym for "GNU's Not Unix!", chosen because GNU's design is Unix-like, but differs from Unix by being free software and containing no Unix code. + + Richard Stallman, the founder of the project, views GNU as a "technical means to a social end". Stallman has written about "the social aspects of software and how Free Software can create community and social justice." in his "Free Society" book. + \vspace{1cm} + + \textbf{Some Projects} + + \begin{multicols}{2} + Fedora + \url{https://getfedora.org} + \includegraphics[width=1cm]{fedora.png} + + GNOME + \url{https://getfedora.org} + \includegraphics[width=1cm]{gnome.png} + \end{multicols} + +\end{document} +``` + +![](https://fedoramagazine.org/wp-content/uploads/2017/07/Screenshot-from-2017-10-18-20-32-32.png) + +这里的功能只触及 LaTeX 功能的表面。你可以在该项目的[帮助和文档站点][3]了解更多关于它们的信息。 + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/typeset-latex-texstudio-fedora/ + +作者:[Julita Inca Chiroque][a] +选题:[Chao-zhi][b] +译者:[Chao-zhi][b] +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/yulytas/ +[b]: https://github.com/Chao-zhi +[1]:http://www.latex-project.org/about/ +[2]:https://fedoramagazine.org/fedora-aarch64-on-the-solidrun-honeycomb-lx2k/ +[3]:https://www.latex-project.org/help/ diff --git a/published/20190312 When the web grew up- A browser story.md b/published/20190312 When the web grew up- A browser story.md new file mode 100644 index 0000000000..a53a5babfb --- /dev/null +++ b/published/20190312 When the web grew up- A browser story.md @@ -0,0 +1,59 @@ +[#]: collector: (lujun9972) +[#]: translator: (XYenChi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13130-1.html) +[#]: subject: (When the web grew up: A browser story) +[#]: via: (https://opensource.com/article/19/3/when-web-grew) +[#]: author: (Mike Bursell https://opensource.com/users/mikecamel) + +Web 的成长,就是一篇浏览器的故事 +====== + +> 互联网诞生之时我的个人故事。 + +![](https://img.linux.net.cn/data/attachment/album/202102/18/161753qb8tytkc6bnxbavn.jpg) + +最近,我和大家 [分享了][1] 我在 1994 年获得英国文学和神学学位离开大学后,如何在一个人们还不知道 Web 服务器是什么的世界里成功找到一份运维 Web 服务器的工作。我说的“世界”,并不仅仅指的是我工作的机构,而是泛指所有地方。Web 那时当真是全新的 —— 人们还正尝试理出头绪。 + +那并不是说我工作的地方(一家学术出版社)特别“懂” Web。这是个大部分人还在用 28.8K 猫(调制解调器,俗称“猫”)访问网页的世界。我还记得我拿到 33.6K 猫时有多激动。至少上下行速率不对称的日子已经过去了,[^1] 以前 1200/300 的带宽描述特别常见。这意味着(在同一家机构的)印刷人员制作的设计复杂、色彩缤纷、纤毫毕现的文档是完全不可能放在 Web 上的。我不能允许在网站的首页出现大于 40k 的 GIF 图片,这对我们的许多访问者来说是很难接受的。大于大约 60k 图片的会作为独立的图片,以缩略图链接过去。 + +如果说市场部只有这一点不喜欢,那是绝对是轻描淡写了。更糟的是布局问题。“浏览器决定如何布局文档,”我一遍又一遍地解释,“你可以使用标题或者段落,但是文档在页面上如何呈现并不取决于文档,而是取决于渲染器!”他们想控制这些,想要不同颜色的背景。后来明白了那些不能实现。我觉得我就像是参加了第一次讨论层叠样式表(CSS)的 W3C 会议,并进行了激烈地争论。关于文档编写者应控制布局的建议真令人厌恶。[^2] CSS 花了一些时间才被人们采用,与此同时,关心这些问题的人搭上了 PDF 这种到处都是安全问题的列车。 + +如何呈现文档不是唯一的问题。作为一个实体书出版社,对于市场部来说,拥有一个网站的全部意义在于,让客户(或者说潜在的客户)不仅知道一本书的内容,而且知道买这本书需要花多少钱。但这有一个问题,你看,互联网,包括快速发展的万维网,是开放的,是所有都免费的自由之地,没有人会在意钱;事实上,在那里谈钱是要回避和避免的。 + +我和主流“网民”的看法一致,认为没必要把价格信息放在线上。我老板,以及机构里相当多的人都持相反的意见。他们觉得消费者应该能够看到书要花多少钱。他们也觉得我的银行经理也会想看到我的账户里每个月进了多少钱,如果我不认同他们的观点的话,那我的收入就可能堪忧。 + +幸运的是,在我被炒鱿鱼之前,我已经自己认清了一些 —— 可能是在我开始迈入 Web 的几星期之后,Web 已经发生变化,有其他人公布他们的产品价格信息。这些新来者通常被那些从早期就开始运行 Web 服务器的老派人士所看不起,[^3] 但很明显,风向是往那边吹的。然而,这并不意味着我们的网站就赢得了战争。作为一个学术出版社,我们和大学共享一个域名(在 “ac.uk” 下)。大学不太相信发布价格信息是合适的,直到出版社的一些资深人士指出,普林斯顿大学出版社正在这样做,如果我们不做……看起来是不是有点傻? + +有趣的事情还没完。在我担任站点管理员(“webmaster@…”)的短短几个月后,我们和其他很多网站一样开始看到了一种令人担忧的趋势。某些访问者可以轻而易举地让我们的 Web 服务器跪了。这些访问者使用了新的网页浏览器:网景浏览器(Netscape)。网景浏览器实在太恶劣了,它居然是多线程的。 + +这为什么是个问题呢?在网景浏览器之前,所有的浏览器都是单线程。它们一次只进行一个连接,所以即使一个页面有五张 GIF 图,[^4] 也会先请求 HTML 基本文件进行解析,然后下载第一张 GIF,完成,接着第二张,完成,如此类推。事实上,GIF 的顺序经常出错,使得页面加载得非常奇怪,但这也是常规思路。而粗暴的网景公司的人决定,它们可以同时打开多个连接到 Web 服务器,比如说,可以同时请求所有的 GIF!为什么这是个问题呢?好吧,问题就在于大多数 Web 服务器都是单线程的。它们不是设计来一次进行多个连接的。确实,我们运行的 HTTP 服务的软件(MacHTTP)是单线程的。尽管我们花钱购买了它(最初是共享软件),但我们用的这版无法同时处理多个请求。 + +互联网上爆发了大量讨论。这些网景公司的人以为他们是谁,能改变世界的运作方式?它应该如何工作?大家分成了不同阵营,就像所有的技术争论一样,双方都用各种技术热词互丢。问题是,网景浏览器不仅是多线程的,它也比其他的浏览器更好。非常多 Web 服务器代码维护者,包括 MacHTTP 作者 Chuck Shotton 在内,开始坐下来认真地在原有代码基础上更新了多线程测试版。几乎所有人立马转向测试版,它们变得稳定了,最终,浏览器要么采用了这种技术,变成多线程,要么就像所有过时产品一样销声匿迹了。[^6] + +对我来说,这才是 Web 真正成长起来的时候。既不是网页展示的价格,也不是设计者能定义你能在网页上看到什么,[^8] 而是浏览器变得更易用,以及成千上万的浏览者向数百万浏览者转变的网络效应,使天平向消费者而不是生产者倾斜。在我的旅程中,还有更多故事,我将留待下次再谈。但从这时起,我的雇主开始看我们的月报,然后是周报、日报,并意识到这将是一件大事,真的需要关注。 + +[^1]: 它们又是怎么回来的? +[^2]: 你可能不会惊讶,我还是在命令行里最开心。 +[^3]: 大约六个月前。 +[^4]: 莽撞,没错,但它确实发生了 [^5] +[^5]: 噢,不,是 GIF 或 BMP,JPEG 还是个好主意,但还没有用上。 +[^6]: 没有真正的沉寂:总有一些坚持他们的首选解决方案具有技术上的优势,并哀叹互联网的其他人都是邪恶的死硬爱好者。 [^7] +[^7]: 我不是唯一一个说“我还在用 Lynx”的人。 +[^8]: 我会指出,为那些有各种无障碍需求的人制造严重而持续的问题。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/3/when-web-grew + +作者:[Mike Bursell][a] +选题:[lujun9972][b] +译者:[XYenChi](https://github.com/XYenChi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/mikecamel +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/article/18/11/how-web-was-won diff --git a/published/20190404 Intel formally launches Optane for data center memory caching.md b/published/20190404 Intel formally launches Optane for data center memory caching.md new file mode 100644 index 0000000000..f07eb56caa --- /dev/null +++ b/published/20190404 Intel formally launches Optane for data center memory caching.md @@ -0,0 +1,69 @@ +[#]: collector: (lujun9972) +[#]: translator: (ShuyRoy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13109-1.html) +[#]: subject: (Intel formally launches Optane for data center memory caching) +[#]: via: (https://www.networkworld.com/article/3387117/intel-formally-launches-optane-for-data-center-memory-caching.html#tk.rss_all) +[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) + +英特尔 Optane:用于数据中心内存缓存 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/12/111720yq1rvxcncjdsjb0g.jpg) + +> 英特尔推出了包含 3D Xpoint 内存技术的 Optane 持久内存产品线。英特尔的这个解决方案介乎于 DRAM 和 NAND 中间,以此来提升性能。 + +![Intel][1] + +英特尔在 2019 年 4 月的[大规模数据中心活动][2]中正式推出 Optane 持久内存产品线。它已经问世了一段时间,但是目前的 Xeon 服务器处理器还不能充分利用它。而新的 Xeon8200 和 9200 系列可以充分利用 Optane 持久内存的优势。 + +由于 Optane 是英特尔的产品(与美光合作开发),所以意味着 AMD 和 ARM 的服务器处理器不能够支持它。 + +正如[我之前所说的][3],OptaneDC 持久内存采用与美光合作研发的 3D Xpoint 内存技术。3D Xpoint 是一种比 SSD 更快的非易失性内存,速度几乎与 DRAM 相近,而且它具有 NAND 闪存的持久性。 + +第一个 3D Xpoint 产品是被称为英特尔“尺子”的 SSD,因为它们被设计成细长的样子,很像尺子的形状。它们被设计这样是为了适合 1u 的服务器机架。在发布的公告中,英特尔推出了新的利用四芯或者 QLC 3D NAND 内存的英特尔 SSD D5-P4325 [尺子][7] SSD,可以在 1U 的服务器机架上放 1PB 的存储。 + +OptaneDC 持久内存的可用容量最初可以通过使用 128GB 的 DIMM 达到 512GB。英特尔数据中心集团执行副总裁及总经理 Navin Shenoy 说:“OptaneDC 持久内存可达到的容量是 DRAM 的 2 到 4 倍。” + +他说:“我们希望服务器系统的容量可以扩展到每个插槽 4.5TB 或者 8 个插槽 36TB,这是我们第一代 Xeon 可扩展芯片的 3 倍。” + +### 英特尔Optane内存的使用和速度 + +Optane 有两种不同的运行模式:内存模式和应用直连模式。内存模式是将 DRAM 放在 Optane 内存之上,将 DRAM 作为 Optane 内存的缓存。应用直连模式是将 DRAM 和 OptaneDC 持久内存一起作为内存来最大化总容量。并不是每个工作负载都适合这种配置,所以应该在对延迟不敏感的应用程序中使用。正如英特尔推广的那样,Optane 的主要使用情景是内存模式。 + +几年前,当 3D Xpoint 最初发布时,英特尔宣称 Optane 的速度是 NAND 的 1000 倍,耐用是 NAND 的 1000 倍,密度潜力是 DRAM 的 10 倍。这虽然有点夸张,但这些因素确实很令人着迷。 + +在 256B 的连续 4 个缓存行中使用 Optane 内存可以达到 8.3GB/秒的读速度和 3.0GB/秒的写速度。与 SATA SSD 的 500MB/秒左右的读/写速度相比,可以看到性能有很大提升。请记住,Optane 充当内存,所以它会缓存被频繁访问的 SSD 中的内容。 + +这是了解 OptaneDC 的关键。它能将非常大的数据集存储在离内存非常近的位置,因此具有很低延迟的 CPU 可以最小化访问较慢的存储子系统的访问延迟,无论存储是 SSD 还是 HDD。现在,它提供了一种可能性,即把多个 TB 的数据放在非常接近 CPU 的地方,以实现更快的访问。 + +### Optane 内存的一个挑战 + +唯一真正的挑战是 Optane 插进内存所在的 DIMM 插槽。现在有些主板的每个 CPU 有多达 16 个 DIMM 插槽,但是这仍然是客户和设备制造商之间需要平衡的电路板空间:Optane 还是内存。有一些 Optane 驱动采用了 PCIe 接口进行连接,可以减轻主板上内存的拥挤。 + +3D Xpoint 由于它写数据的方式,提供了比传统的 NAND 闪存更高的耐用性。英特尔承诺 Optane 提供 5 年保修期,而很多 SSD 只提供 3 年保修期。 + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3387117/intel-formally-launches-optane-for-data-center-memory-caching.html#tk.rss_all + +作者:[Andy Patrizio][a] +选题:[lujun9972][b] +译者:[RiaXu](https://github.com/ShuyRoy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Andy-Patrizio/ +[b]: https://github.com/lujun9972 +[1]: https://images.idgesg.net/images/article/2018/06/intel-optane-persistent-memory-100760427-large.jpg +[2]: https://www.networkworld.com/article/3386142/intel-unveils-an-epic-response-to-amds-server-push.html +[3]: https://www.networkworld.com/article/3279271/intel-launches-optane-the-go-between-for-memory-and-storage.html +[4]: https://www.networkworld.com/article/3290421/why-nvme-users-weigh-benefits-of-nvme-accelerated-flash-storage.html +[5]: https://www.networkworld.com/article/3242807/data-center/top-10-data-center-predictions-idc.html#nww-fsb +[6]: https://www.networkworld.com/newsletters/signup.html#nww-fsb +[7]: https://www.theregister.co.uk/2018/02/02/ruler_and_miniruler_ssd_formats_look_to_banish_diskstyle_drives/ +[8]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 +[9]: https://www.facebook.com/NetworkWorld/ +[10]: https://www.linkedin.com/company/network-world diff --git a/published/20190610 Tmux Command Examples To Manage Multiple Terminal Sessions.md b/published/20190610 Tmux Command Examples To Manage Multiple Terminal Sessions.md new file mode 100644 index 0000000000..c80af774f7 --- /dev/null +++ b/published/20190610 Tmux Command Examples To Manage Multiple Terminal Sessions.md @@ -0,0 +1,305 @@ +[#]: collector: (lujun9972) +[#]: translator: (chensanle) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13107-1.html) +[#]: subject: (Tmux Command Examples To Manage Multiple Terminal Sessions) +[#]: via: (https://www.ostechnix.com/tmux-command-examples-to-manage-multiple-terminal-sessions/) +[#]: author: (sk https://www.ostechnix.com/author/sk/) + +基于 Tmux 的多会话终端管理示例 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/11/101058ffso6wzzw94wm2ng.jpg) + +我们已经了解到如何通过 [GNU Screen][2] 进行多会话管理。今天,我们将要领略另一个著名的管理会话的命令行实用工具 **Tmux**。类似 GNU Screen,Tmux 是一个帮助我们在单一终端窗口中创建多个会话,同一时间内同时运行多个应用程序或进程的终端复用工具。Tmux 自由、开源并且跨平台,支持 Linux、OpenBSD、FreeBSD、NetBSD 以及 Mac OS X。本文将讨论 Tmux 在 Linux 系统下的高频用法。 + +### Linux 下安装 Tmux + +Tmux 可以在绝大多数的 Linux 官方仓库下获取。 + +在 Arch Linux 或它的变种系统下,执行下列命令来安装: + +``` +$ sudo pacman -S tmux +``` + +Debian、Ubuntu 或 Linux Mint: + +``` +$ sudo apt-get install tmux +``` + +Fedora: +``` +$ sudo dnf install tmux +``` + +RHEL 和 CentOS: + +``` +$ sudo yum install tmux +``` + +SUSE/openSUSE: + +``` +$ sudo zypper install tmux +``` + +以上,我们已经完成 Tmux 的安装。之后我们继续看看一些 Tmux 示例。 + +### Tmux 命令示例: 多会话管理 + +Tmux 默认所有命令的前置命令都是 `Ctrl+b`,使用前牢记这个快捷键即可。 + +> **注意**:**Screen** 的前置命令都是 `Ctrl+a`. + +#### 创建 Tmux 会话 + +在终端中运行如下命令创建 Tmux 会话并附着进入: + +``` +tmux +``` + +抑或, + +``` +tmux new +``` + +一旦进入 Tmux 会话,你将看到一个 **沉在底部的绿色的边栏**,如下图所示。 + +![][3] + +*创建 Tmux 会话* + +这个绿色的边栏能很容易提示你当前是否身处 Tmux 会话当中。 + +#### 退出 Tmux 会话 + +退出当前 Tmux 会话仅需要使用 `Ctrl+b` 和 `d`。无需同时触发这两个快捷键,依次按下 `Ctrl+b` 和 `d` 即可。 + +退出当前会话后,你将能看到如下输出: + +``` +[detached (from session 0)] +``` + +#### 创建有名会话 + +如果使用多个会话,你很可能会混淆运行在多个会话中的应用程序。这种情况下,我们需要会话并赋予名称。譬如需要 web 相关服务的会话,就创建一个名称为 “webserver”(或任意一个其他名称) 的 Tmux 会话。 + +``` +tmux new -s webserver +``` + +这里是新的 Tmux 有名会话: + +![][4] + +*拥有自定义名称的 Tmux 会话* + +如你所见上述截图,这个 Tmux 会话的名称已经被标注为 “webserver”。如此,你可以在多个会话中,轻易的区分应用程序的所在。 + +退出会话,轻按 `Ctrl+b` 和 `d`。 + +#### 查看 Tmux 会话清单 + +查看 Tmux 会话清单,执行: + +``` +tmux ls +``` + +示例输出: + +![][5] + +*列出 Tmux 会话* + +如你所见,我们开启了两个 Tmux 会话。 + +#### 创建非附着会话 + +有时候,你可能想要简单创建会话,但是并不想自动切入该会话。 + +创建一个非附着会话,并赋予名称 “ostechnix”,运行: + +``` +tmux new -s ostechnix -d +``` + +上述命令将会创建一个名为 “ostechnix” 的会话,但是并不会附着进入。 + +你可以通过使用 `tmux ls` 命令验证: + +![][6] + +*创建非附着会话* + +#### 附着进入 Tmux 会话 + +通过如下命令,你可以附着进入最后一个被创建的会话: + +``` +tmux attach +``` + +抑或, + +``` +tmux a +``` + +如果你想附着进入任意一个指定的有名会话,譬如 “ostechnix”,运行: + +``` +tmux attach -t ostechnix +``` + +或者,简写为: + +``` +tmux a -t ostechnix +``` + +#### 关闭 Tmux 会话 + +当你完成或者不再需要 Tmux 会话,你可以通过如下命令关闭: + +``` +tmux kill-session -t ostechnix +``` + +当身处该会话时,使用 `Ctrl+b` 以及 `x`。点击 `y` 来关闭会话。 + +可以通过 `tmux ls` 命令验证。 + +关闭所有 Tmux 服务下的所有会话,运行: + +``` +tmux kill-server +``` + +谨慎!这将终止所有 Tmux 会话,并不会产生任何警告,即便会话存在运行中的任务。 + +如果不存在活跃的 Tmux 会话,将看到如下输出: + +``` +$ tmux ls +no server running on /tmp/tmux-1000/default +``` + +#### 切割 Tmux 窗口 + +切割窗口成多个小窗口,在 Tmux 中,这个叫做 “Tmux 窗格”。每个窗格中可以同时运行不同的程序,并同时与所有的窗格进行交互。每个窗格可以在不影响其他窗格的前提下可以调整大小、移动位置和控制关闭。我们可以以水平、垂直或者二者混合的方式切割屏幕。 + +##### 水平切割窗格 + +欲水平切割窗格,使用 `Ctrl+b` 和 `"`(半个双引号)。 + +![][7] + +*水平切割 Tmux 窗格* + +可以使用组合键进一步切割面板。 + +##### 垂直切割窗格 + +垂直切割面板,使用 `Ctrl+b` 和 `%`。 + +![][8] + +*垂直切割 Tmux 窗格* + +##### 水平、垂直混合切割窗格 + +我们也可以同时采用水平和垂直的方案切割窗格。看看如下截图: + +![][9] + +*切割 Tmux 窗格* + +首先,我通过 `Ctrl+b` `"` 水平切割,之后通过 `Ctrl+b` `%` 垂直切割下方的窗格。 + +如你所见,每个窗格下我运行了不同的程序。 + +##### 切换窗格 + +通过 `Ctrl+b` 和方向键(上下左右)切换窗格。 + +##### 发送命令给所有窗格 + +之前的案例中,我们在每个窗格中运行了三个不同命令。其实,也可以发送相同的命令给所有窗格。 + +为此,使用 `Ctrl+b` 然后键入如下命令,之后按下回车: + +``` +:setw synchronize-panes +``` + +现在在任意窗格中键入任何命令。你将看到相同命令影响了所有窗格。 + +##### 交换窗格 + +使用 `Ctrl+b` 和 `o` 交换窗格。 + +##### 展示窗格号 + +使用 `Ctrl+b` 和 `q` 展示窗格号。 + +##### 终止窗格 + +要关闭窗格,直接键入 `exit` 并且按下回车键。或者,按下 `Ctrl+b` 和 `x`。你会看到确认信息。按下 `y` 关闭窗格。 + +![][10] + +*关闭窗格* + +##### 放大和缩小 Tmux 窗格 + +我们可以将 Tmux 窗格放大到当前终端窗口的全尺寸,以获得更好的文本可视性,并查看更多的内容。当你需要更多的空间或专注于某个特定的任务时,这很有用。在完成该任务后,你可以将 Tmux 窗格缩小(取消放大)到其正常位置。更多详情请看以下链接。 + +- [如何缩放 Tmux 窗格以提高文本可见度?](https://ostechnix.com/how-to-zoom-tmux-panes-for-better-text-visibility/) + +#### 自动启动 Tmux 会话 + +当通过 SSH 与远程系统工作时,在 Tmux 会话中运行一个长期运行的进程总是一个好的做法。因为,它可以防止你在网络连接突然中断时失去对运行进程的控制。避免这个问题的一个方法是自动启动 Tmux 会话。更多详情,请参考以下链接。 + +- [通过 SSH 登录远程系统时自动启动 Tmux 会话](https://ostechnix.com/autostart-tmux-session-on-remote-system-when-logging-in-via-ssh/) + +### 总结 + +这个阶段下,你已经获得了基本的 Tmux 技能来进行多会话管理,更多细节,参阅 man 页面。 + +``` +$ man tmux +``` + +GNU Screen 和 Tmux 工具都能透过 SSH 很好的管理远程服务器。学习 Screen 和 Tmux 命令,像个行家一样,彻底通过这些工具管理远程服务器。 + +-------------------------------------------------------------------------------- + +via: https://www.ostechnix.com/tmux-command-examples-to-manage-multiple-terminal-sessions/ + +作者:[sk][a] +选题:[lujun9972][b] +译者:[chensanle](https://github.com/chensanle) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.ostechnix.com/author/sk/ +[b]: https://github.com/lujun9972 +[1]: https://www.ostechnix.com/wp-content/uploads/2019/06/Tmux-720x340.png +[2]: https://www.ostechnix.com/screen-command-examples-to-manage-multiple-terminal-sessions/ +[3]: https://www.ostechnix.com/wp-content/uploads/2019/06/Tmux-session.png +[4]: https://www.ostechnix.com/wp-content/uploads/2019/06/Named-Tmux-session.png +[5]: https://www.ostechnix.com/wp-content/uploads/2019/06/List-Tmux-sessions.png +[6]: https://www.ostechnix.com/wp-content/uploads/2019/06/Create-detached-sessions.png +[7]: https://www.ostechnix.com/wp-content/uploads/2019/06/Horizontal-split.png +[8]: https://www.ostechnix.com/wp-content/uploads/2019/06/Vertical-split.png +[9]: https://www.ostechnix.com/wp-content/uploads/2019/06/Split-Panes.png +[10]: https://www.ostechnix.com/wp-content/uploads/2019/06/Kill-panes.png diff --git a/published/20190626 Where are all the IoT experts going to come from.md b/published/20190626 Where are all the IoT experts going to come from.md new file mode 100644 index 0000000000..5b10af4a11 --- /dev/null +++ b/published/20190626 Where are all the IoT experts going to come from.md @@ -0,0 +1,72 @@ +[#]: collector: (lujun9972) +[#]: translator: (scvoet) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13115-1.html) +[#]: subject: (Where are all the IoT experts going to come from?) +[#]: via: (https://www.networkworld.com/article/3404489/where-are-all-the-iot-experts-going-to-come-from.html) +[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) + +物联网专家都从何而来? +====== + +> 物联网(IoT)的快速发展催生了对跨职能专家进行培养的需求,这些专家可以将传统的网络和基础设施专业知识与数据库和报告技能相结合。 + +![Kevin \(CC0\)][1] + +如果物联网(IoT)要实现其宏伟的诺言,它将需要大量聪明、熟练、**训练有素**的工人军团来实现这一切。而现在,这些人将从何而来尚不清楚。 + +这就是我为什么有兴趣同资产优化软件公司 [AspenTech][2] 的产品管理、研发高级总监 Keith Flynn 通邮件的原因,他说,当处理大量属于物联网范畴的新技术时,你需要能够理解如何配置技术和解释数据的人。Flynn 认为,现有的教育机构对物联网特定课程的需求越来越大,这同时也给了以物联网为重点,提供了完善课程的新私立学院机会。 + +Flynn 跟我说,“在未来,物联网项目将与如今普遍的数据管理和自动化项目有着巨大的不同……未来需要更全面的技能和交叉交易能力,这样我们才会说同一种语言。” + +Flynn 补充说,随着物联网每年增长 30%,将不再依赖于几个特定的技能,“从传统的部署技能(如网络和基础设施)到数据库和报告技能,坦白说,甚至是基础数据科学,都将需要一起理解和使用。” + +### 召集所有物联网顾问 + +Flynn 预测,“受过物联网教育的人的第一个大机会将会是在咨询领域,随着咨询公司对行业趋势的适应或淘汰……有受过物联网培训的员工将有助于他们在物联网项目中的定位,并在新的业务线中提出要求——物联网咨询。” + +对初创企业和小型公司而言,这个问题尤为严重。“组织越大,他们越有可能雇佣到不同技术类别的人”Flynn 这样说到,“但对于较小的组织和较小的物联网项目来说,你则需要一个能同时兼顾的人。” + +两者兼而有之?还是**一应俱全?**物联网“需要将所有知识和技能组合在一起”,Flynn 说到,“并不是所有技能都是全新的,只是在此之前从来没有被归纳在一起或放在一起教授过。” + +### 未来的物联网专家 + +Flynn 表示,真正的物联网专业技术是从基础的仪器仪表和电气技能开始的,这能帮助工人发明新的无线发射器或提升技术,以提高电池寿命和功耗。 + +“IT 技能,如网络、IP 寻址、子网掩码、蜂窝和卫星也是物联网的关键需求”,Flynn 说。他还认为物联网需要数据库管理技能和云管理和安全专业知识,“特别是当高级过程控制(APC)将传感器数据直接发送到数据库和数据湖等事情成为常态时。” + +### 物联网专家又从何而来? + +Flynn 说,标准化的正规教育课程将是确保毕业生或证书持有者掌握一套正确技能的最佳途径。他甚至还列出了一个样本课程。“按时间顺序开始,从基础知识开始,比如 [电气仪表] 和测量。然后讲授网络知识,数据库管理和云计算课程都应在此之后开展。这个学位甚至可以循序渐进至现有的工程课程中,这可能需要两年时间……来完成物联网部分的学业。” + +虽然企业培训也能发挥作用,但实际上却是“说起来容易做起来难”,Flynn 这样警告,“这些培训需要针对组织的具体努力而推动。” + +当然,现在市面上已经有了 [大量的在线物联网培训课程和证书课程][5]。但追根到底,这一工作全都依赖于工人自身的推断。 + +“在这个世界上,随着科技不断改变行业,提升技能是非常重要的”,Flynn 说,“如果这种提升技能的推动力并不是来源于你的雇主,那么在线课程和认证将会是提升你自己很好的一个方式。我们只需要创建这些课程……我甚至可以预见组织将与提供这些课程的高等教育机构合作,让他们的员工更好地开始。当然,物联网课程的挑战在于它需要不断发展以跟上科技的发展。” + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3404489/where-are-all-the-iot-experts-going-to-come-from.html + +作者:[Fredric Paul][a] +选题:[lujun9972][b] +译者:[Percy (@scvoet)](https://github.com/scvoet) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Fredric-Paul/ +[b]: https://github.com/lujun9972 +[1]: https://images.idgesg.net/images/article/2018/07/programmer_certification-skills_code_devops_glasses_student_by-kevin-unsplash-100764315-large.jpg +[2]: https://www.aspentech.com/ +[3]: https://www.networkworld.com/article/3276025/careers/20-hot-jobs-ambitious-it-pros-should-shoot-for.html +[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fupgrading-your-technology-career +[5]: https://www.google.com/search?client=firefox-b-1-d&q=iot+training +[6]: https://www.networkworld.com/article/3254185/internet-of-things/tips-for-securing-iot-on-your-network.html#nww-fsb +[7]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html#nww-fsb +[8]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html#nww-fsb +[9]: https://www.networkworld.com/newsletters/signup.html#nww-fsb +[10]: https://www.facebook.com/NetworkWorld/ +[11]: https://www.linkedin.com/company/network-world diff --git a/published/20190810 EndeavourOS Aims to Fill the Void Left by Antergos in Arch Linux World.md b/published/20190810 EndeavourOS Aims to Fill the Void Left by Antergos in Arch Linux World.md new file mode 100644 index 0000000000..54c752a913 --- /dev/null +++ b/published/20190810 EndeavourOS Aims to Fill the Void Left by Antergos in Arch Linux World.md @@ -0,0 +1,98 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13096-1.html) +[#]: subject: (EndeavourOS Aims to Fill the Void Left by Antergos in Arch Linux World) +[#]: via: (https://itsfoss.com/endeavouros/) +[#]: author: (John Paul https://itsfoss.com/author/john/) + +EndeavourOS:填补 Antergos 在 ArchLinux 世界留下的空白 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/07/225558rdb85bmm6uumro71.jpg) + +我相信我们的大多数读者都知道 [Antergos 项目的终结][2]。在这一消息宣布之后,Antergos 社区的成员创建了几个发行版来继承 Antergos。今天,我们将着眼于 Antergos 的“精神继承人”之一:[EndeavourOS][3]。 + +### EndeavourOS 不是 Antergos 的分支 + +在我们开始之前,我想非常明确地指出,EndeavourOS 并不是一个 Antergos 的复刻版本。开发者们以 Antergos 为灵感,创建了一个基于 Arch 的轻量级发行版。 + +![Endeavouros First Boot][4] + +根据 [这个项目网站][5] 的说法,EndeavourOS 的诞生是因为 Antergos 社区的人们想要保持 Antergos 的精神。他们的目标很简单:“让 Arch 拥有一个易于使用的安装程序和一个友好、有帮助的社区,在掌握系统的过程中能够有一个社区可以依靠。” + +与许多基于 Arch 的发行版不同,EndeavourOS 打算像 [原生 Arch][5] 那样使用,“所以没有一键式安装你喜欢的应用程序的解决方案,也没有一堆你最终不需要的预装应用程序。”对于大多数人来说,尤其是那些刚接触 Linux 和 Arch 的人,会有一个学习曲线,但 EndeavourOS 的目标是建立一个大型友好的社区,鼓励人们提出问题并了解他们的系统。 + +![Endeavouros Installing][6] + +### 正在进行的工作 + +EndeavourOS 在 [2019 年 5 月 23 日首次宣布成立][8] 随后 [在 7 月 15 日发布第一个版本][7]。不幸的是,这意味着开发人员无法将他们计划的所有功能全部整合进来。(LCTT 译注:本文原文发表于 2019 年,而现在,EndeavourOS 还在持续活跃着。) + +例如,他们想要一个类似于 Antergos 的在线安装,但却遇到了[当前选项的问题][9]。“Cnchi 运行在 Antergos 生态系统之外会造成严重的问题,需要彻底重写才能发挥作用。RebornOS 的 Fenix 安装程序还没有完全成型,需要更多时间才能正常运行。”于是现在,EndeavourOS 将会和 [Calamares 安装程序 ][10] 一起发布。 + +EndeavourOS 会提供 [比 Antergos 少的东西][9]:它的存储库比 Antergos 小,尽管他们会附带一些 AUR 包。他们的目标是提供一个接近 Arch 却不是原生 Arch 的系统。 + +![Endeavouros Updating With Kalu][12] + +开发者[进一步声明 ][13]: + +> “Linux,特别是 Arch,核心精神是自由选择,我们提供了一个基本的安装,让你在一个精细的层面上方便地探索各项选择。我们永远不会强行为你作决定,比如为你安装 GUI 应用程序,如 Pamac,甚至采用沙盒解决方案,如 Flatpak 或 Snaps。想安装成什么样子完全取决于你,这是我们与 Antergos 或 Manjaro 的主要区别,但与 Antergos 一样,如果你安装的软件包遇到问题,我们会尽力帮助你。” + +### 体验 EndeavourOS + +我在 [VirtualBox][14] 中安装了 EndeavourOS,并且研究了一番。当我第一次启动时,我看到一个窗口,里面有关于安装的 EndeavourOS 网站的链接。它还有一个安装按钮和一个手动分区工具。Calamares 安装程序的安装过程非常顺利。 + +在我重新启动到新安装的 EndeavourOS 之后,迎接我的是一个彩色主题的 XFCE 桌面。我还收到了一堆通知消息。我使用过的大多数基于 Arch 的发行版都带有一个 GUI 包管理器,比如 [pamac][15] 或 [octopi][16],以进行系统更新。EndeavourOS 配有 [kalu][17](kalu 是 “Keeping Arch Linux Up-to-date” 的缩写)。它可以更新软件包、可以看 Archlinux 新闻、可以更新 AUR 包等等。一旦它检查到有更新,它就会显示通知消息。 + +我浏览了一下菜单,看看默认安装了什么。默认的安装并不多,连办公套件都没有。他们想让 EndeavourOS 成为一块空白画布,让任何人都可以创建他们想要的系统。他们正朝着正确的方向前进。 + +![Endeavouros Desktop][18] + +### 总结思考 + +EndeavourOS 还很年轻。第一个稳定版本都没有发布多久。它缺少一些东西,最重要的是一个在线安装程序。这就是说,我们无法估计他能够走到哪一步。(LCTT 译注:本文发表于 2019 年) + +虽然它不是 Antergos 的精确复刻,但 EndeavourOS 希望复制 Antergos 最重要的部分——热情友好的社区。很多时候,Linux 社区对初学者似乎是不受欢迎甚至是完全敌对的。我看到越来越多的人试图与这种消极情绪作斗争,并将更多的人引入 Linux。随着 EndeavourOS 团队把焦点放在社区建设上,我相信一个伟大的发行版将会诞生。 + +如果你当前正在使用 Antergos,有一种方法可以让你[不用重装系统就切换到 EndeavourOS][20] + +如果你想要一个 Antergos 的精确复刻,我建议你去看看 [RebornOS][21]。他们目前正在开发一个名为 Fenix 的 Cnchi 安装程序的替代品。 + +你试过 EndeavourOS 了吗?你的感受如何? + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/endeavouros/ + +作者:[John Paul][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/john/ +[b]: https://github.com/lujun9972 +[1]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/08/endeavouros-logo.png?ssl=1 +[2]: https://itsfoss.com/antergos-linux-discontinued/ +[3]: https://endeavouros.com/ +[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/08/endeavouros-first-boot.png?resize=800%2C600&ssl=1 +[5]: https://endeavouros.com/info-2/ +[6]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/08/endeavouros-installing.png?resize=800%2C600&ssl=1 +[7]: https://endeavouros.com/endeavouros-first-stable-release-has-arrived/ +[8]: https://forum.antergos.com/topic/11780/endeavour-antergos-community-s-next-stage +[9]: https://endeavouros.com/what-to-expect-on-the-first-release/ +[10]: https://calamares.io/ +[11]: https://itsfoss.com/veltos-linux/ +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/08/endeavouros-updating-with-kalu.png?resize=800%2C600&ssl=1 +[13]: https://endeavouros.com/second-week-after-the-stable-release/ +[14]: https://itsfoss.com/install-virtualbox-ubuntu/ +[15]: https://aur.archlinux.org/packages/pamac-aur/ +[16]: https://octopiproject.wordpress.com/ +[17]: https://github.com/jjk-jacky/kalu +[18]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/08/endeavouros-desktop.png?resize=800%2C600&ssl=1 +[19]: https://itsfoss.com/clear-linux/ +[20]: https://forum.endeavouros.com/t/how-to-switch-from-antergos-to-endevouros/105/2 +[21]: https://rebornos.org/ diff --git a/published/20191227 The importance of consistency in your Python code.md b/published/20191227 The importance of consistency in your Python code.md new file mode 100644 index 0000000000..5ff2d11caf --- /dev/null +++ b/published/20191227 The importance of consistency in your Python code.md @@ -0,0 +1,72 @@ +[#]: collector: (lujun9972) +[#]: translator: (stevenzdg988) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13082-1.html) +[#]: subject: (The importance of consistency in your Python code) +[#]: via: (https://opensource.com/article/19/12/zen-python-consistency) +[#]: author: (Moshe Zadka https://opensource.com/users/moshez) + +Python 代码一致性的重要性 +====== + +> 本文是 Python 之禅特殊系列的一部分,重点是第十二、十三和十四原则:模糊性和明确性的作用。 + +![](https://img.linux.net.cn/data/attachment/album/202102/03/231758po1lcicxmxyjxlba.jpg) + +最小惊喜原则是设计用户界面时的一个 [准则][2]。它是说,当用户执行某项操作时,程序执行的事情应该使用户尽量少地感到意外。这和孩子们喜欢一遍又一遍地读同一本书的原因是一样的:没有什么比能够预测并让预测成真更让人欣慰的了。 + +在开发 [ABC 语言][3](Python 的灵感来源)的过程中,一个重要的见解是,编程设计是用户界面,需要使用与 UI 设计者相同的工具来设计。值得庆幸的是,从那以后,越来越多的语言采用了 UI 设计中的可承受性affordance人体工程学ergonomics的概念,即使它们的应用并不严格。 + +这就引出了 [Python 之禅][4] 中的三个原则。 + +### 面对歧义,要拒绝猜测的诱惑In the face of ambiguity, refuse the temptation to guess + +`1 + "1"` 的结果应该是什么? `"11"` 和 `2` 都是猜测。这种表达方式是*歧义的*:无论如何做都会让一些人感到惊讶。 + +一些语言选择猜测。在 JavaScript 中,结果为 `"11"`。在 Perl 中,结果为 `2`。在 C 语言中,结果自然是空字符串。面对歧义,JavaScript、Perl 和 C 都在猜测。 + +在 Python 中,这会引发 `TypeError`:这不是能忽略的错误。捕获 `TypeError` 是非典型的:它通常将终止程序或至少终止当前任务(例如,在大多数 Web 框架中,它将终止对当前请求的处理)。 + +Python 拒绝猜测 `1 + "1"` 的含义。程序员必须以明确的意图编写代码:`1 + int("1")`,即 `2`;或者 `str(1) + "1"`,即 `"11"`;或 `"1"[1:]`,这将是一个空字符串。通过拒绝猜测,Python 使程序更具可预测性。 + +### 尽量找一种,最好是唯一一种明显的解决方案There should be one—and preferably only one—obvious way to do it + +预测也会出现偏差。给定一个任务,你能预知要实现该任务的代码吗?当然,不可能完美地预测。毕竟,编程是一项具有创造性的任务。 + +但是,不必有意提供多种冗余方式来实现同一目标。从某种意义上说,某些解决方案或许 “更好” 或 “更 Python 化Pythonic”。 + +对 Python 美学欣赏部分是因为,可以就哪种解决方案更好进行健康的辩论。甚至可以持不同观点而继续编程。甚至为使其达成一致,接受不同意的观点也是可以的。但在这一切之下,必须有一种这样的认识,即正确的解决方案终将会出现。我们必须希望,通过商定实现目标的最佳方法,而最终达成真正的一致。 + +### 虽然这种方式一开始可能并不明显(除非你是荷兰人)Although that way may not be obvious at first (unless you're Dutch) + +这是一个重要的警告:首先,实现任务的最佳方法往往*不*明显。观念在不断发展。Python 也在进化。逐块读取文件的最好方法,可能要等到 Python 3.8 时使用 [walrus 运算符][5](`:=`)。 + +逐块读取文件这样常见的任务,在 Python 存在近 *30年* 的历史中并没有 “唯一的最佳方法”。 + +当我在 1998 年从 Python 1.5.2 开始使用 Python 时,没有一种逐行读取文件的最佳方法。多年来,知道字典中是否有某个键的最佳方法是使用关键字 `.haskey`,直到 `in` 操作符出现才发生改变。 + +只是要意识到找到实现目标的一种(也是唯一一种)方法可能需要 30 年的时间来尝试其它方法,Python 才可以不断寻找这些方法。这种历史观认为,为了做一件事用上 30 年是可以接受的,但对于美国这个存在仅 200 多年的国家来说,人们常常会感到不习惯。 + +从 Python 之禅的这一部分来看,荷兰人,无论是 Python 的创造者 [Guido van Rossum][6] 还是著名的计算机科学家 [Edsger W. Dijkstra][7],他们的世界观是不同的。要理解这一部分,某种程度的欧洲人对时间的感受是必不可少的。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/12/zen-python-consistency + +作者:[Moshe Zadka][a] +选题:[lujun9972][b] +译者:[stevenzdg988](https://github.com/stevenzdg988) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/moshez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003499_01_other11x_cc.png?itok=I_kCDYj0 (Two animated computers waving one missing an arm) +[2]: https://www.uxpassion.com/blog/the-principle-of-least-surprise/ +[3]: https://en.wikipedia.org/wiki/ABC_(programming_language) +[4]: https://www.python.org/dev/peps/pep-0020/ +[5]: https://www.python.org/dev/peps/pep-0572/#abstract +[6]: https://en.wikipedia.org/wiki/Guido_van_Rossum +[7]: http://en.wikipedia.org/wiki/Edsger_W._Dijkstra diff --git a/published/20191228 The Zen of Python- Why timing is everything.md b/published/20191228 The Zen of Python- Why timing is everything.md new file mode 100644 index 0000000000..6ebab5320a --- /dev/null +++ b/published/20191228 The Zen of Python- Why timing is everything.md @@ -0,0 +1,45 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13103-1.html) +[#]: subject: (The Zen of Python: Why timing is everything) +[#]: via: (https://opensource.com/article/19/12/zen-python-timeliness) +[#]: author: (Moshe Zadka https://opensource.com/users/moshez) + +Python 之禅:时机最重要 +====== + +> 这是 Python 之禅特别系列的一部分,重点是第十五和第十六条原则:现在与将来。 + +![](https://img.linux.net.cn/data/attachment/album/202102/09/231557dkuzz22ame4ja2jj.jpg) + +Python 一直在不断发展。Python 社区对特性请求的渴求是无止境的,对现状也总是不满意的。随着 Python 越来越流行,这门语言的变化会影响到更多的人。 + +确定什么时候该进行变化往往很难,但 [Python 之禅][2] 给你提供了指导。 + +### 现在有总比永远没有好Now is better than never + +总有一种诱惑,就是要等到事情完美才去做,虽然,它们永远没有完美的一天。当它们看起来已经“准备”得足够好了,那就大胆采取行动吧,去做出改变吧。无论如何,变化总是发生在*某个*现在:拖延的唯一作用就是把它移到未来的“现在”。 + +### 虽然将来总比现在好Although never is often better than right now + +然而,这并不意味着应该急于求成。从测试、文档、用户反馈等方面决定发布的标准。在变化就绪之前的“现在”,并不是一个好时机。 + +这不仅对 Python 这样的流行语言是个很好的经验,对你个人的小开源项目也是如此。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/12/zen-python-timeliness + +作者:[Moshe Zadka][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/moshez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/desk_clock_job_work.jpg?itok=Nj4fuhl6 (Clock, pen, and notepad on a desk) +[2]: https://www.python.org/dev/peps/pep-0020/ diff --git a/published/20191229 How to tell if implementing your Python code is a good idea.md b/published/20191229 How to tell if implementing your Python code is a good idea.md new file mode 100644 index 0000000000..1a2358a77c --- /dev/null +++ b/published/20191229 How to tell if implementing your Python code is a good idea.md @@ -0,0 +1,47 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13116-1.html) +[#]: subject: (How to tell if implementing your Python code is a good idea) +[#]: via: (https://opensource.com/article/19/12/zen-python-implementation) +[#]: author: (Moshe Zadka https://opensource.com/users/moshez) + +如何判断你的 Python 代码实现是否合适? +====== + +> 这是 Python 之禅特别系列的一部分,重点介绍第十七和十八条原则:困难和容易。 + +![](https://img.linux.net.cn/data/attachment/album/202102/14/120518rjkwvjs76p9d1911.jpg) + +一门语言并不是抽象存在的。每一个语言功能都必须用代码来实现。承诺一些功能是很容易的,但实现起来就会很麻烦。复杂的实现意味着更多潜在的 bug,甚至更糟糕的是,会带来日复一日的维护负担。 + +对于这个难题,[Python 之禅][2] 中有答案。 + +### 如果一个实现难以解释,那就是个坏思路If the implementation is hard to explain, it's a bad idea + +编程语言最重要的是可预测性。有时我们用抽象的编程模型来解释某个结构的语义,而这些模型与实现并不完全对应。然而,最好的释义就是*解释该实现*。 + +如果该实现很难解释,那就意味着这条路行不通。 + +### 如果一个实现易于解释,那它可能是一个好思路If the implementation is easy to explain, it may be a good idea + +仅仅因为某事容易,并不意味着它值得。然而,一旦解释清楚,判断它是否是一个好思路就容易得多。 + +这也是为什么这个原则的后半部分故意含糊其辞的原因:没有什么可以肯定一定是好的,但总是可以讨论一下。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/12/zen-python-implementation + +作者:[Moshe Zadka][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/moshez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/devops_confusion_wall_questions.png?itok=zLS7K2JG (Brick wall between two people, a developer and an operations manager) +[2]: https://www.python.org/dev/peps/pep-0020/ diff --git a/published/20191230 Namespaces are the shamash candle of the Zen of Python.md b/published/20191230 Namespaces are the shamash candle of the Zen of Python.md new file mode 100644 index 0000000000..dc98e7afc0 --- /dev/null +++ b/published/20191230 Namespaces are the shamash candle of the Zen of Python.md @@ -0,0 +1,58 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13123-1.html) +[#]: subject: (Namespaces are the shamash candle of the Zen of Python) +[#]: via: (https://opensource.com/article/19/12/zen-python-namespaces) +[#]: author: (Moshe Zadka https://opensource.com/users/moshez) + +命名空间是 Python 之禅的精髓 +====== + +> 这是 Python 之禅特别系列的一部分,重点是一个额外的原则:命名空间。 + +![](https://img.linux.net.cn/data/attachment/album/202102/16/105800d64ceaeertt4u4ee.jpg) + +著名的光明节Hanukkah有八个晚上的庆祝活动。然而,光明节的灯台有九根蜡烛:八根普通的蜡烛和总是偏移的第九根蜡烛。它被称为 “shamash” 或 “shamos”,大致可以翻译为“仆人”或“看门人”的意思。 + +shamos 是点燃所有其它蜡烛的蜡烛:它是唯一一支可以用火的蜡烛,而不仅仅是观看。当我们结束 Python 之禅系列时,我看到命名空间提供了类似的作用。 + +### Python 中的命名空间 + +Python 使用命名空间来处理一切。虽然简单,但它们是稀疏的数据结构 —— 这通常是实现目标的最佳方式。 + +> *命名空间* 是一个从名字到对象的映射。 +> +> —— [Python.org][2] + +模块是命名空间。这意味着正确地预测模块语义通常只需要熟悉 Python 命名空间的工作方式。类是命名空间,对象是命名空间。函数可以访问它们的本地命名空间、父命名空间和全局命名空间。 + +这个简单的模型,即用 `.` 操作符访问一个对象,而这个对象又通常(但并不总是)会进行某种字典查找,这使得 Python 很难优化,但很容易解释。 + +事实上,一些第三方模块也采取了这个准则,并以此来运行。例如,[variants][3] 包把函数变成了“相关功能”的命名空间。这是一个很好的例子,说明 [Python 之禅][4] 是如何激发新的抽象的。 + +### 结语 + +感谢你和我一起参加这次以光明节为灵感的 [我最喜欢的语言][5] 的探索。 + +静心参禅,直至悟道。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/12/zen-python-namespaces + +作者:[Moshe Zadka][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/moshez +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_code_programming_laptop.jpg?itok=ormv35tV (Person programming on a laptop on a building) +[2]: https://docs.python.org/3/tutorial/classes.html +[3]: https://pypi.org/project/variants/ +[4]: https://www.python.org/dev/peps/pep-0020/ +[5]: https://opensource.com/article/19/10/why-love-python diff --git a/published/20200121 Ansible Automation Tool Installation, Configuration and Quick Start Guide.md b/published/20200121 Ansible Automation Tool Installation, Configuration and Quick Start Guide.md new file mode 100644 index 0000000000..600a37c4cd --- /dev/null +++ b/published/20200121 Ansible Automation Tool Installation, Configuration and Quick Start Guide.md @@ -0,0 +1,344 @@ +[#]: collector: "lujun9972" +[#]: translator: "MjSeven" +[#]: reviewer: "wxy" +[#]: publisher: "wxy" +[#]: url: "https://linux.cn/article-13142-1.html" +[#]: subject: "Ansible Automation Tool Installation, Configuration and Quick Start Guide" +[#]: via: "https://www.2daygeek.com/install-configure-ansible-automation-tool-linux-quick-start-guide/" +[#]: author: "Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/" + +Ansible 自动化工具安装、配置和快速入门指南 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/22/124803cgryrgxqezjllfqg.jpg) + +市面上有很多自动化工具。我可以举几个例子,例如 Puppet、Chef、CFEngine、Foreman、Katello、Saltstock、Space Walk,它们被许多组织广泛使用。 + +### 自动化工具可以做什么? + +自动化工具可以自动执行例行任务,无需人工干预,从而使 Linux 管理员的工作变得更加轻松。这些工具允许用户执行配置管理,应用程序部署和资源调配。 + +### 为什么喜欢 Ansible? + +Ansible 是一种无代理的自动化工具,使用 SSH 执行所有任务,但其它工具需要在客户端节点上安装代理。 + +### 什么是 Ansible? + +Ansible 是一个开源、易于使用的功能强大的 IT 自动化工具,通过 SSH 在客户端节点上执行任务。 + +它是用 Python 构建的,这是当今世界上最流行、最强大的编程语言之一。两端都需要使用 Python 才能执行所有模块。 + +它可以配置系统、部署软件和安排高级 IT 任务,例如连续部署或零停机滚动更新。你可以通过 Ansible 轻松执行任何类型的自动化任务,包括简单和复杂的任务。 + +在开始之前,你需要了解一些 Ansible 术语,这些术语可以帮助你更好的创建任务。 + +### Ansible 如何工作? + +Ansible 通过在客户端节点上推送称为 ansible 模块的小程序来工作,这些模块临时存储在客户端节点中,通过 JSON 协议与 Ansible 服务器进行通信。 + +Ansible 通过 SSH 运行这些模块,并在完成后将其删除。 + +模块是用 Python 或 Perl 等编写的一些脚本。 + +![][1] + +*控制节点,用于控制剧本的全部功能,包括客户端节点(主机)。* + + * 控制节点Control node:使用 Ansible 在受控节点上执行任务的主机。你可以有多个控制节点,但不能使用 Windows 系统主机当作控制节点。 + * 受控节点Managed node:控制节点配置的主机列表。 + * 清单Inventory:控制节点管理的一个主机列表,这些节点在 `/etc/ansible/hosts` 文件中配置。它包含每个节点的信息,比如 IP 地址或其主机名,还可以根据需要对这些节点进行分组。 + * 模块Module:每个模块用于执行特定任务,目前有 3387 个模块。 + * 点对点ad-hoc:它允许你一次性运行一个任务,它使用 `/usr/bin/ansible` 二进制文件。 + * 任务Task:每个动作Play都有一个任务列表。任务按顺序执行,在受控节点中一次执行一个任务。 + * 剧本Playbook:你可以使用剧本同时执行多个任务,而使用点对点只能执行一个任务。剧本使用 YAML 编写,易于阅读。将来,我们将会写一篇有关剧本的文章,你可以用它来执行复杂的任务。 + +### 测试环境 + +此环境包含一个控制节点(`server.2g.lab`)和三个受控节点(`node1.2g.lab`、`node2.2g.lab`、`node3.2g.lab`),它们均在虚拟环境中运行,操作系统分别为: + +| System Purpose | Hostname | IP Address | OS | +|----------------------|---------------|-------------|---------------| +| Ansible Control Node | server.2g.lab | 192.168.1.7 | Manjaro 18 | +| Managed Node1 | node1.2g.lab | 192.168.1.6 | CentOS7 | +| Managed Node2 | node2.2g.lab | 192.168.1.5 | CentOS8 | +| Managed Node3 | node3.2g.lab | 192.168.1.9 | Ubuntu 18.04 | +| User: daygeek | + +### 前置条件 + + * 在 Ansible 控制节点和受控节点之间启用无密码身份验证。 + * 控制节点必须是 Python 2(2.7 版本) 或 Python 3(3.5 或更高版本)。 + * 受控节点必须是 Python 2(2.6 或更高版本) 或 Python 3(3.5 或更高版本)。 + * 如果在远程节点上启用了 SELinux,则在 Ansible 中使用任何与复制、文件、模板相关的功能之前,还需要在它们上安装 `libselinux-python`。 + +### 如何在控制节点上安装 Ansible + +对于 Fedora/RHEL 8/CentOS 8 系统,使用 [DNF 命令][2] 来安装 Ansible。 + +注意:你需要在 RHEL/CentOS 系统上启用 [EPEL 仓库][3],因为 Ansible 软件包在发行版官方仓库中不可用。 + +``` +$ sudo dnf install ansible +``` + +对于 Debian/Ubuntu 系统,使用 [APT-GET 命令][4] 或 [APT 命令][5] 来安装 Ansible。 + +配置下面的 PPA 以便在 Ubuntu 上安装最新稳定版本的 Ansible。 + +``` +$ sudo apt update +$ sudo apt install software-properties-common +$ sudo apt-add-repository --yes --update ppa:ansible/ansible +$ sudo apt install ansible +``` + +对于 Debian 系统,配置以下源列表: + +``` +$ echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list.d/ansible.list +$ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 +$ sudo apt update +$ sudo apt install ansible +``` + +对于 Arch Linux 系统,使用 [Pacman 命令][6] 来安装 Ansible: + +``` +$ sudo pacman -S ansible +``` + +对于 RHEL/CentOS 系统,使用 [YUM 命令][7] 来安装 Ansible: + +``` +$ sudo yum install ansible +``` + +对于 openSUSE 系统,使用 [Zypper 命令][8] 来安装 Ansible: + +``` +$ sudo zypper install ansible +``` + +或者,你可以使用 [Python PIP 包管理工具][9] 来安装: + +``` +$ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py +$ sudo python get-pip.py +$ sudo pip install ansible +``` + +在控制节点上检查安装的 Ansible 版本: + +``` +$ ansible --version + +ansible 2.9.2 + config file = /etc/ansible/ansible.cfg + configured module search path = ['/home/daygeek/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = /usr/lib/python3.8/site-packages/ansible + executable location = /usr/bin/ansible + python version = 3.8.1 (default, Jan 8 2020, 23:09:20) [GCC 9.2.0] +``` + +### 如何在受控节点上安装 Python? + +使用以下命令在受控节点上安装 python: + +``` +$ sudo yum install -y python +$ sudo dnf install -y python +$ sudo zypper install -y python +$ sudo pacman -S python +$ sudo apt install -y python +``` + +### 如何在 Linux 设置 SSH 密钥身份验证(无密码身份验证) + +使用以下命令创建 ssh 密钥,然后将其复制到远程计算机。 + +``` +$ ssh-keygen +$ ssh-copy-id daygeek@node1.2g.lab +$ ssh-copy-id daygeek@node2.2g.lab +$ ssh-copy-id daygeek@node3.2g.lab +``` + +具体参考这篇文章《[在 Linux 上设置 SSH 密钥身份验证(无密码身份验证)][10]》。 + +### 如何创建 Ansible 主机清单 + +在 `/etc/ansible/hosts` 文件中添加要管理的节点列表。如果没有该文件,则可以创建一个新文件。以下是我的测试环境的主机清单文件: + +``` +$ sudo vi /etc/ansible/hosts + +[web] +node1.2g.lab +node2.2g.lab + +[app] +node3.2g.lab +``` + +让我们看看是否可以使用以下命令查找所有主机。 + +``` +$ ansible all --list-hosts + + hosts (3): + node1.2g.lab + node2.2g.lab + node3.2g.lab +``` + +对单个组运行以下命令: + +``` +$ ansible web --list-hosts + + hosts (2): + node1.2g.lab + node2.2g.lab +``` + +### 如何使用点对点命令执行任务 + +一旦完成主机清单验证检查后,你就可以上路了。干的漂亮! + +**语法:** + +``` +ansible [pattern] -m [module] -a "[module options]" + +Details: +======== +ansible: A command +pattern: Enter the entire inventory or a specific group +-m [module]: Run the given module name +-a [module options]: Specify the module arguments +``` + +使用 Ping 模块对主机清单中的所有节点执行 ping 操作: + +``` +$ ansible all -m ping + +node3.2g.lab | SUCCESS => { + "ansible_facts": { + "discovered_interpreter_python": "/usr/bin/python" + }, + "changed": false, + "ping": "pong" +} +node1.2g.lab | SUCCESS => { + "ansible_facts": { + "discovered_interpreter_python": "/usr/bin/python" + }, + "changed": false, + "ping": "pong" +} +node2.2g.lab | SUCCESS => { + "ansible_facts": { + "discovered_interpreter_python": "/usr/libexec/platform-python" + }, + "changed": false, + "ping": "pong" +} +``` + +所有系统都返回了成功,但什么都没有改变,只返回了 `pong` 代表成功。 + +你可以使用以下命令获取可用模块的列表。 + +``` +$ ansible-doc -l +``` + +当前有 3387 个内置模块,它们会随着 Ansible 版本的递增而增加: + +``` +$ ansible-doc -l | wc -l +3387 +``` + +使用 command 模块对主机清单中的所有节点执行命令: + +``` +$ ansible all -m command -a "uptime" + +node3.2g.lab | CHANGED | rc=0 >> + 18:05:07 up 1:21, 3 users, load average: 0.12, 0.06, 0.01 +node1.2g.lab | CHANGED | rc=0 >> + 06:35:06 up 1:21, 4 users, load average: 0.01, 0.03, 0.05 +node2.2g.lab | CHANGED | rc=0 >> + 18:05:07 up 1:25, 3 users, load average: 0.01, 0.01, 0.00 +``` + +对指定组执行 command 模块。 + +检查 app 组主机的内存使用情况: + +``` +$ ansible app -m command -a "free -m" + +node3.2g.lab | CHANGED | rc=0 >> + total used free shared buff/cache available +Mem: 1993 1065 91 6 836 748 +Swap: 1425 0 1424 +``` + +要对 web 组运行 `hostnamectl` 命令,使用以下格式: + +``` +$ ansible web -m command -a "hostnamectl" + +node1.2g.lab | CHANGED | rc=0 >> + Static hostname: CentOS7.2daygeek.com + Icon name: computer-vm + Chassis: vm + Machine ID: 002f47b82af248f5be1d67b67e03514c + Boot ID: dc38f9b8089d4b2d9304e526e00c6a8f + Virtualization: kvm + Operating System: CentOS Linux 7 (Core) + CPE OS Name: cpe:/o:centos:centos:7 + Kernel: Linux 3.10.0-957.el7.x86_64 + Architecture: x86-64 +node2.2g.lab | CHANGED | rc=0 >> + Static hostname: node2.2g.lab + Icon name: computer-vm + Chassis: vm + Machine ID: e39e3a27005d44d8bcbfcab201480b45 + Boot ID: 27b46a09dde546da95ace03420fe12cb + Virtualization: oracle + Operating System: CentOS Linux 8 (Core) + CPE OS Name: cpe:/o:centos:centos:8 + Kernel: Linux 4.18.0-80.el8.x86_64 + Architecture: x86-64 +``` + +参考:[Ansible 文档][11]。 + +-------------------------------------------------------------------------------- + +via: https://www.2daygeek.com/install-configure-ansible-automation-tool-linux-quick-start-guide/ + +作者:[Magesh Maruthamuthu][a] +选题:[lujun9972][b] +译者:[MjSeven](https://github.com/MjSeven) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.2daygeek.com/author/magesh/ +[b]: https://github.com/lujun9972 +[1]: https://www.2daygeek.com/wp-content/uploads/2020/01/ansible-architecture-2daygeek.png +[2]: https://www.2daygeek.com/linux-dnf-command-examples-manage-packages-fedora-centos-rhel-systems/ +[3]: https://www.2daygeek.com/install-enable-epel-repository-on-rhel-centos-oracle-linux/ +[4]: https://www.2daygeek.com/apt-get-apt-cache-command-examples-manage-packages-debian-ubuntu-systems/ +[5]: https://www.2daygeek.com/apt-command-examples-manage-packages-debian-ubuntu-systems/ +[6]: https://www.2daygeek.com/pacman-command-examples-manage-packages-arch-linux-system/ +[7]: https://www.2daygeek.com/yum-command-examples-manage-packages-rhel-centos-systems/ +[8]: https://www.2daygeek.com/zypper-command-examples-manage-packages-opensuse-system/ +[9]: https://www.2daygeek.com/install-pip-manage-python-packages-linux/ +[10]: https://www.2daygeek.com/configure-setup-passwordless-ssh-key-based-authentication-linux/ +[11]: https://docs.ansible.com/ansible/latest/user_guide/index.html diff --git a/published/20200419 Getting Started With Pacman Commands in Arch-based Linux Distributions.md b/published/20200419 Getting Started With Pacman Commands in Arch-based Linux Distributions.md new file mode 100644 index 0000000000..8171a81ede --- /dev/null +++ b/published/20200419 Getting Started With Pacman Commands in Arch-based Linux Distributions.md @@ -0,0 +1,244 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13099-1.html) +[#]: subject: (Getting Started With Pacman Commands in Arch-based Linux Distributions) +[#]: via: (https://itsfoss.com/pacman-command/) +[#]: author: (Dimitrios Savvopoulos https://itsfoss.com/author/dimitrios/) + +Arch Linux 的 pacman 命令入门 +====== + +> 这本初学者指南向你展示了在 Linux 中可以使用 pacman 命令做什么,如何使用它们来查找新的软件包,安装和升级新的软件包,以及清理你的系统。 + +[pacman][1] 包管理器是 [Arch Linux][2] 和其他主要发行版如 Red Hat 和 Ubuntu/Debian 之间的主要区别之一。它结合了简单的二进制包格式和易于使用的 [构建系统][3]。`pacman` 的目标是方便地管理软件包,无论它是来自 [官方库][4] 还是用户自己构建的软件库。 + +如果你曾经使用过 Ubuntu 或基于 debian 的发行版,那么你可能使用过 `apt-get` 或 `apt` 命令。`pacman` 在 Arch Linux 中是同样的命令。如果你 [刚刚安装了 Arch Linux][5],在安装 Arch Linux 后,首先要做的 [几件事][6] 之一就是学习使用 `pacman` 命令。 + +在这个初学者指南中,我将解释一些基本的 `pacman` 命令的用法,你应该知道如何用这些命令来管理你的基于 Archlinux 的系统。 + +### Arch Linux 用户应该知道的几个重要的 pacman 命令 + +![](https://img.linux.net.cn/data/attachment/album/202102/09/111411uqadijqdd8afgk56.jpg) + +与其他包管理器一样,`pacman` 可以将包列表与软件库同步,它能够自动解决所有所需的依赖项,以使得用户可以通过一个简单的命令下载和安装软件。 + +#### 通过 pacman 安装软件 + +你可以用以下形式的代码来安装一个或者多个软件包: + +``` +pacman -S 软件包名1 软件包名2 ... +``` + +![安装一个包][8] + +`-S` 选项的意思是同步synchronization,它的意思是 `pacman` 在安装之前先与软件库进行同步。 + +`pacman` 数据库根据安装的原因将安装的包分为两组: + + * **显式安装**:由 `pacman -S` 或 `-U` 命令直接安装的包 + * **依赖安装**:由于被其他显式安装的包所 [依赖][9],而被自动安装的包。 + +#### 卸载已安装的软件包 + +卸载一个包,并且删除它的所有依赖。 + +``` +pacman -R 软件包名 +``` + +![移除一个包][10] + +删除一个包,以及其不被其他包所需要的依赖项: + +``` +pacman -Rs 软件包名 +``` + +如果需要这个依赖的包已经被删除了,这条命令可以删除所有不再需要的依赖项: + +``` +pacman -Qdtq | pacman -Rs - +``` + +#### 升级软件包 + +`pacman` 提供了一个简单的办法来 [升级 Arch Linux][11]。你只需要一条命令就可以升级所有已安装的软件包。这可能需要一段时间,这取决于系统的新旧程度。 + +以下命令可以同步存储库数据库,*并且* 更新系统的所有软件包,但不包括不在软件库中的“本地安装的”包: + +``` +pacman -Syu +``` + + * `S` 代表同步 + * `y` 代表更新本地存储库 + * `u` 代表系统更新 + +也就是说,同步到中央软件库(主程序包数据库),刷新主程序包数据库的本地副本,然后执行系统更新(通过更新所有有更新版本可用的程序包)。 + +![系统更新][12] + +> 注意! +> +> 对于 Arch Linux 用户,在系统升级前,建议你访问 [Arch-Linux 主页][2] 查看最新消息,以了解异常更新的情况。如果系统更新需要人工干预,主页上将发布相关的新闻。你也可以订阅 [RSS 源][13] 或 [Arch 的声明邮件][14]。 +> +> 在升级基础软件(如 kernel、xorg、systemd 或 glibc) 之前,请注意查看相应的 [论坛][15],以了解大家报告的各种问题。 +> +> 在 Arch 和 Manjaro 等滚动发行版中不支持**部分升级**。这意味着,当新的库版本被推送到软件库时,软件库中的所有包都需要根据库版本进行升级。例如,如果两个包依赖于同一个库,则仅升级一个包可能会破坏依赖于该库的旧版本的另一个包。 + +#### 用 Pacman 查找包 + +`pacman` 使用 `-Q` 选项查询本地包数据库,使用 `-S` 选项查询同步数据库,使用 `-F` 选项查询文件数据库。 + +`pacman` 可以在数据库中搜索包,包括包的名称和描述: + +``` +pacman -Ss 字符串1 字符串2 ... +``` + +![查找一个包][16] + +查找已经被安装的包: + +``` +pacman -Qs 字符串1 字符串2 ... +``` + +根据文件名在远程软包中查找它所属的包: + +``` +pacman -F 字符串1 字符串2 ... +``` + +查看一个包的依赖树: + +``` +pactree 软件包名 +``` + +#### 清除包缓存 + +`pacman` 将其下载的包存储在 `/var/cache/Pacman/pkg/` 中,并且不会自动删除旧版本或卸载的版本。这有一些优点: + + 1. 它允许 [降级][17] 一个包,而不需要通过其他来源检索以前的版本。 + 2. 已卸载的软件包可以轻松地直接从缓存文件夹重新安装。 + +但是,有必要定期清理缓存以防止文件夹增大。 + +[pacman contrib][19] 包中提供的 [paccache(8)][18] 脚本默认情况下会删除已安装和未安装包的所有缓存版本,但最近 3 个版本除外: + +``` +paccache -r +``` + +![清除缓存][20] + +要删除当前未安装的所有缓存包和未使用的同步数据库,请执行: + +``` +pacman -Sc +``` + +要从缓存中删除所有文件,请使用清除选项两次,这是最激进的方法,不会在缓存文件夹中留下任何内容: + +``` +pacman -Scc +``` + +#### 安装本地或者第三方的包 + +安装不是来自远程存储库的“本地”包: + +``` +pacman -U 本地软件包路径.pkg.tar.xz +``` + +安装官方存储库中未包含的“远程”软件包: + +``` +pacman -U http://www.example.com/repo/example.pkg.tar.xz +``` + +### 额外内容:用 pacman 排除常见错误 + +下面是使用 `pacman` 管理包时可能遇到的一些常见错误。 + +#### 提交事务失败(文件冲突) + +如果你看到以下报错: + +``` +error: could not prepare transaction +error: failed to commit transaction (conflicting files) +package: /path/to/file exists in filesystem +Errors occurred, no packages were upgraded. +``` + +这是因为 `pacman` 检测到文件冲突,不会为你覆盖文件。 + +解决这个问题的一个安全方法是首先检查另一个包是否拥有这个文件(`pacman-Qo 文件路径`)。如果该文件属于另一个包,请提交错误报告。如果文件不属于另一个包,请重命名“存在于文件系统中”的文件,然后重新发出更新命令。如果一切顺利,文件可能会被删除。 + +你可以显式地运行 `pacman -S –overwrite 要覆盖的文件模式**,强制 `pacman` 覆盖与 给模式匹配的文件,而不是手动重命名并在以后删除属于该包的所有文件。 + +#### 提交事务失败(包无效或损坏) + +在 `/var/cache/pacman/pkg/` 中查找 `.part` 文件(部分下载的包),并将其删除。这通常是由在 `pacman.conf` 文件中使用自定义 `XferCommand` 引起的。 + +#### 初始化事务失败(无法锁定数据库) + +当 `pacman` 要修改包数据库时,例如安装包时,它会在 `/var/lib/pacman/db.lck` 处创建一个锁文件。这可以防止 `pacman` 的另一个实例同时尝试更改包数据库。 + +如果 `pacman` 在更改数据库时被中断,这个过时的锁文件可能仍然保留。如果你确定没有 `pacman` 实例正在运行,那么请删除锁文件。 + +检查进程是否持有锁定文件: + +``` +lsof /var/lib/pacman/db.lck +``` + +如果上述命令未返回任何内容,则可以删除锁文件: + +``` +rm /var/lib/pacman/db.lck +``` + +如果你发现 `lsof` 命令输出了使用锁文件的进程的 PID,请先杀死这个进程,然后删除锁文件。 + +我希望你喜欢我对 `pacman` 基础命令的介绍。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/pacman-command/ + +作者:[Dimitrios Savvopoulos][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/dimitrios/ +[b]: https://github.com/lujun9972 +[1]: https://www.archlinux.org/pacman/ +[2]: https://www.archlinux.org/ +[3]: https://wiki.archlinux.org/index.php/Arch_Build_System +[4]: https://wiki.archlinux.org/index.php/Official_repositories +[5]: https://itsfoss.com/install-arch-linux/ +[6]: https://itsfoss.com/things-to-do-after-installing-arch-linux/ +[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/04/essential-pacman-commands.jpg?ssl=1 +[8]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/04/sudo-pacman-S.png?ssl=1 +[9]: https://wiki.archlinux.org/index.php/Dependency +[10]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/04/sudo-pacman-R.png?ssl=1 +[11]: https://itsfoss.com/update-arch-linux/ +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/04/sudo-pacman-Syu.png?ssl=1 +[13]: https://www.archlinux.org/feeds/news/ +[14]: https://mailman.archlinux.org/mailman/listinfo/arch-announce/ +[15]: https://bbs.archlinux.org/ +[16]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/04/sudo-pacman-Ss.png?ssl=1 +[17]: https://wiki.archlinux.org/index.php/Downgrade +[18]: https://jlk.fjfi.cvut.cz/arch/manpages/man/paccache.8 +[19]: https://www.archlinux.org/packages/?name=pacman-contrib +[20]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/04/sudo-paccache-r.png?ssl=1 diff --git a/published/20200529 A new way to build cross-platform UIs for Linux ARM devices.md b/published/20200529 A new way to build cross-platform UIs for Linux ARM devices.md new file mode 100644 index 0000000000..9a742f2eb2 --- /dev/null +++ b/published/20200529 A new way to build cross-platform UIs for Linux ARM devices.md @@ -0,0 +1,170 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13129-1.html) +[#]: subject: (A new way to build cross-platform UIs for Linux ARM devices) +[#]: via: (https://opensource.com/article/20/5/linux-arm-ui) +[#]: author: (Bruno Muniz https://opensource.com/users/brunoamuniz) + +一种为 Linux ARM 设备构建跨平台 UI 的新方法 +====== + +> AndroidXML 和 TotalCross 的运用为树莓派和其他设备创建 UI 提供了更简单的方法。 + +![](https://img.linux.net.cn/data/attachment/album/202102/18/123715oomfuuz94ioi41ii.jpg) + +为应用程序创建良好的用户体验(UX)是一项艰巨的任务,尤其是在开发嵌入式应用程序时。今天,有两种图形用户界面(GUI)工具通常用于开发嵌入式软件:它们要么涉及复杂的技术,要么非常昂贵。 + +然而,我们已经创建了一个概念验证(PoC),它提供了一种新的方法来使用现有的、成熟的工具为运行在桌面、移动、嵌入式设备和低功耗 ARM 设备上的应用程序构建用户界面(UI)。我们的方法是使用 Android Studio 绘制 UI;使用 [TotalCross][2] 在设备上呈现 Android XML;采用被称为 [KnowCode][4] 的新 [TotalCross API][3];以及使用 [树莓派 4][5] 来执行应用程序。 + +### 选择 Android Studio + +可以使用 TotalCross API 为应用程序构建一个美观的响应式用户体验,但是在 Android Studio 中创建 UI 缩短了制作原型和实际应用程序之间的时间。 + +有很多工具可以用来为应用程序构建 UI,但是 [Android Studio][6] 是全世界开发者最常使用的工具。除了它被大量采用以外,这个工具的使用也非常直观,而且它对于创建简单和复杂的应用程序都非常强大。在我看来,唯一的缺点是使用该工具所需的计算机性能,它比其他集成开发环境 (IDE) 如 VSCode 或其开源替代方案 [VSCodium][7] 要庞大得多。 + +通过思考这些问题,我们创建了一个概念验证,使用 Android Studio 绘制 UI,并使用 TotalCross 直接在设备上运行 AndroidXML。 + +### 构建 UI + +对于我们的 PoC,我们想创建一个家用电器应用程序来控制温度和其他东西,并在 Linux ARM 设备上运行。 + +![Home appliance application to control thermostat][8] + +我们想为树莓派开发我们的应用程序,所以我们使用 Android 的 [ConstraintLayout][10] 来构建 848x480(树莓派的分辨率)的固定屏幕大小的 UI,不过你可以用其他布局构建响应性 UI。 + +Android XML 为 UI 创建增加了很多灵活性,使得为应用程序构建丰富的用户体验变得容易。在下面的 XML 中,我们使用了两个主要组件:[ImageView][11] 和 [TextView][12]。 + +``` + + +``` + +TextView 元素用于向用户显示一些数据,比如建筑物内的温度。大多数 ImageView 都用作用户与 UI 交互的按钮,但它们也需要实现屏幕上组件提供的事件。 + +### 用 TotalCross 整合 + +这个 PoC 中的第二项技术是 TotalCross。我们不想在设备上使用 Android 的任何东西,因为: + + 1。我们的目标是为 Linux ARM 提供一个出色的 UI。 + 2。我们希望在设备上实现低占用。 + 3。我们希望应用程序在低计算能力的低端硬件设备上运行(例如,没有 GPU、 低 RAM 等)。 + +首先,我们使用 [VSCode 插件][13] 创建了一个空的 TotalCross 项目。接下来,我们保存了 `drawable` 文件夹中的图像副本和 `xml` 文件夹中的 Android XML 文件副本,这两个文件夹都位于 `resources` 文件夹中: + +![Home Appliance file structure][14] + +为了使用 TotalCross 模拟器运行 XML 文件,我们添加了一个名为 KnowCode 的新 TotalCross API 和一个主窗口来加载 XML。下面的代码使用 API 加载和呈现 XML: + +``` +public void initUI() { +    XmlScreenAbstractLayout xmlCont = XmlScreenFactory.create("xml / homeApplianceXML.xml"); +    swap(xmlCont); +} +``` + +就这样!只需两个命令,我们就可以使用 TotalCross 运行 Android XML 文件。以下是 XML 如何在 TotalCross 的模拟器上执行: + +![TotalCross simulator running temperature application][15] + +完成这个 PoC 还有两件事要做:添加一些事件来提供用户交互,并在树莓派上运行它。 + +### 添加事件 + +KnowCode API 提供了一种通过 ID(`getControlByID`) 获取 XML 元素并更改其行为的方法,如添加事件、更改可见性等。 + +例如,为了使用户能够改变家中或其他建筑物的温度,我们在 UI 底部放置了加号和减号按钮,并在每次单击按钮时都会出现“单击”事件,使温度升高或降低一度: + +``` +Button plus = (Button) xmlCont.getControlByID("@+id/plus"); +Label insideTempLabel = (Label) xmlCont.getControlByID("@+id/insideTempLabel"); +plus.addPressListener(new PressListener() { + @Override + public void controlPressed(ControlEvent e) { + try { + String tempString = insideTempLabel.getText(); + int temp; + temp = Convert.toInt(tempString); + insideTempLabel.setText(Convert.toString(++temp)); + } catch (InvalidNumberException e1) { + e1.printStackTrace(); + } + } +}); +``` + +### 在树莓派 4 上测试 + +最后一步!我们在一台设备上运行了应用程序并检查了结果。我们只需要打包应用程序并在目标设备上部署和运行它。[VNC][19] 也可用于检查设备上的应用程序。 + +整个应用程序,包括资源(图像等)、Android XML、TotalCross 和 Knowcode API,在 Linux ARM 上大约是 8MB。 + +下面是应用程序的演示: + +![Application demo][20] + +在本例中,该应用程序仅为 Linux ARM 打包,但同一应用程序可以作为 Linux 桌面应用程序运行,在Android 设备 、Windows、windows CE 甚至 iOS 上运行。 + +所有示例源代码和项目都可以在 [HomeApplianceXML GitHub][21] 存储库中找到。 + +### 现有工具的新玩法 + +为嵌入式应用程序创建 GUI 并不需要像现在这样困难。这种概念证明为如何轻松地完成这项任务提供了新的视角,不仅适用于嵌入式系统,而且适用于所有主要的操作系统,所有这些系统都使用相同的代码库。 + +我们的目标不是为设计人员或开发人员创建一个新的工具来构建 UI 应用程序;我们的目标是为使用现有的最佳工具提供新的玩法。 + +你对这种新的应用程序开发方式有何看法?在下面的评论中分享你的想法。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/20/5/linux-arm-ui + +作者:[Bruno Muniz][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/brunoamuniz +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_desk_home_laptop_browser.png?itok=Y3UVpY0l (Digital images of a computer desktop) +[2]: https://totalcross.com/ +[3]: https://yourapp.totalcross.com/knowcode-app +[4]: https://github.com/TotalCross/KnowCodeXML +[5]: https://www.raspberrypi.org/ +[6]: https://developer.android.com/studio +[7]: https://vscodium.com/ +[8]: https://opensource.com/sites/default/files/uploads/homeapplianceapp.png (Home appliance application to control thermostat) +[9]: https://creativecommons.org/licenses/by-sa/4.0/ +[10]: https://codelabs.developers.google.com/codelabs/constraint-layout/index.html#0 +[11]: https://developer.android.com/reference/android/widget/ImageView +[12]: https://developer.android.com/reference/android/widget/TextView +[13]: https://medium.com/totalcross-community/totalcross-plugin-for-vscode-4f45da146a0a +[14]: https://opensource.com/sites/default/files/uploads/homeappliancexml.png (Home Appliance file structure) +[15]: https://opensource.com/sites/default/files/uploads/totalcross-simulator_0.png (TotalCross simulator running temperature application) +[16]: http://www.google.com/search?hl=en&q=allinurl%3Adocs.oracle.com+javase+docs+api+button +[17]: http://www.google.com/search?hl=en&q=allinurl%3Adocs.oracle.com+javase+docs+api+label +[18]: http://www.google.com/search?hl=en&q=allinurl%3Adocs.oracle.com+javase+docs+api+string +[19]: https://tigervnc.org/ +[20]: https://opensource.com/sites/default/files/uploads/application.gif (Application demo) +[21]: https://github.com/TotalCross/HomeApplianceXML diff --git a/published/20200607 Top Arch-based User Friendly Linux Distributions That are Easier to Install and Use Than Arch Linux Itself.md b/published/20200607 Top Arch-based User Friendly Linux Distributions That are Easier to Install and Use Than Arch Linux Itself.md new file mode 100644 index 0000000000..11c43fd1f5 --- /dev/null +++ b/published/20200607 Top Arch-based User Friendly Linux Distributions That are Easier to Install and Use Than Arch Linux Itself.md @@ -0,0 +1,201 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13104-1.html) +[#]: subject: (Top Arch-based User Friendly Linux Distributions That are Easier to Install and Use Than Arch Linux Itself) +[#]: via: (https://itsfoss.com/arch-based-linux-distros/) +[#]: author: (Dimitrios Savvopoulos https://itsfoss.com/author/dimitrios/) + +9 个易用的基于 Arch 的用户友好型 Linux 发行版 +====== + +在 Linux 社区中,[Arch Linux][1] 有一群狂热的追随者。这个轻量级的发行版以 DIY 的态度提供了最前沿的更新。 + +但是,Arch 的目标用户是那些更有经验的用户。因此,它通常被认为是那些技术不够(或耐心不够)的人所无法触及的。 + +事实上,只是最开始的步骤,[安装 Arch Linux 就足以把很多人吓跑][2]。与大多数其他发行版不同,Arch Linux 没有一个易于使用的图形安装程序。安装过程中涉及到的磁盘分区,连接到互联网,挂载驱动器和创建文件系统等只用命令行工具来操作。 + +对于那些不想经历复杂的安装和设置的人来说,有许多用户友好的基于 Arch 的发行版。 + +在本文中,我将向你展示一些 Arch 替代发行版。这些发行版附带了图形安装程序、图形包管理器和其他工具,比它们的命令行版本更容易使用。 + +### 更容易设置和使用的基于 Arch 的 Linux 发行版 + +![](https://img.linux.net.cn/data/attachment/album/202102/10/112812sc42txp4eexco44x.jpg) + +请注意,这不是一个排名列表。这些数字只是为了计数的目的。排第二的发行版不应该被认为比排第七的发行版好。 + +#### 1、Manjaro Linux + +![][4] + +[Manjaro][5] 不需要任何介绍。它是几年来最流行的 Linux 发行版之一,它值得拥有。 + +Manjaro 提供了 Arch Linux 的所有优点,同时注重用户友好性和可访问性。Manjaro 既适合新手,也适合有经验的 Linux 用户。 + +**对于新手**,它提供了一个用户友好的安装程序,系统本身也设计成可以在你[最喜爱的桌面环境 ][6](DE)或窗口管理器中直接“开箱即用”。 + +**对于更有经验的用户**,Manjaro 还提供多种功能,以满足每个个人的口味和喜好。[Manjaro Architect][7] 提供了安装各种 Manjaro 风格的选项,并为那些想要完全自由地塑造系统的人提供了各种桌面环境、文件系统([最近推出的 ZFS][8]) 和引导程序的选择。 + +Manjaro 也是一个滚动发布的前沿发行版。然而,与 Arch 不同的是,Manjaro 首先测试更新,然后将其提供给用户。稳定在这里也很重要。 + +#### 2、ArcoLinux + +![][9] + +[ArcoLinux][10](以前称为 ArchMerge)是一个基于 Arch Linux 的发行版。开发团队提供了三种变体。ArcoLinux、ArcoLinuxD 和 ArcoLinuxB。 + +ArcoLinux 是一个功能齐全的发行版,附带有 [Xfce 桌面][11]、[Openbox][12] 和 [i3 窗口管理器][13]。 + +**ArcoLinuxD** 是一个精简的发行版,它包含了一些脚本,可以让高级用户安装任何桌面和应用程序。 + +**ArcoLinuxB** 是一个让用户能够构建自定义发行版的项目,同时还开发了几个带有预配置桌面的社区版本,如 Awesome、bspwm、Budgie、Cinnamon、Deepin、GNOME、MATE 和 KDE Plasma。 + +ArcoLinux 还提供了各种视频教程,因为它非常注重学习和获取 Linux 技能。 + +#### 3、Archlabs Linux + +![][14] + +[ArchLabs Linux][15] 是一个轻量级的滚动版 Linux 发行版,基于最精简的 Arch Linux,带有 [Openbox][16] 窗口管理器。[ArchLabs][17] 在观感设计中受到 [BunsenLabs][18] 的影响和启发,主要考虑到中级到高级用户的需求。 + +#### 4、Archman Linux + +![][19] + +[Archman][20] 是一个独立的项目。Arch Linux 发行版对于没有多少 Linux 经验的用户来说通常不是理想的操作系统。要想在最小的挫折感下让事情变得更有意义,必须要有相当的背景知识。Archman Linux 的开发人员正试图改变这种评价。 + +Archman 的开发是基于对开发的理解,包括用户反馈和体验组件。根据团队过去的经验,将用户的反馈和要求融合在一起,确定路线图并完成构建工作。 + +#### 5、EndeavourOS + +![][21] + +当流行的基于 Arch 的发行版 [Antergos 在 2019 结束][22] 时,它留下了一个友好且非常有用的社区。Antergos 项目结束的原因是因为该系统对于开发人员来说太难维护了。 + +在宣布结束后的几天内,一些有经验的用户通过创建一个新的发行版来填补 Antergos 留下的空白,从而维护了以前的社区。这就是 [EndeavourOS][23] 的诞生。 + +[EndeavourOS][24] 是轻量级的,并且附带了最少数量的预装应用程序。一块近乎空白的画布,随时可以个性化。 + +#### 6、RebornOS + +![][25] + +[RebornOS][26] 开发人员的目标是将 Linux 的真正威力带给每个人,一个 ISO 提供了 15 个桌面环境可供选择,并提供无限的定制机会。 + +RebornOS 还声称支持 [Anbox][27],它可以在桌面 Linux 上运行 Android 应用程序。它还提供了一个简单的内核管理器 GUI 工具。 + +再加上 [Pacman][28]、[AUR][29],以及定制版本的 Cnchi 图形安装程序,Arch Linux 终于可以让最没有经验的用户也能够使用了。 + +#### 7、Chakra Linux + +![][30] + +一个社区开发的 GNU/Linux 发行版,它的亮点在 KDE 和 Qt 技术。[Chakra Linux][31] 不在特定日期安排发布,而是使用“半滚动发布”系统。 + +这意味着 Chakra Linux 的核心包被冻结,只在修复安全问题时才会更新。这些软件包是在最新版本经过彻底测试后更新的,然后再转移到永久软件库(大约每六个月更新一次)。 + +除官方软件库外,用户还可以安装 Chakra 社区软件库 (CCR) 的软件包,该库为官方存储库中未包含的软件提供用户制作的 PKGINFOs 和 [PKGBUILD][32] 脚本,其灵感来自于 Arch 用户软件库(AUR)。 + +#### 8、Artix Linux + +![Artix Mate Edition][33] + +[Artix Linux][34] 也是一个基于 Arch Linux 的滚动发行版,它使用 [OpenRC][35]、[runit][36] 或 [s6][37] 作为初始化工具而不是 [systemd][38]。 + +Artix Linux 有自己的软件库,但作为一个基于 `pacman` 的发行版,它可以使用 Arch Linux 软件库或任何其他衍生发行版的软件包,甚至可以使用明确依赖于 systemd 的软件包。也可以使用 [Arch 用户软件库][29](AUR)。 + +#### 9、BlackArch Linux + +![][39] + +BlackArch 是一个基于 Arch Linux 的 [渗透测试发行版][40],它提供了大量的网络安全工具。它是专门为渗透测试人员和安全研究人员创建的。该软件库包含 2400 多个[黑客和渗透测试工具 ][41],可以单独安装,也可以分组安装。BlackArch Linux 兼容现有的 Arch Linux 包。 + +### 想要真正的原版 Arch Linux 吗?可以使用图形化 Arch 安装程序简化安装 + +如果你想使用原版的 Arch Linux,但又被它困难的安装所难倒。幸运的是,你可以下载一个带有图形安装程序的 Arch Linux ISO。 + +Arch 安装程序基本上是 Arch Linux ISO 的一个相对容易使用的基于文本的安装程序。它比裸奔的 Arch 安装容易得多。 + +#### Anarchy Installer + +![][42] + +[Anarchy installer][43] 打算为新手和有经验的 Linux 用户提供一种简单而无痛苦的方式来安装 ArchLinux。在需要的时候安装,在需要的地方安装,并且以你想要的方式安装。这就是 Anarchy 的哲学。 + +启动安装程序后,将显示一个简单的 [TUI 菜单][44],列出所有可用的安装程序选项。 + +#### Zen Installer + +![][45] + +[Zen Installer][46] 为安装 Arch Linux 提供了一个完整的图形(点击式)环境。它支持安装多个桌面环境 、AUR 以及 Arch Linux 的所有功能和灵活性,并且易于图形化安装。 + +ISO 将引导一个临场环境,然后在你连接到互联网后下载最新稳定版本的安装程序。因此,你将始终获得最新的安装程序和更新的功能。 + +### 总结 + +对于许多用户来说,基于 Arch 的发行版会是一个很好的无忧选择,而像 Anarchy 这样的图形化安装程序至少离原版的 Arch Linux 更近了一步。 + +在我看来,[Arch Linux 的真正魅力在于它的安装过程][2],对于 Linux 爱好者来说,这是一个学习的机会,而不是麻烦。Arch Linux 及其衍生产品有很多东西需要你去折腾,但是在折腾的过程中你就会进入到开源软件的世界,这里是神奇的新世界。下次再见! + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/arch-based-linux-distros/ + +作者:[Dimitrios Savvopoulos][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/dimitrios/ +[b]: https://github.com/lujun9972 +[1]: https://www.archlinux.org/ +[2]: https://itsfoss.com/install-arch-linux/ +[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/06/arch-based-linux-distributions.png?ssl=1 +[4]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/05/manjaro-20.jpg?ssl=1 +[5]: https://manjaro.org/ +[6]: https://itsfoss.com/best-linux-desktop-environments/ +[7]: https://itsfoss.com/manjaro-architect-review/ +[8]: https://itsfoss.com/manjaro-20-release/ +[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/05/arcolinux.png?ssl=1 +[10]: https://arcolinux.com/ +[11]: https://www.xfce.org/ +[12]: http://openbox.org/wiki/Main_Page +[13]: https://i3wm.org/ +[14]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/06/Archlabs.jpg?ssl=1 +[15]: https://itsfoss.com/archlabs-review/ +[16]: https://en.wikipedia.org/wiki/Openbox +[17]: https://archlabslinux.com/ +[18]: https://www.bunsenlabs.org/ +[19]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/06/Archman.png?ssl=1 +[20]: https://archman.org/en/ +[21]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/05/04_endeavouros_slide.jpg?ssl=1 +[22]: https://itsfoss.com/antergos-linux-discontinued/ +[23]: https://itsfoss.com/endeavouros/ +[24]: https://endeavouros.com/ +[25]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/06/RebornOS.png?ssl=1 +[26]: https://rebornos.org/ +[27]: https://anbox.io/ +[28]: https://itsfoss.com/pacman-command/ +[29]: https://itsfoss.com/aur-arch-linux/ +[30]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/06/Chakra_Goedel_Screenshot.png?ssl=1 +[31]: https://www.chakralinux.org/ +[32]: https://wiki.archlinux.org/index.php/PKGBUILD +[33]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/06/Artix_MATE_edition.png?ssl=1 +[34]: https://artixlinux.org/ +[35]: https://en.wikipedia.org/wiki/OpenRC +[36]: https://en.wikipedia.org/wiki/Runit +[37]: https://en.wikipedia.org/wiki/S6_(software) +[38]: https://en.wikipedia.org/wiki/Systemd +[39]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/06/BlackArch.png?ssl=1 +[40]: https://itsfoss.com/linux-hacking-penetration-testing/ +[41]: https://itsfoss.com/best-kali-linux-tools/ +[42]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/05/anarchy.jpg?ssl=1 +[43]: https://anarchyinstaller.org/ +[44]: https://en.wikipedia.org/wiki/Text-based_user_interface +[45]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/05/zen.jpg?ssl=1 +[46]: https://sourceforge.net/projects/revenge-installer/ diff --git a/published/20200615 LaTeX Typesetting - Part 1 (Lists).md b/published/20200615 LaTeX Typesetting - Part 1 (Lists).md new file mode 100644 index 0000000000..b69d418b64 --- /dev/null +++ b/published/20200615 LaTeX Typesetting - Part 1 (Lists).md @@ -0,0 +1,260 @@ +[#]: collector: "lujun9972" +[#]: translator: "rakino" +[#]: reviewer: "wxy" +[#]: publisher: "wxy" +[#]: url: "https://linux.cn/article-13112-1.html" +[#]: subject: "LaTeX Typesetting – Part 1 (Lists)" +[#]: via: "https://fedoramagazine.org/latex-typesetting-part-1/" +[#]: author: "Earl Ramirez https://fedoramagazine.org/author/earlramirez/" + +LaTeX 排版(1):列表 +====== + +![][1] + +本系列基于前文《[在 Fedora 上用 LaTex 和 TeXstudio 排版你的文档][2]》和《[LaTeX 基础][3]》,本文即系列的第一部分,是关于 LaTeX 列表的。 + +### 列表类型 + +LaTeX 中的列表是封闭的环境,列表中的每个项目可以取一行文字到一个完整的段落。在 LaTeX 中有三种列表类型: + + * `itemize`:无序列表unordered list/项目符号列表bullet list + * `enumerate`:有序列表ordered list + * `description`:描述列表descriptive list + +### 创建列表 + +要创建一个列表,需要在每个项目前加上控制序列 `\item`,并在项目清单前后分别加上控制序列 `\begin{<类型>}` 和 `\end`{<类型>}`(将其中的 `<类型>` 替换为将要使用的列表类型),如下例: + +#### itemize(无序列表) + +``` +\begin{itemize} + \item Fedora + \item Fedora Spin + \item Fedora Silverblue +\end{itemize} +``` + +![][4] + +#### enumerate(有序列表) + +``` +\begin{enumerate} + \item Fedora CoreOS + \item Fedora Silverblue + \item Fedora Spin +\end{enumerate} +``` + +![][5] + +#### description(描述列表) + +``` +\begin{description} + \item[Fedora 6] Code name Zod + \item[Fedora 8] Code name Werewolf +\end{description} +``` + +![][6] + +### 列表项目间距 + +可以通过在导言区加入 `\usepackage{enumitem}` 来自定义默认的间距,宏包 `enumitem` 启用了选项 `noitemsep` 和控制序列 `\itemsep`,可以在列表中使用它们,如下例所示: + +#### 使用选项 noitemsep + +将选项 `noitemsep` 封闭在方括号内,并同下文所示放在控制序列 `\begin` 之后,该选项将移除默认的间距。 + +``` +\begin{itemize}[noitemsep] + \item Fedora + \item Fedora Spin + \item Fedora Silverblue +\end{itemize} +``` + +![][7] + +#### 使用控制序列 \itemsep + +控制序列 `\itemsep` 必须以一个数字作为后缀,用以表示列表项目之间应该有多少空间。 + +``` +\begin{itemize} \itemsep0.75pt + \item Fedora Silverblue + \item Fedora CoreOS +\end{itemize} +``` + +![][8] + +### 嵌套列表 + +LaTeX 最多最多支持四层嵌套列表,如下例: + +#### 嵌套无序列表 + +``` +\begin{itemize}[noitemsep] + \item Fedora Versions + \begin{itemize} + \item Fedora 8 + \item Fedora 9 + \begin{itemize} + \item Werewolf + \item Sulphur + \begin{itemize} + \item 2007-05-31 + \item 2008-05-13 + \end{itemize} + \end{itemize} + \end{itemize} + \item Fedora Spin + \item Fedora Silverblue +\end{itemize} +``` + +![][9] + +#### 嵌套有序列表 + +``` +\begin{enumerate}[noitemsep] + \item Fedora Versions + \begin{enumerate} + \item Fedora 8 + \item Fedora 9 + \begin{enumerate} + \item Werewolf + \item Sulphur + \begin{enumerate} + \item 2007-05-31 + \item 2008-05-13 + \end{enumerate} + \end{enumerate} + \end{enumerate} + \item Fedora Spin + \item Fedora Silverblue +\end{enumerate} +``` + +![][10] + +### 每种列表类型的列表样式名称 + +**enumerate(有序列表)** | **itemize(无序列表)** +---|--- +`\alph*` (小写字母) | `$\bullet$` (●) +`\Alph*` (大写字母) | `$\cdot$` (•) +`\arabic*` (阿拉伯数字) | `$\diamond$` (◇) +`\roman*` (小写罗马数字) | `$\ast$` (✲) +`\Roman*` (大写罗马数字) | `$\circ$` (○) +  | `$-$` (-) + +### 按嵌套深度划分的默认样式 + +**嵌套深度** | **enumerate(有序列表)** | **itemize(无序列表)** +---|---|--- +1 | 阿拉伯数字 | (●) +2 | 小写字母 | (-) +3 | 小写罗马数字 | (✲) +4 | 大写字母 | (•) + +### 设置列表样式 + +下面的例子列举了无序列表的不同样式。 + +``` +% 无序列表样式 +\begin{itemize} + \item[$\ast$] Asterisk + \item[$\diamond$] Diamond + \item[$\circ$] Circle + \item[$\cdot$] Period + \item[$\bullet$] Bullet (default) + \item[--] Dash + \item[$-$] Another dash +\end{itemize} +``` + +![][11] + +有三种设置列表样式的方式,下面将按照优先级从高到低的顺序分别举例。 + +#### 方式一:为各项目单独设置 + +将需要的样式名称封闭在方括号内,并放在控制序列 `\item` 之后,如下例: + +``` +% 方式一 +\begin{itemize} + \item[$\ast$] Asterisk + \item[$\diamond$] Diamond + \item[$\circ$] Circle + \item[$\cdot$] period + \item[$\bullet$] Bullet (default) + \item[--] Dash + \item[$-$] Another dash +\end{itemize} +``` + +#### 方式二:为整个列表设置 + +将需要的样式名称以 `label=` 前缀并封闭在方括号内,放在控制序列 `\begin` 之后,如下例: + +``` +% 方式二 +\begin{enumerate}[label=\Alph*.] + \item Fedora 32 + \item Fedora 31 + \item Fedora 30 +\end{enumerate} +``` + +#### 方式三:为整个文档设置 + +该方式将改变整个文档的默认样式。使用 `\renewcommand` 来设置项目标签的值,下例分别为四个嵌套深度的项目标签设置了不同的样式。 + +``` +% 方式三 +\renewcommand{\labelitemi}{$\ast$} +\renewcommand{\labelitemii}{$\diamond$} +\renewcommand{\labelitemiii}{$\bullet$} +\renewcommand{\labelitemiv}{$-$} +``` + +### 总结 + +LaTeX 支持三种列表,而每种列表的风格和间距都是可以自定义的。在以后的文章中,我们将解释更多的 LaTeX 元素。 + +关于 LaTeX 列表的延伸阅读可以在这里找到:[LaTeX List Structures][12] + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/latex-typesetting-part-1/ + +作者:[Earl Ramirez][a] +选题:[lujun9972][b] +译者:[rakino](https://github.com/rakino) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/earlramirez/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2020/06/latex-series-816x345.png +[2]: https://fedoramagazine.org/typeset-latex-texstudio-fedora +[3]: https://fedoramagazine.org/fedora-classroom-latex-101-beginners +[4]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-1.png +[5]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-2.png +[6]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-3.png +[7]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-4.png +[8]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-5.png +[9]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-7.png +[10]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-8.png +[11]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-9.png +[12]: https://en.wikibooks.org/wiki/LaTeX/List_Structures diff --git a/published/20200629 LaTeX typesetting part 2 (tables).md b/published/20200629 LaTeX typesetting part 2 (tables).md new file mode 100644 index 0000000000..6b134a2c06 --- /dev/null +++ b/published/20200629 LaTeX typesetting part 2 (tables).md @@ -0,0 +1,342 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13146-1.html) +[#]: subject: (LaTeX typesetting part 2 \(tables\)) +[#]: via: (https://fedoramagazine.org/latex-typesetting-part-2-tables/) +[#]: author: (Earl Ramirez https://fedoramagazine.org/author/earlramirez/) + +LaTex 排版 (2):表格 +====== + +![][1] + +LaTeX 提供了许多工具来创建和定制表格,在本系列中,我们将使用 `tabular` 和 `tabularx` 环境来创建和定制表。 + +### 基础表格 + +要创建表,只需指定环境 `\begin{tabular}{列选项}`: + +``` +\begin{tabular}{c|c} + Release &Codename \\ \hline + Fedora Core 1 &Yarrow \\ + Fedora Core 2 &Tettnang \\ + Fedora Core 3 &Heidelberg \\ + Fedora Core 4 &Stentz \\ +\end{tabular} +``` + +![Basic Table][2] + +在上面的示例中,花括号中的 ”{c|c}” 表示文本在列中的位置。下表总结了位置参数及其说明。 + +参数 | 位置 +|:---:|:--- +`c` | 将文本置于中间 +`l` | 将文本左对齐 +`r` | 将文本右对齐 +`p{宽度}` | 文本对齐单元格顶部 +`m{宽度}` | 文本对齐单元格中间 +`b{宽度}` | 文本对齐单元格底部 + +> `m{宽度}` 和 `b{宽度}` 都要求在最前面指定数组包。 + +使用上面的例子,让我们来详细讲解使用的要点,并描述你将在本系列中看到的更多选项: + +选项 | 意义 +|:-:|:-| +`&` | 定义每个单元格,这个符号仅用于第二列 +`\\` | 这将终止该行并开始一个新行 +`|` | 指定表格中的垂直线(可选) +`\hline` | 指定表格中的水平线(可选) +`*{数量}{格式}` | 当你有许多列时,可以使用这个,并且是限制重复的有效方法 +`||` | 指定表格中垂直双线 + +### 定制表格 + +学会了这些选项,让我们使用这些选项创建一个表。 + +``` +\begin{tabular}{*{3}{|l|}} +\hline + \textbf{Version} &\textbf{Code name} &\textbf{Year released} \\ +\hline + Fedora 6 &Zod &2006 \\ \hline + Fedora 7 &Moonshine &2007 \\ \hline + Fedora 8 &Werewolf &2007 \\ +\hline +\end{tabular} +``` + +![Customise Table][3] + +### 管理长文本 + +如果列中有很多文本,那么它的格式就不好处理,看起来也不好看。 + +下面的示例显示了文本的格式长度,我们将在导言区中使用 `blindtext`,以便生成示例文本。 + +``` +\begin{tabular}{|l|l|}\hline + Summary &Description \\ \hline + Test &\blindtext \\ +\end{tabular} +``` + +![Default Formatting][4] + +正如你所看到的,文本超出了页面宽度;但是,有几个选项可以克服这个问题。 + + * 指定列宽,例如 `m{5cm}` + * 利用 `tablarx` 环境,这需要在导言区中引用 `tablarx` 宏包。 + +#### 使用列宽管理长文本 + +通过指定列宽,文本将被折行为如下示例所示的宽度。 + +``` +\begin{tabular}{|l|m{14cm}|} \hline + Summary &Description \\ \hline + Test &\blindtext \\ \hline +\end{tabular}\vspace{3mm} +``` + +![Column Width][5] + +#### 使用 tabularx 管理长文本 + +在我们利用表格之前,我们需要在导言区中加上它。`tabularx` 方法见以下示例:`\begin{tabularx}{宽度}{列选项}`。 + +``` +\begin{tabularx}{\textwidth}{|l|X|} \hline +Summary & Tabularx Description\\ \hline +Text &\blindtext \\ \hline +\end{tabularx} +``` + +![Tabularx][6] + +请注意,我们需要处理长文本的列在花括号中指定了大写 `X`。 + +### 合并行合并列 + +有时需要合并行或列。本节描述了如何完成。要使用 `multirow` 和 `multicolumn`,请将 `multirow` 添加到导言区。 + +#### 合并行 + +`multirow` 采用以下参数 `\multirow{行的数量}{宽度}{文本}`,让我们看看下面的示例。 + +``` +\begin{tabular}{|l|l|}\hline + Release &Codename \\ \hline + Fedora Core 4 &Stentz \\ \hline + \multirow{2}{*}{MultiRow} &Fedora 8 \\ + &Werewolf \\ \hline +\end{tabular} +``` + +![MultiRow][7] + +在上面的示例中,指定了两行,`*` 告诉 LaTeX 自动管理单元格的大小。 + +#### 合并列 + +`multicolumn` 参数是 `{multicolumn{列的数量}{单元格选项}{位置}{文本}`,下面的示例演示合并列。 + +``` +\begin{tabular}{|l|l|l|}\hline + Release &Codename &Date \\ \hline + Fedora Core 4 &Stentz &2005 \\ \hline + \multicolumn{3}{|c|}{Mulit-Column} \\ \hline +\end{tabular} +``` + +![Multi-Column][8] + +### 使用颜色 + +可以为文本、单个单元格或整行指定颜色。此外,我们可以为每一行配置交替的颜色。 + +在给表添加颜色之前,我们需要在导言区引用 `\usepackage[table]{xcolor}`。我们还可以使用以下颜色参考 [LaTeX Color][9] 或在颜色前缀后面添加感叹号(从 0 到 100 的阴影)来定义颜色。例如,`gray!30`。 + +``` +\definecolor{darkblue}{rgb}{0.0, 0.0, 0.55} +\definecolor{darkgray}{rgb}{0.66, 0.66, 0.66} +``` + +下面的示例演示了一个具有各种颜色的表,`\rowcolors` 采用以下选项 `\rowcolors{起始行颜色}{偶数行颜色}{奇数行颜色}`。 + +``` +\rowcolors{2}{darkgray}{gray!20} +\begin{tabular}{c|c} + Release &Codename \\ \hline + Fedora Core 1 &Yarrow \\ + Fedora Core 2 &Tettnang \\ + Fedora Core 3 &Heidelberg \\ + Fedora Core 4 &Stentz \\ +\end{tabular} +``` + +![Alt colour table][10] + +除了上面的例子,`\rowcolor` 可以用来指定每一行的颜色,这个方法在有合并行时效果最好。以下示例显示将 `\rowColors` 与合并行一起使用的影响以及如何解决此问题。 + +![Impact on multi-row][11] + +你可以看到,在合并行中,只有第一行能显示颜色。想要解决这个问题,需要这样做: + +``` +\begin{tabular}{|l|l|}\hline + \rowcolor{darkblue}\textsc{\color{white}Release} &\textsc{\color{white}Codename} \\ \hline + \rowcolor{gray!10}Fedora Core 4 &Stentz \\ \hline + \rowcolor{gray!40}&Fedora 8 \\ + \rowcolor{gray!40}\multirow{-2}{*}{Multi-Row} &Werewolf \\ \hline +\end{tabular} +``` + +![Multi-row][12] + +让我们讲解一下为解决合并行替换颜色问题而实施的更改。 + + * 第一行从合并行上方开始 + * 行数从 `2` 更改为 `-2`,这意味着从上面的行开始读取 + * `\rowcolor` 是为每一行指定的,更重要的是,多行必须具有相同的颜色,这样才能获得所需的结果。 + +关于颜色的最后一个注意事项是,要更改列的颜色,需要创建新的列类型并定义颜色。下面的示例说明了如何定义新的列颜色。 + +``` +\newcolumntype{g}{>{\columncolor{darkblue}}l} +``` + +我们把它分解一下: + + * `\newcolumntype{g}`:将字母 `g` 定义为新列 + * `{>{\columncolor{darkblue}}l}`:在这里我们选择我们想要的颜色,并且 `l` 告诉列左对齐,这可以用 `c` 或 `r` 代替。 + +``` +\begin{tabular}{g|l} + \textsc{Release} &\textsc{Codename} \\ \hline + Fedora Core 4 &Stentz \\ + &Fedora 8 \\ + \multirow{-2}{*}{Multi-Row} &Werewolf \\ +\end{tabular}\ +``` + +![Column Colour][13] + +### 横向表 + +有时,你的表可能有许多列,纵向排列会很不好看。在导言区加入 `rotating` 包,你将能够创建一个横向表。下面的例子说明了这一点。 + +对于横向表,我们将使用 `sidewaystable` 环境并在其中添加表格环境,我们还指定了其他选项。 + + * `\centering` 可以将表格放置在页面中心 + * `\caption{}` 为表命名 + * `\label{}` 这使我们能够引用文档中的表 + +``` +\begin{sidewaystable} +\centering +\caption{Sideways Table} +\label{sidetable} +\begin{tabular}{ll} + \rowcolor{darkblue}\textsc{\color{white}Release} &\textsc{\color{white}Codename} \\ + \rowcolor{gray!10}Fedora Core 4 &Stentz \\ + \rowcolor{gray!40} &Fedora 8 \\ + \rowcolor{gray!40}\multirow{-2}{*}{Multi-Row} &Werewolf \\ +\end{tabular}\vspace{3mm} +\end{sidewaystable} +``` + +![Sideways Table][14] + +### 列表和表格 + +要将列表包含到表中,可以使用 `tabularx`,并将列表包含在指定的列中。另一个办法是使用表格格式,但必须指定列宽。 + +#### 用 tabularx 处理列表 + +``` +\begin{tabularx}{\textwidth}{|l|X|} \hline + Fedora Version &Editions \\ \hline + Fedora 32 &\begin{itemize}[noitemsep] + \item CoreOS + \item Silverblue + \item IoT + \end{itemize} \\ \hline +\end{tabularx}\vspace{3mm} +``` + +![List in tabularx][15] + +#### 用 tabular 处理列表 + +``` +\begin{tabular}{|l|m{6cm}|}\hline +        Fedora Version &amp;amp;Editions \\\ \hline +    Fedora 32 &amp;amp;\begin{itemize}[noitemsep] +        \item CoreOS +        \item Silverblue +        \item IoT +    \end{itemize} \\\ \hline +\end{tabular} +``` + +![List in tabular][16] + +### 总结 + +LaTeX 提供了许多使用 `tablar` 和 `tablarx` 自定义表的方法,你还可以在表环境 (`\begin\table`) 中添加 `tablar` 和 `tablarx` 来添加表的名称和定位表。 + +### LaTeX 宏包 + +所需的宏包有如下这些: + +``` +\usepackage{fullpage} +\usepackage{blindtext} % add demo text +\usepackage{array} % used for column positions +\usepackage{tabularx} % adds tabularx which is used for text wrapping +\usepackage{multirow} % multi-row and multi-colour support +\usepackage[table]{xcolor} % add colour to the columns +\usepackage{rotating} % for landscape/sideways tables +``` + +### 额外的知识 + +这是一堂关于表的小课,有关表和 LaTex 的更多高级信息,请访问 [LaTex Wiki][17] + +![][13] + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/latex-typesetting-part-2-tables/ + +作者:[Earl Ramirez][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/earlramirez/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2020/06/latex-series-816x345.png +[2]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-13.png +[3]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-23.png +[4]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-10.png +[5]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-11.png +[6]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-12.png +[7]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-15.png +[8]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-16.png +[9]: https://latexcolor.com +[10]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-17.png +[11]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-18.png +[12]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-19.png +[13]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-24.png +[14]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-20.png +[15]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-21.png +[16]: https://fedoramagazine.org/wp-content/uploads/2020/06/image-22.png +[17]: https://en.wikibooks.org/wiki/LaTeX/Tables diff --git a/published/20200922 Give Your GNOME Desktop a Tiling Makeover With Material Shell GNOME Extension.md b/published/20200922 Give Your GNOME Desktop a Tiling Makeover With Material Shell GNOME Extension.md new file mode 100644 index 0000000000..a645f460fb --- /dev/null +++ b/published/20200922 Give Your GNOME Desktop a Tiling Makeover With Material Shell GNOME Extension.md @@ -0,0 +1,126 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13110-1.html) +[#]: subject: (Give Your GNOME Desktop a Tiling Makeover With Material Shell GNOME Extension) +[#]: via: (https://itsfoss.com/material-shell/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +使用 Material Shell 扩展将你的 GNOME 桌面打造成平铺式风格 +====== + +平铺式窗口的特性吸引了很多人的追捧。也许是因为它很好看,也许是因为它能提高 [Linux 快捷键][1] 玩家的效率。又或者是因为使用不同寻常的平铺式窗口是一种新奇的挑战。 + +![Tiling Windows in Linux | Image Source][2] + +从 i3 到 [Sway][3],Linux 桌面拥有各种各样的平铺式窗口管理器。配置一个平铺式窗口管理器需要一个陡峭的学习曲线。 + +这就是为什么像 [Regolith 桌面][4] 这样的项目会存在,给你预先配置好的平铺桌面,让你可以更轻松地开始使用平铺窗口。 + +让我给你介绍一个类似的项目 —— Material Shell。它可以让你用上平铺式桌面,甚至比 [Regolith][5] 还简单。 + +### Material Shell 扩展:将 GNOME 桌面转变成平铺式窗口管理器 + +[Material Shell][6] 是一个 GNOME 扩展,这就是它最好的地方。这意味着你不需要注销并登录其他桌面环境。你只需要启用或关闭这个扩展就可以自如的切换你的工作环境。 + +我会列出 Material Shell 的各种特性,但是也许视频更容易让你理解: + +- [video](https://youtu.be/Wc5mbuKrGDE) + +这个项目叫做 Material Shell 是因为它遵循 [Material Design][8] 原则。因此这个应用拥有一个美观的界面。这就是它最重要的一个特性。 + +#### 直观的界面 + +Material Shell 添加了一个左侧面板,以便快速访问。在此面板上,你可以在底部找到系统托盘,在顶部找到搜索和工作区。 + +所有新打开的应用都会添加到当前工作区中。你也可以创建新的工作区并切换到该工作区,以将正在运行的应用分类。其实这就是工作区最初的意义。 + +在 Material Shell 中,每个工作区都可以显示为具有多个应用程序的行列,而不是包含多个应用程序的程序框。 + +#### 平铺式窗口 + +在工作区中,你可以一直在顶部看到所有打开的应用程序。默认情况下,应用程序会像在 GNOME 桌面中那样铺满整个屏幕。你可以使用右上角的布局改变器来改变布局,将其分成两半、多列或多个应用网格。 + +这段视频一目了然的显示了以上所有功能: + +- [video](https://player.vimeo.com/video/460050750?dnt=1&app_id=122963) + +#### 固定布局和工作区 + +Material Shell 会记住你打开的工作区和窗口,这样你就不必重新组织你的布局。这是一个很好的特性,因为如果你对应用程序的位置有要求的话,它可以节省时间。 + +#### 热建/快捷键 + +像任何平铺窗口管理器一样,你可以使用键盘快捷键在应用程序和工作区之间切换。 + + * `Super+W` 切换到上个工作区; + * `Super+S` 切换到下个工作区; + * `Super+A` 切换到左边的窗口; + * `Super+D` 切换到右边的窗口; + * `Super+1`、`Super+2` … `Super+0` 切换到某个指定的工作区; + * `Super+Q` 关闭当前窗口; + * `Super+[鼠标拖动]` 移动窗口; + * `Super+Shift+A` 将当前窗口左移; + * `Super+Shift+D` 将当前窗口右移; + * `Super+Shift+W` 将当前窗口移到上个工作区; + * `Super+Shift+S` 将当前窗口移到下个工作区。 + +### 安装 Material Shell + +> 警告! +> +> 对于大多数用户来说,平铺式窗口可能会导致混乱。你最好先熟悉如何使用 GNOME 扩展。如果你是 Linux 新手或者你害怕你的系统发生翻天覆地的变化,你应当避免使用这个扩展。 + +Material Shell 是一个 GNOME 扩展。所以,请 [检查你的桌面环境][9],确保你运行的是 GNOME 3.34 或者更高的版本。 + +除此之外,我注意到在禁用 Material Shell 之后,它会导致 Firefox 的顶栏和 Ubuntu 的坞站消失。你可以在 GNOME 的“扩展”应用程序中禁用/启用 Ubuntu 的坞站扩展来使其变回原来的样子。我想这些问题也应该在系统重启后消失,虽然我没试过。 + +我希望你知道 [如何使用 GNOME 扩展][10]。最简单的办法就是 [在浏览器中打开这个链接][11],安装 GNOME 扩展浏览器插件,然后启用 Material Shell 扩展即可。 + +![][12] + +如果你不喜欢这个扩展,你也可以在同样的链接中禁用它。或者在 GNOME 的“扩展”应用程序中禁用它。 + +![][13] + +### 用不用平铺式? + +我使用多个电脑屏幕,我发现 Material Shell 不适用于多个屏幕的情况。这是开发者将来可以改进的地方。 + +除了这个毛病以外,Material Shell 是个让你开始使用平铺式窗口的好东西。如果你尝试了 Material Shell 并且喜欢它,请 [在 GitHub 上给它一个星标或赞助它][14] 来鼓励这个项目。 + +由于某些原因,平铺窗户越来越受欢迎。最近发布的 [Pop OS 20.04][15] 也增加了平铺窗口的功能。有一个类似的项目叫 PaperWM,也是这样做的。 + +但正如我前面提到的,平铺布局并不适合所有人,它可能会让很多人感到困惑。 + +你呢?你是喜欢平铺窗口还是喜欢经典的桌面布局? + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/material-shell/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/ubuntu-shortcuts/ +[2]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/09/linux-ricing-example-800x450.jpg?resize=800%2C450&ssl=1 +[3]: https://itsfoss.com/sway-window-manager/ +[4]: https://itsfoss.com/regolith-linux-desktop/ +[5]: https://regolith-linux.org/ +[6]: https://material-shell.com +[7]: https://www.youtube.com/c/itsfoss?sub_confirmation=1 +[8]: https://material.io/ +[9]: https://itsfoss.com/find-desktop-environment/ +[10]: https://itsfoss.com/gnome-shell-extensions/ +[11]: https://extensions.gnome.org/extension/3357/material-shell/ +[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/09/install-material-shell.png?resize=800%2C307&ssl=1 +[13]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/09/material-shell-gnome-extension.png?resize=799%2C497&ssl=1 +[14]: https://github.com/material-shell/material-shell +[15]: https://itsfoss.com/pop-os-20-04-review/ diff --git a/published/20201001 Navigating your Linux files with ranger.md b/published/20201001 Navigating your Linux files with ranger.md new file mode 100644 index 0000000000..4c2387fde9 --- /dev/null +++ b/published/20201001 Navigating your Linux files with ranger.md @@ -0,0 +1,124 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13137-1.html) +[#]: subject: (Navigating your Linux files with ranger) +[#]: via: (https://www.networkworld.com/article/3583890/navigating-your-linux-files-with-ranger.html) +[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) + +用 ranger 在 Linux 文件的海洋中导航 +===== + +> ranger 是一个很好的工具,它为你的 Linux 文件提供了一个多级视图,并允许你使用方向键和一些方便的命令进行浏览和更改。 + +![](https://img.linux.net.cn/data/attachment/album/202102/20/121918g5hqhjfcjyffh3lt.jpg) + +`ranger` 是一款独特且非常方便的文件系统导航器,它允许你在 Linux 文件系统中移动,进出子目录,查看文本文件内容,甚至可以在不离开该工具的情况下对文件进行修改。 + +它运行在终端窗口中,并允许你按下方向键进行导航。它提供了一个多级的文件显示,让你很容易看到你在哪里、在文件系统中移动、并选择特定的文件。 + +要安装 `ranger`,请使用标准的安装命令(例如,`sudo apt install ranger`)。要启动它,只需键入 `ranger`。它有一个很长的、非常详细的手册页面,但开始使用 `ranger` 非常简单。 + +### ranger 的显示方式 + +你需要马上习惯的最重要的一件事就是 `ranger` 的文件显示方式。一旦你启动了 `ranger`,你会看到四列数据。第一列是你启动 `ranger` 的位置的上一级。例如,如果你从主目录开始,`ranger` 将在第一列中列出所有的主目录。第二列将显示你的主目录(或者你开始的目录)中的目录和文件的第一屏内容。 + +这里的关键是超越你可能有的任何习惯,将每一行显示的细节看作是相关的。第二列中的所有条目与第一列中的单个条目相关,第四列中的内容与第二列中选定的文件或目录相关。 + +与一般的命令行视图不同的是,目录将被列在第一位(按字母数字顺序),文件将被列在第二位(也是按字母数字顺序)。从你的主目录开始,显示的内容可能是这样的: + +``` +shs@dragonfly /home/shs/backups <== current selection + bugfarm backups 0 empty + dory bin 59 + eel Buttons 15 + nemo Desktop 0 + shark Documents 0 + shs Downloads 1 + ^ ^ ^ ^ + | | | | + homes directories # files listing + in selected in each of files in + home directory selected directory +``` + +`ranger` 显示的最上面一行告诉你在哪里。在这个例子中,当前目录是 `/home/shs/backups`。我们看到高亮显示的是 `empty`,因为这个目录中没有文件。如果我们按下方向键选择 `bin`,我们会看到一个文件列表: + +``` +shs@dragonfly /home/shs/bin <== current selection + bugfarm backups 0 append + dory bin 59 calcPower + eel Buttons 15 cap + nemo Desktop 0 extract + shark Documents 0 finddups + shs Downloads 1 fix + ^ ^ ^ ^ + | | | | + homes directories # files listing + in selected in each of files in + home directory selected directory +``` + +每一列中高亮显示的条目显示了当前的选择。使用右方向键可移动到更深的目录或查看文件内容。 + +如果你继续按下方向键移动到列表的文件部分,你会注意到第三列将显示文件大小(而不是文件的数量)。“当前选择”行也会显示当前选择的文件名,而最右边的一列则会尽可能地显示文件内容。 + +``` +shs@dragonfly /home/shs/busy_wait.c <== current selection + bugfarm BushyRidge.zip 170 K /* + dory busy_wait.c 338 B * program that does a busy wait + eel camper.jpg 5.55 M * it's used to show ASLR, and that's it + nemo check_lockscreen 80 B */ + shark chkrootkit-output 438 B #include + ^ ^ ^ ^ + | | | | + homes files sizes file content +``` + +在该显示的底行会显示一些文件和目录的详细信息: + +``` +-rw-rw-r—- shs shs 338B 2019-01-05 14:44 1.52G, 365G free 67/488 11% +``` + +如果你选择了一个目录并按下回车键,你将进入该目录。然后,在你的显示屏中最左边的一列将是你的主目录的内容列表,第二列将是该目录内容的文件列表。然后你可以检查子目录的内容和文件的内容。 + +按左方向键可以向上移动一级。 + +按 `q` 键退出 `ranger`。 + +### 做出改变 + +你可以按 `?` 键,在屏幕底部弹出一条帮助行。它看起来应该是这样的: + +``` +View [m]an page, [k]ey bindings, [c]commands or [s]ettings? (press q to abort) +``` + +按 `c` 键,`ranger` 将提供你可以在该工具内使用的命令信息。例如,你可以通过输入 `:chmod` 来改变当前文件的权限,后面跟着预期的权限。例如,一旦选择了一个文件,你可以输入 `:chmod 700` 将权限设置为 `rwx------`。 + +输入 `:edit` 可以在 `nano` 中打开该文件,允许你进行修改,然后使用 `nano` 的命令保存文件。 + +### 总结 + +使用 `ranger` 的方法比本篇文章所描述的更多。该工具提供了一种非常不同的方式来列出 Linux 系统上的文件并与之交互,一旦你习惯了它的多级的目录和文件列表方式,并使用方向键代替 `cd` 命令来移动,就可以很轻松地在 Linux 的文件中导航。 + +-------------------------------------------------------------------------------- + +via: https://www.networkworld.com/article/3583890/navigating-your-linux-files-with-ranger.html + +作者:[Sandra Henry-Stocker][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ +[b]: https://github.com/lujun9972 +[1]: https://unsplash.com/photos/mHC0qJ7l-ls +[2]: https://creativecommons.org/publicdomain/zero/1.0/ +[3]: https://www.networkworld.com/newsletters/signup.html +[4]: https://www.facebook.com/NetworkWorld/ +[5]: https://www.linkedin.com/company/network-world diff --git a/published/20201013 My first day using Ansible.md b/published/20201013 My first day using Ansible.md new file mode 100644 index 0000000000..990ddc1f24 --- /dev/null +++ b/published/20201013 My first day using Ansible.md @@ -0,0 +1,297 @@ +[#]: collector: "lujun9972" +[#]: translator: "MjSeven" +[#]: reviewer: "wxy" +[#]: publisher: "wxy" +[#]: url: "https://linux.cn/article-13079-1.html" +[#]: subject: "My first day using Ansible" +[#]: via: "https://opensource.com/article/20/10/first-day-ansible" +[#]: author: "David Both https://opensource.com/users/dboth" + +使用 Ansible 的第一天 +====== + +> 一名系统管理员分享了如何使用 Ansible 在网络中配置计算机并把其带入实际工作的信息和建议。 + +![](https://img.linux.net.cn/data/attachment/album/202102/03/105826sn41jj0i8evu19nn.jpg) + +无论是第一次还是第五十次,启动并运行一台新的物理或虚拟计算机都非常耗时,而且需要大量的工作。多年来,我一直使用我创建的一系列脚本和 RPM 来安装所需的软件包,并为我喜欢的工具配置各种选项。这种方法效果很好,简化了我的工作,而且还减少了在键盘上输入命令的时间。 + +我一直在寻找更好的工作方式。近几年来,我一直在听到并且读到有关 [Ansible][2] 的信息,它是一个自动配置和管理系统的强大工具。Ansible 允许系统管理员在一个或多个剧本playbook中为每个主机指定一个特定状态,然后执行各种必要的任务,使主机进入该状态。这包括安装或删除各种资源,例如 RPM 或 Apt 软件包、配置文件和其它文件、用户、组等等。 + +因为一些琐事,我推迟了很长一段时间学习如何使用它。直到最近,我遇到了一个我认为 Ansible 可以轻松解决的问题。 + +这篇文章并不会完整地告诉你如何入门 Ansible,相反,它只是对我遇到的问题和我在一些隐秘的地方发现的信息的做了一些记录。我在各种在线讨论和问答小组中找到的有关 Ansible 的许多信息都是错误的。错误范围包括明显的老旧信息(没有任何日期或来源的迹象),还有一些是完全错误的信息。 + +本文所介绍的内容是有用的,尽管可能还有其它方法可以完成相同的事情,但我使用的是 Ansible 2.9.13 和 [Python][3] 3.8.5。 + +### 我的问题 + +我所有的最佳学习经历都始于我需要解决的问题,这次也不例外。 + +我一直在做一个小项目,修改 [Midnight Commander][4] 文件管理器的配置文件,并将它们推送到我网络上的各种系统中进行测试。尽管我有一个脚本可以自动执行这个操作,但它仍然需要使用命令行循环来提供我想要推送新代码的系统名称。我对配置文件进行了大量的更改,这使我必须频繁推送新的配置文件。但是,就在我以为我的新配置刚刚好时,我发现了一个问题,所以我需要在修复后再进行一次推送。 + +这种环境使得很难跟踪哪些系统有新文件,哪些没有。我还有几个主机需要区别对待。我对 Ansible 的一点了解表明,它可能能够满足我的全部或大部分工作。 + +### 开始 + +我读过许多有关 Ansible 的好文章和书籍,但从来没有在“我必须现在就把这个做好!”的情况下读过。而现在 —— 好吧,就是现在! + +在重读这些文档时,我发现它们主要是在讨论如何从 GitHub 开始安装并使用 Ansible,这很酷。但是我真的只是想尽快开始,所以我使用 DNF 和 Fedora 仓库中的版本在我的 Fedora 工作站上安装了它,非常简单。 + +但是后来我开始寻找文件位置,并尝试确定需要修改哪些配置文件、将我的剧本保存在什么位置,甚至一个剧本怎么写以及它的作用,我脑海中有一大堆(到目前为止)悬而未决的问题。 + +因此,不不需要进一步描述我的困难的情况下,以下是我发现的东西以及促使我继续前进的东西。 + +### 配置 + +Ansible 的配置文件保存在 `/etc/ansible` 中,这很有道理,因为 `/etc/` 是系统程序应该保存配置文件的地方。我需要使用的两个文件是 `ansible.cfg` 和 `hosts`。 + +#### ansible.cfg + +在进行了从文档和线上找到的一些实践练习之后,我遇到了一些有关弃用某些较旧的 Python 文件的警告信息。因此,我在 `ansible.cfg` 中将 `deprecation_warnings` 设置为 `false`,这样那些愤怒的红色警告消息就不会出现了: + +``` +deprecation_warnings = False +``` + +这些警告很重要,所以我稍后将重新回顾它们,并弄清楚我需要做什么。但是现在,它们不会再扰乱屏幕,也不会让我混淆实际上需要关注的错误。 + +#### hosts 文件 + +与 `/etc/hosts` 文件不同,`hosts` 文件也被称为清单inventory文件,它列出了网络上的主机。此文件允许将主机分组到相关集合中,例如“servers”、“workstations”和任何你所需的名称。这个文件包含帮助和大量示例,所以我在这里就不详细介绍了。但是,有些事情你必须知道。 + +主机也可以列在组之外,但是组对于识别具有一个或多个共同特征的主机很有帮助。组使用 INI 格式,所以服务器组看起来像这样: + +``` +[servers] +server1 +server2 +...... +``` + +这个文件中必须有一个主机名,这样 Ansible 才能对它进行操作。即使有些子命令允许指定主机名,但除非主机名在 `hosts` 文件中,否则命令会失败。一个主机也可以放在多个组中。因此,除了 `[servers]` 组之外,`server1` 也可能是 `[webservers]` 组的成员,还可以是 `[ubuntu]` 组的成员,这样以区别于 Fedora 服务器。 + +Ansible 很智能。如果 `all` 参数用作主机名,Ansible 会扫描 `hosts` 文件并在它列出的所有主机上执行定义的任务。Ansible 只会尝试在每个主机上工作一次,不管它出现在多少个组中。这也意味着不需要定义 `all` 组,因为 Ansible 可以确定文件中的所有主机名,并创建自己唯一的主机名列表。 + +另一件需要注意的事情是单个主机的多个条目。我在 DNS 文件中使用 `CNAME` 记录来创建别名,这些别名指向某些主机的 [A 记录][5],这样,我可以将一个主机称为 `host1` 或 `h1` 或 `myhost`。如果你在 `hosts` 文件中为同一主机指定多个主机名,则 Ansible 将尝试在所有这些主机名上执行其任务,它无法知道它们指向同一主机。好消息是,这并不会影响整体结果;它只是多花了一点时间,因为 Ansible 会在次要主机名上工作,它会确定所有操作均已执行。 + +### Ansible 实情 + +我阅读过 Ansible 的大多数材料都谈到了 Ansible [实情][6]facts,它是与远程系统相关的数据,包括操作系统、IP 地址、文件系统等等。这些信息可以通过其它方式获得,如 `lshw`、`dmidecode` 或 `/proc` 文件系统等。但是 Ansible 会生成一个包含此信息的 JSON 文件。每次 Ansible 运行时,它都会生成这些实情数据。在这个数据流中,有大量的信息,都是以键值对形式出现的:`<"variable-name": "value">`。所有这些变量都可以在 Ansible 剧本中使用,理解大量可用信息的最好方法是实际显示一下: + +``` +# ansible -m setup | less +``` + +明白了吗?你想知道的有关主机硬件和 Linux 发行版的所有内容都在这里,它们都可以在剧本中使用。我还没有达到需要使用这些变量的地步,但是我相信在接下来的几天中会用到。 + +### 模块 + +上面的 `ansible` 命令使用 `-m` 选项来指定 `setup` 模块。Ansible 已经内置了许多模块,所以你对这些模块不需要使用 `-m`。也可以安装许多下载的模块,但是内置模块可以完成我目前项目所需的一切。 + +### 剧本 + +剧本playbook几乎可以放在任何地方。因为我需要以 root 身份运行,所以我将它放在了 `/root/ansible` 下。当我运行 Ansible 时,只要这个目录是当前的工作目录(PWD),它就可以找到我的剧本。Ansible 还有一个选项,用于在运行时指定不同的剧本和位置。 + +剧本可以包含注释,但是我看到的文章或书籍很少提及此。但作为一个相信记录一切的系统管理员,我发现使用注释很有帮助。这并不是说在注释中做和任务名称同样的事情,而是要确定任务组的目的,并确保我以某种方式或顺序记录我做这些事情的原因。当我可能忘记最初的想法时,这可以帮助以后解决调试问题。 + +剧本只是定义主机所需状态的任务集合。在剧本的开头指定主机名或清单组,并定义 Ansible 将在其上运行剧本的主机。 + +以下是我的一个剧本示例: + +``` +################################################################################ +# This Ansible playbook updates Midnight commander configuration files.        # +################################################################################ +- name: Update midnight commander configuration files +  hosts: all +  +  tasks: +  - name: ensure midnight commander is the latest version +    dnf: +      name: mc +      state: present + +  - name: create ~/.config/mc directory for root +    file: +      path: /root/.config/mc +      state: directory +      mode: 0755 +      owner: root +      group: root + +  - name: create ~/.config/mc directory for dboth +    file: +      path: /home/dboth/.config/mc +      state: directory +      mode: 0755 +      owner: dboth +      group: dboth + +  - name: copy latest personal skin +    copy: +      src: /root/ansible/UpdateMC/files/MidnightCommander/DavidsGoTar.ini +      dest: /usr/share/mc/skins/DavidsGoTar.ini +      mode: 0644 +      owner: root +      group: root + +  - name: copy latest mc ini file +    copy: +      src: /root/ansible/UpdateMC/files/MidnightCommander/ini +      dest: /root/.config/mc/ini +      mode: 0644 +      owner: root +      group: root + +  - name: copy latest mc panels.ini file +    copy: +      src: /root/ansible/UpdateMC/files/MidnightCommander/panels.ini +      dest: /root/.config/mc/panels.ini +      mode: 0644 +      owner: root +      group: root +<截断> +``` + +剧本从它自己的名字和它将要操作的主机开始,在本文中,所有主机都在我的 `hosts` 文件中。`tasks` 部分列出了使主机达到所需状态的特定任务。这个剧本从使用 DNF 更新 Midnight Commander 开始(如果它不是最新的版本的话)。下一个任务确保创建所需的目录(如果它们不存在),其余任务将文件复制到合适的位置,这些 `file` 和 `copy` 任务还可以为目录和文件设置所有权和文件模式。 + +剧本细节超出了本文的范围,但是我对这个问题使用了一点蛮力。还有其它方法可以确定哪些用户需要更新文件,而不是对每个用户的每个文件使用一个任务。我的下一个目标是简化这个剧本,使用一些更先进的技术。 + +运行剧本很容易,只需要使用 `ansible-playbook` 命令。`.yml` 扩展名代表 YAML,我看到过它的几种不同含义,但我认为它是“另一种标记语言Yet Another Markup Language”,尽管有些人声称 YAML 不是这种语言。 + +这个命令将会运行剧本,它会更新 Midnight Commander 文件: + +``` +# ansible-playbook -f 10 UpdateMC.yml +``` + +`-f` 选项指定 Ansible 使用 10 个线程来执行操作。这可以大大加快整个任务的完成速度,特别是在多台主机上工作时。 + +### 输出 + +剧本运行时会列出每个任务和执行结果。`ok` 代表任务管理的机器状态已经完成,因为在任务中定义的状态已经为真,所以 Ansible 不需要执行任何操作。 + +`changed` 表示 Ansible 已经执行了指定的任务。在这种情况下,任务中定义的机器状态不为真,所以执行指定的操作使其为真。在彩色终端上,`TASK` 行会以彩色显示。我的终端配色为“amber-on-black”,`TASK` 行显示为琥珀色,`changed` 是棕色,`ok` 为绿色,错误是红色。 + +下面的输出是我最终用于在新主机执行安装后配置的剧本: + +``` +PLAY [Post-installation updates, package installation, and configuration] + +TASK [Gathering Facts] +ok: [testvm2] + +TASK [Ensure we have connectivity] +ok: [testvm2] + +TASK [Install all current updates] +changed: [testvm2] + +TASK [Install a few command line tools] +changed: [testvm2] + +TASK [copy latest personal Midnight Commander skin to /usr/share] +changed: [testvm2] + +TASK [create ~/.config/mc directory for root] +changed: [testvm2] + +TASK [Copy the most current Midnight Commander configuration files to /root/.config/mc] +changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/DavidsGoTar.ini) +changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/ini) +changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/panels.ini) + +TASK [create ~/.config/mc directory in /etc/skel] +changed: [testvm2] + +<截断> +``` + +### cowsay + +如果你的计算机上安装了 [cowsay][7] 程序,你会发现 `TASK` 的名字出现在奶牛的语音泡泡中: + +``` + ____________________________________ +< TASK [Ensure we have connectivity] > + ------------------------------------ +        \   ^__^ +         \  (oo)\\_______ +            (__)\       )\/\ +                ||----w | +                ||     || +``` + +如果你没有这个有趣的程序,你可以使用发行版的包管理器安装 Cowsay 程序。如果你有这个程序但不想要它,可以通过在 `/etc/ansible/ansible.cfg` 文件中设置 `nocows=1` 将其禁用。 + +我喜欢这头奶牛,它很有趣,但是它会占用我的一部分屏幕。因此,在它开始妨碍我使用时,我就把它禁用了。 + +### 目录 + +与我的 Midnight Commander 任务一样,经常需要安装和维护各种类型的文件。创建用于存储剧本的目录树的“最佳实践”和系统管理员一样多,至少与编写有关 Ansible 书和文章的作者数量一样多。 + +我选择了一个对我有意义的简单结构: + +```bash +/root/ansible +└── UpdateMC +    ├── files +    │   └── MidnightCommander +    │       ├── DavidsGoTar.ini +    │       ├── ini +    │       └── panels.ini +    └── UpdateMC.yml +``` + +你可以使用任何结构。但是请注意,其它系统管理员可能需要使用你设置的剧本来工作,所以目录应该具有一定程度的逻辑。当我使用 RPM 和 Bash 脚本执行安装任务后,我的文件仓库有点分散,绝对没有任何逻辑结构。当我为许多管理任务创建剧本时,我将介绍一个更有逻辑的结构来管理我的目录。 + +### 多次运行剧本 + +根据需要或期望多次运行剧本是安全的。只有当主机状态与任务中指定的状态不匹配时,才会执行每个任务。这使得很容易从先前的剧本运行中遇到的错误中恢复。因为当剧本遇到错误时,它将停止运行。 + +在测试我的第一个剧本时,我犯了很多错误并改正了它们。假设我的修正正确,那么剧本每次运行,都会跳过那些状态已与指定状态匹配的任务,执行不匹配状态的任务。当我的修复程序起作用时,之前失败的任务就会成功完成,并且会执行此任务之后的任务 —— 直到遇到另一个错误。 + +这使得测试变得容易。我可以添加新任务,并且在运行剧本时,只有新任务会被执行,因为它们是唯一与测试主机期望状态不匹配的任务。 + +### 一些思考 + +有些任务不适合用 Ansible,因为有更好的方法可以实现特定的计算机状态。我想到的场景是使 VM 返回到初始状态,以便可以多次使用它来执行从已知状态开始的测试。让 VM 进入特定状态,然后对此时的计算机状态进行快照要容易得多。还原到该快照与 Ansible 将主机返回到之前状态相比,通常还原到快照通常会更容易且更快。在研究文章或测试新代码时,我每天都会做几次这样的事情。 + +在完成用于更新 Midnight Commander 的剧本之后,我创建了一个新的剧本,用于在新安装的 Fedora 主机上执行安装任务。我已经取得了不错的进步,剧本比我第一个的更加复杂,但没有那么粗暴。 + +在使用 Ansible 的第一天,我创建了一个解决问题的剧本,我还开始编写第二个剧本,它将解决安装后配置的大问题,在这个过程中我学到了很多东西。 + +尽管我真的很喜欢使用 [Bash][8] 脚本来管理任务,但是我发现 Ansible 可以完成我想要的一切,并且可以使系统保持在我所需的状态。只用了一天,我就成为了 Ansible 的粉丝。 + +### 资源 + +我找到的最完整、最有用的参考文档是 Ansible 网站上的[用户指南][9]。本文仅供参考,不作为入门文档。 + +多年来,我们已经发布了许多[有关 Ansible 的文章][10],我发现其中大多数对我的需求很有帮助。Enable Sysadmin 网站上也有很多对我有帮助 [Ansible 文章][11]。你可以通过查看本周(2020 年 10 月 13 日至 14 日)的 [AnsibleFest][12] 了解更多信息。该活动完全是线上的,可以免费注册。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/20/10/first-day-ansible + +作者:[David Both][a] +选题:[lujun9972][b] +译者:[MjSeven](https://github.com/MjSeven) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/dboth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003499_01_linux11x_cc.png?itok=XMDOouJR "People work on a computer server with devices" +[2]: https://www.ansible.com/ +[3]: https://www.python.org/ +[4]: https://midnight-commander.org/ +[5]: https://en.wikipedia.org/wiki/List_of_DNS_record_types +[6]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts +[7]: https://en.wikipedia.org/wiki/Cowsay +[8]: https://opensource.com/downloads/bash-cheat-sheet +[9]: https://docs.ansible.com/ansible/latest/user_guide/index.html +[10]: https://opensource.com/tags/ansible +[11]: https://www.redhat.com/sysadmin/topics/ansible +[12]: https://www.ansible.com/ansiblefest diff --git a/published/20160302 Go channels are bad and you should feel bad.md b/published/202101/20160302 Go channels are bad and you should feel bad.md similarity index 100% rename from published/20160302 Go channels are bad and you should feel bad.md rename to published/202101/20160302 Go channels are bad and you should feel bad.md diff --git a/published/202101/20181009 GCC- Optimizing Linux, the Internet, and Everything.md b/published/202101/20181009 GCC- Optimizing Linux, the Internet, and Everything.md new file mode 100644 index 0000000000..641a77b740 --- /dev/null +++ b/published/202101/20181009 GCC- Optimizing Linux, the Internet, and Everything.md @@ -0,0 +1,93 @@ +GCC:优化 Linux、互联网和一切 +====== + +![](https://img.linux.net.cn/data/attachment/album/202101/22/122155ujfd62u6zbx3i4b3.jpg) + +软件如果不能被电脑运行,那么它就是无用的。而在处理运行时run-time性能的问题上,即使是最有才华的开发人员也会受编译器的支配 —— 因为如果没有可靠的编译器工具链,就无法构建任何重要的东西。GNU 编译器集合GNU Compiler Collection(GCC)提供了一个健壮、成熟和高性能的工具,以帮助你充分发挥你代码的潜能。经过数十年成千上万人的开发,GCC 成为了世界上最受尊敬的编译器之一。如果你在构建应用程序是没有使用 GCC,那么你可能错过了最佳解决方案。 + +根据 LLVM.org 的说法,GCC 是“如今事实上的标准开源编译器” [^1],也是用来构建完整系统的基础 —— 从内核开始。GCC 支持超过 60 种硬件平台,包括 ARM、Intel、AMD、IBM POWER、SPARC、HP PA-RISC 和 IBM Z,以及各种操作环境,包括 GNU、Linux、Windows、macOS、FreeBSD、NetBSD、OpenBSD、DragonFly BSD、Solaris、AIX、HP-UX 和 RTEMS。它提供了高度兼容的 C/C++ 编译器,并支持流行的 C 库,如 GNU C Library(glibc)、Newlib、musl 和各种 BSD 操作系统中包含的 C 库,以及 Fortran、Ada 和 GO 语言的前端。GCC 还可以作为一个交叉编译器,可以为运行编译器的平台以外的其他平台创建可执行代码。GCC 是紧密集成的 GNU 工具链的核心组件,由 GNU 项目产生,它包括 glibc、Binutils 和 GNU 调试器(GDB)。 + +“一直以来我最喜欢的 GNU 工具是 GCC,即GNU 编译器集合GNU Compiler Collection。在开发工具非常昂贵的时候,GCC 是第二个 GNU 工具,也是使社区能够编写和构建所有其他工具的工具。这个工具一手改变了这个行业,导致了自由软件运动的诞生,因为一个好的、自由的编译器是一个社区软件的先决条件。”—— Red Hat 开源和标准团队的 Dave Neary。[^2] + +### 优化 Linux + +作为 Linux 内核源代码的默认编译器,GCC 提供了可靠、稳定的性能以及正确构建内核所需的额外扩展。GCC 是流行的 Linux 发行版的标准组件,如 ArchLinux、CentOS、Debian、Fedora、openSUSE 和 Ubuntu 这些发行版中,GCC 通常用来编译支持系统的组件。这包括 Linux 使用的默认库(如 libc、libm、libintl、libssh、libssl、libcrypto、libexpat、libpthread 和 ncurses),这些库依赖于 GCC 来提供可靠性和高性能,并且使应用程序和系统程序可以访问 Linux 内核功能。发行版中包含的许多应用程序包也是用 GCC 构建的,例如 Python、Perl、Ruby、nginx、Apache HTTP 服务器、OpenStack、Docker 和 OpenShift。各个 Linux 发行版使用 GCC 构建的大量代码组成了内核、库和应用程序软件。对于 openSUSE 发行版,几乎 100% 的原生代码都是由 GCC 构建的,包括 6135 个源程序包、5705 个共享库和 38927 个可执行文件。这相当于每周编译 24540 个源代码包。[^3] + +Linux 发行版中包含的 GCC 的基本版本用于创建定义系统应用程序二进制接口Application Binary Interface(ABI)的内核和库。用户空间User space开发者可以选择下载 GCC 的最新稳定版本,以获得高级功能、性能优化和可用性改进。Linux 发行版提供安装说明或预构建的工具链,用于部署最新版本的 GCC 以及其他 GNU 工具,这些工具有助于提高开发人员的工作效率和缩短部署时间。 + +### 优化互联网 + +GCC 是嵌入式系统中被广泛采用的核心编译器之一,支持为日益增长的物联网设备开发软件。GCC 提供了许多扩展功能,使其非常适合嵌入式系统软件开发,包括使用编译器的内建函数、#语法、内联汇编和以应用程序为中心的命令行选项进行精细控制。GCC 支持广泛的嵌入式体系结构,包括 ARM、AMCC、AVR、Blackfin、MIPS、RISC-V、Renesas Electronics V850、NXP 和 Freescale Power 处理器,可以生成高效、高质量的代码。GCC提供的交叉编译能力对这个社区至关重要,而预制的交叉编译工具链 [^4] 是一个主要需求。例如,GNU ARM 嵌入式工具链是经过集成和验证的软件包,其中包含 ARM 嵌入式 GCC 编译器、库和其它裸机软件开发所需的工具。这些工具链可用于在 Windows、Linux 和 macOS 主机操作系统上对流行的 ARM Cortex-R 和 Cortex-M 处理器进行交叉编译,这些处理器已装载于数百亿台支持互联网的设备中。[^5] + +GCC 为云计算赋能,为需要直接管理计算资源的软件提供了可靠的开发平台,如数据库和 Web 服务引擎以及备份和安全软件。GCC 完全兼容 C++ 11 和 C++ 14,为 C++ 17 和 C++ 2a 提供实验支持 [^6](LCTT 译注:本文原文发布于 2018 年),可以创建性能优异的对象代码,并提供可靠的调试信息。使用 GCC 的应用程序的一些例子包括:MySQL 数据库管理系统,它需要 Linux 的 GCC [^7];Apache HTTP 服务器,它建议使用 GCC [^8];Bacula,一个企业级网络备份工具,它需要 GCC。[^9] + +### 优化一切 + +对于高性能计算High Performance Computing(HPC)中使用的科学代码的研究和开发,GCC 提供了成熟的 C、C++ 和 Fortran 前端,以及对 OpenMP 和 OpenACC API的支持,用于基于指令的并行编程。因为 GCC 提供了跨计算环境的可移植性,它使得代码能够更容易地在各种新的和传统的客户机和服务器平台上进行测试。GCC 为 C、C++ 和 Fortran 编译器提供了 OpenMP 4.0 的完整支持,为 C 和 C++ 编译器提供了 OpenMP 4.5 完整支持。对于 OpenACC、 GCC 支持大部分 2.5 规范和性能优化,并且是唯一提供 [OpenACC][1] 支持的非商业、非学术编译器。 + +代码性能是这个社区的一个重要参数,GCC 提供了一个坚实的性能基础。Colfax Research 于 2017 年 11 月发表的一篇论文评估了 C++ 编译器在使用 OpenMP 4.x 指令并行化编译代码的速度和编译后代码的运行速度。图 1 描绘了不同编译器编译并使用单个线程运行时计算内核的相对性能。性能值经过了归一化处理,以 G++ 的性能为 1.0。 + +![performance][3] + +*图 1 为由不同编译器编译的每个内核的相对性能。(单线程,越高越好)。* + +他的论文总结道:“GNU 编译器在我们的测试中也做得很好。G++ 在六种情况中的三种情况下生成的代码速度是第二快的,并且在编译时间方面是最快的编译器之一。”[^10] + +### 谁在用 GCC? + +在 JetBrains 2018 年的开发者生态状况调查中,在接受调查的 6000 名开发者中,66% 的 C++ 程序员和 73% 的 C 程序员经常使用 GCC。[^11] 以下简要介绍 GCC 的优点,正是这些优点使它在开发人员社区中如此受欢迎。 + + * 对于需要为各种新的和遗留的计算平台和操作环境编写代码的开发人员,GCC 提供了对最广泛的硬件和操作环境的支持。硬件供应商提供的编译器主要侧重于对其产品的支持,而其他开源编译器在所支持的硬件和操作系统方面则受到很大限制。[^12] + * 有各种各样的基于 GCC 的预构建工具链,这对嵌入式系统开发人员特别有吸引力。这包括 GNU ARM 嵌入式工具链和 Bootlin 网站上提供的 138 个预编译交叉编译器工具链。[^13] 虽然其他开源编译器(如 Clang/LLVM)可以取代现有交叉编译工具链中的 GCC,但这些工具集需要开发者完全重新构建。[^14] + * GCC 通过成熟的编译器平台向应用程序开发人员提供可靠、稳定的性能。《在 AMD EPYC 平台上用 GCC 8/9 与 LLVM Clang 6/7 编译器基准测试》这篇文章提供了 49 个基准测试的结果,这些测试的编译器在三个优化级别上运行。使用 `-O3 -march=native` 级别的 GCC 8.2 RC1 在 34% 的时间里排在第一位,而在相同的优化级别 LLVM Clang 6.0 在 20% 的时间里赢得了第二位。[^15] + * GCC 为编译调试 [^16] 提供了改进的诊断方法,并为运行时调试提供了准确而有用的信息。GCC 与 GDB 紧密集成,GDB 是一个成熟且功能齐全的工具,它提供“不间断”调试,可以在断点处停止单个线程。 + * GCC 是一个得到良好支持的平台,它有一个活跃的、有责任感的社区,支持当前版本和以前的两个版本。由于每年都有发布计划,这为一个版本提供了两年的支持。 + +### GCC:仍然在继续优化 + +GCC 作为一个世界级的编译器继续向前发展。GCC 的最新版本是 8.2,于 2018 年 7 月发布(LCTT 译注:本文原文发表于 2018 年),增加了对即将推出的 Intel CPU、更多 ARM CPU 的硬件支持,并提高了 AMD 的 ZEN CPU 的性能。增加了对 C17 的初步支持,同时也对 C++2A 进行了初步工作。诊断功能继续得到增强,包括更好的发射诊断,改进了定位、定位范围和修复提示,特别是在 C++ 前端。Red Hat 的 David Malcolm 在 2018 年 3 月撰写的博客概述了 GCC 8 中的可用性改进。[^17] + +新的硬件平台继续依赖 GCC 工具链进行软件开发,例如 RISC-V,这是一种自由开放的 ISA,机器学习、人工智能(AI)和物联网细分市场都对其感兴趣。GCC 仍然是 Linux 系统持续开发的关键组件。针对 Intel 架构的 Clear Linux 项目是一个为云、客户端和物联网用例构建的新兴发行版,它提供了一个很好的示例,说明如何使用和改进 GCC 编译器技术来提高基于 Linux 的系统的性能和安全性。GCC 还被用于微软 Azure Sphere 的应用程序开发,这是一个基于 Linux 的物联网应用程序操作系统,最初支持基于 ARM 的联发科 MT3620 处理器。在培养下一代程序员方面,GCC 也是树莓派的 Windows 工具链的核心组件,树莓派是一种运行基于 Debian 的 GNU/Linux 的低成本嵌入式板,用于促进学校和发展中国家的基础计算机科学教学。 + +GCC 由 GNU 项目的创始人理查德•斯托曼Richard Stallman首次发布 于 1987 年 3 月 22 日,由于它是第一个作为自由软件发布的可移植的 ANSI C 优化编译器,因此它被认为是一个重大突破。GCC 由来自世界各地的程序员组成的社区在指导委员会的指导下维护,以确保对项目进行广泛的、有代表性的监督。GCC 的社区方法是它的优势之一,它形成了一个由开发人员和用户组成的庞大而多样化的社区,他们为项目做出了贡献并提供支持。根据 Open Hub 的说法,“GCC 是世界上最大的开源团队之一,在 Open Hub 上的所有项目团队中排名前 2%。”[^18] + +关于 GCC 的许可问题,人们进行了大量的讨论,其中大多数是混淆而不是启发。GCC 在 GNU 通用公共许可证(GPL)版本 3 或更高版本下发布,但运行时库例外。这是一个左版许可,这意味着衍生作品只能在相同的许可条款下分发。GPLv3 旨在保护 GCC,防止其成为专有软件,并要求对 GCC 代码的更改可以自由公开地进行。对于“最终用户”来说,这个编译器与其他编译器完全相同;使用 GCC 对你为自己的代码所选择的任何许可都没有区别。[^19] + + [^1]: http://clang.llvm.org/features.html#gcccompat + [^2]: https://opensource.com/article/18/9/happy-birthday-gnu + [^3]: 由 SUSE 基于最近的构建统计提供的信息。在 openSUSE 中还有其他不生成可执行镜像的源码包,这些不包括在统计中。 + [^4]: https://community.arm.com/tools/b/blog/posts/gnu-toolchain-performance-in-2018 + [^5]: https://www.arm.com/products/processors/cortex-m + [^6]: https://gcc.gnu.org/projects/cxx-status.html#cxx17 + [^7]: https://mysqlserverteam.com/mysql-8-0-source-code-improvements/ + [^8]: http://httpd.apache.org/docs/2.4/install.html + [^9]: https://blog.bacula.org/what-is-bacula/system-requirements/ +[^10]: https://colfaxresearch.com/compiler-comparison/ +[^11]: https://www.jetbrains.com/research/devecosystem-2018/ +[^12]: http://releases.llvm.org/6.0.0/tools/clang/docs/UsersManual.html +[^13]: https://bootlin.com/blog/free-and-ready-to-use-cross-compilation-toolchains/ +[^14]: https://clang.llvm.org/docs/Toolchain.html +[^15]: https://www.phoronix.com/scan.php?page=article&item=gcclang-epyc-summer18&num=1 +[^16]: https://gcc.gnu.org/wiki/ClangDiagnosticsComparison +[^17]: https://developers.redhat.com/blog/2018/03/15/gcc-8-usability-improvements/ +[^18]: https://www.openhub.net/p/gcc/factoids#FactoidTeamSizeVeryLarge +[^19]: https://www.gnu.org/licenses/gcc-exception-3.1-faq.en.html + + +-------------------------------------------------------------------------------- + +via: https://www.linux.com/blog/2018/10/gcc-optimizing-linux-internet-and-everything + +作者:[Margaret Lewis][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linux.com/users/margaret-lewis +[b]: https://github.com/lujun9972 +[1]: https://www.openacc.org/tools +[2]: /files/images/gccjpg-0 +[3]: https://lcom.static.linuxfound.org/sites/lcom/files/gcc_0.jpg?itok=HbGnRqWX "performance" +[4]: https://www.linux.com/licenses/category/used-permission diff --git a/published/20181123 Three SSH GUI Tools for Linux.md b/published/202101/20181123 Three SSH GUI Tools for Linux.md similarity index 100% rename from published/20181123 Three SSH GUI Tools for Linux.md rename to published/202101/20181123 Three SSH GUI Tools for Linux.md diff --git a/published/20190104 Search, Study And Practice Linux Commands On The Fly.md b/published/202101/20190104 Search, Study And Practice Linux Commands On The Fly.md similarity index 100% rename from published/20190104 Search, Study And Practice Linux Commands On The Fly.md rename to published/202101/20190104 Search, Study And Practice Linux Commands On The Fly.md diff --git a/published/20190204 Getting started with Git- Terminology 101.md b/published/202101/20190204 Getting started with Git- Terminology 101.md similarity index 100% rename from published/20190204 Getting started with Git- Terminology 101.md rename to published/202101/20190204 Getting started with Git- Terminology 101.md diff --git a/published/202101/20190205 5 Streaming Audio Players for Linux.md b/published/202101/20190205 5 Streaming Audio Players for Linux.md new file mode 100644 index 0000000000..82babefc3c --- /dev/null +++ b/published/202101/20190205 5 Streaming Audio Players for Linux.md @@ -0,0 +1,134 @@ +[#]: collector: (lujun9972) +[#]: translator: (Chao-zhi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13029-1.html) +[#]: subject: (5 Streaming Audio Players for Linux) +[#]: via: (https://www.linux.com/blog/2019/2/5-streaming-audio-players-linux) +[#]: author: (Jack Wallen https://www.linux.com/users/jlwallen) + +5 个适用于 Linux 的流式音频播放器 +====== + +![](https://img.linux.net.cn/data/attachment/album/202101/18/220035k8mmbl1blmkb97f8.jpg) + +当我工作的时候,我会一直在后台播放音乐。大多数情况下,这些音乐是以黑胶唱片的形式在转盘上旋转。但有时我不想用这种单纯的方法听音乐时,我会选择听流媒体音频应用程序的方式。然而,由于我工作在 Linux 平台上,所以我只可以使用在我的操作系统上运行良好的软件。幸运的是,对于想在 Linux 桌面听流式音频的人来说,有很多工具可以选择。 + +事实上,Linux 为音乐流媒体提供了许多可靠的产品,我将重点介绍我最喜欢的五种用于此任务的工具。警告一句,并不是所有的玩意都是开源的。但是如果你不介意在你的开源桌面上运行一个专有的应用程序,你有一些非常棒的选择。让我们来看看有什么可用的。 + +### Spotify + +Linux 版的 Spotify 不是那种在你启动就闪退的愚蠢的、半生不熟的应用程序,也没有阉割什么功能。事实上,Spotify 的 Linux 版本与其他平台上的版本完全相同。使用 Spotify 流媒体客户端,你可以收听音乐和播客、创建播放列表、发现新的艺术家等等。Spotify 界面(图 1)非常易于导航和使用。 + +![Spotify][2] + +*图 1:Spotify 界面可以很容易地找到新的音乐和旧的收藏。* + +你可以使用 snap(使用 `sudo snap install Spotify` 命令)安装 Spotify,也可以使用以下命令从官方存储库安装 Spotify: + +``` +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 931FF8E79F0876134EDDBDCCA87FF9DF48BF1C90 +sudo echo deb http://repository.spotify.com stable non-free | sudo tee /etc/apt/sources.list.d/spotify.list +sudo apt-get update +sudo apt-get install spotify-client +``` + +一旦安装,你就可以登录你的 Spotify 帐户,这样你就可以开始听好听的音乐,以帮助激励你完成你的工作。如果你已在其他设备上安装了 Spotify(并登录到同一帐户),则可以指定音乐应该流式传输到哪个设备(通过单击 Spotify 窗口右下角附近的“可用设备”图标)。 + +### Clementine + +Clementine 是 Linux 平台上最好的音乐播放器之一。Clementine 不仅允许用户播放本地存储的音乐,还可以连接到许多流媒体音频服务,例如: + + * Amazon Cloud Drive + * Box + * Dropbox + * Icecast + * Jamendo + * Magnatune + * RockRadio.com + * Radiotunes.com + * SomaFM + * SoundCloud + * Spotify + * Subsonic + * Vk.com + * 或其他有趣的电台 + +使用 Clementine 有两个注意事项。首先,你必须使用最新版本(因为某些软件库中可用的构建版本已过时,并且不会安装必要的流式处理插件)。第二,即使是最新的构建版本,一些流媒体服务也不会像预期的那样工作。例如,接入 Spotify 频道时,你只能使用最热门的曲目(而无法使用播放列表,或搜索歌曲的功能)。 + +使用 Clementine 互联网流媒体服务时,你会发现其中有很多你从来没有听说过的音乐家和乐队(图 2)。 + +![Clementine][5] + +*图 2:Clementine 互联网广播是寻找新音乐的好方法。* + +### Odio + +Odio 是一个跨平台的专有应用程序(可用于 Linux、MacOS 和 Windows),它允许你流式传输各种类型的互联网音乐站。广播的内容是取自 [www.radio-browser.info][6],而应用程序本身在为你呈现流方面做了令人难以置信的工作(图 3)。 + +![Odio][8] + +*图 3:Odio 接口是你能找到的最好的接口之一。* + +Odio 让你很容易找到独特的互联网广播电台,甚至可以把你找到并收藏的电台添加到你的库中。目前,在 Linux 上安装 Odio 的唯一方法是通过 Snap。如果你的发行版支持 snap 软件包,请使用以下命令安装此流应用程序: + +``` +sudo snap install odio +``` + +安装后,你可以打开应用程序并开始使用它。无需登录(或创建)帐户。Odio 的设置非常有限。实际上,它只提供了在设置窗口中选择暗色主题或亮色主题的选项。然而,尽管它可能功能有限,但 Odio 是在 Linux 上播放互联网广播的最佳选择之一。 + +### StreamTuner2 + +Streamtuner2 是一个优秀的互联网电台 GUI 工具。使用它,你可以流式播放以下音乐: + + * Internet radio stations + * Jameno + * MyOggRadio + * Shoutcast.com + * SurfMusic + * TuneIn + * Xiph.org + * YouTube + +Streamtuner2 提供了一个很好的界面(如果不是有点过时的话),可以很容易地找到和播放你喜爱的音乐。StreamTuner2 的一个警告是,它实际上只是一个用于查找你想要听到的流媒体的 GUI。当你找到一个站点时,双击它打开与流相关的应用程序。这意味着你必须安装必要的应用程序,才能播放流媒体。如果你没有合适的应用程序,你就不能播放流媒体。因此,你将花费大量的时间来确定要为某些流媒体安装哪些应用程序(图 4)。 + +![Streamtuner2][10] + +*图4:配置 Streamtuner2 需要一个坚强的心脏。* + +### VLC + +很长一段时间以来,VLC 一直被称为 Linux 最好的媒体播放工具。这是有充分理由的,因为几乎所有你丢给它的东西它都能播放。其中包括流媒体广播电台。虽然你无法让 VLC 连接到 Spotify 这样的网站,但你可以直接访问互联网广播,点击播放列表,而 VLC 毫无疑问可以打开它。考虑到目前有很多互联网广播电台,你在寻找适合自己口味的音乐方面不会有任何问题。VLC 还包括可视化工具、均衡器(图 5)等工具。 + +![VLC ][12] + +*图 5:VLC 可视化工具和均衡器特性。* + +VLC 唯一需要注意的是,你必须有一个你希望听到的互联网广播的 URL,因为这个工具本身并不能进行管理。但是有了这些链接,你就找不到比 VLC 更好的媒体播放器了。 + +### 这些工具软件怎么来的 + +如果这五个工具都不太不适合你的需要,我建议你打开你发行版的应用商店,搜索一个适合你的。有很多工具可以制作流媒体音乐、播客等等,不仅可以在 Linux 上实现,而且很简单。 + +-------------------------------------------------------------------------------- + +via: https://www.linux.com/blog/2019/2/5-streaming-audio-players-linux + +作者:[Jack Wallen][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.linux.com/users/jlwallen +[b]: https://github.com/lujun9972 +[2]: https://lcom.static.linuxfound.org/sites/lcom/files/spotify_0.jpg?itok=8-Ym-R61 (Spotify) +[3]: https://www.linux.com/licenses/category/used-permission +[5]: https://lcom.static.linuxfound.org/sites/lcom/files/clementine_0.jpg?itok=5oODJO3b (Clementine) +[6]: http://www.radio-browser.info +[8]: https://lcom.static.linuxfound.org/sites/lcom/files/odio.jpg?itok=sNPTSS3c (Odio) +[10]: https://lcom.static.linuxfound.org/sites/lcom/files/streamtuner2.jpg?itok=1MSbafWj (Streamtuner2) +[12]: https://lcom.static.linuxfound.org/sites/lcom/files/vlc_0.jpg?itok=QEOsq7Ii (VLC ) +[13]: https://training.linuxfoundation.org/linux-courses/system-administration-training/introduction-to-linux diff --git a/published/202101/20190205 Install Apache, MySQL, PHP (LAMP) Stack On Ubuntu 18.04 LTS.md b/published/202101/20190205 Install Apache, MySQL, PHP (LAMP) Stack On Ubuntu 18.04 LTS.md new file mode 100644 index 0000000000..4f74e02e9e --- /dev/null +++ b/published/202101/20190205 Install Apache, MySQL, PHP (LAMP) Stack On Ubuntu 18.04 LTS.md @@ -0,0 +1,436 @@ +[#]: collector: (lujun9972) +[#]: translator: (stevenzdg988) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13041-1.html) +[#]: subject: (Install Apache, MySQL, PHP \(LAMP\) Stack On Ubuntu 18.04 LTS) +[#]: via: (https://www.ostechnix.com/install-apache-mysql-php-lamp-stack-on-ubuntu-18-04-lts/) +[#]: author: (SK https://www.ostechnix.com/author/sk/) + +在 Ubuntu 中安装 Apache、MySQL、PHP(LAMP)套件 +====== + +![](https://www.ostechnix.com/wp-content/uploads/2019/02/lamp-720x340.jpg) + +**LAMP** 套件是一种流行的开源 Web 开发平台,可用于运行和部署动态网站和基于 Web 的应用程序。通常,LAMP 套件由 Apache Web 服务器、MariaDB/MySQL 数据库、PHP/Python/Perl 程序设计(脚本)语言组成。 LAMP 是 **L**inux,**M**ariaDB/**M**YSQL,**P**HP/**P**ython/**P**erl 的缩写。 本教程描述了如何在 Ubuntu 18.04 LTS 服务器中安装 Apache、MySQL、PHP(LAMP 套件)。 + +就本教程而言,我们将使用以下 Ubuntu 测试。 + + * **操作系统**:Ubuntu 18.04.1 LTS Server Edition + * **IP 地址** :192.168.225.22/24 + +### 1. 安装 Apache Web 服务器 + +首先,利用下面命令更新 Ubuntu 服务器: + +``` +$ sudo apt update +$ sudo apt upgrade +``` + +然后,安装 Apache Web 服务器(命令如下): + +``` +$ sudo apt install apache2 +``` + +检查 Apache Web 服务器是否已经运行: + +``` +$ sudo systemctl status apache2 +``` + +输出结果大概是这样的: + +``` +● apache2.service - The Apache HTTP Server + Loaded: loaded (/lib/systemd/system/apache2.service; enabled; vendor preset: en + Drop-In: /lib/systemd/system/apache2.service.d + └─apache2-systemd.conf + Active: active (running) since Tue 2019-02-05 10:48:03 UTC; 1min 5s ago + Main PID: 2025 (apache2) + Tasks: 55 (limit: 2320) + CGroup: /system.slice/apache2.service + ├─2025 /usr/sbin/apache2 -k start + ├─2027 /usr/sbin/apache2 -k start + └─2028 /usr/sbin/apache2 -k start + +Feb 05 10:48:02 ubuntuserver systemd[1]: Starting The Apache HTTP Server... +Feb 05 10:48:03 ubuntuserver apachectl[2003]: AH00558: apache2: Could not reliably +Feb 05 10:48:03 ubuntuserver systemd[1]: Started The Apache HTTP Server. +``` + +祝贺你! Apache 服务已经启动并运行了!! + +#### 1.1 调整防火墙允许 Apache Web 服务器 + +默认情况下,如果你已在 Ubuntu 中启用 UFW 防火墙,则无法从远程系统访问 Apache Web 服务器。 必须按照以下步骤开启 `http` 和 `https` 端口。 + +首先,使用以下命令列出 Ubuntu 系统上可用的应用程序配置文件: + +``` +$ sudo ufw app list +``` + +输出结果: + +``` +Available applications: +Apache +Apache Full +Apache Secure +OpenSSH +``` + +如你所见,Apache 和 OpenSSH 应用程序已安装 UFW 配置文件。你可以使用 `ufw app info "Profile Name"` 命令列出有关每个配置文件及其包含的规则的信息。 + +让我们研究一下 “Apache Full” 配置文件。 为此,请运行: + +``` +$ sudo ufw app info "Apache Full" +``` + +输出结果: + +``` +Profile: Apache Full +Title: Web Server (HTTP,HTTPS) +Description: Apache v2 is the next generation of the omnipresent Apache web +server. + +Ports: +80,443/tcp +``` + +如你所见,“Apache Full” 配置文件包含了启用经由端口 **80** 和 **443** 的传输规则: + +现在,运行以下命令配置允许 HTTP 和 HTTPS 传入通信: + +``` +$ sudo ufw allow in "Apache Full" +Rules updated +Rules updated (v6) +``` + +如果你不想允许 HTTP 通信,而只允许 HTTP(80) 通信,请运行: + +``` +$ sudo ufw app info "Apache" +``` + +#### 1.2 测试 Apache Web 服务器 + +现在,打开 Web 浏览器并导航到 来访问 Apache 测试页。 + +![](https://www.ostechnix.com/wp-content/uploads/2016/06/apache-2.png) + +如果看到上面类似的显示内容,那就成功了。 Apache 服务器正在工作! + +### 2. 安装 MySQL + +在 Ubuntu 安装 MySQL 请运行: + +``` +$ sudo apt install mysql-server +``` + +使用以下命令验证 MySQL 服务是否正在运行: + +``` +$ sudo systemctl status mysql +``` + +输出结果: + +``` +● mysql.service - MySQL Community Server +Loaded: loaded (/lib/systemd/system/mysql.service; enabled; vendor preset: enab +Active: active (running) since Tue 2019-02-05 11:07:50 UTC; 17s ago +Main PID: 3423 (mysqld) +Tasks: 27 (limit: 2320) +CGroup: /system.slice/mysql.service +└─3423 /usr/sbin/mysqld --daemonize --pid-file=/run/mysqld/mysqld.pid + +Feb 05 11:07:49 ubuntuserver systemd[1]: Starting MySQL Community Server... +Feb 05 11:07:50 ubuntuserver systemd[1]: Started MySQL Community Server. +``` + +MySQL 正在运行! + +#### 2.1 配置数据库管理用户(root)密码 + +默认情况下,MySQL root 用户密码为空。你需要通过运行以下脚本使你的 MySQL 服务器安全: + +``` +$ sudo mysql_secure_installation +``` + +系统将询问你是否要安装 “VALIDATE PASSWORD plugin(密码验证插件)”。该插件允许用户为数据库配置强密码凭据。如果启用,它将自动检查密码的强度并强制用户设置足够安全的密码。**禁用此插件是安全的**。但是,必须为数据库使用唯一的强密码凭据。如果不想启用此插件,只需按任意键即可跳过密码验证部分,然后继续其余步骤。 + +如果回答是 `y`,则会要求你选择密码验证级别。 + +``` +Securing the MySQL server deployment. + +Connecting to MySQL using a blank password. + +VALIDATE PASSWORD PLUGIN can be used to test passwords +and improve security. It checks the strength of password +and allows the users to set only those passwords which are +secure enough. Would you like to setup VALIDATE PASSWORD plugin? + +Press y|Y for Yes, any other key for No y +``` + +可用的密码验证有 “low(低)”、 “medium(中)” 和 “strong(强)”。只需输入适当的数字(0 表示低,1 表示中,2 表示强密码)并按回车键。 + +``` +There are three levels of password validation policy: + +LOW Length >= 8 +MEDIUM Length >= 8, numeric, mixed case, and special characters +STRONG Length >= 8, numeric, mixed case, special characters and dictionary file + +Please enter 0 = LOW, 1 = MEDIUM and 2 = STRONG: +``` + +现在,输入 MySQL root 用户的密码。请注意,必须根据上一步中选择的密码策略,为 MySQL root 用户使用密码。如果你未启用该插件,则只需使用你选择的任意强度且唯一的密码即可。 + +``` +Please set the password for root here. + +New password: + +Re-enter new password: + +Estimated strength of the password: 50 +Do you wish to continue with the password provided?(Press y|Y for Yes, any other key for No) : y +``` + +两次输入密码后,你将看到密码强度(在此示例情况下为 50)。如果你确定可以,请按 `y` 继续提供的密码。如果对密码长度不满意,请按其他任意键并设置一个强密码。我现在的密码可以,所以我选择了`y`。 + +对于其余的问题,只需键入 `y` 并按回车键。这将删除匿名用户、禁止 root 用户远程登录并删除 `test`(测试)数据库。 + +``` +Remove anonymous users? (Press y|Y for Yes, any other key for No) : y +Success. + +Normally, root should only be allowed to connect from +'localhost'. This ensures that someone cannot guess at +the root password from the network. + +Disallow root login remotely? (Press y|Y for Yes, any other key for No) : y +Success. + +By default, MySQL comes with a database named 'test' that +anyone can access. This is also intended only for testing, +and should be removed before moving into a production +environment. + +Remove test database and access to it? (Press y|Y for Yes, any other key for No) : y +- Dropping test database... +Success. + +- Removing privileges on test database... +Success. + +Reloading the privilege tables will ensure that all changes +made so far will take effect immediately. + +Reload privilege tables now? (Press y|Y for Yes, any other key for No) : y +Success. + +All done! +``` + +以上就是为 MySQL root 用户设置密码。 + +#### 2.2 更改 MySQL 超级用户的身份验证方法 + +默认情况下,Ubuntu 系统的 MySQL root 用户为 MySQL 5.7 版本及更新的版本使用插件 `auth_socket` 设置身份验证。尽管它增强了安全性,但是当你使用任何外部程序(例如 phpMyAdmin)访问数据库服务器时,也会变得更困难。要解决此问题,你需要将身份验证方法从 `auth_socket` 更改为 `mysql_native_password`。为此,请使用以下命令登录到你的 MySQL 提示符下: + +``` +$ sudo mysql +``` + +在 MySQL 提示符下运行以下命令,找到所有 MySQL 当前用户帐户的身份验证方法: + +``` +SELECT user,authentication_string,plugin,host FROM mysql.user; +``` + +输出结果: + +``` ++------------------|-------------------------------------------|-----------------------|-----------+ +| user | authentication_string | plugin | host | ++------------------|-------------------------------------------|-----------------------|-----------+ +| root | | auth_socket | localhost | +| mysql.session | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | mysql_native_password | localhost | +| mysql.sys | *THISISNOTAVALIDPASSWORDTHATCANBEUSEDHERE | mysql_native_password | localhost | +| debian-sys-maint | *F126737722832701DD3979741508F05FA71E5BA0 | mysql_native_password | localhost | ++------------------|-------------------------------------------|-----------------------|-----------+ +4 rows in set (0.00 sec) +``` + +![][2] + +如你所见,Mysql root 用户使用 `auth_socket` 插件进行身份验证。 + +要将此身份验证更改为 `mysql_native_password` 方法,请在 MySQL 提示符下运行以下命令。 别忘了用你选择的强大唯一的密码替换 `password`。 如果已启用 VALIDATION 插件,请确保已根据当前策略要求使用了强密码。 + +``` +ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'password'; +``` + +使用以下命令更新数据库: + +``` +FLUSH PRIVILEGES; +``` + +使用命令再次检查身份验证方法是否已更改: + +``` +SELECT user,authentication_string,plugin,host FROM mysql.user; +``` + +输出结果: + +![][3] + +好!MySQL root 用户就可以使用密码进行身份验证来访问 `mysql shell`。 + +从 MySQL 提示符下退出: + +``` +exit +``` + +### 3. 安装 PHP + +安装 PHP 请运行: + +``` +$ sudo apt install php libapache2-mod-php php-mysql +``` + +安装 PHP 后,在 Apache 文档根目录中创建 `info.php` 文件。通常,在大多数基于 Debian 的 Linux 发行版中,Apache 文档根目录为 `/var/www/html/` 或 `/var/www/`。Ubuntu 18.04 LTS 系统下,文档根目录是 `/var/www/html/`。 + +在 Apache 根目录中创建 `info.php` 文件: + +``` +$ sudo vi /var/www/html/info.php +``` + +在此文件中编辑如下内容: + +``` + +``` + +然后按下 `ESC` 键并且输入 `:wq` 保存并退出此文件。重新启动 Apache 服务使更改生效。 + +``` +$ sudo systemctl restart apache2 +``` + +#### 3.1 测试 PHP + +打开 Web 浏览器,然后导航到 URL 。 + +你就将看到 PHP 测试页面。 + +![](https://www.ostechnix.com/wp-content/uploads/2019/02/php-test-page.png) + +通常,当用户向 Web 服务器发出请求时,Apache 首先会在文档根目录中查找名为 `index.html` 的文件。如果你想将 Apache 更改为 `php` 文件提供服务而不是其他文件,请将 `dir.conf` 配置文件中的 `index.php` 移至第一个位置,如下所示: + +``` +$ sudo vi /etc/apache2/mods-enabled/dir.conf +``` + +上面的配置文件(`dir.conf`) 内容如下: + +``` + +DirectoryIndex index.html index.cgi index.pl index.php index.xhtml index.htm + + +# vim: syntax=apache ts=4 sw=4 sts=4 sr noet +``` + +将 `index.php` 移动到最前面。更改后,`dir.conf` 文件内容看起来如下所示。 + +``` + +DirectoryIndex index.php index.html index.cgi index.pl index.xhtml index.htm + + +# vim: syntax=apache ts=4 sw=4 sts=4 sr noet +``` + +然后按下 `ESC` 键并且输入 `:wq` 保存并关闭此文件。重新启动 Apache 服务使更改生效。 + +``` +$ sudo systemctl restart apache2 +``` + +#### 3.2 安装 PHP 模块 + +为了增加 PHP 的功能,可以安装一些其他的 PHP 模块。 + +要列出可用的 PHP 模块,请运行: + +``` +$ sudo apt-cache search php- | less +``` + +输出结果: + +![][4] + +使用方向键浏览结果。要退出,请输入 `q` 并按下回车键。 + +要查找任意 `php` 模块的详细信息,例如 `php-gd`,请运行: + +``` +$ sudo apt-cache show php-gd +``` + +安装 PHP 模块请运行: + +``` +$ sudo apt install php-gd +``` + +安装所有的模块(虽然没有必要),请运行: + +``` +$ sudo apt-get install php* +``` + +安装任何 `php` 模块后,请不要忘记重新启动 Apache 服务。要检查模块是否已加载,请在浏览器中打开 `info.php` 文件并检查是否存在。 + +接下来,你可能需要安装数据库管理工具,以通过 Web 浏览器轻松管理数据库。如果是这样,请按照以下链接中的说明安装 `phpMyAdmin`。 + +祝贺你!我们已经在 Ubuntu 服务器中成功配置了 LAMP 套件。 + +-------------------------------------------------------------------------------- + +via: https://www.ostechnix.com/install-apache-mysql-php-lamp-stack-on-ubuntu-18-04-lts/ + +作者:[SK][a] +选题:[lujun9972][b] +译者:[stevenzdg988](https://github.com/stevenzdg988) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://www.ostechnix.com/author/sk/ +[b]: https://github.com/lujun9972 +[1]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +[2]: http://www.ostechnix.com/wp-content/uploads/2019/02/mysql-1.png +[3]: http://www.ostechnix.com/wp-content/uploads/2019/02/mysql-2.png +[4]: http://www.ostechnix.com/wp-content/uploads/2016/06/php-modules.png diff --git a/published/202101/20190215 Make websites more readable with a shell script.md b/published/202101/20190215 Make websites more readable with a shell script.md new file mode 100644 index 0000000000..a84cdfaa40 --- /dev/null +++ b/published/202101/20190215 Make websites more readable with a shell script.md @@ -0,0 +1,261 @@ +[#]: collector: (lujun9972) +[#]: translator: (stevenzdg988) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13052-1.html) +[#]: subject: (Make websites more readable with a shell script) +[#]: via: (https://opensource.com/article/19/2/make-websites-more-readable-shell-script) +[#]: author: (Jim Hall https://opensource.com/users/jim-hall) + +利用 Shell 脚本让网站更具可读性 +====== + +> 测算网站的文本和背景之间的对比度,以确保站点易于阅读。 + +![](https://img.linux.net.cn/data/attachment/album/202101/25/231152ce5ufhjtufxj1eeu.jpg) + +如果希望人们发现你的网站实用,那么他们需要能够阅读它。为文本选择的颜色可能会影响网站的可读性。不幸的是,网页设计中的一种流行趋势是在打印输出文本时使用低对比度的颜色,就像在白色背景上的灰色文本。对于 Web 设计师来说,这也许看起来很酷,但对于许多阅读它的人来说确实很困难。 + +W3C 提供了《Web 内容可访问性指南Web Content Accessibility Guidelines》,其中包括帮助 Web 设计人员选择易于区分文本和背景色的指导。z这就是所谓的“对比度contrast ratio”。 W3C 定义的对比度需要进行一些计算:给定两种颜色,首先计算每种颜色的相对亮度,然后计算对比度。对比度在 1 到 21 的范围内(通常写为 1:1 到 21:1)。对比度越高,文本在背景下的突出程度就越高。例如,白色背景上的黑色文本非常醒目,对比度为 21:1。对比度为 1:1 的白色背景上的白色文本不可读。 + +[W3C 说,正文][1] 的对比度至少应为 4.5:1,标题至少应为 3:1。但这似乎是最低限度的要求。W3C 还建议正文至少 7:1,标题至少 4.5:1。 + +计算对比度可能比较麻烦,因此最好将其自动化。我已经用这个方便的 Bash 脚本做到了这一点。通常,脚本执行以下操作: + + 1. 获取文本颜色和背景颜色 + 2. 计算相对亮度 + 3. 计算对比度 + +### 获取颜色 + +你可能知道显示器上的每种颜色都可以用红色、绿色和蓝色(R、G 和 B)来表示。要计算颜色的相对亮度,脚本需要知道颜色的红、绿和蓝的各个分量。理想情况下,脚本会将这些信息读取为单独的 R、G 和 B 值。 Web 设计人员可能知道他们喜欢的颜色的特定 RGB 代码,但是大多数人不知道不同颜色的 RGB 值。作为一种替代的方法是,大多数人通过 “red” 或 “gold” 或 “maroon” 之类的名称来引用颜色。 + +幸运的是,GNOME 的 [Zenity][2] 工具有一个颜色选择器应用程序,可让你使用不同的方法选择颜色,然后用可预测的格式 `rgb(R,G,B)` 返回 RGB 值。使用 Zenity 可以轻松获得颜色值: + +``` +color=$( zenity --title 'Set text color' --color-selection --color='black' ) +``` + +如果用户(意外地)单击 “Cancel(取消)” 按钮,脚本将假定一种颜色: + +``` +if [ $? -ne 0 ] ; then +        echo '** color canceled .. assume black' +        color='rgb(0,0,0)' +fi +``` + +脚本对背景颜色值也执行了类似的操作,将其设置为 `$background`。 + +### 计算相对亮度 + +一旦你在 `$color` 中设置了前景色,并在 `$background` 中设置了背景色,下一步就是计算每种颜色的相对亮度。 [W3C 提供了一个算法][3] 用以计算颜色的相对亮度。 + +> 对于 sRGB 色彩空间,一种颜色的相对亮度定义为: +> +> L = 0.2126 * R + 0.7152 * G + 0.0722 * B +> +> R、G 和 B 定义为: +> +> if $R_{sRGB}$ <= 0.03928 then R = $R_{sRGB}$/12.92 +> +> else R = (($R_{sRGB}$+0.055)/1.055) $^{2.4}$ +> +> if $G_{sRGB}$ <= 0.03928 then G = $G_{sRGB}$/12.92 +> +> else G = (($G_{sRGB}$+0.055)/1.055) $^{2.4}$ +> +> if $B_{sRGB}$ <= 0.03928 then B = $B_{sRGB}$/12.92 +> +> else B = (($B_{sRGB}$+0.055)/1.055) $^{2.4}$ +> +> $R_{sRGB}$、$G_{sRGB}$ 和 $B_{sRGB}$ 定义为: +> +> $R_{sRGB}$ = $R_{8bit}$/255 +> +> $G_{sRGB}$ = $G_{8bit}$/255 +> +> $B_{sRGB}$ = $B_{8bit}$/255 + +由于 Zenity 以 `rgb(R,G,B)` 的格式返回颜色值,因此脚本可以轻松拉取分隔开的 R、B 和 G 的值以计算相对亮度。AWK 可以使用逗号作为字段分隔符(`-F,`),并使用 `substr()` 字符串函数从 `rgb(R,G,B)` 中提取所要的颜色值: + +``` +R=$( echo $color | awk -F, '{print substr($1,5)}' ) +G=$( echo $color | awk -F, '{print $2}' ) +B=$( echo $color | awk -F, '{n=length($3); print substr($3,1,n-1)}' ) +``` + +*有关使用 AWK 提取和显示数据的更多信息,[查看 AWK 备忘表][4]* + +最好使用 BC 计算器来计算最终的相对亮度。BC 支持计算中所需的简单 `if-then-else`,这使得这一过程变得简单。但是由于 BC 无法使用非整数指数直接计算乘幂,因此需要使用自然对数替代它做一些额外的数学运算: + +``` +echo "scale=4 +rsrgb=$R/255 +gsrgb=$G/255 +bsrgb=$B/255 +if ( rsrgb <= 0.03928 ) r = rsrgb/12.92 else r = e( 2.4 * l((rsrgb+0.055)/1.055) ) +if ( gsrgb <= 0.03928 ) g = gsrgb/12.92 else g = e( 2.4 * l((gsrgb+0.055)/1.055) ) +if ( bsrgb <= 0.03928 ) b = bsrgb/12.92 else b = e( 2.4 * l((bsrgb+0.055)/1.055) ) +0.2126 * r + 0.7152 * g + 0.0722 * b" | bc -l +``` + +这会将一些指令传递给 BC,包括作为相对亮度公式一部分的 `if-then-else` 语句。接下来 BC 打印出最终值。 + +### 计算对比度 + +利用文本颜色和背景颜色的相对亮度,脚本就可以计算对比度了。 [W3C 确定对比度][5] 是使用以下公式: + +> (L1 + 0.05) / (L2 + 0.05),这里的 +> L1 是颜色较浅的相对亮度, +> L2 是颜色较深的相对亮度。 + +给定两个相对亮度值 `$r1` 和 `$r2`,使用 BC 计算器很容易计算对比度: + +``` +echo "scale=2 +if ( $r1 > $r2 ) { l1=$r1; l2=$r2 } else { l1=$r2; l2=$r1 } +(l1 + 0.05) / (l2 + 0.05)" | bc +``` + +使用 `if-then-else` 语句确定哪个值(`$r1` 或 `$r2`)是较浅还是较深的颜色。BC 执行结果计算并打印结果,脚本可以将其存储在变量中。 + +### 最终脚本 + +通过以上内容,我们可以将所有内容整合到一个最终脚本。 我使用 Zenity 在文本框中显示最终结果: + +``` +#!/bin/sh +# script to calculate contrast ratio of colors + +# read color and background color: +# zenity returns values like 'rgb(255,140,0)' and 'rgb(255,255,255)' + +color=$( zenity --title 'Set text color' --color-selection --color='black' ) +if [ $? -ne 0 ] ; then + echo '** color canceled .. assume black' + color='rgb(0,0,0)' +fi + +background=$( zenity --title 'Set background color' --color-selection --color='white' ) +if [ $? -ne 0 ] ; then + echo '** background canceled .. assume white' + background='rgb(255,255,255)' +fi + +# compute relative luminance: + +function luminance() +{ + R=$( echo $1 | awk -F, '{print substr($1,5)}' ) + G=$( echo $1 | awk -F, '{print $2}' ) + B=$( echo $1 | awk -F, '{n=length($3); print substr($3,1,n-1)}' ) + + echo "scale=4 +rsrgb=$R/255 +gsrgb=$G/255 +bsrgb=$B/255 +if ( rsrgb <= 0.03928 ) r = rsrgb/12.92 else r = e( 2.4 * l((rsrgb+0.055)/1.055) ) +if ( gsrgb <= 0.03928 ) g = gsrgb/12.92 else g = e( 2.4 * l((gsrgb+0.055)/1.055) ) +if ( bsrgb <= 0.03928 ) b = bsrgb/12.92 else b = e( 2.4 * l((bsrgb+0.055)/1.055) ) +0.2126 * r + 0.7152 * g + 0.0722 * b" | bc -l +} + +lum1=$( luminance $color ) +lum2=$( luminance $background ) + +# compute contrast + +function contrast() +{ + echo "scale=2 +if ( $1 > $2 ) { l1=$1; l2=$2 } else { l1=$2; l2=$1 } +(l1 + 0.05) / (l2 + 0.05)" | bc +} + +rel=$( contrast $lum1 $lum2 ) + +# print results + +( cat< 容器构建有两大趋势:使用基本镜像和从头开始构建。每个都有工程上的权衡。 + +![](https://img.linux.net.cn/data/attachment/album/202101/16/223919aubhguedlt8sk8i8.jpg) + +有人说 Linux 发行版不再与容器有关。像 distroless 和 scratch 容器等可替代的方法,似乎是风靡一时。看来,我们在考虑和做出技术决策时,更多的是基于时尚感和即时的情感满足,而不是考虑我们选择的次要影响。我们应该问这样的问题:这些选择将如何影响未来六个月的维护?工程权衡是什么?这种范式转换如何影响我们的大规模构建系统? + +这真让人沮丧。如果我们忘记了工程是一个零和游戏,有可衡量的利弊权衡,有不同方法的成本和收益 —— 这样对我们自己不利,对雇主不利,对最终维护我们的代码的同事不利。最后,我们对所有的维护人员([向维护人员致敬!][1] )都是一种伤害,因为我们不欣赏他们所做的工作。 + +### 理解问题所在 + +为了理解这个问题,我们必须首先研究为什么我们使用 Linux 发行版。我将把原因分为两大类:内核和其他包。编译内核实际上相当容易。Slackware 和 Gentoo( 我的小心脏还是有点害怕)教会了我们这一点。 + +另一方面,对于一个可用的 Linux 系统需要打包大量的开发软件和应用软件,这可能会让人望而生畏。此外,确保数百万个程序包可以一起安装和工作的唯一方法是使用旧的范例:即编译它并将它作为一个工件(即 Linux 发行版)一起发布。那么,为什么 Linux 发行版要将内核和所有包一起编译呢?很简单:确保事情协调一致。 + +首先,我们来谈谈内核。内核很特别。在没有编译好的内核的情况下引导 Linux 系统有点困难。它是 Linux 操作系统的核心,也是我们在系统启动时首先依赖的。内核在编译时有很多不同的配置选项,这些选项会对硬件和软件如何在一个内核上运行产生巨大影响。这方面中的第二个问题是,系统软件(如编译器 、C 库和解释器)必须针对内核中内置的选项进行调优。Gentoo 的维基以一种发自内心的方式教我们这一点,它把每个人都变成了一个微型的开发版维护者。 + +令人尴尬的是(因为我在过去五年里一直在使用容器),我必须承认我最近才编译过内核。我必须让嵌套的 KVM 在 RHEL7 上工作,这样我才能在笔记本电脑上的 KVM 虚拟机中运行 [OpenShift on OpenStack][2] 虚拟机,以及我的 [Container Development Kit(CDK)][3]。我只想说,当时我在一个全新的 4.X 内核上启动了 RHEL7。和任何优秀的系统管理员一样,我有点担心自己错过了一些重要的配置选项和补丁。当然,我也的确错过了一些东西。比如睡眠模式无法正常工作,我的扩展底座无法正常工作,还有许多其他小的随机错误。但它在我的笔记本电脑上的一个 KVM 虚拟机上,对于 OpenStack 上的 OpenShift 的实时演示来说已经足够好了。来吧,这很有趣,对吧?但我离题了…… + +现在,我们来谈谈其他的软件包。虽然内核和相关的系统软件可能很难编译,但从工作负载的角度来看,更大的问题是编译成千上万的包,以提供一个可用的 Linux 系统。每个软件包都需要专业知识。有些软件只需要运行三个命令:`./configure`、`make` 和 `make install`。另一些则需要大量的专业知识,从在 `etc` 中添加用户和配置特定的默认值到运行安装后脚本和添加 systemd 单元文件。对于任何一个人来说,调试好你可能用得到的成千上万种不同软件所需要的一套技能都是令人望而生畏的。但是,如果你想要一个可以随时尝试新软件的可用系统,你必须学会如何编译和安装新软件,然后才能开始学习使用它。这就是没有 Linux 发行版的 Linux。当你放弃使用 Linux 发行版时,那么你就得自己编译软件。 + +关键是,你必须将所有内容构建在一起,以确保它能够以任何合理的可靠性级别协同工作,而且构建一个可用的包群需要大量的知识。这是任何一个开发人员或系统管理员都无法合理地学习和保留的知识。我描述的每个问题都适用于你的 [容器主机][4](内核和系统软件)和 [容器镜像][5](系统软件和所有其他包)——请注意:在容器镜像中还包含有编译器 、C 库、解释器和 JVM。 + +### 解决方案 + +所以你也看到了,其实使用 Linux 发行版就是解决方案。别再看了,给离你最近的软件包维护者(再次向维护人员致敬!)发张电子卡吧(等等,我是不是把我的年龄告诉别人了?)。不过说真的,这些人做了大量的工作,这真的是被低估了。Kubernetes、Istio、Prometheus,还有 Knative:我在看着你们。你们的时代要来了,到时候你们会进入维护模式,被过度使用,被低估。大约七到十年后,我将再次写这篇文章,可能是关于 Kubernetes 的。 + +### 容器构建的首要原则 + +从零开始构建和从基础镜像构建之间存在权衡。 + +#### 从基础镜像构建 + +从基础镜像构建的优点是,大多数构建操作只不过是安装或更新包。它依赖于 Linux 发行版中包维护人员所做的大量工作。它还有一个优点,即六个月甚至十年后的修补事件(使用 RHEL)是运维/系统管理员事件 (`yum update`),而不是开发人员事件(这需要通过代码找出某些函数参数不再工作的原因)。 + +你想想,应用程序代码依赖于许多库,从 JSON mung 库到对象关系映射器。与 Linux 内核和 Glibc 不同,这些类型的库很少改变 API 兼容性。这意味着三年后你的修补事件可能会变成代码更改事件,而不是 yum 更新事件。因此让他自己深入下去吧。开发人员,如果安全团队找不到防火墙黑客来阻止攻击,你将在凌晨 2 点收到短信。 + +从基础镜像构建不是完美的;还有一些缺点,比如所有被拖入的依赖项的大小。这几乎总是会使容器镜像比从头开始构建的镜像更大。另一个缺点是你并不总是能够访问最新的上游代码。这可能会让开发人员感到沮丧,尤其是当你只想使用依赖项中的一部分功能时,但是你仍然不得不将你根本用不着的东西一起打包带走,因为上游的维护人员一直在改变这个库。 + +如果你是一个 web 开发人员,正在对我翻白眼,我有一个词可以形容你:DevOps。那意味着你带着寻呼机,我的朋友。 + +#### Scratch 构建 + +Scratch 构建的优点是镜像非常小。当你不依赖容器中的 Linux 发行版时,你有很多控制权,这意味着你可以根据需要定制所有内容。这是一个最佳模型,在某些用例中是很有效的。另一个优势是你可以访问最新的软件包。你不必等待 Linux 发行版更新任何内容。是你自己在控制,所以你可以自行选择什么时候去费功夫纳入新的软件。 + +记住,控制一切都是有代价的。通常,更新到具有新特性的新库会拖如不必要的 API 更改,这意味着修复代码中的不兼容(换句话说,这就像[给牦牛剪毛][6])。在凌晨 2 点应用程序不起作用的时候给牦牛剪毛是不好玩的。幸运的是,使用容器,你可以在下一个工作日回滚并给牦牛剪毛,但它仍会占用你为业务提供新价值、为应用程序提供新功能的时间。欢迎来到系统管理员的生活。 + +好吧,也就是说,有些时候,Scratch 构建是有意义的。我完全承认,静态编译的 Golang 程序和 C 程序是 scratch/distorless 构建的两个不错的候选程序。对于这些类型的程序,每个容器构建都是一个编译事件。三年后你仍然需要担心 API 的损坏,但是如果你是一个 Golang 商店,你应该有能力随着时间的推移修复问题。 + +### 结论 + +基本上,Linux 发行版做了大量工作来节省你在常规 Linux 系统或容器上的时间。维护人员所拥有的知识是巨大的,而且没有得到真正的赞赏。容器的采用使得问题更加严重,因为它被进一步抽象了。 + +通过容器主机,Linux 发行版可以让你访问广泛的硬件生态系统,从微型 ARM 系统到 128 核 CPU x86 巨型机箱,再到云服务商的虚拟机。他们提供可工作的容器引擎和开箱即用的容器运行时环境,所以你只需启动你的容器,让其他人担心事情的进展。 + +对于容器镜像,Linux 发行版为你的项目提供了对大量软件的轻松访问。即使你从头开始构建,你也可能会看到一个包维护人员是如何构建和运送东西的 —— 一个好的艺术家是一个好的偷学者,所以不要低估这项工作的价值。 + +所以,感谢 Fedora、RHEL(Frantisek,你是我的英雄 )、Debian、Gentoo 和其他 Linux 发行版的所有维护人员。我很感激你所做的工作,尽管我是个“容器工人” + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/19/2/linux-distributions-still-matter-containers + +作者:[Scott McCarty][a] +选题:[lujun9972][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/fatherlinux +[b]: https://github.com/lujun9972 +[1]: https://aeon.co/essays/innovation-is-overvalued-maintenance-often-matters-more +[2]: https://blog.openshift.com/openshift-on-openstack-delivering-applications-better-together/ +[3]: https://developers.redhat.com/blog/2018/02/13/red-hat-cdk-nested-kvm/ +[4]: https://developers.redhat.com/blog/2018/02/22/container-terminology-practical-introduction/#h.8tyd9p17othl +[5]: https://developers.redhat.com/blog/2018/02/22/container-terminology-practical-introduction/#h.dqlu6589ootw +[6]: https://en.wiktionary.org/wiki/yak_shaving diff --git a/published/20190322 Printing from the Linux command line.md b/published/202101/20190322 Printing from the Linux command line.md similarity index 100% rename from published/20190322 Printing from the Linux command line.md rename to published/202101/20190322 Printing from the Linux command line.md diff --git a/published/20190516 Monitor and Manage Docker Containers with Portainer.io (GUI tool) - Part-2.md b/published/202101/20190516 Monitor and Manage Docker Containers with Portainer.io (GUI tool) - Part-2.md similarity index 100% rename from published/20190516 Monitor and Manage Docker Containers with Portainer.io (GUI tool) - Part-2.md rename to published/202101/20190516 Monitor and Manage Docker Containers with Portainer.io (GUI tool) - Part-2.md diff --git a/published/202101/20190924 Integrate online documents editors, into a Python web app using ONLYOFFICE.md b/published/202101/20190924 Integrate online documents editors, into a Python web app using ONLYOFFICE.md new file mode 100644 index 0000000000..b1404ac687 --- /dev/null +++ b/published/202101/20190924 Integrate online documents editors, into a Python web app using ONLYOFFICE.md @@ -0,0 +1,391 @@ +[#]: collector: (lujun9972) +[#]: translator: (stevenzdg988) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13037-1.html) +[#]: subject: (Integrate online documents editors, into a Python web app using ONLYOFFICE) +[#]: via: (https://opensourceforu.com/2019/09/integrate-online-documents-editors-into-a-python-web-app-using-onlyoffice/) +[#]: author: (Aashima Sharma https://opensourceforu.com/author/aashima-sharma/) + +利用 ONLYOFFICE 将在线文档编辑器集成到 Python Web 应用程序中 +====== + +![][1] + +[ONLYOFFICE][3] 是根据 GNU AGPL v.3 许可证条款分发的开源协作办公套件。它包含三个用于文本文档、电子表格和演示文稿的编辑器,并具有以下功能: + + * 查看,编辑和协同编辑 `.docx`、`.xlsx`、`.pptx` 文件。OOXML 作为一种核心格式,可确保与 Microsoft Word、Excel 和 PowerPoint 文件的高度兼容性。 + * 通过内部转换为 OOXML,编辑其他流行格式(`.odt`、`.rtf`、`.txt`、`.html`、`.ods`、`.csv`、`.odp`)。 + * 熟悉的选项卡式界面。 + * 协作工具:两种协同编辑模式(快速和严谨),跟踪更改,评论和集成聊天。 + * 灵活的访问权限管理:完全访问权限、只读、审阅、表单填写和评论。 + * 使用 API 构建附加组件。 + * 250 种可用语言和象形字母表。 + +通过 API,开发人员可以将 ONLYOFFICE 编辑器集成到网站和利用程序设计语言编写的应用程序中,并能配置和管理编辑器。 + +要集成 ONLYOFFICE 编辑器,我们需要一个集成应用程序来连接编辑器(ONLYOFFICE 文档服务器)和服务。 要在你的界面中使用编辑器,因该授予 ONLYOFFICE 以下权限: + + * 添加并执行自定义代码。 + * 用于下载和保存文件的匿名访问权限。这意味着编辑器仅与服务器端的服务通信,而不包括客户端的任何用户授权数据(浏览器 cookies)。 + * 在用户界面添加新按钮(例如,“在 ONLYOFFICE 中打开”、“在 ONLYOFFICE 中编辑”)。 + * 开启一个新页面,ONLYOFFICE 可以在其中执行脚本以添加编辑器。 + * 能够指定文档服务器连接设置。 + +流行的协作解决方案的成功集成案例有很多,如 Nextcloud、ownCloud、Alfresco、Confluence 和 SharePoint,都是通过 ONLYOFFICE 提供的官方即用型连接器实现的。 + +实际的集成案例之一是 ONLYOFFICE 编辑器与以 C# 编写的开源协作平台的集成。该平台具有文档和项目管理、CRM、电子邮件聚合器、日历、用户数据库、博客、论坛、调查、Wiki 和即时通讯程序的功能。 + +将在线编辑器与 CRM 和项目模块集成,你可以: + + * 文档关联到 CRM 时机和容器、项目任务和讨论,甚至创建一个单独的文件夹,其中包含与项目相关的文档、电子表格和演示文稿。 + * 直接在 CRM 或项目模块中创建新的文档、工作表和演示文稿。 + * 打开和编辑关联的文档,或者下载和删除。 + * 将联系人从 CSV 文件批量导入到 CRM 中,并将客户数据库导出为 CSV 文件。 + +在“邮件”模块中,你可以关联存储在“文档模块”中的文件,或者将指向所需文档的链接插入到邮件正文中。 当 ONLYOFFICE 用户收到带有附件的文档的消息时,他们可以:下载附件、在浏览器中查看文件、打开文件进行编辑或将其保存到“文档模块”。 如上所述,如果格式不同于 OOXML ,则文件将自动转换为 `.docx`、`.xlsx`、`.pptx`,并且其副本也将以原始格式保存。 + +在本文中,你将看到 ONLYOFFICE 与最流行的编程语言之一的 Python 编写的文档管理系统的集成过程。 以下步骤将向你展示如何创建所有必要的部分,以使在 DMS(文档管理系统Document Management System)界面内的文档中可以进行协同工作成为可能:查看、编辑、协同编辑、保存文件和用户访问管理,并可以作为服务的示例集成到 Python 应用程序中。 + +### 1、前置需求 + +首先,创建集成过程的关键组件:[ONLYOFFICE 文档服务器][4] 和用 Python 编写的文件管理系统。 + +#### 1.1、ONLYOFFICE 文档服务器 + +要安装 ONLYOFFICE 文档服务器,你可以从多个安装选项中进行选择:编译 GitHub 上可用的源代码,使用 `.deb` 或 `.rpm` 软件包亦或 Docker 镜像。 + +我们推荐使用下面这条命令利用 Docker 映像安装文档服务器和所有必需的依赖。请注意,选择此方法,你需要安装最新的 Docker 版本。 + +``` +docker run -itd -p 80:80 onlyoffice/documentserver-de +``` + +#### 1.2、利用 Python 开发 DMS + +如果已经拥有一个,请检查它是否满足以下条件: + + * 包含需要打开以查看/编辑的保留文件 + * 允许下载文件 + +对于该应用程序,我们将使用 Bottle 框架。我们将使用以下命令将其安装在工作目录中: + +``` +pip install bottle +``` + +然后我们创建应用程序代码 `main.py` 和模板 `index.tpl`。 + +我们将以下代码添加到 `main.py` 文件中: + +``` +from bottle import route, run, template, get, static_file # connecting the framework and the necessary components +@route('/') # setting up routing for requests for / +def index(): + return template('index.tpl') # showing template in response to request + +run(host="localhost", port=8080) # running the application on port 8080 +``` + +一旦我们运行该应用程序,点击 就会在浏览器上呈现一个空白页面 。 +为了使文档服务器能够创建新文档,添加默认文件并在模板中生成其名称列表,我们应该创建一个文件夹 `files` 并将3种类型文件(`.docx`、`.xlsx` 和 `.pptx`)放入其中。 + +要读取这些文件的名称,我们使用 `listdir` 组件(模块): + +``` +from os import listdir +``` + +现在让我们为文件夹中的所有文件名创建一个变量: + +``` +sample_files = [f for f in listdir('files')] +``` + +要在模板中使用此变量,我们需要通过 `template` 方法传递它: + +``` +def index(): + return template('index.tpl', sample_files=sample_files) +``` + +这是模板中的这个变量: + +``` +% for file in sample_files: +
+ {{file}} +
+% end +``` + +我们重新启动应用程序以查看页面上的文件名列表。 + +使这些文件可用于所有应用程序用户的方法如下: + +``` +@get("/files/") +def show_sample_files(filepath): + return static_file(filepath, root="files") +``` + +### 2、查看文档 + +所有组件准备就绪后,让我们添加函数以使编辑者可以利用应用接口操作。 + +第一个选项使用户可以打开和查看文档。连接模板中的文档编辑器 API : + +``` + +``` + +`editor_url` 是文档编辑器的链接接口。 + +打开每个文件以供查看的按钮: + +``` + +``` + +现在我们需要添加带有 `id` 的 `div` 标签,打开文档编辑器: + +``` +
+``` + +要打开编辑器,必须调用调用一个函数: + +``` + +``` + +DocEditor 函数有两个参数:将在其中打开编辑器的元素 `id` 和带有编辑器设置的 `JSON`。 +在此示例中,使用了以下必需参数: + + * `documentType` 由其格式标识(`.docx`、`.xlsx`、`.pptx` 用于相应的文本、电子表格和演示文稿) + * `document.url` 是你要打开的文件链接。 + * `editorConfig.mode`。 + +我们还可以添加将在编辑器中显示的 `title`。 + +接下来,我们可以在 Python 应用程序中查看文档。 + +### 3、编辑文档 + +首先,添加 “Edit”(编辑)按钮: + +``` + +``` + +然后创建一个新功能,打开文件进行编辑。类似于查看功能。 + +现在创建 3 个函数: + +``` + +``` + +`destroyEditor` 被调用以关闭一个打开的编辑器。 + +你可能会注意到,`edit()` 函数中缺少 `editorConfig` 参数,因为默认情况下它的值是:`{"mode":"edit"}`。 + +现在,我们拥有了打开文档以在 Python 应用程序中进行协同编辑的所有功能。 + +### 4、如何在 Python 应用中利用 ONLYOFFICE 协同编辑文档 + +通过在编辑器中设置对同一文档使用相同的 `document.key` 来实现协同编辑。 如果没有此键值,则每次打开文件时,编辑器都会创建编辑会话。 + +为每个文档设置唯一键,以使用户连接到同一编辑会话时进行协同编辑。 密钥格式应为以下格式:`filename +"_key"`。下一步是将其添加到当前文档的所有配置中。 + +``` +document: { + url: "host_url" + '/' + filepath, + title: filename, + key: filename + '_key' +}, +``` + +### 5、如何在 Python 应用中利用 ONLYOFFICE 保存文档 + +每次我们更改并保存文件时,ONLYOFFICE 都会存储其所有版本。 让我们仔细看看它是如何工作的。 关闭编辑器后,文档服务器将构建要保存的文件版本并将请求发送到 `callbackUrl` 地址。 该请求包含 `document.key`和指向刚刚构建的文件的链接。 + +`document.key` 用于查找文件的旧版本并将其替换为新版本。 由于这里没有任何数据库,因此仅使用 `callbackUrl` 发送文件名。 + +在 `editorConfig.callbackUrl` 的设置中指定 `callbackUrl` 参数并将其添加到 `edit()` 方法中: + +``` + function edit(filename) { + const filepath = 'files/' + filename; + if (editor) { + editor.destroyEditor() + } + editor = new DocsAPI.DocEditor("editor", + { + documentType: get_file_type(filepath), + document: { + url: "host_url" + '/' + filepath, + title: filename, + key: filename + '_key' + } + , + editorConfig: { + mode: 'edit', + callbackUrl: "host_url" + '/callback' + '&filename=' + filename // add file name as a request parameter + } + }); + } +``` + +编写一种方法,在获取到 POST 请求发送到 `/callback` 地址后将保存文件: + +``` +@post("/callback") # processing post requests for /callback +def callback(): + if request.json['status'] == 2: + file = requests.get(request.json['url']).content + with open('files/' + request.query['filename'], 'wb') as f: + f.write(file) + return "{\"error\":0}" +​ +``` + +`# status 2` 是已生成的文件,当我们关闭编辑器时,新版本的文件将保存到存储器中。 + +### 6、管理用户 + +如果应用中有用户,并且你需要查看谁在编辑文档,请在编辑器的配置中输入其标识符(`id`和`name`)。 + +在界面中添加选择用户的功能: + +``` + +``` + +如果在标记 ` + + + + +``` + +要在浏览器中运行此文件,请双击文件或打开你喜欢的浏览器,点击菜单,然后选择**文件->打开文件**。(如果使用 Brackets 软件,也可以使用角落处的闪电图标在浏览器中打开文件)。 + +### 生成伪随机数 + +猜谜游戏的第一步是为玩家生成一个数字供玩家猜测。JavaScript 包含几个内置的全局对象,可帮助你编写代码。要生成随机数,请使用 `Math` 对象。 + +JavaScript中的 [Math][17] 具有处理和数学相关的属性和功能。你将使用两个数学函数来生成随机数,供你的玩家猜测。 + +[Math.random()][18],会将生成一个介于 0 和 1 之间的伪随机数。(`Math.random` 包含 0 但不包含 1。这意味着该函数可以生成 0 ,永远不会产生 1) + +对于此游戏,请将随机数设置在 1 到 100 之间以缩小玩家的选择范围。取刚刚生成的小数,然后乘以 100,以产生一个介于 0 到……甚至不是 100 之间的小数。至此,你将需要其他步骤来解决这个问题。 + +现在,你的数字仍然是小数,但你希望它是一个整数。为此,你可以使用属于 `Math` 对象的另一个函数 [Math.floor()][19]。`Math.floor()` 的目的是返回小于或等于你作为参数指定的数字的最大整数,这意味着它会四舍五入为最接近的整数: + +``` +Math.floor(Math.random() * 100) +``` + +这样你将得到 0 到 99 之间的整数,这不是你想要的范围。你可以在最后一步修复该问题,即在结果中加 1。瞧!现在,你有一个(有点)随机生成的数字,介于 1 到 100 之间: + +``` +Math.floor(Math.random() * 100) + 1 +``` + +### 变量 + +现在,你需要存储随机生成的数字,以便可以将其与玩家的猜测进行比较。为此,你可以将其存储到一个 **变量**。 + +JavaScript 具有不同类型的变量,你可以选择这些类型,具体取决于你要如何使用该变量。对于此游戏,请使用 `const` 和 `let`。 + + * `let` 用于指示变量在整个程序中可以改变。 + * `const` 用于指示变量不应该被修改。 + +`const` 和 `let` 还有很多要说的,但现在知道这些就足够了。 + +随机数在游戏中仅生成一次,因此你将使用 `const` 变量来保存该值。你想给变量起一个清楚地表明要存储什么值的名称,因此将其命名为 `randomNumber`: + +``` +const randomNumber +``` + +有关命名的注意事项:JavaScript 中的变量和函数名称以驼峰形式编写。如果只有一个单词,则全部以小写形式书写。如果有多个单词,则第一个单词均为小写,其他任何单词均以大写字母开头,且单词之间没有空格。 + +### 打印到控制台 + +通常,你不想向任何人显示随机数,但是开发人员可能想知道生成的数字以使用它来帮助调试代码。 使用 JavaScript,你可以使用另一个内置函数 [console.log()][20] 将数字输出到浏览器的控制台。 + +大多数浏览器都包含开发人员工具,你可以通过按键盘上的 `F12` 键来打开它们。从那里,你应该看到一个 **控制台** 标签。打印到控制台的所有信息都将显示在此处。由于到目前为止编写的代码将在浏览器加载后立即运行,因此,如果你查看控制台,你应该会看到刚刚生成的随机数! + +![Javascript game with console][21] + +### 函数 + +接下来,你需要一种方法来从数字输入字段中获得玩家的猜测,将其与你刚刚生成的随机数进行比较,并向玩家提供反馈,让他们知道他们是否正确猜到了。为此,编写一个函数。 **函数** 是执行一定任务的代码块。函数是可以重用的,这意味着如果你需要多次运行相同的代码,则可以调用函数,而不必重写执行任务所需的所有步骤。 + +根据你使用的 JavaScript 版本,有许多不同的方法来编写或声明函数。由于这是该语言的基础入门,因此请使用基本函数语法声明函数。 + +以关键字 `function` 开头,然后起一个函数名。好的做法是使用一个描述该函数的功能的名称。在这个例子中,你正在检查玩家的猜测的数,因此此函数的名字可以是 `checkGuess`。在函数名称之后,写上一组小括号,然后写上一组花括号。 你将在以下花括号之间编写函数的主体: + +``` +function checkGuess() {} +``` + +### 使用 DOM + +JavaScript 的目的之一是与网页上的 HTML 交互。它通过文档对象模型(DOM)进行此操作,DOM 是 JavaScript 用于访问和更改网页信息的对象。现在,你需要从 HTML 中获取数字输入字段中玩家的猜测。你可以使用分配给 HTML 元素的 `id` 属性(在这种情况下为 `guess`)来做到这一点: + +``` + +``` + +JavaScript 可以通过访问玩家输入到数字输入字段中的数来获取其值。你可以通过引用元素的 ID 并在末尾添加 `.value` 来实现。这次,使用 `let` 定义的变量来保存用户的猜测值: + +``` +let myGuess = guess.value +``` + +玩家在数字输入字段中输入的任何数字都将被分配给 `checkGuess` 函数中的 `myGuess` 变量。 + +### 条件语句 + +下一步是将玩家的猜测与游戏产生的随机数进行比较。你还想给玩家反馈,让他们知道他们的猜测是太高,太低还是正确。 + +你可以使用一系列条件语句来决定玩家将收到的反馈。**条件语句** 在运行代码块之前检查是否满足条件。如果不满足条件,则代码停止,继续检查下一个条件,或者继续执行其余代码,而无需执行条件块中的代码: + +``` +if (myGuess === randomNumber){ +  feedback.textContent = "You got it right!" +} +else if(myGuess > randomNumber) { +  feedback.textContent = "Your guess was " + myGuess + ". That's too high. Try Again!" +} +else if(myGuess < randomNumber) { +  feedback.textContent = "Your guess was " + myGuess + ". That's too low. Try Again!" +} +``` + +第一个条件块使用比较运算符 `===` 将玩家的猜测与游戏生成的随机数进行比较。比较运算符检查右侧的值,将其与左侧的值进行比较,如果匹配则返回布尔值 `true`,否则返回布尔值 `false`。 + +如果数字匹配(猜对了!),为了让玩家知道。通过将文本添加到具有 `id` 属性 `feedback` 的 `

` 标记中来操作 DOM。就像上面的 `guess.value` 一样,除了不是从 DOM 获取信息,而是更改其中的信息。`

` 元素没有像 `` 元素那样的值,而是具有文本,因此请使用 `.textContent` 访问元素并设置要显示的文本: + +``` +feedback.textContent = "You got it right!" +``` + +当然,玩家很有可能在第一次尝试时就猜错了,因此,如果 `myGuess` 和 `randomNumber` 不匹配,请给玩家一个线索,以帮助他们缩小猜测范围。如果第一个条件失败,则代码将跳过该 `if` 语句中的代码块,并检查下一个条件是否为 `true`。 这使你进入 `else if` 块: + +``` +else if(myGuess > randomNumber) { +  feedback.textContent = "Your guess was " + myGuess + ". That's too high. Try Again!" +} +``` + +如果你将其作为句子阅读,则可能是这样的:“如果玩家的猜测等于随机数,请让他们知道他们猜对了。否则,请检查玩家的猜测是否大于 `randomNumber`,如果是,则显示玩家的猜测并告诉他们太高了。” + +最后一种可能性是玩家的猜测低于随机数。 要检查这一点,再添加一个 `else if` 块: + +``` +else if(myGuess < randomNumber) { +  feedback.textContent = "Your guess was " + myGuess + ". That's too low. Try Again!" +} +``` + +### 用户事件和事件监听器 + +如果你看上面的代码,则会看到某些代码在页面加载时自动运行,但有些则不会。你想在玩游戏之前生成随机数,但是你不想在玩家将数字输入到数字输入字段并准备检查它之前检查其猜测。 + +生成随机数并将其打印到控制台的代码不在函数的范围内,因此它将在浏览器加载脚本时自动运行。 但是,要使函数内部的代码运行,你必须对其进行调用。 + +调用函数有几种方法。在此,你希望该函数在用户单击 “Check My Guess” 按钮时运行。单击按钮将创建一个用户事件,然后 JavaScript 可以 “监听” 这个事件,以便知道何时需要运行函数。 + +代码的最后一行将事件侦听器添加到按钮上,以在单击按钮时调用函数。当它“听到”该事件时,它将运行分配给事件侦听器的函数: + +``` +submitGuess.addEventListener('click', checkGuess) +``` + +就像访问 DOM 元素的其他实例一样,你可以使用按钮的 ID 告诉 JavaScript 与哪个元素进行交互。 然后,你可以使用内置的 `addEventListener` 函数来告诉 JavaScript 要监听的事件。 + +你已经看到了带有参数的函数,但花点时间看一下它是如何工作的。参数是函数执行其任务所需的信息。并非所有函数都需要参数,但是 `addEventListener` 函数需要两个参数。它采用的第一个参数是将为其监听的用户事件的名称。用户可以通过多种方式与 DOM 交互,例如键入、移动鼠标,键盘上的 `TAB` 键和粘贴文本。在这种情况下,你正在监听的用户事件是单击按钮,因此第一个参数将是 `click`。 + +`addEventListener`的第二个所需的信息是用户单击按钮时要运行的函数的名称。 这里我们需要 `checkGuess` 函数。 + +现在,当玩家按下 “Check My Guess” 按钮时,`checkGuess` 函数将获得他们在数字输入字段中输入的值,将其与随机数进行比较,并在浏览器中显示反馈,以使玩家知道他们猜的怎么样。 太棒了!你的游戏已准备就绪。 + +### 学习 JavaScript 以获取乐趣和收益 + +这一点点的平凡无奇的 JavaScript 只是这个庞大的生态系统所提供功能的一小部分。这是一种值得花时间投入学习的语言,我鼓励你继续挖掘并学习更多。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/learn-javascript + +作者:[Mandy Kendall][a] +选题:[lujun9972][b] +译者:[amwps290](https://github.com/amwps290) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/mkendall +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/code_javascript.jpg?itok=60evKmGl "Javascript code close-up with neon graphic overlay" +[2]: https://opensource.com/tags/javascript +[3]: https://opensource.com/article/20/11/reactjs-tutorial +[4]: https://opensource.com/article/18/9/open-source-javascript-chart-libraries +[5]: https://opensource.com/article/20/12/brackets +[6]: http://december.com/html/4/element/html.html +[7]: http://december.com/html/4/element/head.html +[8]: http://december.com/html/4/element/meta.html +[9]: http://december.com/html/4/element/title.html +[10]: http://december.com/html/4/element/body.html +[11]: http://december.com/html/4/element/h1.html +[12]: http://december.com/html/4/element/p.html +[13]: http://december.com/html/4/element/label.html +[14]: http://december.com/html/4/element/input.html +[15]: http://december.com/html/4/element/form.html +[16]: http://december.com/html/4/element/script.html +[17]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math +[18]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/random +[19]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/floor +[20]: https://developer.mozilla.org/en-US/docs/Web/API/Console/log +[21]: https://opensource.com/sites/default/files/javascript-game-with-console.png "Javascript game with console" diff --git a/published/20210121 How Nextcloud is the ultimate open source productivity suite.md b/published/20210121 How Nextcloud is the ultimate open source productivity suite.md new file mode 100644 index 0000000000..397dfea81f --- /dev/null +++ b/published/20210121 How Nextcloud is the ultimate open source productivity suite.md @@ -0,0 +1,70 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13077-1.html) +[#]: subject: (How Nextcloud is the ultimate open source productivity suite) +[#]: via: (https://opensource.com/article/21/1/nextcloud-productivity) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +Nextcloud 是如何成为终极开源生产力套件的 +====== + +> Nextcloud 可以取代你用于协作、组织和任务管理的许多在线应用。 + +![](https://img.linux.net.cn/data/attachment/album/202102/02/121553uhl3pjljjkhj0h8p.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十一天。 + +基于 Web 的服务几乎可以在任何地方访问你的数据,它们每小时可以支持数百万用户。不过对于我们中的一些人来说,由于各种原因,运行自己的服务比使用大公司的服务更可取。也许我们的工作是受监管的或有明确安全要求。也许我们有隐私方面的考虑,或者只是喜欢能够自己构建、运行和修复事物。不管是什么情况,[Nextcloud][2] 都可以提供你所需要的大部分服务,但是是在你自己的硬件上。 + +![NextCloud Dashboard displaying service options][3] + +*Nextcloud 控制面板(Kevin Sonney, [CC BY-SA 4.0][4])* + +大多数时候,当我们想到 Nextcloud 时,我们会想到文件共享和同步,类似于 Dropbox、OneDrive 和 Google Drive 等商业产品。然而,如今,它是一个完整的生产力套件,拥有电子邮件客户端、日历、任务和笔记本。 + +有几种方法可以安装和运行 Nextcloud。你可以把它安装到裸机服务器上、在 Docker 容器中运行,或者作为虚拟机运行。如果可以考虑,还有一些托管服务将为你运行 Nextcloud。最后,有适用于所有主流操作系统的应用,包括移动应用,以便随时访问。 + +![Nextcloud virtual machine][5] + +*Nextcloud 虚拟机(Kevin Sonney, [CC BY-SA 4.0][4])* + +默认情况下,Nextcloud 会安装文件共享和其他一些相关应用(或附加组件)。你可以在管理界面中找到“应用”页面,这里允许你安装单个附加组件和一些预定义的相关应用捆绑。对我而言,我选择了 “Groupware Bundle”,其中包括“邮件”、“日历”、“联系人”和 “Deck”。“Deck” 是一个轻量级的看板,用于处理任务。我也安装了“记事本”和“任务”应用。 + +Nextcloud “邮件” 是一个非常直白的 IMAP 邮件客户端。虽然 Nextcloud 没有将 IMAP 或 SMTP 服务器作为软件包的一部分,但你可以很容易地在操作系统中添加一个或使用远程服务。“日历”应用是相当标准的,也允许你订阅远程日历。有一个缺点是,远程日历(例如,来自大型云提供商)是只读的,所以你可以查看但不能修改它们。 + +![NextCoud App Interface][6] + +*Nextcloud 应用界面 (Kevin Sonney, [CC BY-SA 4.0][4])* + +“记事本” 是一个简单的文本记事本,允许你创建和更新简短的笔记、日记和相关的东西。“任务” 是一款待办事项应用,支持多个列表、任务优先级、完成百分比以及其他一些用户期待的标准功能。如果你安装了 “Deck”,它的任务卡也会被列出来。每个看板都会显示自己的列表,所以你可以使用 “Deck” 或 “任务” 来跟踪完成的内容。 + +“Deck” 本身就是一个看板应用,将任务以卡片的形式呈现在流程中。如果你喜欢看板流程,它是一个追踪进度的优秀应用。 + +![Taking notes][7] + +*做笔记 (Kevin Sonney, [CC BY-SA 4.0][4])* + +Nextcloud 中所有的应用都原生支持通过标准协议进行共享。与一些类似的解决方案不同,它的分享并不是为了完成功能列表中的一项而加上去的。分享是 Nextcloud 存在的主要原因之一,所以使用起来非常简单。你还可以将链接分享到社交媒体、通过电子邮件分享等。你可以用一个 Nextcloud 取代多个在线服务,它在任何地方都可以访问,以协作为先。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/nextcloud-productivity + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/team_dev_email_chat_video_work_wfm_desk_520.png?itok=6YtME4Hj (Working on a team, busy worklife) +[2]: https://nextcloud.com/ +[3]: https://opensource.com/sites/default/files/day11-image1_0.png +[4]: https://creativecommons.org/licenses/by-sa/4.0/ +[5]: https://opensource.com/sites/default/files/pictures/nextcloud-vm.png (Nextcloud virtual machine) +[6]: https://opensource.com/sites/default/files/pictures/nextcloud-app-interface.png (NextCoud App Interface) +[7]: https://opensource.com/sites/default/files/day11-image3.png (Taking notes in Nextcloud) diff --git a/published/20210122 3 tips for automating your email filters.md b/published/20210122 3 tips for automating your email filters.md new file mode 100644 index 0000000000..f3644bb0e5 --- /dev/null +++ b/published/20210122 3 tips for automating your email filters.md @@ -0,0 +1,60 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13073-1.html) +[#]: subject: (3 tips for automating your email filters) +[#]: via: (https://opensource.com/article/21/1/email-filter) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +3 个自动化电子邮件过滤器的技巧 +====== + +> 通过这些简单的建议,减少你的电子邮件并让你的生活更轻松。 + +![](https://img.linux.net.cn/data/attachment/album/202102/01/103638ozdejmy6eycm6omx.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十二天。 + +如果有一件事是我喜欢的,那就是自动化。只要有机会,我就会把小任务进行自动化。早起打开鸡舍的门?我买了一扇门,可以在日出和日落时开门和关门。每天从早到晚实时监控鸡群?用 Node-RED 和 [OBS-Websockets][2] 稍微花点时间,就能搞定。 + +我们还有电子邮件。几天前,我写过关于处理邮件的文章,也写过关于标签和文件夹的文章。只要做一点前期的工作,你就可以在邮件进来的时候,你就可以自动摆脱掉大量管理邮件的开销。 + +![Author has 480 filters][3] + +*是的,我有很多过滤器。(Kevin Sonney, [CC BY-SA 4.0][4])* + +有两种主要方式来过滤你的电子邮件:在服务端或者客户端上。我更喜欢在服务端上做,因为我不断地在尝试新的和不同的电子邮件客户端。(不,真的,我光这个星期就已经使用了五个不同的客户端。我可能有问题。) + +无论哪种方式,我都喜欢用电子邮件过滤规则做几件事,以使我的电子邮件更容易浏览,并保持我的收件箱不混乱。 + + 1. 将不紧急的邮件移到“稍后阅读”文件夹中。对我而言,这包括来自社交网络、新闻简报和邮件列表的通知。 + 2. 按列表或主题给消息贴上标签。我属于几个组织,虽然它们经常会被放在“稍后阅读”文件夹中,但我会添加第二个或第三个标签,以说明该来源或项目的内容,以帮助搜索时找到相关的东西。 + 3. 不要把规则搞得太复杂。这个想法让我困难了一段时间。我想把邮件发送到某个文件夹的所有可能情况都加到一个规则里。如果有什么问题或需要添加或删除的东西,有一个大规则只是让它更难修复。 + +![Unsubscribe from email][5] + +*点击它,点击它就行!(Kevin Sonney, [CC BY-SA 4.0][4])* + +说了这么多,还有一件事我一直在做,它有助于减少我花在电子邮件上的时间:退订邮件。两年前我感兴趣的那个邮件列表已经不感兴趣了,所以就不订阅了。产品更新通讯是我去年停止使用的商品?退订!这一直在积极解放我。我每年都会试着评估几次列表中的邮件信息是否(仍然)有用。 + +过滤器和规则可以是非常强大的工具,让你的电子邮件保持集中,减少花在它们身上的时间。而点击取消订阅按钮是一种解放。试试就知道了! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/email-filter + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/innovation_lightbulb_gears_devops_ansible.png?itok=TSbmp3_M (gears and lightbulb to represent innovation) +[2]: https://opensource.com/article/20/6/obs-websockets-streaming +[3]: https://opensource.com/sites/default/files/day12-image1_0.png +[4]: https://creativecommons.org/licenses/by-sa/4.0/ +[5]: https://opensource.com/sites/default/files/day12-image2_0.png diff --git a/published/20210125 Explore binaries using this full-featured Linux tool.md b/published/20210125 Explore binaries using this full-featured Linux tool.md new file mode 100644 index 0000000000..ffa4c8554b --- /dev/null +++ b/published/20210125 Explore binaries using this full-featured Linux tool.md @@ -0,0 +1,635 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13074-1.html) +[#]: subject: (Explore binaries using this full-featured Linux tool) +[#]: via: (https://opensource.com/article/21/1/linux-radare2) +[#]: author: (Gaurav Kamathe https://opensource.com/users/gkamathe) + +全功能的二进制文件分析工具 Radare2 指南 +====== + +> Radare2 是一个为二进制分析定制的开源工具。 + +![](https://img.linux.net.cn/data/attachment/album/202102/01/112611baw4gpqlch10ps1c.jpg) + +在《[Linux 上分析二进制文件的 10 种方法][2]》中,我解释了如何使用 Linux 上丰富的原生工具集来分析二进制文件。但如果你想进一步探索你的二进制文件,你需要一个为二进制分析定制的工具。如果你是二进制分析的新手,并且大多使用的是脚本语言,这篇文章《[GNU binutils 里的九种武器][3]》可以帮助你开始学习编译过程和什么是二进制。 + +### 为什么我需要另一个工具? + +如果现有的 Linux 原生工具也能做类似的事情,你自然会问为什么需要另一个工具。嗯,这和你用手机做闹钟、做笔记、做相机、听音乐、上网、偶尔打电话和接电话的原因是一样的。以前,使用单独的设备和工具处理这些功能 —— 比如拍照的实体相机,记笔记的小记事本,起床的床头闹钟等等。对用户来说,有一个设备来做多件(但相关的)事情是*方便的*。另外,杀手锏就是独立功能之间的*互操作性*。 + +同样,即使许多 Linux 工具都有特定的用途,但在一个工具中捆绑类似(和更好)的功能是非常有用的。这就是为什么我认为 [Radare2][4] 应该是你需要处理二进制文件时的首选工具。 + +根据其 [GitHub 简介][5],Radare2(也称为 r2)是一个“类 Unix 系统上的逆向工程框架和命令行工具集”。它名字中的 “2” 是因为这个版本从头开始重写的,使其更加模块化。 + +### 为什么选择 Radare2? + +有大量(非原生的)Linux 工具可用于二进制分析,为什么要选择 Radare2 呢?我的理由很简单。 + +首先,它是一个开源项目,有一个活跃而健康的社区。如果你正在寻找新颖的功能或提供着 bug 修复的工具,这很重要。 + +其次,Radare2 可以在命令行上使用,而且它有一个功能丰富的图形用户界面(GUI)环境,叫做 Cutter,适合那些对 GUI 比较熟悉的人。作为一个长期使用 Linux 的用户,我对习惯于在 shell 上输入。虽然熟悉 Radare2 的命令稍微有一点学习曲线,但我会把它比作 [学习 Vim][6]。你可以先学习基本的东西,一旦你掌握了它们,你就可以继续学习更高级的东西。很快,它就变成了肌肉记忆。 + +第三,Radare2 通过插件可以很好的支持外部工具。例如,最近开源的 [Ghidra][7] 二进制分析和逆向工具reversing tool很受欢迎,因为它的反编译器功能是逆向软件的关键要素。你可以直接从 Radare2 控制台安装 Ghidra 反编译器并使用,这很神奇,让你两全其美。 + +### 开始使用 Radare2 + +要安装 Radare2,只需克隆其存储库并运行 `user.sh` 脚本。如果你的系统上还没有一些预备软件包,你可能需要安装它们。一旦安装完成,运行 `r2 -v` 命令来查看 Radare2 是否被正确安装: + +``` +$ git clone https://github.com/radareorg/radare2.git +$ cd radare2 +$ ./sys/user.sh + +# version + +$ r2 -v +radare2 4.6.0-git 25266 @ linux-x86-64 git.4.4.0-930-g48047b317 +commit: 48047b3171e6ed0480a71a04c3693a0650d03543 build: 2020-11-17__09:31:03 +$ +``` + +#### 获取二进制测试样本 + +现在 `r2` 已经安装好了,你需要一个样本二进制程序来试用它。你可以使用任何系统二进制文件(`ls`、`bash` 等),但为了使本教程的内容简单,请编译以下 C 程序: + +``` +$ cat adder.c +``` + +``` +#include + +int adder(int num) { + return num + 1; +} + +int main() { + int res, num1 = 100; + res = adder(num1); + printf("Number now is : %d\n", res); + return 0; +} +``` + +``` +$ gcc adder.c -o adder +$ file adder +adder: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=9d4366f7160e1ffb46b14466e8e0d70f10de2240, not stripped +$ ./adder +Number now is : 101 +``` + +#### 加载二进制文件 + +要分析二进制文件,你必须在 Radare2 中加载它。通过提供文件名作为 `r2` 命令的一个命令行参数来加载它。你会进入一个独立的 Radare2 控制台,这与你的 shell 不同。要退出控制台,你可以输入 `Quit` 或 `Exit` 或按 `Ctrl+D`: + +``` +$ r2 ./adder + -- Learn pancake as if you were radare! +[0x004004b0]> quit +$ +``` + +#### 分析二进制 + +在你探索二进制之前,你必须让 `r2` 为你分析它。你可以通过在 `r2` 控制台中运行 `aaa` 命令来实现: + +``` +$ r2 ./adder + -- Sorry, radare2 has experienced an internal error. +[0x004004b0]> +[0x004004b0]> +[0x004004b0]> aaa +[x] Analyze all flags starting with sym. and entry0 (aa) +[x] Analyze function calls (aac) +[x] Analyze len bytes of instructions for references (aar) +[x] Check for vtables +[x] Type matching analysis for all functions (aaft) +[x] Propagate noreturn information +[x] Use -AA or aaaa to perform additional experimental analysis. +[0x004004b0]> +``` + +这意味着每次你选择一个二进制文件进行分析时,你必须在加载二进制文件后输入一个额外的命令 `aaa`。你可以绕过这一点,在命令后面跟上 `-A` 来调用 `r2`;这将告诉 `r2` 为你自动分析二进制: + +``` +$ r2 -A ./adder +[x] Analyze all flags starting with sym. and entry0 (aa) +[x] Analyze function calls (aac) +[x] Analyze len bytes of instructions for references (aar) +[x] Check for vtables +[x] Type matching analysis for all functions (aaft) +[x] Propagate noreturn information +[x] Use -AA or aaaa to perform additional experimental analysis. + -- Already up-to-date. +[0x004004b0]> +``` + +#### 获取一些关于二进制的基本信息 + +在开始分析一个二进制文件之前,你需要一些背景信息。在许多情况下,这可以是二进制文件的格式(ELF、PE 等)、二进制的架构(x86、AMD、ARM 等),以及二进制是 32 位还是 64 位。方便的 `r2` 的 `iI` 命令可以提供所需的信息: + +``` +[0x004004b0]> iI +arch x86 +baddr 0x400000 +binsz 14724 +bintype elf +bits 64 +canary false +class ELF64 +compiler GCC: (GNU) 8.3.1 20190507 (Red Hat 8.3.1-4) +crypto false +endian little +havecode true +intrp /lib64/ld-linux-x86-64.so.2 +laddr 0x0 +lang c +linenum true +lsyms true +machine AMD x86-64 architecture +maxopsz 16 +minopsz 1 +nx true +os linux +pcalign 0 +pic false +relocs true +relro partial +rpath NONE +sanitiz false +static false +stripped false +subsys linux +va true + +[0x004004b0]> +[0x004004b0]> +``` + +### 导入和导出 + +通常情况下,当你知道你要处理的是什么样的文件后,你就想知道二进制程序使用了什么样的标准库函数,或者了解程序的潜在功能。在本教程中的示例 C 程序中,唯一的库函数是 `printf`,用来打印信息。你可以通过运行 `ii` 命令看到这一点,它显示了该二进制所有导入的库: + +``` +[0x004004b0]> ii +[Imports] +nth vaddr bind type lib name +――――――――――――――――――――――――――――――――――――― +1 0x00000000 WEAK NOTYPE _ITM_deregisterTMCloneTable +2 0x004004a0 GLOBAL FUNC printf +3 0x00000000 GLOBAL FUNC __libc_start_main +4 0x00000000 WEAK NOTYPE __gmon_start__ +5 0x00000000 WEAK NOTYPE _ITM_registerTMCloneTable +``` + +该二进制也可以有自己的符号、函数或数据。这些函数通常显示在 `Exports` 下。这个测试的二进制导出了两个函数:`main` 和 `adder`。其余的函数是在编译阶段,当二进制文件被构建时添加的。加载器需要这些函数来加载二进制文件(现在不用太关心它们): + +``` +[0x004004b0]> +[0x004004b0]> iE +[Exports] + +nth paddr vaddr bind type size lib name +―――――――――――――――――――――――――――――――――――――――――――――――――――――― +82 0x00000650 0x00400650 GLOBAL FUNC 5 __libc_csu_fini +85 ---------- 0x00601024 GLOBAL NOTYPE 0 _edata +86 0x00000658 0x00400658 GLOBAL FUNC 0 _fini +89 0x00001020 0x00601020 GLOBAL NOTYPE 0 __data_start +90 0x00000596 0x00400596 GLOBAL FUNC 15 adder +92 0x00000670 0x00400670 GLOBAL OBJ 0 __dso_handle +93 0x00000668 0x00400668 GLOBAL OBJ 4 _IO_stdin_used +94 0x000005e0 0x004005e0 GLOBAL FUNC 101 __libc_csu_init +95 ---------- 0x00601028 GLOBAL NOTYPE 0 _end +96 0x000004e0 0x004004e0 GLOBAL FUNC 5 _dl_relocate_static_pie +97 0x000004b0 0x004004b0 GLOBAL FUNC 47 _start +98 ---------- 0x00601024 GLOBAL NOTYPE 0 __bss_start +99 0x000005a5 0x004005a5 GLOBAL FUNC 55 main +100 ---------- 0x00601028 GLOBAL OBJ 0 __TMC_END__ +102 0x00000468 0x00400468 GLOBAL FUNC 0 _init + +[0x004004b0]> +``` + +### 哈希信息 + +如何知道两个二进制文件是否相似?你不能只是打开一个二进制文件并查看里面的源代码。在大多数情况下,二进制文件的哈希值(md5sum、sha1、sha256)是用来唯一识别它的。你可以使用 `it` 命令找到二进制的哈希值: + +``` +[0x004004b0]> it +md5 7e6732f2b11dec4a0c7612852cede670 +sha1 d5fa848c4b53021f6570dd9b18d115595a2290ae +sha256 13dd5a492219dac1443a816ef5f91db8d149e8edbf26f24539c220861769e1c2 +[0x004004b0]> +``` + +### 函数 + +代码按函数分组;要列出二进制中存在的函数,请运行 `afl` 命令。下面的列表显示了 `main` 函数和 `adder` 函数。通常,以 `sym.imp` 开头的函数是从标准库(这里是 glibc)中导入的: + +``` +[0x004004b0]> afl +0x004004b0    1 46           entry0 +0x004004f0    4 41   -> 34   sym.deregister_tm_clones +0x00400520    4 57   -> 51   sym.register_tm_clones +0x00400560    3 33   -> 32   sym.__do_global_dtors_aux +0x00400590    1 6            entry.init0 +0x00400650    1 5            sym.__libc_csu_fini +0x00400658    1 13           sym._fini +0x00400596    1 15           sym.adder +0x004005e0    4 101          loc..annobin_elf_init.c +0x004004e0    1 5            loc..annobin_static_reloc.c +0x004005a5    1 55           main +0x004004a0    1 6            sym.imp.printf +0x00400468    3 27           sym._init +[0x004004b0]> +``` + +### 交叉引用 + +在 C 语言中,`main` 函数是一个程序开始执行的地方。理想情况下,其他函数都是从 `main` 函数调用的,在退出程序时,`main` 函数会向操作系统返回一个退出状态。这在源代码中是很明显的,然而,二进制程序呢?如何判断 `adder` 函数的调用位置呢? + +你可以使用 `axt` 命令,后面加上函数名,看看 `adder` 函数是在哪里调用的;如下图所示,它是从 `main` 函数中调用的。这就是所谓的交叉引用cross-referencing。但什么调用 `main` 函数本身呢?从下面的 `axt main` 可以看出,它是由 `entry0` 调用的(关于 `entry0` 的学习我就不说了,留待读者练习)。 + +``` +[0x004004b0]> axt sym.adder +main 0x4005b9 [CALL] call sym.adder +[0x004004b0]> +[0x004004b0]> axt main +entry0 0x4004d1 [DATA] mov rdi, main +[0x004004b0]> +``` + +### 寻找定位 + +在处理文本文件时,你经常通过引用行号和行或列号在文件内移动;在二进制文件中,你需要使用地址。这些是以 `0x` 开头的十六进制数字,后面跟着一个地址。要找到你在二进制中的位置,运行 `s` 命令。要移动到不同的位置,使用 `s` 命令,后面跟上地址。 + +函数名就像标签一样,内部用地址表示。如果函数名在二进制中(未剥离的),可以使用函数名后面的 `s` 命令跳转到一个特定的函数地址。同样,如果你想跳转到二进制的开始,输入 `s 0`: + +``` +[0x004004b0]> s +0x4004b0 +[0x004004b0]> +[0x004004b0]> s main +[0x004005a5]> +[0x004005a5]> s +0x4005a5 +[0x004005a5]> +[0x004005a5]> s sym.adder +[0x00400596]> +[0x00400596]> s +0x400596 +[0x00400596]> +[0x00400596]> s 0 +[0x00000000]> +[0x00000000]> s +0x0 +[0x00000000]> +``` + +### 十六进制视图 + +通常情况下,原始二进制没有意义。在十六进制模式下查看二进制及其等效的 ASCII 表示法会有帮助: + +``` +[0x004004b0]> s main +[0x004005a5]> +[0x004005a5]> px +- offset -   0 1  2 3  4 5  6 7  8 9  A B  C D  E F  0123456789ABCDEF +0x004005a5  5548 89e5 4883 ec10 c745 fc64 0000 008b  UH..H....E.d.... +0x004005b5  45fc 89c7 e8d8 ffff ff89 45f8 8b45 f889  E.........E..E.. +0x004005c5  c6bf 7806 4000 b800 0000 00e8 cbfe ffff  ..x.@........... +0x004005d5  b800 0000 00c9 c30f 1f40 00f3 0f1e fa41  .........@.....A +0x004005e5  5749 89d7 4156 4989 f641 5541 89fd 4154  WI..AVI..AUA..AT +0x004005f5  4c8d 2504 0820 0055 488d 2d04 0820 0053  L.%.. .UH.-.. .S +0x00400605  4c29 e548 83ec 08e8 57fe ffff 48c1 fd03  L).H....W...H... +0x00400615  741f 31db 0f1f 8000 0000 004c 89fa 4c89  t.1........L..L. +0x00400625  f644 89ef 41ff 14dc 4883 c301 4839 dd75  .D..A...H...H9.u +0x00400635  ea48 83c4 085b 5d41 5c41 5d41 5e41 5fc3  .H...[]A\A]A^A_. +0x00400645  9066 2e0f 1f84 0000 0000 00f3 0f1e fac3  .f.............. +0x00400655  0000 00f3 0f1e fa48 83ec 0848 83c4 08c3  .......H...H.... +0x00400665  0000 0001 0002 0000 0000 0000 0000 0000  ................ +0x00400675  0000 004e 756d 6265 7220 6e6f 7720 6973  ...Number now is +0x00400685  2020 3a20 2564 0a00 0000 0001 1b03 3b44    : %d........;D +0x00400695  0000 0007 0000 0000 feff ff88 0000 0020  ............... +[0x004005a5]> +``` + +### 反汇编 + +如果你使用的是编译后的二进制文件,则无法查看源代码。编译器将源代码转译成 CPU 可以理解和执行的机器语言指令;其结果就是二进制或可执行文件。然而,你可以查看汇编指令(的助记词)来理解程序正在做什么。例如,如果你想查看 `main` 函数在做什么,你可以使用 `s main` 寻找 `main` 函数的地址,然后运行 `pdf` 命令来查看反汇编的指令。 + +要理解汇编指令,你需要参考体系结构手册(这里是 x86),它的应用二进制接口(ABI,或调用惯例),并对堆栈的工作原理有基本的了解: + +``` +[0x004004b0]> s main +[0x004005a5]> +[0x004005a5]> s +0x4005a5 +[0x004005a5]> +[0x004005a5]> pdf +            ; DATA XREF from entry0 @ 0x4004d1 +┌ 55: int main (int argc, char **argv, char **envp); +│           ; var int64_t var_8h @ rbp-0x8 +│           ; var int64_t var_4h @ rbp-0x4 +│           0x004005a5      55             push rbp +│           0x004005a6      4889e5         mov rbp, rsp +│           0x004005a9      4883ec10       sub rsp, 0x10 +│           0x004005ad      c745fc640000.  mov dword [var_4h], 0x64    ; 'd' ; 100 +│           0x004005b4      8b45fc         mov eax, dword [var_4h] +│           0x004005b7      89c7           mov edi, eax +│           0x004005b9      e8d8ffffff     call sym.adder +│           0x004005be      8945f8         mov dword [var_8h], eax +│           0x004005c1      8b45f8         mov eax, dword [var_8h] +│           0x004005c4      89c6           mov esi, eax +│           0x004005c6      bf78064000     mov edi, str.Number_now_is__:__d ; 0x400678 ; "Number now is  : %d\n" ; const char *format +│           0x004005cb      b800000000     mov eax, 0 +│           0x004005d0      e8cbfeffff     call sym.imp.printf         ; int printf(const char *format) +│           0x004005d5      b800000000     mov eax, 0 +│           0x004005da      c9             leave +└           0x004005db      c3             ret +[0x004005a5]> +``` + +这是 `adder` 函数的反汇编结果: + +``` +[0x004005a5]> s sym.adder +[0x00400596]> +[0x00400596]> s +0x400596 +[0x00400596]> +[0x00400596]> pdf +            ; CALL XREF from main @ 0x4005b9 +┌ 15: sym.adder (int64_t arg1); +│           ; var int64_t var_4h @ rbp-0x4 +│           ; arg int64_t arg1 @ rdi +│           0x00400596      55             push rbp +│           0x00400597      4889e5         mov rbp, rsp +│           0x0040059a      897dfc         mov dword [var_4h], edi     ; arg1 +│           0x0040059d      8b45fc         mov eax, dword [var_4h] +│           0x004005a0      83c001         add eax, 1 +│           0x004005a3      5d             pop rbp +└           0x004005a4      c3             ret +[0x00400596]> +``` + +### 字符串 + +查看二进制中存在哪些字符串可以作为二进制分析的起点。字符串是硬编码到二进制中的,通常会提供重要的提示,可以让你将重点转移到分析某些区域。在二进制中运行 `iz` 命令来列出所有的字符串。这个测试二进制中只有一个硬编码的字符串: + +``` +[0x004004b0]> iz +[Strings] +nth paddr      vaddr      len size section type  string +――――――――――――――――――――――――――――――――――――――――――――――――――――――― +0   0x00000678 0x00400678 20  21   .rodata ascii Number now is  : %d\n + +[0x004004b0]> +``` + +### 交叉引用字符串 + +和函数一样,你可以交叉引用字符串,看看它们是从哪里被打印出来的,并理解它们周围的代码: + +``` +[0x004004b0]> ps @ 0x400678 +Number now is  : %d + +[0x004004b0]> +[0x004004b0]> axt 0x400678 +main 0x4005c6 [DATA] mov edi, str.Number_now_is__:__d +[0x004004b0]> +``` + +### 可视模式 + +当你的代码很复杂,有多个函数被调用时,很容易迷失方向。如果能以图形或可视化的方式查看哪些函数被调用,根据某些条件采取了哪些路径等,会很有帮助。在移动到感兴趣的函数后,可以通过 `VV` 命令来探索 `r2` 的可视化模式。例如,对于 `adder` 函数: + +``` +[0x004004b0]> s sym.adder +[0x00400596]> +[0x00400596]> VV +``` + +![Radare2 Visual mode][8] + +*(Gaurav Kamathe, [CC BY-SA 4.0][9])* + +### 调试器 + +到目前为止,你一直在做的是静态分析 —— 你只是在看二进制文件中的东西,而没有运行它,有时你需要执行二进制文件,并在运行时分析内存中的各种信息。`r2` 的内部调试器允许你运行二进制文件、设置断点、分析变量的值、或者转储寄存器的内容。 + +用 `-d` 标志启动调试器,并在加载二进制时添加 `-A` 标志进行分析。你可以通过使用 `db ` 命令在不同的地方设置断点,比如函数或内存地址。要查看现有的断点,使用 `dbi` 命令。一旦你放置了断点,使用 `dc` 命令开始运行二进制文件。你可以使用 `dbt` 命令查看堆栈,它可以显示函数调用。最后,你可以使用 `drr` 命令转储寄存器的内容: + +``` +$ r2 -d -A ./adder +Process with PID 17453 started... += attach 17453 17453 +bin.baddr 0x00400000 +Using 0x400000 +asm.bits 64 +[x] Analyze all flags starting with sym. and entry0 (aa) +[x] Analyze function calls (aac) +[x] Analyze len bytes of instructions for references (aar) +[x] Check for vtables +[x] Type matching analysis for all functions (aaft) +[x] Propagate noreturn information +[x] Use -AA or aaaa to perform additional experimental analysis. + -- git checkout hamster +[0x7f77b0a28030]> +[0x7f77b0a28030]> db main +[0x7f77b0a28030]> +[0x7f77b0a28030]> db sym.adder +[0x7f77b0a28030]> +[0x7f77b0a28030]> dbi +0 0x004005a5 E:1 T:0 +1 0x00400596 E:1 T:0 +[0x7f77b0a28030]> +[0x7f77b0a28030]> afl | grep main +0x004005a5    1 55           main +[0x7f77b0a28030]> +[0x7f77b0a28030]> afl | grep sym.adder +0x00400596    1 15           sym.adder +[0x7f77b0a28030]> +[0x7f77b0a28030]> dc +hit breakpoint at: 0x4005a5 +[0x004005a5]> +[0x004005a5]> dbt +0  0x4005a5           sp: 0x0                 0    [main]  main sym.adder+15 +1  0x7f77b0687873     sp: 0x7ffe35ff6858      0    [??]  section..gnu.build.attributes-1345820597 +2  0x7f77b0a36e0a     sp: 0x7ffe35ff68e8      144  [??]  map.usr_lib64_ld_2.28.so.r_x+65034 +[0x004005a5]> dc +hit breakpoint at: 0x400596 +[0x00400596]> dbt +0  0x400596           sp: 0x0                 0    [sym.adder]  rip entry.init0+6 +1  0x4005be           sp: 0x7ffe35ff6838      0    [main]  main+25 +2  0x7f77b0687873     sp: 0x7ffe35ff6858      32   [??]  section..gnu.build.attributes-1345820597 +3  0x7f77b0a36e0a     sp: 0x7ffe35ff68e8      144  [??]  map.usr_lib64_ld_2.28.so.r_x+65034 +[0x00400596]> +[0x00400596]> +[0x00400596]> dr +rax = 0x00000064 +rbx = 0x00000000 +rcx = 0x7f77b0a21738 +rdx = 0x7ffe35ff6948 +r8 = 0x7f77b0a22da0 +r9 = 0x7f77b0a22da0 +r10 = 0x0000000f +r11 = 0x00000002 +r12 = 0x004004b0 +r13 = 0x7ffe35ff6930 +r14 = 0x00000000 +r15 = 0x00000000 +rsi = 0x7ffe35ff6938 +rdi = 0x00000064 +rsp = 0x7ffe35ff6838 +rbp = 0x7ffe35ff6850 +rip = 0x00400596 +rflags = 0x00000202 +orax = 0xffffffffffffffff +[0x00400596]> +``` + +### 反编译器 + +能够理解汇编是二进制分析的前提。汇编语言总是与二进制建立和预期运行的架构相关。一行源代码和汇编代码之间从来没有 1:1 的映射。通常,一行 C 源代码会产生多行汇编代码。所以,逐行读取汇编代码并不是最佳的选择。 + +这就是反编译器的作用。它们试图根据汇编指令重建可能的源代码。这与用于创建二进制的源代码绝不完全相同,它是基于汇编的源代码的近似表示。另外,要考虑到编译器进行的优化,它会生成不同的汇编代码以加快速度,减小二进制的大小等,会使反编译器的工作更加困难。另外,恶意软件作者经常故意混淆代码,让恶意软件的分析人员望而却步。 + +Radare2 通过插件提供反编译器。你可以安装任何 Radare2 支持的反编译器。使用 `r2pm -l` 命令可以查看当前插件。使用 `r2pm install` 命令来安装一个示例的反编译器 `r2dec`: + +``` +$ r2pm  -l +$ +$ r2pm install r2dec +Cloning into 'r2dec'... +remote: Enumerating objects: 100, done. +remote: Counting objects: 100% (100/100), done. +remote: Compressing objects: 100% (97/97), done. +remote: Total 100 (delta 18), reused 27 (delta 1), pack-reused 0 +Receiving objects: 100% (100/100), 1.01 MiB | 1.31 MiB/s, done. +Resolving deltas: 100% (18/18), done. +Install Done For r2dec +gmake: Entering directory '/root/.local/share/radare2/r2pm/git/r2dec/p' +[CC] duktape/duktape.o +[CC] duktape/duk_console.o +[CC] core_pdd.o +[CC] core_pdd.so +gmake: Leaving directory '/root/.local/share/radare2/r2pm/git/r2dec/p' +$ +$ r2pm  -l +r2dec +$ +``` + +### 反编译器视图 + +要反编译一个二进制文件,在 `r2` 中加载二进制文件并自动分析它。在本例中,使用 `s sym.adder` 命令移动到感兴趣的 `adder` 函数,然后使用 `pdda` 命令并排查看汇编和反编译后的源代码。阅读这个反编译后的源代码往往比逐行阅读汇编更容易: + +``` +$ r2 -A ./adder +[x] Analyze all flags starting with sym. and entry0 (aa) +[x] Analyze function calls (aac) +[x] Analyze len bytes of instructions for references (aar) +[x] Check for vtables +[x] Type matching analysis for all functions (aaft) +[x] Propagate noreturn information +[x] Use -AA or aaaa to perform additional experimental analysis. + -- What do you want to debug today? +[0x004004b0]> +[0x004004b0]> s sym.adder +[0x00400596]> +[0x00400596]> s +0x400596 +[0x00400596]> +[0x00400596]> pdda +    ; assembly                               | /* r2dec pseudo code output */ +                                             | /* ./adder @ 0x400596 */ +                                             | #include <stdint.h> +                                             |   +    ; (fcn) sym.adder ()                     | int32_t adder (int64_t arg1) { +                                             |     int64_t var_4h; +                                             |     rdi = arg1; +    0x00400596 push rbp                      |     +    0x00400597 mov rbp, rsp                  |     +    0x0040059a mov dword [rbp - 4], edi      |     *((rbp - 4)) = edi; +    0x0040059d mov eax, dword [rbp - 4]      |     eax = *((rbp - 4)); +    0x004005a0 add eax, 1                    |     eax++; +    0x004005a3 pop rbp                       |     +    0x004005a4 ret                           |     return eax; +                                             | } +[0x00400596]> +``` + +### 配置设置 + +随着你对 Radare2 的使用越来越熟悉,你会想改变它的配置,以适应你的工作方式。你可以使用 `e` 命令查看 `r2` 的默认配置。要设置一个特定的配置,在 `e` 命令后面添加 `config = value`: + +``` +[0x004005a5]> e | wc -l +593 +[0x004005a5]> e | grep syntax +asm.syntax = intel +[0x004005a5]> +[0x004005a5]> e asm.syntax = att +[0x004005a5]> +[0x004005a5]> e | grep syntax +asm.syntax = att +[0x004005a5]> +``` + +要使配置更改永久化,请将它们放在 `r2` 启动时读取的名为 `.radare2rc` 的启动文件中。这个文件通常在你的主目录下,如果没有,你可以创建一个。一些示例配置选项包括: + +``` +$ cat ~/.radare2rc +e asm.syntax = att +e scr.utf8 = true +eco solarized +e cmd.stack = true +e stack.size = 256 +$ +``` + +### 探索更多 + +你已经看到了足够多的 Radare2 功能,对这个工具有了一定的了解。因为 Radare2 遵循 Unix 哲学,即使你可以从它的主控台做各种事情,它也会在下面使用一套独立的二进制来完成它的任务。 + +探索下面列出的独立二进制文件,看看它们是如何工作的。例如,用 `iI` 命令在控制台看到的二进制信息也可以用 `rabin2 ` 命令找到: + +``` +$ cd bin/ +$ +$ ls +prefix  r2agent    r2pm  rabin2   radiff2  ragg2    rarun2   rasm2 +r2      r2-indent  r2r   radare2  rafind2  rahash2  rasign2  rax2 +$ +``` + +你觉得 Radare2 怎么样?请在评论中分享你的反馈。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/linux-radare2 + +作者:[Gaurav Kamathe][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/gkamathe +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/binary_code_computer_screen.png?itok=7IzHK1nn (Binary code on a computer screen) +[2]: https://linux.cn/article-12187-1.html +[3]: https://linux.cn/article-11441-1.html +[4]: https://rada.re/n/ +[5]: https://github.com/radareorg/radare2 +[6]: https://opensource.com/article/19/3/getting-started-vim +[7]: https://ghidra-sre.org/ +[8]: https://opensource.com/sites/default/files/uploads/radare2_visual-mode_0.png (Radare2 Visual mode) +[9]: https://creativecommons.org/licenses/by-sa/4.0/ diff --git a/published/20210125 Use Joplin to find your notes faster.md b/published/20210125 Use Joplin to find your notes faster.md new file mode 100644 index 0000000000..13e6be8bdf --- /dev/null +++ b/published/20210125 Use Joplin to find your notes faster.md @@ -0,0 +1,63 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13080-1.html) +[#]: subject: (Use Joplin to find your notes faster) +[#]: via: (https://opensource.com/article/21/1/notes-joplin) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +使用 Joplin 更快地找到你的笔记 +====== + +> 在多个手写和数字平台上整理笔记是一个严峻的挑战。这里有一个小技巧,可以更好地组织你的笔记,并快速找到你需要的东西。 + +![](https://img.linux.net.cn/data/attachment/album/202102/03/120141dkiqil1vlqiz6wql.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十五天。 + +保持生产力也意味着(在某种程度上)要有足够的组织能力,以便找到笔记并在需要时参考它们。这不仅是对我自己的挑战,也是与我交谈的很多人的挑战。 + +多年来,我在应用中单独或使用数字笔记、纸质笔记、便签、数字便签、Word 文档、纯文本文件以及一堆我忘记的其他格式的组合。这不仅让寻找笔记变得困难,而且知道把它们放在哪里是一个更大的挑战。 + +![Stacks of paper notes on a desk][2] + +*一堆笔记 (Jessica Cherry, [CC BY-SA 4.0][3])* + +还有就是做笔记最重要的一点:如果你以后找不到它,笔记就没有任何价值。知道含有你所需信息的笔记存在于你保存笔记的*某处*,根本没有任何帮助。 + +我是如何为自己解决这个问题的呢?正如他们所说,这是一个过程,我希望这也是一个对其他人有效的过程。 + +我首先看了看自己所做的笔记种类。不同的主题需要用不同的方式保存吗?由于我为我的播客手写笔记,而几乎所有其他的东西都使用纯文本笔记,我需要两种不同的方式来维护它们。对于手写的笔记,我把它们都放在一个文件夹里,方便我参考。 + +![Man holding a binder full of notes][4] + +*三年多的笔记 (Kevin Sonney, [CC BY-SA 4.0][3])* + +为了保存我的数字笔记,我需要将它们全部集中到一个地方。这个工具需要能够从多种设备上访问,具有有用的搜索功能,并且能够导出或共享我的笔记。在尝试了很多很多不同的选项之后,我选择了 [Joplin][5]。Joplin 可以让我用 Markdown 写笔记,有一个相当不错的搜索功能,有适用于所有操作系统(包括手机)的应用,并支持几种不同的设备同步方式。另外,它还有文件夹*和*标签,因此我可以按照对我有意义的方式将笔记分组。 + +![Organized Joplin notes management page][6] + +*我的 Joplin* + +我花了一些时间才把所有的东西都放在我想要的地方,但最后,这真的是值得的。现在,我可以找到我所做的笔记,而不是让它们散落在我的办公室、不同的机器和各种服务中。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/notes-joplin + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/wfh_work_home_laptop_work.png?itok=VFwToeMy (Working from home at a laptop) +[2]: https://opensource.com/sites/default/files/day15-image1.jpg +[3]: https://creativecommons.org/licenses/by-sa/4.0/ +[4]: https://opensource.com/sites/default/files/day15-image2.png +[5]: https://joplinapp.org/ +[6]: https://opensource.com/sites/default/files/day15-image3.png diff --git a/published/20210125 Why you need to drop ifconfig for ip.md b/published/20210125 Why you need to drop ifconfig for ip.md new file mode 100644 index 0000000000..c4370c6f6d --- /dev/null +++ b/published/20210125 Why you need to drop ifconfig for ip.md @@ -0,0 +1,201 @@ +[#]: collector: "lujun9972" +[#]: translator: "MjSeven" +[#]: reviewer: "wxy" +[#]: publisher: "wxy" +[#]: url: "https://linux.cn/article-13089-1.html" +[#]: subject: "Why you need to drop ifconfig for ip" +[#]: via: "https://opensource.com/article/21/1/ifconfig-ip-linux" +[#]: author: "Rajan Bhardwaj https://opensource.com/users/rajabhar" + +放弃 ifconfig,拥抱 ip 命令 +====== + +> 开始使用现代方法配置 Linux 网络接口。 + +![](https://img.linux.net.cn/data/attachment/album/202102/05/233847lpg1lnz7kl2czgfj.jpg) + +在很长一段时间内,`ifconfig` 命令是配置网络接口的默认方法。它为 Linux 用户提供了很好的服务,但是网络很复杂,所以配置网络的命令必须健壮。`ip` 命令是现代系统中新的默认网络命令,在本文中,我将向你展示如何使用它。 + +`ip` 命令工作在 [OSI 网络栈][2] 的两个层上:第二层(数据链路层)和第三层(网络 或 IP)层。它做了之前 `net-tools` 包的所有工作。 + +### 安装 ip + +`ip` 命令包含在 `iproute2util` 包中,它可能已经在你的 Linux 发行版中安装了。如果没有,你可以从发行版的仓库中进行安装。 + +### ifconfig 和 ip 使用对比 + +`ip` 和 `ifconfig` 命令都可以用来配置网络接口,但它们做事方法不同。接下来,作为对比,我将用它们来执行一些常见的任务。 + +#### 查看网口和 IP 地址 + +如果你想查看主机的 IP 地址或网络接口信息,`ifconfig` (不带任何参数)命令提供了一个很好的总结。 + +``` +$ ifconfig + +eth0: flags=4099 mtu 1500 + ether bc:ee:7b:5e:7d:d8 txqueuelen 1000 (Ethernet) + RX packets 0 bytes 0 (0.0 B) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 0 bytes 0 (0.0 B) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +lo: flags=73 mtu 65536 + inet 127.0.0.1 netmask 255.0.0.0 + inet6 ::1 prefixlen 128 scopeid 0x10 + loop txqueuelen 1000 (Local Loopback) + RX packets 41 bytes 5551 (5.4 KiB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 41 bytes 5551 (5.4 KiB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +wlan0: flags=4163 mtu 1500 + inet 10.1.1.6 netmask 255.255.255.224 broadcast 10.1.1.31 + inet6 fdb4:f58e:49f:4900:d46d:146b:b16:7212 prefixlen 64 scopeid 0x0 + inet6 fe80::8eb3:4bc0:7cbb:59e8 prefixlen 64 scopeid 0x20 + ether 08:71:90:81:1e:b5 txqueuelen 1000 (Ethernet) + RX packets 569459 bytes 779147444 (743.0 MiB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 302882 bytes 38131213 (36.3 MiB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 +``` + +新的 `ip` 命令提供了类似的结果,但命令是 `ip address show`,或者简写为 `ip a`: + +``` +$ ip a + +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: eth0: mtu 1500 qdisc pfifo_fast state DOWN group default qlen 1000 + link/ether bc:ee:7b:5e:7d:d8 brd ff:ff:ff:ff:ff:ff +3: wlan0: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 08:71:90:81:1e:b5 brd ff:ff:ff:ff:ff:ff + inet 10.1.1.6/27 brd 10.1.1.31 scope global dynamic wlan0 + valid_lft 83490sec preferred_lft 83490sec + inet6 fdb4:f58e:49f:4900:d46d:146b:b16:7212/64 scope global noprefixroute dynamic + valid_lft 6909sec preferred_lft 3309sec + inet6 fe80::8eb3:4bc0:7cbb:59e8/64 scope link + valid_lft forever preferred_lft forever +``` + +#### 添加 IP 地址 + +使用 `ifconfig` 命令添加 IP 地址命令为: + +``` +$ ifconfig eth0 add 192.9.203.21 +``` + +`ip` 类似: + +``` +$ ip address add 192.9.203.21 dev eth0 +``` + +`ip` 中的子命令可以缩短,所以下面这个命令同样有效: + +``` +$ ip addr add 192.9.203.21 dev eth0 +``` + +你甚至可以更短些: + +``` +$ ip a add 192.9.203.21 dev eth0 +``` + +#### 移除一个 IP 地址 + +添加 IP 地址与删除 IP 地址正好相反。 + +使用 `ifconfig`,命令是: + +``` +$ ifconfig eth0 del 192.9.203.21 +``` + +`ip` 命令的语法是: + +``` +$ ip a del 192.9.203.21 dev eth0 +``` + +#### 启用或禁用组播 + +使用 `ifconfig` 接口来启用或禁用 [组播][3]multicast: + +``` +# ifconfig eth0 multicast +``` + +对于 `ip`,使用 `set` 子命令与设备(`dev`)以及一个布尔值和 `multicast` 选项: + +``` +# ip link set dev eth0 multicast on +``` + +#### 启用或禁用网络 + +每个系统管理员都熟悉“先关闭,然后打开”这个技巧来解决问题。对于网络接口来说,即打开或关闭网络。 + +`ifconfig` 命令使用 `up` 或 `down` 关键字来实现: + +``` +# ifconfig eth0 up +``` + +或者你可以使用一个专用命令: + +``` +# ifup eth0 +``` + +`ip` 命令使用 `set` 子命令将网络设置为 `up` 或 `down` 状态: + +``` +# ip link set eth0 up +``` + +#### 开启或关闭地址解析功能(ARP) + +使用 `ifconfig`,你可以通过声明它来启用: + +``` +# ifconfig eth0 arp +``` + +使用 `ip`,你可以将 `arp` 属性设置为 `on` 或 `off`: + +``` +# ip link set dev eth0 arp on +``` + +### ip 和 ipconfig 的优缺点 + +`ip` 命令比 `ifconfig` 更通用,技术上也更有效,因为它使用的是 `Netlink` 套接字,而不是 `ioctl` 系统调用。 + +`ip` 命令可能看起来比 `ifconfig` 更详细、更复杂,但这是它拥有更多功能的一个原因。一旦你开始使用它,你会了解它的内部逻辑(例如,使用 `set` 而不是看起来随意混合的声明或设置)。 + +最后,`ifconfig` 已经过时了(例如,它缺乏对网络命名空间的支持),而 `ip` 是为现代网络而生的。尝试并学习它,使用它,你会由衷高兴的! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/ifconfig-ip-linux + +作者:[Rajan Bhardwaj][a] +选题:[lujun9972][b] +译者:[MjSeven](https://github.com/MjSeven) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/rajabhar +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/gears_devops_learn_troubleshooting_lightbulb_tips_520.png?itok=HcN38NOk "Tips and gears turning" +[2]: https://en.wikipedia.org/wiki/OSI_model +[3]: https://en.wikipedia.org/wiki/Multicast diff --git a/published/20210126 Use your Raspberry Pi as a productivity powerhouse.md b/published/20210126 Use your Raspberry Pi as a productivity powerhouse.md new file mode 100644 index 0000000000..d7deca1036 --- /dev/null +++ b/published/20210126 Use your Raspberry Pi as a productivity powerhouse.md @@ -0,0 +1,75 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13084-1.html) +[#]: subject: (Use your Raspberry Pi as a productivity powerhouse) +[#]: via: (https://opensource.com/article/21/1/raspberry-pi-productivity) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +将你的树莓派用作生产力源泉 +====== + +> 树莓派已经从主要为黑客和业余爱好者服务,成为了小型生产力工作站的可靠选择。 + +![](https://img.linux.net.cn/data/attachment/album/202102/04/103826pjbxb7j1m8ok6ezf.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十六天。 + +[树莓派][2]是一台相当棒的小电脑。它体积小,功能却出奇的强大,而且非常容易设置和使用。我曾将它们用于家庭自动化项目、面板和专用媒体播放器。但它也能成为生产力的动力源泉么? + +答案相当简单:是的。 + +![Geary and Calendar apps on the Raspberry Pi][3] + +*Geary 和 Calendar 应用 (Kevin Sonney, [CC BY-SA 4.0][4])* + +基本的 [Raspbian][5] 安装包括 [Claw Mail][6],这是一个轻量级的邮件客户端。它的用户界面有点过时了,而且非常的简陋。如果你是一个 [Mutt 用户][7],它可能会满足你的需求。 + +我更喜欢安装 [Geary][8],因为它也是轻量级的,而且有一个现代化的界面。另外,与 Claws 不同的是,Geary 默认支持富文本 (HTML) 邮件。我不喜欢富文本电子邮件,但它已经成为必要的,所以对它有良好的支持是至关重要的。 + +默认的 Raspbian 安装不包含日历,所以我添加了 [GNOME 日历][9] ,因为它可以与远程服务通信(因为我的几乎所有日历都在云提供商那里)。 + +![GTG and GNote open on Raspberry Pi][10] + +*GTG 和 GNote(Kevin Sonney, [CC BY-SA 4.0][4])* + +那笔记和待办事项清单呢?有很多选择,但我喜欢用 [GNote][11] 来做笔记,用 [Getting-Things-GNOME!][12] 来做待办事项。两者都相当轻量级,并且可以相互同步,也可以同步到其他服务。 + +你会注意到,我在这里使用了不少 GNOME 应用。为什么不直接安装完整的 GNOME 桌面呢?在内存为 4Gb(或 8Gb)的树莓派 4 上,GNOME 工作得很好。你需要采取一些额外的步骤来禁用 Raspbian 上的默认 wifi 设置,并用 Network Manager 来代替它,但这个在网上有很好的文档,而且真的很简单。 + +GNOME 中包含了 [Evolution][13],它将邮件、日历、笔记、待办事项和联系人管理整合到一个应用中。与 Geary 和 GNOME Calendar 相比,它有点重,但在树莓派 4 上却很稳定。这让我很惊讶,因为我习惯了 Evolution 有点消耗资源,但树莓派 4 却和我的品牌笔记本一样运行良好,而且资源充足。 + +![Evolution on Raspbian][14] + +*Raspbian 上的 Evolution (Kevin Sonney, [CC BY-SA 4.0][4])* + +树莓派在过去的几年里进步很快,已经从主要为黑客和业余爱好者服务,成为了小型生产力工作站的可靠选择。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/raspberry-pi-productivity + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/todo_checklist_team_metrics_report.png?itok=oB5uQbzf (Team checklist and to dos) +[2]: https://www.raspberrypi.org/ +[3]: https://opensource.com/sites/default/files/day16-image1.png +[4]: https://creativecommons.org/licenses/by-sa/4.0/ +[5]: https://www.raspbian.org/ +[6]: https://www.claws-mail.org/ +[7]: http://www.mutt.org/ +[8]: https://wiki.gnome.org/Apps/Geary +[9]: https://wiki.gnome.org/Apps/Calendar +[10]: https://opensource.com/sites/default/files/day16-image2.png +[11]: https://wiki.gnome.org/Apps/Gnote +[12]: https://wiki.gnome.org/Apps/GTG +[13]: https://opensource.com/business/18/1/desktop-email-clients +[14]: https://opensource.com/sites/default/files/day16-image3.png diff --git a/published/20210126 Write GIMP scripts to make image processing faster.md b/published/20210126 Write GIMP scripts to make image processing faster.md new file mode 100644 index 0000000000..4caa8357ff --- /dev/null +++ b/published/20210126 Write GIMP scripts to make image processing faster.md @@ -0,0 +1,234 @@ +[#]: collector: (lujun9972) +[#]: translator: (amwps290) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13093-1.html) +[#]: subject: (Write GIMP scripts to make image processing faster) +[#]: via: (https://opensource.com/article/21/1/gimp-scripting) +[#]: author: (Cristiano L. Fontana https://opensource.com/users/cristianofontana) + +编写 GIMP 脚本使图像处理更快 +====== + +> 通过向一批图像添加效果来学习 GIMP 的脚本语言 Script-Fu。 + +![](https://img.linux.net.cn/data/attachment/album/202102/06/231011c0xhvxitxjv899qv.jpg) + +前一段时间,我想给方程图片加一个黑板式的外观。我开始是使用 [GIMP][2] 来处理的,我对结果很满意。问题是我必须对图像执行几个操作,当我想再次使用此样式,不想对所有图像重复这些步骤。此外,我确信我会很快忘记这些步骤。 + +![Fourier transform equations][3] + +*傅立叶变换方程式(Cristiano Fontana,[CC BY-SA 4.0] [4])* + +GIMP 是一个很棒的开源图像编辑器。尽管我已经使用了多年,但从未研究过其批处理功能或 [Script-Fu][5] 菜单。这是探索它们的绝好机会。 + +### 什么是 Script-Fu? + +[Script-Fu][6] 是 GIMP 内置的脚本语言。是一种基于 [Scheme][7] 的编程语言。如果你从未使用过 Scheme,请尝试一下,因为它可能非常有用。我认为 Script-Fu 是一个很好的入门方法,因为它对图像处理具有立竿见影的效果,所以你可以很快感觉到自己的工作效率的提高。你也可以使用 [Python][8] 编写脚本,但是 Script-Fu 是默认选项。 + +为了帮助你熟悉 Scheme,GIMP 的文档提供了深入的 [教程][9]。Scheme 是一种类似于 [Lisp][10] 的语言,因此它的主要特征是使用 [前缀][11] 表示法和 [许多括号][12]。函数和运算符通过前缀应用到操作数列表中: + +``` +(函数名 操作数 操作数 ...) + +(+ 2 3) +↳ 返回 5 + +(list 1 2 3 5) +↳ 返回一个列表,包含 1、 2、 3 和 5 +``` + +我花了一些时间才找到完整的 GIMP 函数列表文档,但实际上很简单。在 **Help** 菜单中,有一个 **Procedure Browser**,其中包含所有可用的函数的丰富详尽文档。 + +![GIMP Procedure Browser][13] + +### 使用 GIMP 的批处理模式 + +你可以使用 `-b` 选项以批处理的方式启动 GIMP。`-b` 选项的参数可以是你想要运行的脚本,或者用一个 `-` 来让 GIMP 进入交互模式而不是命令行模式。正常情况下,当你启动 GIMP 的时候,它会启动图形界面,但是你可以使用 `-i` 选项来禁用它。 + +### 开始编写你的第一个脚本 + +创建一个名为 `chalk.scm` 的文件,并把它保存在 **Preferences** 窗口中 **Folders** 选项下的 **Script** 中指定的 `script` 文件夹下。就我而言,是在 `$HOME/.config/GIMP/2.10/scripts`。 + +在 `chalk.scm` 文件中,写入下面的内容: + +``` +(define (chalk filename grow-pixels spread-amount percentage) + (let* ((image (car (gimp-file-load RUN-NONINTERACTIVE filename filename))) + (drawable (car (gimp-image-get-active-layer image))) + (new-filename (string-append "modified_" filename))) + (gimp-image-select-color image CHANNEL-OP-REPLACE drawable '(0 0 0)) + (gimp-selection-grow image grow-pixels) + (gimp-context-set-foreground '(0 0 0)) + (gimp-edit-bucket-fill drawable BUCKET-FILL-FG LAYER-MODE-NORMAL 100 255 TRUE 0 0) + (gimp-selection-none image) + (plug-in-spread RUN-NONINTERACTIVE image drawable spread-amount spread-amount) + (gimp-drawable-invert drawable TRUE) + (plug-in-randomize-hurl RUN-NONINTERACTIVE image drawable percentage 1 TRUE 0) + (gimp-file-save RUN-NONINTERACTIVE image drawable new-filename new-filename) + (gimp-image-delete image))) +``` + +### 定义脚本变量 + +在脚本中, `(define (chalk filename grow-pixels spread-amound percentage) ...)` 函数定义了一个名叫 `chalk` 的新函数。它的函数参数是 `filename`、`grow-pixels`、`spread-amound` 和 `percentage`。在 `define` 中的所有内容都是 `chalk` 函数的主体。你可能已经注意到,那些名字比较长的变量中间都有一个破折号来分割。这是类 Lisp 语言的惯用风格。 + +`(let* ...)` 函数是一个特殊过程procedure,可以让你定义一些只有在这个函数体中才有效的临时变量。临时变量有 `image`、`drawable` 以及 `new-filename`。它使用 `gimp-file-load` 来载入图片,这会返回它所包含的图片的一个列表。并通过 `car` 函数来选取第一项。然后,它选择第一个活动层并将其引用存储在 `drawable` 变量中。最后,它定义了包含图像新文件名的字符串。 + +为了帮助你更好地了解该过程,我将对其进行分解。首先,启动带 GUI 的 GIMP,然后你可以通过依次点击 **Filters → Script-Fu → Console** 来打开 Script-Fu 控制台。 在这种情况下,不能使用 `let *`,因为变量必须是持久的。使用 `define` 函数定义 `image` 变量,并为其提供查找图像的正确路径: + +``` +(define image (car (gimp-file-load RUN-NONINTERACTIVE "Fourier.png" "Fourier.png"))) +``` + +似乎在 GUI 中什么也没有发生,但是图像已加载。 你需要通过以下方式来让图像显示: + +``` +(gimp-display-new image) +``` + +![GUI with the displayed image][14] + +现在,获取活动层并将其存储在 `drawable` 变量中: + +``` +(define drawable (car (gimp-image-get-active-layer image))) +``` + +最后,定义图像的新文件名: + +``` +(define new-filename "modified_Fourier.png") +``` + +运行命令后,你将在 Script-Fu 控制台中看到以下内容: + +![Script-Fu console][15] + +在对图像执行操作之前,需要定义将在脚本中作为函数参数的变量: + +``` +(define grow-pixels 2) +(define spread-amount 4) +(define percentage 3) +``` + +### 处理图片 + +现在,所有相关变量都已定义,你可以对图像进行操作了。 脚本的操作可以直接在控制台上执行。第一步是在活动层上选择黑色。颜色被写成一个由三个数字组成的列表,即 `(list 0 0 0)` 或者是 `'(0 0 0)`: + +``` +(gimp-image-select-color image CHANNEL-OP-REPLACE drawable '(0 0 0)) +``` + +![Image with the selected color][16] + +扩大选取两个像素: + +``` +(gimp-selection-grow image grow-pixels) +``` + +![Image with the selected color][17] + +将前景色设置为黑色,并用它填充选区: + +``` +(gimp-context-set-foreground '(0 0 0)) +(gimp-edit-bucket-fill drawable BUCKET-FILL-FG LAYER-MODE-NORMAL 100 255 TRUE 0 0) +``` + +![Image with the selection filled with black][18] + +删除选区: + +``` +(gimp-selection-none image) +``` + +![Image with no selection][19] + +随机移动像素: + +``` +(plug-in-spread RUN-NONINTERACTIVE image drawable spread-amount spread-amount) +``` + +![Image with pixels moved around][20] + +反转图像颜色: + +``` +(gimp-drawable-invert drawable TRUE) +``` + +![Image with pixels moved around][21] + +随机化像素: + +``` +(plug-in-randomize-hurl RUN-NONINTERACTIVE image drawable percentage 1 TRUE 0) +``` + +![Image with pixels moved around][22] + +将图像保存到新文件: + +``` +(gimp-file-save RUN-NONINTERACTIVE image drawable new-filename new-filename) +``` + +![Equations of the Fourier transform and its inverse][23] + +*傅立叶变换方程 (Cristiano Fontana, [CC BY-SA 4.0][4])* + +### 以批处理模式运行脚本 + +现在你知道了脚本的功能,可以在批处理模式下运行它: + +``` +gimp -i -b '(chalk "Fourier.png" 2 4 3)' -b '(gimp-quit 0)' +``` + +在运行 `chalk` 函数之后,它将使用 `-b` 选项调用第二个函数 `gimp-quit` 来告诉 GIMP 退出。 + +### 了解更多 + +本教程向你展示了如何开始使用 GIMP 的内置脚本功能,并介绍了 GIMP 的 Scheme 实现:Script-Fu。如果你想继续前进,建议你查看官方文档及其[入门教程][9]。如果你不熟悉 Scheme 或 Lisp,那么一开始的语法可能有点吓人,但我还是建议你尝试一下。这可能是一个不错的惊喜。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/gimp-scripting + +作者:[Cristiano L. Fontana][a] +选题:[lujun9972][b] +译者:[amwps290](https://github.com/amwps290) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/cristianofontana +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/painting_computer_screen_art_design_creative.png?itok=LVAeQx3_ (Painting art on a computer screen) +[2]: https://www.gimp.org/ +[3]: https://opensource.com/sites/default/files/uploads/fourier.png (Fourier transform equations) +[4]: https://creativecommons.org/licenses/by-sa/4.0/ +[5]: https://docs.gimp.org/en/gimp-filters-script-fu.html +[6]: https://docs.gimp.org/en/gimp-concepts-script-fu.html +[7]: https://en.wikipedia.org/wiki/Scheme_(programming_language) +[8]: https://docs.gimp.org/en/gimp-filters-python-fu.html +[9]: https://docs.gimp.org/en/gimp-using-script-fu-tutorial.html +[10]: https://en.wikipedia.org/wiki/Lisp_%28programming_language%29 +[11]: https://en.wikipedia.org/wiki/Polish_notation +[12]: https://xkcd.com/297/ +[13]: https://opensource.com/sites/default/files/uploads/procedure_browser.png (GIMP Procedure Browser) +[14]: https://opensource.com/sites/default/files/uploads/gui01_image.png (GUI with the displayed image) +[15]: https://opensource.com/sites/default/files/uploads/console01_variables.png (Script-Fu console) +[16]: https://opensource.com/sites/default/files/uploads/gui02_selected.png (Image with the selected color) +[17]: https://opensource.com/sites/default/files/uploads/gui03_grow.png (Image with the selected color) +[18]: https://opensource.com/sites/default/files/uploads/gui04_fill.png (Image with the selection filled with black) +[19]: https://opensource.com/sites/default/files/uploads/gui05_no_selection.png (Image with no selection) +[20]: https://opensource.com/sites/default/files/uploads/gui06_spread.png (Image with pixels moved around) +[21]: https://opensource.com/sites/default/files/uploads/gui07_invert.png (Image with pixels moved around) +[22]: https://opensource.com/sites/default/files/uploads/gui08_hurl.png (Image with pixels moved around) +[23]: https://opensource.com/sites/default/files/uploads/modified_fourier.png (Equations of the Fourier transform and its inverse) diff --git a/published/20210127 3 email mistakes and how to avoid them.md b/published/20210127 3 email mistakes and how to avoid them.md new file mode 100644 index 0000000000..4e0b3c8cc9 --- /dev/null +++ b/published/20210127 3 email mistakes and how to avoid them.md @@ -0,0 +1,77 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13086-1.html) +[#]: subject: (3 email mistakes and how to avoid them) +[#]: via: (https://opensource.com/article/21/1/email-mistakes) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +3 个电子邮件错误以及如何避免它们 +====== + +> 自动化是美好的,但也不总是那样。确保你的电子邮件自动回复和抄送配置正确,这样你就不会浪费大家的时间。 + +![](https://img.linux.net.cn/data/attachment/album/202102/05/090335a888nqn7pcolblzn.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十七天。 + +好了,我们已经谈到了一些我们应该对电子邮件做的事情:[不要再把它当作即时通讯工具][2]、[优先处理事情][3]、[努力达到收件箱 0 新邮件][4],以及[有效过滤][5]。但哪些事情是我们不应该做的呢? + +![Automated email reply][6] + +*你真幸运 (Kevin Sonney, [CC BY-SA 4.0][7])* + +### 1、请不要对所有事情自动回复 + +邮件列表中总有些人,他们去度假了,并设置了一个“我在度假”的自动回复信息。然而,他们没有正确地设置,所以它对列表上的每一封邮件都会回复“我在度假”,直到管理员将其屏蔽或取消订阅。 + +我们都感受到了这种痛苦,我承认过去至少有一次,我就是那个人。 + +从我的错误中吸取教训,并确保你的自动回复器或假期信息对它们将回复谁和多久回复一次有限制。 + +![An actual email with lots of CC'd recipients][8] + +*这是一封真实的电子邮件 (Kevin Sonney, [CC BY-SA 4.0][7])* + +### 2、请不要抄送给所有人 + +我们都至少做过一次。我们需要发送邮件的人员众多,因此我们只需抄送他们*所有人*。有时这是有必要的,但大多数时候,它并不是。当然,你邀请每个人在庭院吃生日蛋糕,或者你的表姐要结婚了,或者公司刚拿到一个大客户,这都是好事。如果你有邮件列表的话,请用邮件列表,如果没有的话,请给每个人密送。说真的,密送是你的朋友。 + +### 3、回复所有人不是你的朋友 + +![Reply options in Kmail][9] + +这一条与上一条是相辅相成的。我不知道有多少次看到有人向一个名单(或者是一个大群的人)发送信息,而这个信息本来是要发给一个人的。我见过这样发送的相对好的邮件,以及随之而来的纪律处分邮件。 + +认真地说,除非必须,不要使用“回复全部”按钮。即使是这样,也要确保你*真的*需要这样做。 + +有些电子邮件应用比其他应用管理得更好。Kmail,[KDE Kontact][10] 的电子邮件组件,在**回复**工具栏按钮的子菜单中,有几个不同的回复选项。你可以选择只回复给**发件人**字段中的任何实体(通常是一个人,但有时是一个邮件列表),或者回复给作者(在抄送或密送中去除每一个人),或者只回复给一个邮件列表,或者回复*所有人*(不要这样做)。看到明确列出的选项,的确可以帮助你了解谁会收到你要发送的邮件副本,这有时比你想象的更发人深省。我曾经发现,当我意识到一个评论并不一定会对一个复杂的讨论的最终目标有所帮助时,我就会把邮件的收件人改为只是作者,而不是整个列表。 + +(另外,如果你写的邮件可能会给你的 HR 或公司带来麻烦,请在点击发送之前多考虑下。—— + +希望你已经*不再*在电子邮件中这么做了。如果你认识的人是这样的呢?欢迎与他们分享。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/email-mistakes + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/browser_screen_windows_files.png?itok=kLTeQUbY (Computer screen with files or windows open) +[2]: https://opensource.com/article/21/1/email-rules +[3]: https://opensource.com/article/21/1/prioritize-tasks +[4]: https://opensource.com/article/21/1/inbox-zero +[5]: https://opensource.com/article/21/1/email-filter +[6]: https://opensource.com/sites/default/files/day17-image1.png +[7]: https://creativecommons.org/licenses/by-sa/4.0/ +[8]: https://opensource.com/sites/default/files/day17-image2.png +[9]: https://opensource.com/sites/default/files/kmail-replies.jpg (Reply options in Kmail) +[10]: https://opensource.com/article/21/1/kde-kontact diff --git a/published/20210127 Why I use the D programming language for scripting.md b/published/20210127 Why I use the D programming language for scripting.md new file mode 100644 index 0000000000..63618522ea --- /dev/null +++ b/published/20210127 Why I use the D programming language for scripting.md @@ -0,0 +1,176 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13100-1.html) +[#]: subject: (Why I use the D programming language for scripting) +[#]: via: (https://opensource.com/article/21/1/d-scripting) +[#]: author: (Lawrence Aberba https://opensource.com/users/aberba) + +我为什么要用 D 语言写脚本? +====== + +> D 语言以系统编程语言而闻名,但它也是编写脚本的一个很好的选择。 + +![](https://img.linux.net.cn/data/attachment/album/202102/09/134351j4m3hrhll0h38plp.jpg) + +D 语言由于其静态类型和元编程能力,经常被宣传为系统编程语言。然而,它也是一种非常高效的脚本语言。 + +由于 Python 在自动化任务和快速实现原型想法方面的灵活性,它通常被选为脚本语言。这使得 Python 对系统管理员、[管理者][2]和一般的开发人员非常有吸引力,因为它可以自动完成他们可能需要手动完成的重复性任务。 + +我们自然也可以期待任何其他的脚本编写语言具有 Python 的这些特性和能力。以下是我认为 D 是一个不错的选择的两个原因。 + +### 1、D 很容易读和写 + +作为一种类似于 C 的语言,D 应该是大多数程序员所熟悉的。任何使用 JavaScript、Java、PHP 或 Python 的人对 D 语言都很容易上手。 + +如果你还没有安装 D,请[安装 D 编译器][3],这样你就可以[运行本文中的 D 代码][4]。你也可以使用[在线 D 编辑器][5]。 + +下面是一个 D 代码的例子,它从一个名为 `words.txt` 的文件中读取单词,并在命令行中打印出来: + +``` +open +source +is +cool +``` + +用 D 语言写脚本: + +``` +#!/usr/bin/env rdmd +// file print_words.d + +// import the D standard library +import std; + +void main(){ + // open the file + File("./words.txt") + + //iterate by line + .byLine + + // print each number + .each!writeln; +} +``` + +这段代码以 [释伴][6] 开头,它将使用 [rdmd][7] 来运行这段代码,`rdmd` 是 D 编译器自带的编译和运行代码的工具。假设你运行的是 Unix 或 Linux,在运行这个脚本之前,你必须使用` chmod` 命令使其可执行: + +``` +chmod u+x print_words.d +``` + +现在脚本是可执行的,你可以运行它: + +``` +./print_words.d +``` + +这将在你的命令行中打印以下内容: + +``` +open +source +is +cool +``` + +恭喜你,你写了第一个 D 语言脚本。你可以看到 D 是如何让你按顺序链式调用函数,这让阅读代码的感觉很自然,类似于你在头脑中思考问题的方式。这个[功能让 D 成为我最喜欢的编程语言][8]。 + +试着再写一个脚本:一个非营利组织的管理员有一个捐款的文本文件,每笔金额都是单独的一行。管理员想把前 10 笔捐款相加,然后打印出金额: + +``` +#!/usr/bin/env rdmd +// file sum_donations.d + +import std; + +void main() +{ + double total = 0; + + // open the file + File("monies.txt") + + // iterate by line + .byLine + + // pick first 10 lines + .take(10) + + // remove new line characters (\n) + .map!(strip) + + // convert each to double + .map!(to!double) + + // add element to total + .tee!((x) { total += x; }) + + // print each number + .each!writeln; + + // print total + writeln("total: ", total); +} +``` + +与 `each` 一起使用的 `!` 操作符是[模板参数][9]的语法。 + +### 2、D 是快速原型设计的好帮手 + +D 是灵活的,它可以快速地将代码敲打在一起,并使其发挥作用。它的标准库中包含了丰富的实用函数,用于执行常见的任务,如操作数据(JSON、CSV、文本等)。它还带有一套丰富的通用算法,用于迭代、搜索、比较和 mutate 数据。这些巧妙的算法通过定义通用的 [基于范围的接口][10] 而按照序列进行处理。 + +上面的脚本显示了 D 中的链式调用函数如何提供顺序处理和操作数据的要领。D 的另一个吸引人的地方是它不断增长的用于执行普通任务的第三方包的生态系统。一个例子是,使用 [Vibe.d][11] web 框架构建一个简单的 web 服务器很容易。下面是一个例子: + +``` +#!/usr/bin/env dub +/+ dub.sdl: +dependency "vibe-d" version="~>0.8.0" ++/ +void main() +{ + import vibe.d; + listenHTTP(":8080", (req, res) { + res.writeBody("Hello, World: " ~ req.path); + }); + runApplication(); +} +``` + +它使用官方的 D 软件包管理器 [Dub][12],从 [D 软件包仓库][13]中获取 vibe.d Web 框架。Dub 负责下载 Vibe.d 包,然后在本地主机 8080 端口上编译并启动一个 web 服务器。 + +### 尝试一下 D 语言 + +这些只是你可能想用 D 来写脚本的几个原因。 + +D 是一种非常适合开发的语言。你可以很容易从 D 下载页面安装,因此下载编译器,看看例子,并亲自体验 D 语言。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/d-scripting + +作者:[Lawrence Aberba][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/aberba +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/lenovo-thinkpad-laptop-concentration-focus-windows-office.png?itok=-8E2ihcF (Woman using laptop concentrating) +[2]: https://opensource.com/article/20/3/automating-community-management-python +[3]: https://tour.dlang.org/tour/en/welcome/install-d-locally +[4]: https://tour.dlang.org/tour/en/welcome/run-d-program-locally +[5]: https://run.dlang.io/ +[6]: https://en.wikipedia.org/wiki/Shebang_(Unix) +[7]: https://dlang.org/rdmd.html +[8]: https://opensource.com/article/20/7/d-programming +[9]: http://ddili.org/ders/d.en/templates.html +[10]: http://ddili.org/ders/d.en/ranges.html +[11]: https://vibed.org +[12]: https://dub.pm/getting_started +[13]: https://code.dlang.org diff --git a/published/20210128 4 tips for preventing notification fatigue.md b/published/20210128 4 tips for preventing notification fatigue.md new file mode 100644 index 0000000000..843364bb91 --- /dev/null +++ b/published/20210128 4 tips for preventing notification fatigue.md @@ -0,0 +1,75 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13094-1.html) +[#]: subject: (4 tips for preventing notification fatigue) +[#]: via: (https://opensource.com/article/21/1/alert-fatigue) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +防止通知疲劳的 4 个技巧 +====== + +> 不要让提醒淹没自己:设置重要的提醒,让其它提醒消失。你会感觉更好,工作效率更高。 + +![W】(https://img.linux.net.cn/data/attachment/album/202102/06/234924mo3okotjlv7lo3yo.jpg) + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的第十八天。 + +当我和人们谈论生产力时,我注意到一件事,那就是几乎每个人都是为了保持更清晰的头脑。我们不是把所有的约会都记在脑子里,而是把它们放在一个数字日历上,在事件发生前提醒我们。我们有数字或实体笔记,这样我们就不必记住某件事的每一个小细节。我们有待办事项清单,提醒我们去做该做的事情。 + +![Text box offering to send notifications][2] + +*NOPE(Kevin Sonney, [CC BY-SA 4.0][3])* + +如此多的应用、网站和服务想要提醒我们每一件小事,我们很容易就把它们全部调出来。而且,如果我们不这样做,我们将开始遭受**提醒疲劳**的困扰 —— 这时我们处于边缘的状态,只是等待下一个提醒,并生活在恐惧之中。 + +提醒疲劳在那些因工作而被随叫随到的人中非常常见。它也发生在那些 **FOMO** (错失恐惧症)的人身上,从而对每一个关键词、标签或在社交媒体上提到他们感兴趣的事情都会设置提醒。 + +此时,设置能引起我们的注意,但不会被忽略的提醒是件棘手的事情。不过,我确实有一些有用的提示,这样重要的提醒可能会在这个忙碌的世界中越过我们自己的心理过滤器。 + +![Alert for a task][4] + +*我可以忽略这个,对吧?(Kevin Sonney, [CC BY-SA 4.0][3])* + + 1. 弄清楚什么更适合你:视觉提醒或声音提醒。我使用视觉弹出和声音的组合,但这对我是有效的。有些人需要触觉提醒。比如手机或手表的震动。找到适合你的那一种。 + 2. 为重要的提醒指定独特的音调或视觉效果。我有一个朋友,他的工作页面的铃声最响亮、最讨厌。这旨在吸引他的注意力,让他看到提醒。我的显示器上有一盏灯,当我在待命时收到工作提醒时,它就会闪烁红灯,以及发送通知到我手机上。 + 3. 关掉那些实际上无关紧要的警报。社交网络、网站和应用都希望得到你的关注。它们不会在意你是否错过会议、约会迟到,或者熬夜到凌晨 4 点。关掉那些不重要的,让那些重要的可以被看到。 + 4. 每隔一段时间就改变一下。上个月有效的东西,下个月可能就不行了。我们会适应、习惯一些东西,然后我们会忽略。如果有些东西不奏效,就换个东西试试吧!它不会伤害你,即使无法解决问题,也许你也会学到一些新知识。 + +![Blue alert indicators light][5] + +*蓝色是没问题。红色是有问题。(Kevin Sonney, [CC BY-SA 4.0][3])* + +### 开源和选择 + +一个好的应用可以通知提供很多选择。我最喜欢的一个是 Android 的 Etar 日历应用。[Etar 可以从开源 F-droid 仓库中获得][6]。 + +Etar 和许多开源应用一样,为你提供了很多选项,尤其是通知设置。 + +![Etar][7] + +通过 Etar,你可以激活或停用弹出式通知,设置打盹时间、打盹延迟、是否提醒你已拒绝的事件等。结合有计划的日程安排策略,你可以通过控制数字助手对你需要做的事情进行提示的频率来改变你一天的进程。 + +提醒和警报真的很有用,只要我们收到重要的提醒并予以注意即可。这可能需要做一些实验,但最终,少一些噪音是好事,而且更容易注意到真正需要我们注意的提醒。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/alert-fatigue + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/team_dev_email_chat_video_work_wfm_desk_520.png?itok=6YtME4Hj (Working on a team, busy worklife) +[2]: https://opensource.com/sites/default/files/day18-image1.png +[3]: https://creativecommons.org/licenses/by-sa/4.0/ +[4]: https://opensource.com/sites/default/files/day18-image2.png +[5]: https://opensource.com/sites/default/files/day18-image3.png +[6]: https://f-droid.org/en/packages/ws.xsoh.etar/ +[7]: https://opensource.com/sites/default/files/etar.jpg (Etar) diff --git a/published/20210128 How to Run a Shell Script in Linux -Essentials Explained for Beginners.md b/published/20210128 How to Run a Shell Script in Linux -Essentials Explained for Beginners.md new file mode 100644 index 0000000000..f2c5fd298c --- /dev/null +++ b/published/20210128 How to Run a Shell Script in Linux -Essentials Explained for Beginners.md @@ -0,0 +1,168 @@ +[#]: collector: (lujun9972) +[#]: translator: (robsean) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13106-1.html) +[#]: subject: (How to Run a Shell Script in Linux [Essentials Explained for Beginners]) +[#]: via: (https://itsfoss.com/run-shell-script-linux/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +基础:如何在 Linux 中运行一个 Shell 脚本 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/10/235325tkv7h8dvlp4makkk.jpg) + +在 Linux 中有两种运行 shell 脚本的方法。你可以使用: + +``` +bash script.sh +``` + +或者,你可以像这样执行 shell 脚本: + +``` +./script.sh +``` + +这可能很简单,但没太多解释。不要担心,我将使用示例来进行必要的解释,以便你能理解为什么在运行一个 shell 脚本时要使用给定的特定语法格式。 + +我将使用这一行 shell 脚本来使需要解释的事情变地尽可能简单: + +``` +abhishek@itsfoss:~/Scripts$ cat hello.sh + +echo "Hello World!" +``` + +### 方法 1:通过将文件作为参数传递给 shell 以运行 shell 脚本 + +第一种方法涉及将脚本文件的名称作为参数传递给 shell 。 + +考虑到 bash 是默认 shell,你可以像这样运行一个脚本: + +``` +bash hello.sh +``` + +你知道这种方法的优点吗?**你的脚本不需要执行权限**。对于简单的任务非常方便快速。 + +![在 Linux 中运行一个 Shell 脚本][1] + +如果你还不熟悉,我建议你 [阅读我的 Linux 文件权限详细指南][2] 。 + +记住,将其作为参数传递的需要是一个 shell 脚本。一个 shell 脚本是由命令组成的。如果你使用一个普通的文本文件,它将会抱怨错误的命令。 + +![运行一个文本文件为脚本][3] + +在这种方法中,**你要明确地具体指定你想使用 bash 作为脚本的解释器** 。 + +shell 只是一个程序,并且 bash 只是 Shell 的一种实现。还有其它的 shell 程序,像 ksh 、[zsh][4] 等等。如果你安装有其它的 shell ,你也可以使用它们来代替 bash 。 + +例如,我已安装了 zsh ,并使用它来运行相同的脚本: + +![使用 Zsh 来执行 Shell 脚本][5] + +### 方法 2:通过具体指定 shell 脚本的路径来执行脚本 + +另外一种运行一个 shell 脚本的方法是通过提供它的路径。但是要这样做之前,你的文件必须是可执行的。否则,当你尝试执行脚本时,你将会得到 “权限被拒绝” 的错误。 + +因此,你首先需要确保你的脚本有可执行权限。你可以 [使用 chmod 命令][8] 来给予你自己脚本的这种权限,像这样: + +``` +chmod u+x script.sh +``` + +使你的脚本是可执行之后,你只需输入文件的名称及其绝对路径或相对路径。大多数情况下,你都在同一个目录中,因此你可以像这样使用它: + +``` +./script.sh +``` + +如果你与你的脚本不在同一个目录中,你可以具体指定脚本的绝对路径或相对路径: + +![在其它的目录中运行 Shell 脚本][9] + +在脚本前的这个 `./` 是非常重要的(当你与脚本在同一个目录中)。 + +![][10] + +为什么当你在同一个目录下,却不能使用脚本名称?这是因为你的 Linux 系统会在 `PATH` 环境变量中指定的几个目录中查找可执行的文件来运行。 + +这里是我的系统的 `PATH` 环境变量的值: + +``` +abhishek@itsfoss:~$ echo $PATH +/home/abhishek/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin +``` + +这意味着在下面目录中具有可执行权限的任意文件都可以在系统的任何位置运行: + + * `/home/abhishek/.local/bin` + * `/usr/local/sbin` + * `/usr/local/bin` + * `/usr/sbin` + * `/usr/bin` + * `/sbin` + * `/bin` + * `/usr/games` + * `/usr/local/games` + * `/snap/bin` + +Linux 命令(像 `ls`、`cat` 等)的二进制文件或可执行文件都位于这些目录中的其中一个。这就是为什么你可以在你系统的任何位置通过使用命令的名称来运作这些命令的原因。看看,`ls` 命令就是位于 `/usr/bin` 目录中。 + +![][11] + +当你使用脚本而不具体指定其绝对路径或相对路径时,系统将不能在 `PATH` 环境变量中找到提及的脚本。 + +> 为什么大多数 shell 脚本在其头部包含 #! /bin/bash ? +> +> 记得我提过 shell 只是一个程序,并且有 shell 程序的不同实现。 +> +> 当你使用 `#! /bin/bash` 时,你是具体指定 bash 作为解释器来运行脚本。如果你不这样做,并且以 `./script.sh` 的方式运行一个脚本,它通常会在你正在运行的 shell 中运行。 +> +> 有问题吗?可能会有。看看,大多数的 shell 语法是大多数种类的 shell 中通用的,但是有一些语法可能会有所不同。 +> +> 例如,在 bash 和 zsh 中数组的行为是不同的。在 zsh 中,数组索引是从 1 开始的,而不是从 0 开始。 +> +>![Bash Vs Zsh][12] +> +> 使用 `#! /bin/bash` 来标识该脚本是 bash 脚本,并且应该使用 bash 作为脚本的解释器来运行,而不受在系统上正在使用的 shell 的影响。如果你使用 zsh 的特殊语法,你可以通过在脚本的第一行添加 `#! /bin/zsh` 的方式来标识其是 zsh 脚本。 +> +> 在 `#!` 和 `/bin/bash` 之间的空格是没有影响的。你也可以使用 `#!/bin/bash` 。 + +### 它有帮助吗? + +我希望这篇文章能够增加你的 Linux 知识。如果你还有问题或建议,请留下评论。 + +专家用户可能依然会挑出我遗漏的东西。但这种初级题材的问题是,要找到信息的平衡点,避免细节过多或过少,并不容易。 + +如果你对学习 bash 脚本感兴趣,在我们专注于系统管理的网站 [Linux Handbook][14] 上,我们有一个 [完整的 Bash 初学者系列][13] 。如果你想要,你也可以 [购买带有附加练习的电子书][15] ,以支持 Linux Handbook。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/run-shell-script-linux/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[robsean](https://github.com/robsean) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/run-a-shell-script-linux.png?resize=741%2C329&ssl=1 +[2]: https://linuxhandbook.com/linux-file-permissions/ +[3]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/running-text-file-as-script.png?resize=741%2C329&ssl=1 +[4]: https://www.zsh.org +[5]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/01/execute-shell-script-with-zsh.png?resize=741%2C253&ssl=1 +[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/09/run-multiple-commands-in-linux.png?fit=800%2C450&ssl=1 +[7]: https://itsfoss.com/run-multiple-commands-linux/ +[8]: https://linuxhandbook.com/chmod-command/ +[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/01/running-shell-script-in-other-directory.png?resize=795%2C272&ssl=1 +[10]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/executing-shell-scripts-linux.png?resize=800%2C450&ssl=1 +[11]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/locating-command-linux.png?resize=795%2C272&ssl=1 +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/01/bash-vs-zsh.png?resize=795%2C386&ssl=1 +[13]: https://linuxhandbook.com/tag/bash-beginner/ +[14]: https://linuxhandbook.com +[15]: https://www.buymeacoffee.com/linuxhandbook diff --git a/published/20210129 Manage containers with Podman Compose.md b/published/20210129 Manage containers with Podman Compose.md new file mode 100644 index 0000000000..d1d3ce6286 --- /dev/null +++ b/published/20210129 Manage containers with Podman Compose.md @@ -0,0 +1,174 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13125-1.html) +[#]: subject: (Manage containers with Podman Compose) +[#]: via: (https://fedoramagazine.org/manage-containers-with-podman-compose/) +[#]: author: (Mehdi Haghgoo https://fedoramagazine.org/author/powergame/) + +用 Podman Compose 管理容器 +====== + +![][1] + +容器很棒,让你可以将你的应用连同其依赖项一起打包,并在任何地方运行。从 2013 年的 Docker 开始,容器已经让软件开发者的生活变得更加轻松。 + +Docker 的一个缺点是它有一个中央守护进程,它以 root 用户的身份运行,这对安全有影响。但这正是 Podman 的用武之地。Podman 是一个 [无守护进程容器引擎][2],用于开发、管理和在你的 Linux 系统上以 root 或无 root 模式运行 OCI 容器。 + +下面这些文章可以用来了解更多关于 Podman 的信息: + + * [使用 Podman 以非 root 用户身份运行 Linux 容器][11] + * [在 Fedora 上使用 Podman 的 Pod][3] + * [在 Fedora 中结合权能使用 Podman][4] + +如果你使用过 Docker,你很可能也知道 Docker Compose,它是一个用于编排多个可能相互依赖的容器的工具。要了解更多关于 Docker Compose 的信息,请看它的[文档][5]。 + +### 什么是 Podman Compose? + +[Podman Compose][6] 项目的目标是作为 Docker Compose 的替代品,而不需要对 docker-compose.yaml 文件进行任何修改。由于 Podman Compose 使用吊舱pod 工作,所以最好看下“吊舱”的最新定义。 + +> 一个“吊舱pod ”(如一群鲸鱼或豌豆荚)是由一个或多个[容器][7]组成的组,具有共享的存储/网络资源,以及如何运行容器的规范。 +> +> [Pods - Kubernetes 文档][8] + +(LCTT 译注:容器技术领域大量使用了航海比喻,pod 一词,意为“豆荚”,在航海领域指“吊舱” —— 均指盛装多个物品的容器。常不翻译,考虑前后文,可译做“吊舱”。) + +Podman Compose 的基本思想是,它选中 `docker-compose.yaml` 文件里面定义的服务,为每个服务创建一个容器。Docker Compose 和 Podman Compose 的一个主要区别是,Podman Compose 将整个项目的容器添加到一个单一的吊舱中,而且所有的容器共享同一个网络。如你在例子中看到的,在创建容器时使用 `--add-host` 标志,它甚至用和 Docker Compose 一样的方式命名容器。 + +### 安装 + +Podman Compose 的完整安装说明可以在[项目页面][6]上找到,它有几种方法。要安装最新的开发版本,使用以下命令: + +``` +pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz +``` + +确保你也安装了 [Podman][9],因为你也需要它。在 Fedora 上,使用下面的命令来安装Podman: + +``` +sudo dnf install podman +``` + +### 例子:用 Podman Compose 启动一个 WordPress 网站 + +想象一下,你的 `docker-compose.yaml` 文件在一个叫 `wpsite` 的文件夹里。一个典型的 WordPress 网站的 `docker-compose.yaml` (或 `docker-compose.yml`) 文件是这样的: + +``` +version: "3.8" +services: + web: + image: wordpress + restart: always + volumes: + - wordpress:/var/www/html + ports: + - 8080:80 + environment: + WORDPRESS_DB_HOST: db + WORDPRESS_DB_USER: magazine + WORDPRESS_DB_NAME: magazine + WORDPRESS_DB_PASSWORD: 1maGazine! + WORDPRESS_TABLE_PREFIX: cz + WORDPRESS_DEBUG: 0 + depends_on: + - db + networks: + - wpnet + db: + image: mariadb:10.5 + restart: always + ports: + - 6603:3306 + + volumes: + - wpdbvol:/var/lib/mysql + + environment: + MYSQL_DATABASE: magazine + MYSQL_USER: magazine + MYSQL_PASSWORD: 1maGazine! + MYSQL_ROOT_PASSWORD: 1maGazine! + networks: + - wpnet +volumes: + wordpress: {} + wpdbvol: {} + +networks: + wpnet: {} +``` + +如果你用过 Docker,你就会知道你可运行 `docker-compose up` 来启动这些服务。Docker Compose 会创建两个名为 `wpsite_web_1` 和 `wpsite_db_1` 的容器,并将它们连接到一个名为 `wpsite_wpnet` 的网络。 + +现在,看看当你在项目目录下运行 `podman-compose up` 时会发生什么。首先,一个以执行命令的目录命名的吊舱被创建。接下来,它寻找 YAML 文件中定义的任何名称的卷,如果它们不存在,就创建卷。然后,在 YAML 文件的 `services` 部分列出的每个服务都会创建一个容器,并添加到吊舱中。 + +容器的命名与 Docker Compose 类似。例如,为你的 web 服务创建一个名为 `wpsite_web_1` 的容器。Podman Compose 还为每个命名的容器添加了 `localhost` 别名。之后,容器仍然可以通过名字互相解析,尽管它们并不像 Docker 那样在一个桥接网络上。要做到这一点,使用选项 `-add-host`。例如,`-add-host web:localhost`。 + +请注意,`docker-compose.yaml` 包含了一个从主机 8080 端口到容器 80 端口的 Web 服务的端口转发。现在你应该可以通过浏览器访问新 WordPress 实例,地址为 `http://localhost:8080`。 + +![WordPress Dashboard][10] + +### 控制 pod 和容器 + +要查看正在运行的容器,使用 `podman ps`,它可以显示 web 和数据库容器以及吊舱中的基础设施容器。 + +``` +CONTAINER ID  IMAGE                               COMMAND               CREATED      STATUS          PORTS                                         NAMES +a364a8d7cec7  docker.io/library/wordpress:latest  apache2-foregroun...  2 hours ago  Up 2 hours ago  0.0.0.0:8080-&gt;80/tcp, 0.0.0.0:6603-&gt;3306/tcp  wpsite_web_1 +c447024aa104  docker.io/library/mariadb:10.5      mysqld                2 hours ago  Up 2 hours ago  0.0.0.0:8080-&gt;80/tcp, 0.0.0.0:6603-&gt;3306/tcp  wpsite_db_1 +12b1e3418e3e  k8s.gcr.io/pause:3.2 +``` + +你也可以验证 Podman 已经为这个项目创建了一个吊舱,以你执行命令的文件夹命名。 + +``` +POD ID        NAME             STATUS    CREATED      INFRA ID      # OF CONTAINERS +8a08a3a7773e  wpsite           Degraded  2 hours ago  12b1e3418e3e  3 +``` + +要停止容器,在另一个命令窗口中输入以下命令: + +``` +podman-compose down +``` + +你也可以通过停止和删除吊舱来实现。这实质上是停止并移除所有的容器,然后再删除包含的吊舱。所以,同样的事情也可以通过这些命令来实现: + +``` +podman pod stop podname +podman pod rm podname +``` + +请注意,这不会删除你在 `docker-compose.yaml` 中定义的卷。所以,你的 WordPress 网站的状态被保存下来了,你可以通过运行这个命令来恢复它。 + +``` +podman-compose up +``` + +总之,如果你是一个 Podman 粉丝,并且用 Podman 做容器工作,你可以使用 Podman Compose 来管理你的开发和生产中的容器。 + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/manage-containers-with-podman-compose/ + +作者:[Mehdi Haghgoo][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/powergame/ +[b]: https://github.com/lujun9972 +[1]: https://fedoramagazine.org/wp-content/uploads/2021/01/podman-compose-1-816x345.jpg +[2]: https://podman.io +[3]: https://fedoramagazine.org/podman-pods-fedora-containers/ +[4]: https://linux.cn/article-12859-1.html +[5]: https://docs.docker.com/compose/ +[6]: https://github.com/containers/podman-compose +[7]: https://kubernetes.io/docs/concepts/containers/ +[8]: https://kubernetes.io/docs/concepts/workloads/pods/ +[9]: https://podman.io/getting-started/installation +[10]: https://fedoramagazine.org/wp-content/uploads/2021/01/Screenshot-from-2021-01-08-06-27-29-1024x767.png +[11]: https://linux.cn/article-10156-1.html \ No newline at end of file diff --git a/published/20210131 3 wishes for open source productivity in 2021.md b/published/20210131 3 wishes for open source productivity in 2021.md new file mode 100644 index 0000000000..4517bae151 --- /dev/null +++ b/published/20210131 3 wishes for open source productivity in 2021.md @@ -0,0 +1,64 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13113-1.html) +[#]: subject: (3 wishes for open source productivity in 2021) +[#]: via: (https://opensource.com/article/21/1/productivity-wishlist) +[#]: author: (Kevin Sonney https://opensource.com/users/ksonney) + +2021 年开源生产力的 3 个愿望 +====== + +> 2021年,开源世界可以拓展的有很多。这是我特别感兴趣的三个领域。 + +![Looking at a map for career journey][1] + +在前几年,这个年度系列涵盖了单个的应用。今年,我们除了关注 2021 年的策略外,还将关注一体化解决方案。欢迎来到 2021 年 21 天生产力的最后一天。 + +我们已经到了又一个系列的结尾处。因此,让我们谈谈我希望在 2021 年看到的更多事情。 + +### 断网 + +![Large Lego set built by the author][2] + +*我在假期期间制作的(Kevin Sonney, [CC BY-SA 4.0][3])* + +对*许多、许多的*人来说,2020 年是非常困难的一年。疫情大流行、各种政治事件、24 小时的新闻轰炸等等,都对我们的精神健康造成了伤害。虽然我确实谈到了 [抽出时间进行自我护理][4],但我只是想断网:也就是关闭提醒、手机、平板等,暂时无视这个世界。我公司的一位经理实际上告诉我们,如果放假或休息一天,就把所有与工作有关的东西都关掉(除非我们在值班)。我最喜欢的“断网”活动之一就是听音乐和搭建大而复杂的乐高。 + +### 可访问性 + +尽管我谈论的许多技术都是任何人都可以做的,但是软件方面的可访问性都有一定难度。相对于自由软件运动之初,Linux 和开源世界在辅助技术方面已经有了长足发展。但是,仍然有太多的应用和系统不会考虑有些用户没有与设计者相同的能力。我一直在关注这一领域的发展,因为每个人都应该能够访问事物。 + +### 更多的一体化选择 + +![JPilot all in one organizer software interface][5] + +*JPilot(Kevin Sonney, [CC BY-SA 4.0][3])* + +在 FOSS 世界中,一体化的个人信息管理解决方案远没有商业软件世界中那么多。总体趋势是使用单独的应用,它们必须通过配置来相互通信或通过中介服务(如 CalDAV 服务器)。移动市场在很大程度上推动了这一趋势,但我仍然向往像 [JPilot][6] 这样无需额外插件或服务就能完成几乎所有我需要的事情的日子。 + +非常感谢大家阅读这个年度系列。如果你认为我错过了什么,或者明年需要注意什么,请在下方评论。 + +就像我在 [生产力炼金术][7] 上说的那样,尽最大努力保持生产力! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/1/productivity-wishlist + +作者:[Kevin Sonney][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/ksonney +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/career_journey_road_gps_path_map_520.png?itok=PpL6jJgY (Looking at a map for career journey) +[2]: https://opensource.com/sites/default/files/day21-image1.png +[3]: https://creativecommons.org/licenses/by-sa/4.0/ +[4]: https://opensource.com/article/21/1/self-care +[5]: https://opensource.com/sites/default/files/day21-image2.png +[6]: http://www.jpilot.org/ +[7]: https://productivityalchemy.com diff --git a/published/20210201 Generate QR codes with this open source tool.md b/published/20210201 Generate QR codes with this open source tool.md new file mode 100644 index 0000000000..bc554bf5cb --- /dev/null +++ b/published/20210201 Generate QR codes with this open source tool.md @@ -0,0 +1,81 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13097-1.html) +[#]: subject: (Generate QR codes with this open source tool) +[#]: via: (https://opensource.com/article/21/2/zint-barcode-generator) +[#]: author: (Don Watkins https://opensource.com/users/don-watkins) + +Zint:用这个开源工具生成二维码 +====== + +> Zint 可以轻松生成 50 多种类型的自定义条码。 + +![](https://img.linux.net.cn/data/attachment/album/202102/07/231854y8ffstg0m6l2fcmz.jpg) + +二维码是一种很好的可以向人们提供信息的方式,且没有打印的麻烦和费用。大多数人的智能手机都支持二维码扫描,无论其操作系统是什么。 + +你可能想使用二维码的原因有很多。也许你是一名教师,希望通过补充材料来测试你的学生,以增强学习效果,或者是一家餐厅,需要在遵守社交距离准则的同时提供菜单。我经常行走于自然小径,那里贴有树木和其他植物的标签。用二维码来补充这些小标签是一种很好的方式,它可以提供关于公园展品的额外信息,而无需花费和维护标识牌。在这些和其他情况下,二维码是非常有用的。 + +在互联网上搜索一个简单的、开源的方法来创建二维码时,我发现了 [Zint][2]。Zint 是一个优秀的开源 (GPLv3.0) 生成条码的解决方案。根据该项目的 [GitHub 仓库][3]:“Zint 是一套可以方便地对任何一种公共领域条形码标准的数据进行编码的程序,并允许你将这种功能集成到你自己的程序中。” + +Zint 支持 50 多种类型的条形码,包括二维码(ISO 18004),你可以轻松地创建这些条形码,然后复制和粘贴到 word 文档、博客、维基和其他数字媒体中。人们可以用智能手机扫描这些二维码,快速链接到信息。 + +### 安装 Zint + +Zint 适用于 Linux、macOS 和 Windows。 + +你可以在基于 Ubuntu 的 Linux 发行版上使用 `apt` 安装 Zint 命令: + +``` +$ sudo apt install zint +``` + +我还想要一个图形用户界面(GUI),所以我安装了 Zint-QT: + +``` +$ sudo apt install zint-qt +``` + +请参考手册的[安装部分][4],了解 macOS 和 Windows 的说明。 + +### 用 Zint 生成二维码 + +安装好后,我启动了它,并创建了我的第一个二维码,这是一个指向 Opensource.com 的链接。 + +![Generating QR code with Zint][5] + +Zint 的 50 多个其他条码选项包括许多国家的邮政编码、DotCode、EAN、EAN-14 和通用产品代码 (UPC)。[项目文档][2]中包含了它可以渲染的所有代码的完整列表。 + +你可以将任何条形码复制为 BMP 或 SVG,或者将输出保存为你应用中所需要的任何尺寸的图像文件。这是我的 77x77 像素的二维码。 + +![QR code][7] + +该项目维护了一份出色的用户手册,其中包含了在[命令行][8]和 [GUI][9] 中使用 Zint 的说明。你甚至可以[在线][10]试用 Zint。对于功能请求或错误报告,请[访问网站][11]或[发送电子邮件][12]。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/zint-barcode-generator + +作者:[Don Watkins][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/don-watkins +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/wfh_work_home_laptop_work.png?itok=VFwToeMy (Working from home at a laptop) +[2]: http://www.zint.org.uk/ +[3]: https://github.com/zint/zint +[4]: http://www.zint.org.uk/Manual.aspx?type=p&page=2 +[5]: https://opensource.com/sites/default/files/uploads/zintqrcode_generation.png (Generating QR code with Zint) +[6]: https://creativecommons.org/licenses/by-sa/4.0/ +[7]: https://opensource.com/sites/default/files/uploads/zintqrcode_77px.png (QR code) +[8]: http://zint.org.uk/Manual.aspx?type=p&page=4 +[9]: http://zint.org.uk/Manual.aspx?type=p&page=3 +[10]: http://www.barcode-generator.org/ +[11]: https://lists.sourceforge.net/lists/listinfo/zint-barcode +[12]: mailto:zint-barcode@lists.sourceforge.net diff --git a/published/20210202 Filmulator is a Simple, Open Source, Raw Image Editor for Linux Desktop.md b/published/20210202 Filmulator is a Simple, Open Source, Raw Image Editor for Linux Desktop.md new file mode 100644 index 0000000000..12f6aa8801 --- /dev/null +++ b/published/20210202 Filmulator is a Simple, Open Source, Raw Image Editor for Linux Desktop.md @@ -0,0 +1,86 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13119-1.html) +[#]: subject: (Filmulator is a Simple, Open Source, Raw Image Editor for Linux Desktop) +[#]: via: (https://itsfoss.com/filmulator/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +Filmulator:一个简单的、开源的 Raw 图像编辑器 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/15/100616o54wb5h4aqgmq4qe.jpg) + +> Filmulator 是一个开源的具有库管理功能的 raw 照片编辑应用,侧重于简单、易用和简化的工作流程。 + +### Filmulator:适用于 Linux(和 Windows)的 raw 图像编辑器 + +[Linux 中有一堆 raw 照片编辑器][1],[Filmulator][2] 就是其中之一。Filmulator 的目标是仅提供基本要素,从而使 raw 图像编辑变得简单。它还增加了库处理的功能,如果你正在为你的相机图像寻找一个不错的应用,这是一个加分项。 + +对于那些不知道 raw 的人来说,[raw 图像文件][3]是一个最低限度处理、未压缩的文件。换句话说,它是未经压缩的数字文件,并且只经过了最低限度的处理。专业摄影师更喜欢用 raw 文件拍摄照片,并自行处理。普通人从智能手机拍摄照片,它通常被压缩为 JPEG 格式或被过滤。 + +让我们来看看在 Filmulator 编辑器中会有什么功能。 + +### Filmulator 的功能 + +![Filmulator interface][4] + +Filmulator 宣称,它不是典型的“胶片效果滤镜” —— 这只是复制了胶片的外在特征。相反,Filmulator 从根本上解决了胶片的魅力所在:显影过程。 + +它模拟了胶片的显影过程:从胶片的“曝光”,到每个像素内“银晶”的生长,再到“显影剂”在相邻像素之间与储槽中大量显影剂的扩散。 + +Fimulator 开发者表示,这种模拟带来了以下好处: + + * 大的明亮区域变得更暗,压缩了输出动态范围。 + * 小的明亮区域使周围环境变暗,增强局部对比度。 + * 在明亮区域,饱和度得到增强,有助于保留蓝天、明亮肤色和日落的色彩。 + * 在极度饱和的区域,亮度会被减弱,有助于保留细节,例如花朵。 + +以下是经 Filmulator 处理后的 raw 图像的对比,以自然的方式增强色彩,而不会引起色彩剪切。 + +![原图][5] + +![处理后][10] + +### 在 Ubuntu/Linux 上安装 Filmulator + +Filmulator 有一个 AppImage 可用,这样你就可以在 Linux 上轻松使用它。使用 [AppImage 文件][6]真的很简单。下载后,使它可执行,然后双击运行。 + +- [下载 Filmulator for Linux][7] + +对 Windows 用户也有一个 Windows 版本。除此之外,你还可以随时前往[它的 GitHub 仓库][8]查看它的源代码。 + +有一份[小文档][9]来帮助你开始使用 Fimulator。 + +### 总结 + +Fimulator 的设计理念是为任何工作提供最好的工具,而且只有这一个工具。这意味着牺牲了灵活性,但获得了一个大大简化和精简的用户界面。 + +我连业余摄影师都不是,更别说是专业摄影师了。我没有单反或其他高端摄影设备。因此,我无法测试和分享我对 Filmulator 的实用性的经验。 + +如果你有更多处理 raw 图像的经验,请尝试下 Filmulator,并分享你的意见。有一个 AppImage 可以让你快速测试它,看看它是否适合你的需求。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/filmulator/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/raw-image-tools-linux/ +[2]: https://filmulator.org/ +[3]: https://www.findingtheuniverse.com/what-is-raw-in-photography/ +[4]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/Filmulate.jpg?resize=799%2C463&ssl=1 +[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/image-without-filmulator.jpeg?ssl=1 +[6]: https://itsfoss.com/use-appimage-linux/ +[7]: https://filmulator.org/download/ +[8]: https://github.com/CarVac/filmulator-gui +[9]: https://github.com/CarVac/filmulator-gui/wiki +[10]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/image-with-filmulator.jpeg?ssl=1 \ No newline at end of file diff --git a/published/20210203 Paru - A New AUR Helper and Pacman Wrapper Based on Yay.md b/published/20210203 Paru - A New AUR Helper and Pacman Wrapper Based on Yay.md new file mode 100644 index 0000000000..c30c45aaab --- /dev/null +++ b/published/20210203 Paru - A New AUR Helper and Pacman Wrapper Based on Yay.md @@ -0,0 +1,153 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13122-1.html) +[#]: subject: (Paru – A New AUR Helper and Pacman Wrapper Based on Yay) +[#]: via: (https://itsfoss.com/paru-aur-helper/) +[#]: author: (Dimitrios Savvopoulos https://itsfoss.com/author/dimitrios/) + +Paru:基于 Yay 的新 AUR 助手 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/16/101301ldekk9kkpqlplke6.jpg) + +[用户选择 Arch Linux][1] 或 [基于 Arch 的 Linux 发行版][2]的主要原因之一就是 [Arch 用户仓库(AUR)][3]。 + +遗憾的是,[pacman][4],也就是 Arch 的包管理器,不能以类似官方仓库的方式访问 AUR。AUR 中的包是以 [PKGBUILD][5] 的形式存在的,需要手动过程来构建。 + +AUR 助手可以自动完成这个过程。毫无疑问,[yay][6] 是最受欢迎和备受青睐的 AUR 助手之一。 + +最近,`yay` 的两位开发者之一的 [Morganamilo][7][宣布][8]将退出 `yay` 的维护工作,以开始自己的 AUR 助手 [paru][9]。`paru` 是用 [Rust][10] 编写的,而 `yay` 是用 [Go][11] 编写的,它的设计是基于 yay 的。 + +请注意,`yay` 还没有结束支持,它仍然由 [Jguer][12] 积极维护。他还[评论][13]说,`paru` 可能适合那些寻找丰富功能的 AUR 助手的用户。因此我推荐大家尝试一下。 + +### 安装 Paru AUR 助手 + +要安装 `paru`,打开你的终端,逐一输入以下命令: + +``` +sudo pacman -S --needed base-devel +git clone https://aur.archlinux.org/paru.git +cd paru +makepkg -si +``` + +现在已经安装好了,让我们来看看如何使用它。 + +### 使用 Paru AUR 助手的基本命令 + +在我看来,这些都是 `paru` 最基本的命令。你可以在 [GitHub][9] 的官方仓库中探索更多。 + + * `paru <用户输入>`:搜索并安装“用户输入” + * `paru -`:`paru -Syu` 的别名 + * `paru -Sua`:仅升级 AUR 包。 + * `paru -Qua`:打印可用的 AUR 更新 + * `paru -Gc <用户输入>`:显示“用户输入”的 AUR 评论 + +### 充分使用 Paru AUR 助手 + +你可以在 GitHub 上访问 `paru` 的[更新日志][14]来查看完整的变更日志历史,或者你可以在[首次发布][15]中查看对 `yay` 的变化。 + +#### 在 Paru 中启用颜色 + +要在 `paru` 中启用颜色,你必须先在 `pacman` 中启用它。所有的[配置文件][16]都在 `/etc` 目录下。在此例中,我[使用 Nano 文本编辑器][17],但是,你可以选择使用任何[基于终端的文本编辑器][18]。 + +``` +sudo nano /etc/pacman.conf +``` + +打开 `pacman` 配置文件后,取消 `Color` 的注释,即可启用此功能。 + +![][19] + +#### 反转搜索顺序 + +根据你的搜索条件,最相关的包通常会显示在搜索结果的顶部。在 `paru` 中,你可以反转搜索顺序,使你的搜索更容易。 + +与前面的例子类似,打开 `paru` 配置文件: + +``` +sudo nano /etc/paru.conf +``` + +取消注释 `BottomUp` 项,然后保存文件。 + +![][20] + +如你所见,顺序是反转的,第一个包出现在了底部。 + +![][21] + +#### 编辑 PKGBUILD (对于高级用户) + +如果你是一个有经验的 Linux 用户,你可以通过 `paru` 编辑 AUR 包。要做到这一点,你需要在 `paru` 配置文件中启用该功能,并设置你所选择的文件管理器。 + +在此例中,我将使用配置文件中的默认值,即 vifm 文件管理器。如果你还没有使用过它,你可能需要安装它。 + +``` +sudo pacman -S vifm +sudo nano /etc/paru.conf +``` + +打开配置文件,如下所示取消注释。 + +![][22] + +让我们回到 [Google Calendar][23] 的 AUR 包,并尝试安装它。系统会提示你审查该软件包。输入 `Y` 并按下回车。 + +![][24] + +从文件管理器中选择 PKGBUILD,然后按下回车查看软件包。 + +![][25] + +你所做的任何改变都将是永久性的,下次升级软件包时,你的改变将与上游软件包合并。 + +![][26] + +### 总结 + +`paru` 是 [AUR 助手家族][27]的又一个有趣的新成员,前途光明。此时,我不建议更换 `yay`,因为它还在维护,但一定要试试 `paru`。你可以把它们两个都安装到你的系统中,然后得出自己的结论。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/paru-aur-helper/ + +作者:[Dimitrios Savvopoulos][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/dimitrios/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/why-arch-linux/ +[2]: https://itsfoss.com/arch-based-linux-distros/ +[3]: https://itsfoss.com/aur-arch-linux/ +[4]: https://itsfoss.com/pacman-command/ +[5]: https://wiki.archlinux.org/index.php/PKGBUILD +[6]: https://news.itsfoss.com/qt-6-released/ +[7]: https://github.com/Morganamilo +[8]: https://www.reddit.com/r/archlinux/comments/jjn1c1/paru_v100_and_stepping_away_from_yay/ +[9]: https://github.com/Morganamilo/paru +[10]: https://www.rust-lang.org/ +[11]: https://golang.org/ +[12]: https://github.com/Jguer +[13]: https://aur.archlinux.org/packages/yay/#pinned-788241 +[14]: https://github.com/Morganamilo/paru/releases +[15]: https://github.com/Morganamilo/paru/releases/tag/v1.0.0 +[16]: https://linuxhandbook.com/linux-directory-structure/#-etc-configuration-files +[17]: https://itsfoss.com/nano-editor-guide/ +[18]: https://itsfoss.com/command-line-text-editors-linux/ +[19]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/pacman.conf-color.png?resize=800%2C480&ssl=1 +[20]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru.conf-bottomup.png?resize=800%2C480&ssl=1 +[21]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru.conf-bottomup-2.png?resize=800%2C480&ssl=1 +[22]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru.conf-vifm.png?resize=732%2C439&ssl=1 +[23]: https://aur.archlinux.org/packages/gcalcli/ +[24]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru-proceed-for-review.png?resize=800%2C480&ssl=1 +[25]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru-proceed-for-review-2.png?resize=800%2C480&ssl=1 +[26]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/01/paru-proceed-for-review-3.png?resize=800%2C480&ssl=1 +[27]: https://itsfoss.com/best-aur-helpers/ +[28]: https://news.itsfoss.com/ \ No newline at end of file diff --git a/published/20210204 A hands-on tutorial of SQLite3.md b/published/20210204 A hands-on tutorial of SQLite3.md new file mode 100644 index 0000000000..057e0ce98e --- /dev/null +++ b/published/20210204 A hands-on tutorial of SQLite3.md @@ -0,0 +1,255 @@ +[#]: collector: "lujun9972" +[#]: translator: "amwps290" +[#]: reviewer: "wxy" +[#]: publisher: "wxy" +[#]: url: "https://linux.cn/article-13117-1.html" +[#]: subject: "A hands-on tutorial of SQLite3" +[#]: via: "https://opensource.com/article/21/2/sqlite3-cheat-sheet" +[#]: author: "Klaatu https://opensource.com/users/klaatu" + +SQLite3 实践教程 +====== + +> 开始使用这个功能强大且通用的数据库吧。 + +![](https://img.linux.net.cn/data/attachment/album/202102/14/131146jsx2kvyobwxwswct.jpg) + +应用程序经常需要保存数据。无论你的用户是创建简单的文本文档、复杂的图形布局、游戏进度还是错综复杂的客户和订单号列表,软件通常都意味着生成数据。有很多方法可以存储数据以供重复使用。你可以将文本转储为 INI、[YAML][2]、XML 或 JSON 等配置格式,可以输出原始的二进制数据,也可以将数据存储在结构化数据库中。SQLite 是一个自包含的、轻量级数据库,可轻松创建、解析、查询、修改和传输数据。 + +- 下载 [SQLite3 备忘录][3] + +SQLite 专用于 [公共领域][4],[从技术上讲,这意味着它没有版权,因此不需要许可证][5]。如果你需要许可证,则可以 [购买所有权担保][6]。SQLite 非常常见,大约有 1 万亿个 SQLite 数据库正在使用中。在每个基于 Webkit 的 Web 浏览器,现代电视机,汽车多媒体系统以及无数其他软件应用程序中,Android 和 iOS 设备, macOS 和 Windows 10 计算机,大多数 Linux 系统上都包含多个这种数据库。 + +总而言之,它是用于存储和组织数据的一个可靠而简单的系统。 + +### 安装 + +你的系统上可能已经有 SQLite 库,但是你需要安装其命令行工具才能直接使用它。在 Linux上,你可能已经安装了这些工具。该工具提供的命令是 `sqlite3` (而不仅仅是 sqlite)。 + +如果没有在你的 Linux 或 BSD 上安装 SQLite,你则可以从软件仓库中或 ports 树中安装 SQLite,也可以从源代码或已编译的二进制文件进行[下载并安装][7]。 + +在 macOS 或 Windows 上,你可以从 [sqlite.org][7] 下载并安装 SQLite 工具。 + +### 使用 SQLite + +通过编程语言与数据库进行交互是很常见的。因此,像 Java、Python、Lua、PHP、Ruby、C++ 以及其他编程语言都提供了 SQLite 的接口(或“绑定”)。但是,在使用这些库之前,了解数据库引擎的实际情况以及为什么你对数据库的选择很重要是有帮助的。本文向你介绍 SQLite 和 `sqlite3` 命令,以便你熟悉该数据库如何处理数据的基础知识。 + +### 与 SQLite 交互 + +你可以使用 `sqlite3` 命令与 SQLite 进行交互。 该命令提供了一个交互式的 shell 程序,以便你可以查看和更新数据库。 + +``` +$ sqlite3 +SQLite version 3.34.0 2020-12-01 16:14:00 +Enter ".help" for usage hints. +Connected to a transient in-memory database. +Use ".open FILENAME" to reopen on a persistent database. +sqlite> +``` + +该命令将使你处于 SQLite 的子 shell 中,因此现在的提示符是 SQLite 的提示符。你以前使用的 Bash 命令在这里将不再适用。你必须使用 SQLite 命令。要查看 SQLite 命令列表,请输入 `.help`: + +``` +sqlite> .help +.archive ... Manage SQL archives +.auth ON|OFF SHOW authorizer callbacks +.backup ?DB? FILE Backup DB (DEFAULT "main") TO FILE +.bail ON|off Stop after hitting an error. DEFAULT OFF +.binary ON|off Turn BINARY output ON OR off. DEFAULT OFF +.cd DIRECTORY CHANGE the working directory TO DIRECTORY +[...] +``` + +这些命令中的其中一些是二进制的,而其他一些则需要唯一的参数(如文件名、路径等)。这些是 SQLite Shell 的管理命令,不是用于数据库查询。数据库以结构化查询语言(SQL)进行查询,许多 SQLite 查询与你从 [MySQL][8] 和 [MariaDB][9] 数据库中已经知道的查询相同。但是,数据类型和函数有所不同,因此,如果你熟悉另一个数据库,请特别注意细微的差异。 + +### 创建数据库 + +启动 SQLite 时,可以打开内存数据库,也可以选择要打开的数据库: + +``` +$ sqlite3 mydatabase.db +``` + +如果还没有数据库,则可以在 SQLite 提示符下创建一个数据库: + +``` +sqlite> .open mydatabase.db +``` + +现在,你的硬盘驱动器上有一个空文件,可以用作 SQLite 数据库。 文件扩展名 `.db` 是任意的。你也可以使用 `.sqlite` 或任何你想要的后缀。 + +### 创建一个表 + +数据库包含一些table,可以将其可视化为电子表格。有许多的行(在数据库中称为记录record)和列。行和列的交集称为字段field。 + +结构化查询语言(SQL)以其提供的内容而命名:一种以可预测且一致的语法查询数据库内容以接收有用的结果的方法。SQL 读起来很像普通的英语句子,即使有点机械化。当前,你的数据库是一个没有任何表的空数据库。 + +你可以使用 `CREATE` 来创建一个新表,你可以和 `IF NOT EXISTS` 结合使用。以便不会破坏现在已有的同名的表。 + +你无法在 SQLite 中创建一个没有任何字段的空表,因此在尝试 `CREATE` 语句之前,必须考虑预期表将存储的数据类型。在此示例中,我将使用以下列创建一个名为 `member` 的表: + + * 唯一标识符 + * 人名 + * 记录创建的时间和日期 + +#### 唯一标识符 + +最好用唯一的编号来引用记录,幸运的是,SQLite 认识到这一点,创建一个名叫 `rowid` 的列来为你自动实现这一点。 + +无需 SQL 语句即可创建此字段。 + +#### 数据类型 + +对于我的示例表中,我正在创建一个 `name` 列来保存 `TEXT` 类型的数据。为了防止在没有指定字段数据的情况下创建记录,可以添加 `NOT NULL` 指令。 + +用 `name TEXT NOT NULL` 语句来创建。 + +SQLite 中有五种数据类型(实际上是 _储存类别_): + + * `TEXT`:文本字符串 + * `INTEGER`:一个数字 + * `REAL`:一个浮点数(小数位数无限制) + * `BLOB`:二进制数据(例如,.jpeg 或 .webp 图像) + * `NULL`:空值 + +#### 日期和时间戳 + +SQLite 有一个方便的日期和时间戳功能。它本身不是数据类型,而是 SQLite 中的一个函数,它根据所需的格式生成字符串或整数。 在此示例中,我将其保留为默认值。 + +创建此字段的 SQL 语句是:`datestamp DATETIME DEFAULT CURRENT_TIMESTAMP`。 + +### 创建表的语句 + +在 SQLite 中创建此示例表的完整 SQL: + +``` +sqlite> CREATE TABLE +...> IF NOT EXISTS +...> member (name TEXT NOT NULL, +...> datestamp DATETIME DEFAULT CURRENT_TIMESTAMP); +``` + +在此代码示例中,我在语句的分句后按了回车键。以使其更易于阅读。除非以分号(`;`)终止,否则 SQLite 不会运行你的 SQL 语句。 + +你可以使用 SQLite 命令 `.tables` 验证表是否已创建: + +``` +sqlite> .tables +member +``` + +### 查看表中的所有列 + +你可以使用 `PRAGMA` 语句验证表包含哪些列和行: + +``` +sqlite> PRAGMA table_info(member); +0|name|TEXT|1||0 +1|datestamp|DATETIME|0|CURRENT_TIMESTAMP|0 +``` + +### 数据输入 + +你可以使用 `INSERT` 语句将一些示例数据填充到表中: + +``` +> INSERT INTO member (name) VALUES ('Alice'); +> INSERT INTO member (name) VALUES ('Bob'); +> INSERT INTO member (name) VALUES ('Carol'); +> INSERT INTO member (name) VALUES ('David'); +``` + +查看表中的数据: + +``` +> SELECT * FROM member; +Alice|2020-12-15 22:39:00 +Bob|2020-12-15 22:39:02 +Carol|2020-12-15 22:39:05 +David|2020-12-15 22:39:07 +``` + +#### 添加多行数据 + +现在创建第二个表: + +``` +> CREATE TABLE IF NOT EXISTS linux ( +...> distro TEXT NOT NULL); +``` + +填充一些示例数据,这一次使用小的 `VALUES` 快捷方式,因此你可以在一个命令中添加多行。关键字 `VALUES` 期望以括号形式列出列表,而用多个逗号分隔多个列表: + +``` +> INSERT INTO linux (distro) +...> VALUES ('Slackware'), ('RHEL'), +...> ('Fedora'),('Debian'); +``` + +### 修改表结构 + +你现在有两个表,但是到目前为止,两者之间没有任何关系。它们每个都包含独立的数据,但是可能你可能需要将第一个表的成员与第二个表中列出的特定项相关联。 + +为此,你可以为第一个表创建一个新列,该列对应于第二个表。由于两个表都设计有唯一标识符(这要归功于 SQLite 的自动创建),所以连接它们的最简单方法是将其中一个的 `rowid` 字段用作另一个的选择器。 + +在第一个表中创建一个新列,以存储第二个表中的值: + +``` +> ALTER TABLE member ADD os INT; +``` + +使用 `linux` 表中的唯一标识符作为 `member` 表中每一条记录中 `os` 字段的值。因为记录已经存在。因此你可以使用 `UPDATE` 语句而不是使用 `INSERT` 语句来更新数据。需要特别注意的是,你首先需要选中特定的一行来然后才能更新其中的某个字段。从句法上讲,这有点相反,更新首先发生,选择匹配最后发生: + +``` +> UPDATE member SET os=1 WHERE name='Alice'; +``` + +对 `member` 表中的其他行重复相同的过程。更新 `os` 字段,为了数据多样性,在四行记录上分配三种不同的发行版(其中一种加倍)。 + +### 联接表 + +现在,这两个表相互关联,你可以使用 SQL 显示关联的数据。数据库中有多种 _联接方式_,但是一旦掌握了基础知识,就可以尝试所有的联接形式。这是一个基本联接,用于将 `member` 表的 `os` 字段中的值与 linux 表的 `rowid` 字段相关联: + +``` +> SELECT * FROM member INNER JOIN linux ON member.os=linux.rowid; +Alice|2020-12-15 22:39:00|1|Slackware +Bob|2020-12-15 22:39:02|3|Fedora +Carol|2020-12-15 22:39:05|3|Fedora +David|2020-12-15 22:39:07|4|Debian +``` + +`os` 和 `rowid` 字段形成了关联。 + +在一个图形应用程序中,你可以想象 `os` 字段是一个下拉选项菜单,其中的值是 `linux` 表中 `distro` 字段中的数据。将相关的数据集通过唯一的字段相关联,可以确保数据的一致性和有效性,并且借助 SQL,你可以在以后动态地关联它们。 + +### 了解更多 + +SQLite 是一个非常有用的自包含的、可移植的开源数据库。学习以交互方式使用它是迈向针对 Web 应用程序进行管理或通过编程语言库使用它的重要的第一步。 + +如果你喜欢 SQLite,也可以尝试由同一位作者 Richard Hipp 博士的 [Fossil][10]。 + +在学习和使用 SQLite 时,有一些常用命令可能会有所帮助,所以请立即下载我们的 [SQLite3 备忘单][3]! + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/sqlite3-cheat-sheet + +作者:[Klaatu][a] +选题:[lujun9972][b] +译者:[amwps290](https://github.com/amwps290) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/klaatu +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/coverimage_cheat_sheet.png?itok=lYkNKieP "Cheat Sheet cover image" +[2]: https://www.redhat.com/sysadmin/yaml-beginners +[3]: https://opensource.com/downloads/sqlite-cheat-sheet +[4]: https://sqlite.org/copyright.html +[5]: https://directory.fsf.org/wiki/License:PublicDomain +[6]: https://www.sqlite.org/purchase/license? +[7]: https://www.sqlite.org/download.html +[8]: https://www.mysql.com/ +[9]: https://mariadb.org/ +[10]: https://opensource.com/article/20/11/fossil diff --git a/published/20210207 Why the success of open source depends on empathy.md b/published/20210207 Why the success of open source depends on empathy.md new file mode 100644 index 0000000000..6905d890a1 --- /dev/null +++ b/published/20210207 Why the success of open source depends on empathy.md @@ -0,0 +1,58 @@ +[#]: collector: (lujun9972) +[#]: translator: (scvoet) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13120-1.html) +[#]: subject: (Why the success of open source depends on empathy) +[#]: via: (https://opensource.com/article/21/2/open-source-empathy) +[#]: author: (Bronagh Sorota https://opensource.com/users/bsorota) + +为何开源的成功取决于同理心? +====== + +> 随着对同理心认识的提高和传播同理心的激励,开源生产力将得到提升,协作者将会聚拢,可以充分激发开源软件开发的活力。 + +![](https://img.linux.net.cn/data/attachment/album/202102/15/110606rc48qf05904m9n7p.jpg) + +开源开发的协调创新精神和社区精神改变了世界。Jim Whitehurst 在[《开放式组织》][2]中解释说,开源的成功源于“将人们视为社区的一份子,从交易思维转变为基于承诺基础的思维方式”。 但是,开源开发模型的核心仍然存在障碍:它经常性地缺乏人类的同理心empathy。 + +同理心是理解或感受他人感受的能力。在开源社区中,面对面的人际互动和协作是很少的。任何经历过 GitHub 拉取请求Pull request议题Issue的开发者都曾收到过来自他们可能从未见过的人的评论,这些人往往身处地球的另一端,而他们的交流也可能同样遥远。现代开源开发就是建立在这种异步、事务性的沟通基础之上。因此,人们在社交媒体平台上所经历的同类型的网络欺凌和其他虐待行为,在开源社区中也不足为奇。 + +当然,并非所有开源交流都会事与愿违。许多人在工作中发展出了尊重并秉持着良好的行为标准。但是很多时候,人们的沟通也常常缺乏常识性的礼仪,他们将人们像机器而非人类一般对待。这种行为是激发开源创新模型全部潜力的障碍,因为它让许多潜在的贡献者望而却步,并扼杀了灵感。 + +### 恶意交流的历史 + +代码审查中存在的敌意言论对开源社区来说并不新鲜,它多年来一直被社区所容忍。开源教父莱纳斯·托瓦尔兹Linus Torvalds经常在代码不符合他的标准时[抨击][3] Linux 社区,并将贡献者赶走。埃隆大学计算机科学教授 Megan Squire 借助[机器学习][4]分析了托瓦尔兹的侮辱行为,发现它们在四年内的数量高达数千次。2018 年,莱纳斯因自己的不良行为而自我放逐,责成自己学习同理心,道歉并为 Linux 社区制定了行为准则。 + +2015 年,[Sage Sharp][5] 虽然在技术上受人尊重,但因其缺乏对个人的尊重,被辞去了 FOSS 女性外展计划中的 Linux 内核协调员一职。 + +PR 审核中存在的贬低性评论对开发者会造成深远的影响。它导致开发者在提交 PR 时产生畏惧感,让他们对预期中的反馈感到恐惧。这吞噬了开发者对自己能力的信心。它逼迫工程师每次都只能追求完美,从而减缓了开发速度,这与许多社区采用的敏捷方法论背道而驰。 + +### 如何缩小开源中的同理心差距? + +通常情况下,冒犯的评论常是无意间的,而通过一些指导,作者则可以学会如何在不带负面情绪的情况下表达意见。GitHub 不会监控议题和 PR 的评论是否有滥用内容,相反,它提供了一些工具,使得社区能够对其内容进行审查。仓库的所有者可以删除评论和锁定对话,所有贡献者可以报告滥用和阻止用户。 + +制定社区行为准则可为所有级别的贡献者提供一个安全且包容的环境,并且能让所有级别的贡献者参与并定义降低协作者之间冲突的过程。 + +我们能够克服开源中存在的同理心问题。面对面的辩论比文字更有利于产生共鸣,所以尽可能选择视频通话。以同理心的方式分享反馈,树立榜样。如果你目睹了一个尖锐的评论,请做一个指导者而非旁观者。如果你是受害者,请大声说出来。在面试候选人时,评估同理心能力,并将同理心能力与绩效评估和奖励挂钩。界定并执行社区行为准则,并管理好你的社区。 + +随着对同理心认识的提高和传播同理心的激励,开源生产力将得到提升,协作者将会聚拢,可以充分激发开源软件开发的活力。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/open-source-empathy + +作者:[Bronagh Sorota][a] +选题:[lujun9972][b] +译者:[scvoet](https://github.com/scvoet) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/bsorota +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/practicing-empathy.jpg?itok=-A7fj6NF (Practicing empathy) +[2]: https://www.redhat.com/en/explore/the-open-organization-book +[3]: https://arstechnica.com/information-technology/2013/07/linus-torvalds-defends-his-right-to-shame-linux-kernel-developers/ +[4]: http://flossdata.syr.edu/data/insults/hicssInsultsv2.pdf +[5]: https://en.wikipedia.org/wiki/Sage_Sharp diff --git a/published/20210208 3 open source tools that make Linux the ideal workstation.md b/published/20210208 3 open source tools that make Linux the ideal workstation.md new file mode 100644 index 0000000000..82a71b31f7 --- /dev/null +++ b/published/20210208 3 open source tools that make Linux the ideal workstation.md @@ -0,0 +1,76 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13133-1.html) +[#]: subject: (3 open source tools that make Linux the ideal workstation) +[#]: via: (https://opensource.com/article/21/2/linux-workday) +[#]: author: (Seth Kenlon https://opensource.com/users/seth) + +让 Linux 成为理想的工作站的 3 个开源工具 +====== + +> Linux 不但拥有你认为所需的一切,还有更多可以让你高效工作的工具。 + +![](https://img.linux.net.cn/data/attachment/album/202102/19/134935qhe252ifbvbpnzxk.jpg) + +在 2021 年,有更多让人们喜欢 Linux 的理由。在这个系列中,我将分享 21 种使用 Linux 的不同理由。今天,我将与你分享为什么 Linux 是你工作的最佳选择。 + +每个人都希望在工作期间提高工作效率。如果你的工作通常涉及到文档、演示文稿和电子表格的工作,那么你可能已经习惯了特定的例行工作。问题在于,这个*惯常的例行工作*通常是由一两个特定的应用程序决定的,无论是某个办公套件还是桌面操作系统。当然,习惯并不意味着它是理想的,但是它往往会毫无疑义地持续存在,甚至影响到企业的运作架构。 + +### 更聪明地工作 + +如今,许多办公应用程序都在云端运行,因此如果你愿意的话,你可以在 Linux 上使用相同的方式。然而,由于许多典型的知名办公应用程序并不符合 Linux 上的文化预期,因此你可能会发现自己受到启发,想去探索其他的选择。正如任何渴望走出“舒适区”的人所知道的那样,这种微妙的打破可能会出奇的有用。很多时候,你不知道自己效率低下,因为你实际上并没有尝试过以不同的方式做事。强迫自己去探索其他方式,你永远不知道会发现什么。你甚至不必完全知道要寻找的内容。 + +### LibreOffice + +Linux(或任何其他平台)上显而易见的开源办公主力之一是 [LibreOffice][2]。它具有多个组件,包括文字处理器、演示软件、电子表格、关系型数据库界面、矢量绘图等。它可以从其他流行的办公应用程序中导入许多文档格式,因此从其他工具过渡到 LibreOffice 通常很容易。 + +然而,LibreOffice 不仅仅是一个出色的办公套件。LibreOffice 支持宏,所以机智的用户可以自动完成重复性任务。它还具有终端命令的功能,因此你可以在不启动 LibreOffice 界面的情况下执行许多任务。 + +想象一下,比如要打开 21 个文档,导航到**文件**菜单,到**导出**或**打印**菜单项,并将文件导出为 PDF 或 EPUB。这至少需要 84 次以上的点击,可能要花费一个小时的时间。相比之下,打开一个文档文件夹,并转换所有文件为 PDF 或 EPUB,只需要执行一个迅速的命令或菜单操作。转换将在后台运行,而你可以处理其他事情。只需要四分之一的时间,可能更少。 + +``` +$ libreoffice --headless --convert-to epub *.docx +``` + +这是一个小改进,是由 Linux 工具集和你可以自定义环境和工作流程的便利性所潜在带来的鼓励。 + +### Abiword 和 Gnumeric + +有时,你并不需要一个大而全的办公套件。如果你喜欢简化你的办公室工作,那么使用一个轻量级和针对特定任务的应用程序可能更好。例如,我大部分时间都是用文本编辑器写文章,因为我知道在转换为 HTML 的过程中,所有的样式都会被丢弃。但有些时候,文字处理器是很有用的,无论是打开别人发给我的文档,还是因为我想用一种快速简单的方法来生成一些样式漂亮的文本。 + +[Abiword][3] 是一款简单的文字处理器,它基本支持流行的文档格式,并具备你所期望的文字处理器的所有基本功能。它并不意味着是一个完整的办公套件,这是它最大的特点。虽然没有太多的选择,但我们仍然处于信息过载的时代,这正是一个完整的办公套件或文字处理器有时会犯的错误。如果你想避免这种情况,那就用一些简单的东西来代替。 + +同样,[Gnumeric][4] 项目提供了一个简单的电子表格应用程序。Gnumeric 避免了任何严格意义上的电子表格所不需要的功能,所以你仍然可以获得强大的公式语法、大量的函数,以及样式和操作单元格的所有选项。我不怎么使用电子表格,所以我发现自己在极少数需要查看或处理分类账中的数据时,对 Gnumeric 相当满意。 + +### Pandoc + +通过专门的命令和文件处理程序,可以最小化。`pandoc` 命令专门用于文件转换。它就像 `libreoffice --headless` 命令一样,只是要处理的文档格式数量是它的十倍。你甚至可以用它来生成演示文稿! 如果你的工作之一是从一个文档中提取源文本,并以多种格式交付它,那么 Pandoc 是必要的,所以你应该[下载我们的攻略][5]看看。 + +广义上讲,Pandoc 代表的是一种完全不同的工作方式。它让你脱离了办公应用的束缚。它将你从试图将你的想法写成文字,并同时决定这些文字应该使用什么字体的工作中分离出来。在纯文本中工作,然后转换为各种交付格式,让你可以使用任何你想要的应用程序,无论是移动设备上的记事本,还是你碰巧坐在电脑前的简单文本编辑器,或者是云端的文本编辑器。 + +### 寻找替代品 + +Linux 有很多意想不到的替代品。你可以从你正在做的事情中退后一步,分析你的工作流程,评估你所需的结果,并调查那些声称可以完全你的需求的新应用程序来找到它们。 + +改变你所使用的工具、工作流程和日常工作可能会让你迷失方向,特别是当你不知道你要找的到底是什么的时候。但 Linux 的优势在于,你有机会重新评估你在多年的计算机使用过程中潜意识里形成的假设。如果你足够努力地寻找答案,你最终会意识到问题所在。通常,你最终会欣赏你学到的东西。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/linux-workday + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/laptop_screen_desk_work_chat_text.png?itok=UXqIDRDD (Person using a laptop) +[2]: http://libreoffice.org +[3]: https://www.abisource.com +[4]: http://www.gnumeric.org +[5]: https://opensource.com/article/20/5/pandoc-cheat-sheet diff --git a/published/20210208 Why choose Plausible for an open source alternative to Google Analytics.md b/published/20210208 Why choose Plausible for an open source alternative to Google Analytics.md new file mode 100644 index 0000000000..ed909d31a1 --- /dev/null +++ b/published/20210208 Why choose Plausible for an open source alternative to Google Analytics.md @@ -0,0 +1,80 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13135-1.html) +[#]: subject: (Why choose Plausible for an open source alternative to Google Analytics) +[#]: via: (https://opensource.com/article/21/2/plausible) +[#]: author: (Ben Rometsch https://opensource.com/users/flagsmith) + +为什么选择 Plausible 作为 Google Analytics 的开源替代品? +====== + +> Plausible 作为 Google Analytics 的可行、有效的替代方案正在引起用户的关注。 + +![](https://img.linux.net.cn/data/attachment/album/202102/19/233627sb7mvtt7hn93lvvr.jpg) + +替代 Google Analytics 似乎是一个巨大的挑战。实际上,你可以说这听起来似乎不合理(LCTT 译注:Plausible 意即“貌似合理”)。但这正是 [Plausible.io][2] 取得巨大成功的原因,自 2018 年以来已注册了数千名新用户。 + +Plausible 的联合创始人 Uku Taht 和 Marko Saric 最近出现在 [The Craft of Open Source][3] 播客上,谈论了这个项目以及他们是如何: + +* 创建了一个可行的替代 Google Analytics 的方案 +* 在不到两年的时间里获得了如此大的发展势头 +* 通过开源他们的项目实现其目标 + +请继续阅读他们与播客主持人和 Flagsmith 创始人 Ben Rometsch 的对话摘要。 + +### Plausible 是如何开始的 + +2018 年冬天,Uku 开始编写一个他认为急需的项目:一个可行的、有效的 Google Analytics 替代方案。因为他对 Google 产品的发展方向感到失望,而且所有其他数据解决方案似乎都把 Google 当作“数据处理中间人”。 + +Uku 的第一直觉是利用现有的数据库解决方案专注于分析方面的工作。马上,他就遇到了一些挑战。开始尝试使用了 PostgreSQL,这在技术上很幼稚,因为它很快就变得不堪重负,效率低下。因此,他的目标蜕变成了做一个分析产品,可以处理大量的数据点,而且性能不会有明显的下降。简而言之,Uku 成功了,Plausible 现在每月可以收取超过 8000 万条记录。 + +Plausible 的第一个版本于 2019 年夏天发布。2020 年 3 月,Marko 加入,负责项目的传播和营销方面的工作。从那时起,它它的受欢迎程度有了很大的增长。 + +### 为什么要开源? + +Uku 热衷于遵循“独立黑客”的软件开发路线:创建一个产品,把它投放出去,然后看看它如何成长。开源在这方面是有意义的,因为你可以迅速发展一个社区并获得人气。 + +但 Plausible 一开始并不是开源的。Uku 最初担心软件的敏感代码,比如计费代码,但他很快就发布了,因为这对没有 API 令牌的人来说是没有用的。 + +现在,Plausible 是在 [AGPL][4] 下完全开源的,他们选择了 AGPL 而不是 MIT 许可。Uku 解释说,在 MIT 许可下,任何人都可以不受限制地对代码做任何事情。在 AGPL 下,如果有人修改代码,他们必须将他们的修改开源,并将代码回馈给社区。这意味着,大公司不能拿着原始代码在此基础上进行构建,然后获得所有的回报。他们必须共享,使得竞争环境更加公平。例如,如果一家公司想插入他们的计费或登录系统,他们有法律义务发布代码。 + +在播客中,Uku 向我询问了关于 Flagsmith 的授权,目前 Flagsmith 的授权采用 BSD 三句版许可,该许可证是高度开放的,但我即将把一些功能移到更严格的许可后面。到目前为止,Flagsmith 社区已经理解了这一变化,因为他们意识到这将带来更多更好的功能。 + +### Plausible vs. Google Analytics + +Uku 说,在他看来,开源的精神是,代码应该是开放的,任何人都可以进行商业使用,并与社区共享,但你可以把一个闭源的 API 模块作为专有附加组件保留下来。这样一来,Plausible 和其他公司就可以通过创建和销售定制的 API 附加许可来满足不同的使用场景。 + +Marko 职位上是一名开发者,但从营销方面来说,他努力让这个项目在 Hacker News 和 Lobster 等网站上得到报道,并建立了 Twitter 帐户以帮助产生动力。这种宣传带来的热潮也意味着该项目在 GitHub 上起飞,从 500 颗星到 4300 颗星。随着流量的增长,Plausible 出现在 GitHub 的趋势列表中,这让其受欢迎程度像滚雪球一样。 + +Marko 还非常注重发布和推广博客文章。这一策略得到了回报,在最初的 6 个月里,有四五篇文章进入了病毒式传播,他利用这些峰值来放大营销信息,加速了增长。 + +Plausible 成长过程中最大的挑战是让人们从 Google Analytics 上转换过来。这个项目的主要目标是创建一个有用、高效、准确的网络分析产品。它还需要符合法规,并为企业和网站访问者提供高度的隐私。 + +Plausible 现在已经在 8000 多个网站上运行。通过与客户的交谈,Uku 估计其中约 90% 的客户运行过 Google Analytics。 + +Plausible 以标准的软件即服务 (SaaS) 订阅模式运行。为了让事情更公平,它按月页面浏览量收费,而不是按网站收费。对于季节性网站来说,这可能会有麻烦,比如说电子商务网站在节假日会激增,或者美国大选网站每四年激增一次。这些可能会导致月度订阅模式下的定价问题,但它通常对大多数网站很好。 + +### 查看播客 + +想要了解更多关于 Uku 和 Marko 如何以惊人的速度发展开源 Plausible 项目,并使其获得商业上的成功,请[收听播客][3],并查看[其他剧集][5],了解更多关于“开源软件社区的来龙去脉”。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/plausible + +作者:[Ben Rometsch][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/flagsmith +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/analytics-graphs-charts.png?itok=sersoqbV (Analytics: Charts and Graphs) +[2]: https://plausible.io/ +[3]: https://www.flagsmith.com/podcast/02-plausible +[4]: https://www.gnu.org/licenses/agpl-3.0.en.html +[5]: https://www.flagsmith.com/podcast \ No newline at end of file diff --git a/published/20210209 Viper Browser- A Lightweight Qt5-based Web Browser With A Focus on Privacy and Minimalism.md b/published/20210209 Viper Browser- A Lightweight Qt5-based Web Browser With A Focus on Privacy and Minimalism.md new file mode 100644 index 0000000000..78f9f8bc07 --- /dev/null +++ b/published/20210209 Viper Browser- A Lightweight Qt5-based Web Browser With A Focus on Privacy and Minimalism.md @@ -0,0 +1,106 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13139-1.html) +[#]: subject: (Viper Browser: A Lightweight Qt5-based Web Browser With A Focus on Privacy and Minimalism) +[#]: via: (https://itsfoss.com/viper-browser/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +Viper 浏览器:一款注重隐私和简约的轻量级 Qt5 浏览器 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/21/110148d7r3hlurczc1ci73.jpg) + +> Viper 浏览器是一个基于 Qt 的浏览器,它提供了简单易用的用户体验,同时考虑到隐私问题。 + +虽然大多数流行的浏览器都运行在 Chromium 之上,但像 [Firefox][1]、[Beaker 浏览器][2]以及其他一些 [chrome 替代品][3]这样独特的替代品不应该停止存在。 + +尤其是考虑到谷歌最近可能想到从 Chromium 中剥离[谷歌浏览器特有的功能][4],并给出了滥用的借口。 + +在寻找更多的 Chrome 替代品时,我在 [Mastodon][6] 上看到了一个有趣的项目 “[Viper 浏览器][5]”。 + +### Viper 浏览器:一个基于 Qt5 的开源浏览器 + +**注意**:Viper 浏览器是一个只有几个贡献者的相当新的项目。它缺乏某些功能,我将在下文提及。 + +Viper 是一款有趣的 Web 浏览器,在利用 [QtWebEngine][8] 的同时,它专注于成为一个强大而又轻巧的选择。 + +QtWebEngine 借用了 Chromium 的代码,但它不包括连接到 Google 平台的二进制文件和服务。 + +我花了一些时间使用它并进行一些日常浏览活动,我必须说,我对它相当感兴趣。不仅仅是因为它是一个简单易用的东西(浏览器可以那么复杂),而且它还专注于增强你的隐私,为你提供添加不同的广告阻止选项以及一些有用的选项。 + +![][9] + +虽然我认为它并不是为所有人准备的,但还是值得一看的。在你继续尝试之前,让我简单介绍一下它的功能。 + +### Viper 浏览器的功能 + +![][10] + +我将列出一些你会发现有用的关键功能: + +* 管理 cookies 的能力 +* 多个预设选项以选择不同的广告屏蔽器网络。 +* 简单且易于使用 +* 隐私友好的默认搜索引擎 - [Startpage][11] (你可以更改) +* 能够添加用户脚本 +* 能够添加新的 user-agent +* 禁用 JavaScript 的选项 +* 能够阻止图像加载 + +除了这些亮点之外,你还可以轻松地调整隐私设置,以删除你的历史记录、清理已有 cookies,以及一些更多的选项。 + +![][12] + +### 在 Linux 上安装 Viper 浏览器 + +它只是在[发布页][13]提供了一个 AppImage 文件,你可以利用它在任何 Linux 发行版上进行测试。 + +如果你需要帮助,你也可以参考我们的[在 Linux 上使用 AppImage 文件][14]指南。如果你好奇,你可以在 [GitHub][5] 上探索更多关于它的内容。 + +- [Viper 浏览器][5] + +### 我对使用 Viper 浏览器的看法 + +我不认为这是一个可以立即取代你当前浏览器的东西,但如果你有兴趣测试尝试提供 Chrome 替代品的新项目,这肯定是其中之一。 + +当我试图登录我的谷歌账户时,它阻止了我,说它可能是一个不安全的浏览器或不支持的浏览器。因此,如果你依赖你的谷歌帐户,这是一个令人失望的消息。 + +但是,其他社交媒体平台也可以与 YouTube 一起正常运行(无需登录)。不支持 Netflix,但总体上浏览体验是相当快速和可用的。 + +你可以安装用户脚本,但 Chrome 扩展还不支持。当然,这要么是有意为之,要么是在开发过程中特别考虑到它是一款隐私友好型的网络浏览器。 + +### 总结 + +考虑到这是一个鲜为人知但对某些人来说很有趣的东西,你对我们有什么建议吗? 是一个值得关注的开源项目么? + +请在下面的评论中告诉我。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/viper-browser/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://www.mozilla.org/en-US/firefox/new/ +[2]: https://itsfoss.com/beaker-browser-1-release/ +[3]: https://itsfoss.com/open-source-browsers-linux/ +[4]: https://www.bleepingcomputer.com/news/google/google-to-kill-chrome-sync-feature-in-third-party-browsers/ +[5]: https://github.com/LeFroid/Viper-Browser +[6]: https://mastodon.social/web/accounts/199851 +[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/viper-browser.png?resize=800%2C583&ssl=1 +[8]: https://wiki.qt.io/QtWebEngine +[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/viper-browser-setup.jpg?resize=793%2C600&ssl=1 +[10]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/viper-preferences.jpg?resize=800%2C660&ssl=1 +[11]: https://www.startpage.com +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/viper-browser-tools.jpg?resize=800%2C262&ssl=1 +[13]: https://github.com/LeFroid/Viper-Browser/releases +[14]: https://itsfoss.com/use-appimage-linux/ diff --git a/published/20210215 A practical guide to JavaScript closures.md b/published/20210215 A practical guide to JavaScript closures.md new file mode 100644 index 0000000000..cebf757a5a --- /dev/null +++ b/published/20210215 A practical guide to JavaScript closures.md @@ -0,0 +1,136 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13140-1.html) +[#]: subject: (A practical guide to JavaScript closures) +[#]: via: (https://opensource.com/article/21/2/javascript-closures) +[#]: author: (Nimisha Mukherjee https://opensource.com/users/nimisha) + +JavaScript 闭包实践 +====== + +> 通过深入了解 JavaScript 的高级概念之一:闭包,更好地理解 JavaScript 代码的工作和执行方式。 + +![](https://img.linux.net.cn/data/attachment/album/202102/21/162941ogyb74z3ahfbfe35.jpg) + +在《[JavaScript 如此受欢迎的 4 个原因][2]》中,我提到了一些高级 JavaScript 概念。在本文中,我将深入探讨其中的一个概念:闭包closure。 + +根据 [Mozilla 开发者网络][3](MDN),“闭包是将一个函数和对其周围的状态(词法环境)的引用捆绑在一起(封闭)的组合。”简而言之,这意味着在一个函数内部的函数可以访问其外部(父)函数的变量。 + +为了更好地理解闭包,可以看看作用域及其执行上下文。 + +下面是一个简单的代码片段: + +``` +var hello = "Hello"; + +function sayHelloWorld() { + var world = "World"; +    function wish() { +        var year = "2021"; +        console.log(hello + " " + world + " "+ year); + } + wish(); +} +sayHelloWorld(); +``` + +下面是这段代码的执行上下文: + +![JS 代码的执行上下文][4] + +每次创建函数时(在函数创建阶段)都会创建闭包。每个闭包有三个作用域。 + + * 本地作用域(自己的作用域) + * 外部函数范围 + * 全局范围 + +我稍微修改一下上面的代码来演示一下闭包: + +``` +var hello = "Hello"; + +var sayHelloWorld = function() { + var world = "World"; +    function wish() { +        var year = "2021"; +        console.log(hello + " " + world + " "+ year); + } + return wish; +} +var callFunc = sayHelloWorld(); +callFunc(); +``` + +内部函数 `wish()` 在执行之前就从外部函数返回。这是因为 JavaScript 中的函数形成了**闭包**。 + + * 当 `sayHelloWorld` 运行时,`callFunc` 持有对函数 `wish` 的引用。 + * `wish` 保持对其周围(词法)环境的引用,其中存在变量 `world`。 + +### 私有变量和方法 + +本身,JavaScript 不支持创建私有变量和方法。闭包的一个常见和实用的用途是模拟私有变量和方法,并允许数据隐私。在闭包范围内定义的方法是有特权的。 + +这个代码片段捕捉了 JavaScript 中闭包的常用编写和使用方式: + +``` +var resourceRecord = function(myName, myAddress) { +  var resourceName = myName; +  var resourceAddress = myAddress; +  var accessRight = "HR"; +  return { +    changeName: function(updateName, privilege) { +      // only HR can change the name +      if (privilege === accessRight ) { +        resourceName = updateName; +        return true; +      } else { +        return false; +      } +    },   +    changeAddress: function(newAddress) { +      // any associate can change the address +      resourceAddress = newAddress;           +    },   +    showResourceDetail: function() { +      console.log ("Name:" + resourceName + " ; Address:" + resourceAddress); +    } +  } +} +// Create first record +var resourceRecord1 = resourceRecord("Perry","Office"); +// Create second record +var resourceRecord2 = resourceRecord("Emma","Office"); +// Change the address on the first record +resourceRecord1.changeAddress("Home"); +resourceRecord1.changeName("Perry Berry", "Associate"); // Output is false as only an HR can change the name +resourceRecord2.changeName("Emma Freeman", "HR"); // Output is true as HR changes the name +resourceRecord1.showResourceDetail(); // Output - Name:Perry ; Address:Home +resourceRecord2.showResourceDetail(); // Output - Name:Emma Freeman ; Address:Office +``` + +资源记录(`resourceRecord1` 和 `resourceRecord2`)相互独立。每个闭包通过自己的闭包引用不同版本的 `resourceName` 和 `resourceAddress` 变量。你也可以应用特定的规则来处理私有变量,我添加了一个谁可以修改 `resourceName` 的检查。 + +### 使用闭包 + +理解闭包是很重要的,因为它可以更深入地了解变量和函数之间的关系,以及 JavaScript 代码如何工作和执行。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/javascript-closures + +作者:[Nimisha Mukherjee][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/nimisha +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/programming-code-keyboard-laptop-music-headphones.png?itok=EQZ2WKzy (Woman programming) +[2]: https://linux.cn/article-12830-1.html +[3]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Closures +[4]: https://opensource.com/sites/default/files/uploads/execution-context.png (Execution context for JS code) +[5]: https://creativecommons.org/licenses/by-sa/4.0/ diff --git a/published/20210217 Use this bootable USB drive on Linux to rescue Windows users.md b/published/20210217 Use this bootable USB drive on Linux to rescue Windows users.md new file mode 100644 index 0000000000..73d93d1246 --- /dev/null +++ b/published/20210217 Use this bootable USB drive on Linux to rescue Windows users.md @@ -0,0 +1,82 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13143-1.html) +[#]: subject: (Use this bootable USB drive on Linux to rescue Windows users) +[#]: via: (https://opensource.com/article/21/2/linux-woeusb) +[#]: author: (Don Watkins https://opensource.com/users/don-watkins) + +如何在 Linux 中创建 USB 启动盘来拯救 Windows 用户 +====== + +> WoeUSB 可以在 Linux 中制作 Windows 启动盘,并帮助你的朋友解锁他们罢工的机器。 + +![](https://img.linux.net.cn/data/attachment/album/202102/22/143829x0gm1gkmw1yb1zu8.jpg) + +人们经常要求我帮助他们恢复被锁死或损坏的 Windows 电脑。有时,我可以使用 Linux USB 启动盘来挂载 Windows 分区,然后从损坏的系统中传输和备份文件。 + +有的时候,客户丢失了他们的密码或以其他方式锁死了他们的登录账户凭证。解锁账户的一种方法是创建一个 Windows 启动盘来修复计算机。微软允许你从网站下载 Windows 的副本,并提供创建 USB 启动盘的工具。但要使用它们,你需要一台 Windows 电脑,这意味着,作为一个 Linux 用户,我需要其他方法来创建一个 DVD 或 USB 启动盘。我发现在 Linux 上创建 Windows USB 很困难。我的可靠工具,如 [Etcher.io][2]、[Popsicle][3](适用于 Pop!_OS)和 [UNetbootin][4],或者从命令行使用 `dd` 来创建可启动媒体,都不是很成功。 + +直到我发现了 [WoeUSB-ng][5],一个 [GPL 3.0][6] 许可的 Linux 工具,它可以为 Windows Vista、7、8 和 10 创建一个 USB 启动盘。这个开源软件有两个程序:一个命令行工具和一个图形用户界面 (GUI) 版本。 + +### 安装 WoeUSB-ng + +GitHub 仓库包含了在 Arch、Ubuntu、Fedora 或使用 pip3 [安装][7] WoeUSB-ng 的说明。 + +如果你是受支持的 Linux 发行版,你可以使用你的包管理器安装 WoeUSB-ng。或者,你可以使用 Python 的包管理器 [pip][8] 来安装应用程序。这在任何 Linux 发行版中都是通用的。这些方法在功能上没有区别,所以使用你熟悉的任何一种。 + +我运行的是 Pop!_OS,它是 Ubuntu 的衍生版本,但由于对 Python 很熟悉,我选择了 pip3 安装: + +``` +$ sudo pip3 install WoeUSB-ng +``` + +### 创建一个启动盘 + +你可以从命令行或 GUI 版本使用 WoeUSB-ng。 + +要从命令行创建一个启动盘,语法要求命令包含 Windows ISO 文件的路径和一个设备。(本例中是 `/dev/sdX`。使用 `lsblk` 命令来确定你的驱动器) + +``` +$ sudo woeusb --device Windows.iso /dev/sdX +``` + +你也可以启动该程序,以获得简单易用的界面。在 WoeUSB-ng 应用程序窗口中,找到 `Windows.iso` 文件并选择它。选择你的 USB 目标设备(你想变成 Windows 启动盘的驱动器)。这将会删除这个驱动器上的所有信息,所以要谨慎选择,然后仔细检查(再三检查)你的选择! + +当你确认正确选择目标驱动器后,点击 **Install** 按钮。 + +![WoeUSB-ng UI][9] + +创建该介质需要 5 到 10 分钟,这取决于你的 Linux 电脑的处理器、内存、USB 端口速度等。请耐心等待。 + +当这个过程完成并验证后,你将有可用的 Windows USB 启动盘,以帮助其他人修复 Windows 计算机。 + +### 帮助他人 + +开源就是为了帮助他人。很多时候,你可以通过使用基于 Linux 的[系统救援 CD][11] 来帮助 Windows 用户。但有时,唯一的帮助方式是直接从 Windows 中获取,而 WoeUSB-ng 是一个很好的开源工具,它可以让这成为可能。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/linux-woeusb + +作者:[Don Watkins][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/don-watkins +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/puzzle_computer_solve_fix_tool.png?itok=U0pH1uwj (Puzzle pieces coming together to form a computer screen) +[2]: https://etcher.io/ +[3]: https://github.com/pop-os/popsicle +[4]: https://github.com/unetbootin/unetbootin +[5]: https://github.com/WoeUSB/WoeUSB-ng +[6]: https://github.com/WoeUSB/WoeUSB-ng/blob/master/COPYING +[7]: https://github.com/WoeUSB/WoeUSB-ng#installation +[8]: https://opensource.com/downloads/pip-cheat-sheet +[9]: https://opensource.com/sites/default/files/uploads/woeusb-ng-gui.png (WoeUSB-ng UI) +[10]: https://creativecommons.org/licenses/by-sa/4.0/ +[11]: https://www.system-rescue.org/ diff --git a/published/20210218 5 must-have Linux media players.md b/published/20210218 5 must-have Linux media players.md new file mode 100644 index 0000000000..1125465ae2 --- /dev/null +++ b/published/20210218 5 must-have Linux media players.md @@ -0,0 +1,90 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13148-1.html) +[#]: subject: (5 must-have Linux media players) +[#]: via: (https://opensource.com/article/21/2/linux-media-players) +[#]: author: (Seth Kenlon https://opensource.com/users/seth) + +5 款值得拥有的 Linux 媒体播放器 +====== + +> 无论是电影还是音乐,Linux 都能为你提供一些优秀的媒体播放器。 + +![](https://img.linux.net.cn/data/attachment/album/202102/24/101806k2g26zfcamiffhlb.jpg) + +在 2021 年,人们有更多的理由喜欢 Linux。在这个系列中,我将分享 21 个使用 Linux 的不同理由。媒体播放是我最喜欢使用 Linux 的理由之一。 + +你可能更喜欢黑胶唱片和卡带,或者录像带和激光影碟,但你很有可能还是在数字设备上播放你喜欢的大部分媒体。电脑上的媒体有一种无法比拟的便利性,这主要是因为我们大多数人一天中的大部分时间都在电脑附近。许多现代电脑用户并没有过多考虑有哪些应用可以用来听音乐和看电影,因为大多数操作系统都默认提供了媒体播放器,或者因为他们订阅了流媒体服务,因此并没有把媒体文件放在自己身边。但如果你的口味超出了通常的热门音乐和节目列表,或者你以媒体工作为乐趣或利润,那么你就会有你想要播放的本地文件。你可能还对现有用户界面有意见。在 Linux 上,*选择*是一种权利,因此你可以选择无数种播放媒体的方式。 + +以下是我在 Linux 上必备的五个媒体播放器。 + +### 1、mpv + +![mpv interface][2] + +一个现代、干净、简约的媒体播放器。得益于它的 Mplayer、[ffmpeg][3] 和 `libmpv` 后端,它可以播放你可能会扔给它的任何类型媒体。我说“扔给它”,是因为播放一个文件的最快捷、最简单的方法就是把文件拖到 mpv 窗口中。如果你拖动多个文件,mpv 会为你创建一个播放列表。 + +当你把鼠标放在上面时,它提供了直观的覆盖控件,但最好还是通过键盘操作界面。例如,`Alt+1` 会使 mpv 窗口变成全尺寸,而 `Alt+0` 会使其缩小到一半大小。你可以使用 `,` 和 `.` 键逐帧浏览视频,`[` 和 `]` 键调整播放速度,`/` 和 `*` 调整音量,`m` 静音等等。这些主控功能可以让你快速调整,一旦你学会了这些功能,你几乎可以在想到要调整播放的时候快速调整。无论是工作还是娱乐,mpv 都是我播放媒体的首选。 + +### 2、Kaffeine 和 Rhythmbox + +![Kaffeine interface][4] + +KDE Plasma 和 GNOME 桌面都提供了音乐应用([Kaffeine][5] 和 [Rhythmbox][]),可以作为你个人音乐库的前端。它们会让你为你的音乐文件提供一个标准的位置,然后扫描你的音乐收藏,这样你就可以根据专辑、艺术家等来浏览。这两款软件都很适合那些你无法完全决定你想听什么,而又想用一种简单的方式来浏览现有音乐的时候。 + +[Kaffeine][5] 其实不仅仅是一个音乐播放器。它可以播放视频文件、DVD、CD,甚至数字电视(假设你有输入信号)。我已经整整几天没有关闭 Kaffeine 了,因为不管我是想听音乐还是看电影,Kaffeine 都能让我轻松地开始播放。 + +### 3、Audacious + +![Audacious interface][6] + +[Audacious][7] 媒体播放器是一个轻量级的应用,它可以播放你的音乐文件(甚至是 MIDI 文件)或来自互联网的流媒体音乐。对我来说,它的主要吸引力在于它的模块化架构,它鼓励开发插件。这些插件可以播放几乎所有你能想到的音频媒体格式,用图形均衡器调整声音,应用效果,甚至可以重塑整个应用,改变其界面。 + +很难把 Audacious 仅仅看作是一个应用,因为它很容易让它变成你想要的应用。无论你是 Linux 上的 XMMS、Windows 上的 WinAmp,还是任何其他替代品,你大概都可以用 Audacious 来近似它们。Audacious 还提供了一个终端命令,`audtool`,所以你可以从命令行控制一个正在运行的 Audacious 实例,所以它甚至可以近似于一个终端媒体播放器! + +### 4、VLC + +![vlc interface][8] + +[VLC][9] 播放器可能是向用户介绍开源的应用之首。作为一款久经考验的多媒体播放器,VLC 可以播放音乐、视频、光盘。它还可以通过网络摄像头或麦克风进行流式传输和录制,从而使其成为捕获快速视频或语音消息的简便方法。像 mpv 一样,大多数情况下都可以通过按单个字母的键盘操作来控制它,但它也有一个有用的右键菜单。它可以将媒体从一种格式转换为另一种格式、创建播放列表、跟踪你的媒体库等。VLC 是最好的,大多数播放器甚至无法在功能上与之匹敌。无论你在什么平台上,它都是一款必备的应用。 + +### 5、Music player daemon + +![mpd with the ncmpc interface][10] + +[music player daemon(mpd)][11] 是一个特别有用的播放器,因为它在服务器上运行。这意味着你可以在 [树莓派][12] 上启动它,然后让它处于空闲状态,这样你就可以在任何时候播放一首曲子。mpd 的客户端有很多,但我用的是 [ncmpc][13]。有了 ncmpc 或像 [netjukebox][14] 这样的 Web 客户端,我可以从本地主机或远程机器上连接 mpd,选择一张专辑,然后从任何地方播放它。 + +### Linux 上的媒体播放 + +在 Linux 上播放媒体是很容易的,这要归功于它出色的编解码器支持和惊人的播放器选择。我只提到了我最喜欢的五个播放器,但还有更多的播放器供你探索。试试它们,找到最好的,然后坐下来放松一下。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/linux-media-players + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/LIFE_film.png?itok=aElrLLrw (An old-fashioned video camera) +[2]: https://opensource.com/sites/default/files/mpv_0.png +[3]: https://opensource.com/article/17/6/ffmpeg-convert-media-file-formats +[4]: https://opensource.com/sites/default/files/kaffeine.png +[5]: https://apps.kde.org/en/kaffeine +[6]: https://opensource.com/sites/default/files/audacious.png +[7]: https://audacious-media-player.org/ +[8]: https://opensource.com/sites/default/files/vlc_0.png +[9]: http://videolan.org +[10]: https://opensource.com/sites/default/files/mpd-ncmpc.png +[11]: https://www.musicpd.org/ +[12]: https://opensource.com/article/21/1/raspberry-pi-hifi +[13]: https://www.musicpd.org/clients/ncmpc/ +[14]: http://www.netjukebox.nl/ +[15]: https://wiki.gnome.org/Apps/Rhythmbox \ No newline at end of file diff --git a/published/20210219 7 Ways to Customize Cinnamon Desktop in Linux -Beginner-s Guide.md b/published/20210219 7 Ways to Customize Cinnamon Desktop in Linux -Beginner-s Guide.md new file mode 100644 index 0000000000..5fd5bdcd1a --- /dev/null +++ b/published/20210219 7 Ways to Customize Cinnamon Desktop in Linux -Beginner-s Guide.md @@ -0,0 +1,132 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13145-1.html) +[#]: subject: (7 Ways to Customize Cinnamon Desktop in Linux [Beginner’s Guide]) +[#]: via: (https://itsfoss.com/customize-cinnamon-desktop/) +[#]: author: (Dimitrios https://itsfoss.com/author/dimitrios/) + +初级:在 Linux 中自定义 Cinnamon 桌面的 7 种方法 +====== + +![](https://img.linux.net.cn/data/attachment/album/202102/23/095703u8t88l0rpf4o4p5o.jpg) + +Linux Mint 是最好的 [适合初学者的 Linux 发行版][1] 之一。尤其是想 [转战 Linux][2] 的 Windows 用户,会发现它的 Cinnamon 桌面环境非常熟悉。 + +Cinnamon 给人一种传统的桌面体验,很多用户喜欢它的样子。这并不意味着你必须满足于它提供的东西。Cinnamon 提供了几种自定义桌面的方式。 + +在阅读了 [MATE][3] 和 [KDE 自定义][4] 指南后,很多读者要求为 Linux Mint Cinnamon 也提供类似的教程。因此,我创建了这个关于调整 Cinnamon 桌面的外观和感觉的基本指南。 + +### 7 种自定义 Cinnamon 桌面的不同方法 + +在本教程中,我使用的是 [Linux Mint Debian Edition][5](LMDE 4)。你可以在任何运行 Cinnamon 的 Linux 发行版上使用这篇文章的方法。如果你不确定,这里有 [如何检查你使用的桌面环境][6]。 + +当需要改变 Cinnamon 桌面外观时,我发现非常简单,因为只需点击两下即可。如下图所示,点击菜单图标,然后点击“设置”。 + +![][7] + +所有的外观设置都放在该窗口的顶部。在“系统设置”窗口上的一切都显得整齐划一。 + +![][8] + +#### 1、效果 + +效果选项简单,不言自明,一目了然。你可以开启或关闭桌面不同元素的特效,或者通过改变特效风格来改变窗口过渡。如果你想改变效果的速度,可以通过自定义标签来实现。 + +![][9] + +#### 2、字体选择 + +在这个部分,你可以区分整个系统中使用的字体大小和类型,通过字体设置,你可以对外观进行微调。 + +![][10] + +#### 3、主题和图标 + +我曾经做了几年的 Linux Mint 用户,一个原因是你不需要到处去改变你想要的东西。窗口管理器、图标和面板定制都在一个地方! + +你可以将面板改成深色或浅色,窗口边框也可以根据你要的而改变。默认的 Cinnamon 外观设置在我眼里是最好的,我甚至在测试 [Ubuntu Cinnamon Remix][11] 时也应用了一模一样的设置,不过是橙色的。 + +![][12] + +#### 4、Cinnamon 小程序 + +Cinnamon 小程序是所有包含在底部面板的元素,如日历或键盘布局切换器。在管理选项卡中,你可以添加/删除已经安装的小程序。 + +你一定要探索一下可以下载的小程序,如天气和 [CPU 温度][13]指示小程序是我额外选择的。 + +![][14] + +#### 5、Cinnamon Desklets + +Cinnamon Desklets 是可以直接放置在桌面上的应用。和其他所有的自定义选项一样,Desklets 可以从设置菜单中访问,各种各样的选择可以吸引任何人的兴趣。谷歌日历是一个方便的应用,可以直接在桌面上跟踪你的日程安排。 + +![][15] + +#### 6、桌面壁纸 + +要改变 Cinnamon 桌面的背景,只需在桌面上点击右键,选择“改变桌面背景”。它将打开一个简单易用的窗口,在左侧列出了可用的背景系统文件夹,右侧有每个文件夹内的图片预览。 + +![][16] + +你可以通过点击加号(`+`)并选择路径来添加自己的文件夹。在“设置”选项卡中,你可以选择你的背景是静态还是幻灯片,以及背景在屏幕上的位置。 + +![][17] + +#### 7、自定义桌面屏幕上的内容 + +背景并不是你唯一可以改变的桌面元素。如果你在桌面上点击右键,然后点击“自定义”,你可以找到更多的选项。 + +![][18] + +你可以改变图标的大小,将摆放方式从垂直改为水平,并改变它们在两个轴上的间距。如果你不喜欢你所做的,点击重置网格间距回到默认值。 + +![][19] + +此外,如果你点击“桌面设置”,将显示更多的选项。你可以禁用桌面上的图标,将它们放在主显示器或副显示器上,甚至两个都可以。如你所见,你可以选择一些图标出现在桌面上。 + +![][20] + +### 总结 + +Cinnamon 桌面是最好的选择之一,尤其是当你正在 [从 windows 切换到 Linux][21] 的时候,同时对一个简单而优雅的桌面追求者也是如此。 + +Cinnamon 桌面非常稳定,在我手上从来没有崩溃过,这也是在各种 Linux 发行版之间,我使用它这么久的主要原因之一。 + +我没有讲得太详细,但给了你足够的指导,让你自己去探索设置。欢迎反馈你对 Cinnamon 的定制。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/customize-cinnamon-desktop/ + +作者:[Dimitrios][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/dimitrios/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/best-linux-beginners/ +[2]: https://itsfoss.com/reasons-switch-linux-windows-xp/ +[3]: https://itsfoss.com/ubuntu-mate-customization/ +[4]: https://itsfoss.com/kde-customization/ +[5]: https://itsfoss.com/lmde-4-release/ +[6]: https://itsfoss.com/find-desktop-environment/ +[7]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/6-Cinnamon-settings.png?resize=800%2C680&ssl=1 +[8]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/7-Cinnamon-Settings.png?resize=800%2C630&ssl=1 +[9]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/8-cinnamon-effects.png?resize=800%2C630&ssl=1 +[10]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/11-font-selection.png?resize=800%2C650&ssl=1 +[11]: https://itsfoss.com/ubuntu-cinnamon-remix-review/ +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/10-cinnamon-themes-and-icons.png?resize=800%2C630&ssl=1 +[13]: https://itsfoss.com/check-laptop-cpu-temperature-ubuntu/ +[14]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/12-cinnamon-applets.png?resize=800%2C630&ssl=1 +[15]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/13-cinnamon-desklets.png?resize=800%2C630&ssl=1 +[16]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/1.-Cinnamon-change-desktop-background.png?resize=800%2C400&ssl=1 +[17]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/2-Cinnamon-change-desktop-background.png?resize=800%2C630&ssl=1 +[18]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/1.-desktop-additional-customization.png?resize=800%2C400&ssl=1 +[19]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/4-desktop-additional-customization.png?resize=800%2C480&ssl=1 +[20]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/5-desktop-additional-customization.png?resize=800%2C630&ssl=1 +[21]: https://itsfoss.com/guide-install-linux-mint-16-dual-boot-windows/ diff --git a/published/20210219 Unlock your Chromebook-s hidden potential with Linux.md b/published/20210219 Unlock your Chromebook-s hidden potential with Linux.md new file mode 100644 index 0000000000..7612fa716f --- /dev/null +++ b/published/20210219 Unlock your Chromebook-s hidden potential with Linux.md @@ -0,0 +1,127 @@ +[#]: collector: (lujun9972) +[#]: translator: (max27149) +[#]: reviewer: (wxy) +[#]: publisher: (wxy) +[#]: url: (https://linux.cn/article-13149-1.html) +[#]: subject: (Unlock your Chromebook's hidden potential with Linux) +[#]: via: (https://opensource.com/article/21/2/chromebook-linux) +[#]: author: (Seth Kenlon https://opensource.com/users/seth) + +用 Linux 释放你 Chromebook 的隐藏潜能 +====== + +> Chromebook 是令人惊叹的工具,但通过解锁它内部的 Linux 系统,你可以让它变得更加不同凡响。 + +![](https://img.linux.net.cn/data/attachment/album/202102/24/114254qstdq1dhj288jh1z.jpg) + +Google Chromebook 运行在 Linux 系统之上,但通常它运行的 Linux 系统对普通用户而言,并不是十分容易就能访问得到。Linux 被用作基于开源的 [Chromium OS][2] 运行时环境的后端技术,然后 Google 将其转换为 Chrome OS。大多数用户体验到的界面是一个电脑桌面,可以用来运行 Chrome 浏览器及其应用程序。然而,在这一切的背后,有一个 Linux 系统等待被你发现。如果你知道怎么做,你可以在 Chromebook 上启用 Linux,把一台可能价格相对便宜、功能相对基础的电脑变成一个严谨的笔记本,获取数百个应用和你需要的所有能力,使它成为一个通用计算机。 + +### 什么是 Chromebook? + +Chromebook 是专为 Chrome OS 创造的笔记本电脑,它本身专为特定的笔记本电脑型号而设计。Chrome OS 不是像 Linux 或 Windows 这样的通用操作系统,而是与 Android 或 iOS 有更多的共同点。如果你决定购买 Chromebook,你会发现有许多不同制造商的型号,包括惠普、华硕和联想等等。有些是为学生而设计,而另一些是为家庭或商业用户而设计的。主要的区别通常分别集中在电池功率或处理能力上。 + +无论你决定买哪一款,Chromebook 都会运行 Chrome OS,并为你提供现代计算机所期望的基本功能。有连接到互联网的网络管理器、蓝牙、音量控制、文件管理器、桌面等等。 + +![Chrome OS desktop][3] + +*Chrome OS 桌面截图* + +不过,想从这个简单易用的操作系统中获得更多,你只需要激活 Linux。 + +### 启用 Chromebook 的开发者模式 + +如果我让你觉得启用 Linux 看似简单,那是因为它确实简单但又有欺骗性。之所以说有欺骗性,是因为在启用 Linux 之前,你*必须*备份数据。 + +这个过程虽然简单,但它确实会将你的计算机重置回出厂默认状态。你必须重新登录到你的笔记本电脑中,如果你有数据存储在 Google 云盘帐户上,你必须得把它重新同步回计算机中。启用 Linux 还需要为 Linux 预留硬盘空间,因此无论你的 Chromebook 硬盘容量是多少,都将减少一半或四分之一(自主选择)。 + +在 Chromebook 上接入 Linux 仍被 Google 视为测试版功能,因此你必须选择使用开发者模式。开发者模式的目的是允许软件开发者测试新功能,安装新版本的操作系统等等,但它可以为你解锁仍在开发中的特殊功能。 + +要启用开发者模式,请首先关闭你的 Chromebook。假定你已经备份了设备上的所有重要信息。 + +接下来,按下键盘上的 `ESC` 和 `⟳`,再按 **电源键** 启动 Chromebook。 + +![ESC and refresh buttons][4] + +*ESC 键和 ⟳ 键* + +当提示开始恢复时,按键盘上的 `Ctrl+D`。 + +恢复结束后,你的 Chromebook 已重置为出厂设置,且没有默认的使用限制。 + +### 开机启动进入开发者模式 + +在开发者模式下运行意味着每次启动 Chromebook 时,都会提醒你处于开发者模式。你可以按 `Ctrl+D` 跳过启动延迟。有些 Chromebook 会在几秒钟后发出蜂鸣声来提醒你处于开发者模式,使得 `Ctrl+D` 操作几乎是强制的。从理论上讲,这个操作很烦人,但在实践中,我不经常启动我的 Chromebook,因为我只是唤醒它,所以当我需要这样做的时候,`Ctrl+D` 只不过是整个启动过程中小小的一步。 + +启用开发者模式后的第一次启动时,你必须重新设置你的设备,就好像它是全新的一样。你只需要这样做一次(除非你在未来某个时刻停用开发者模式)。 + +### 启用 Chromebook 上的 Linux + +现在,你已经运行在开发者模式下,你可以激活 Chrome OS 中的 **Linux Beta** 功能。要做到这一点,请打开 **设置**,然后单击左侧列表中的 **Linux Beta**。 + +激活 **Linux Beta**,并为你的 Linux 系统和应用程序分配一些硬盘空间。在最糟糕的时候,Linux 是相当轻量级的,所以你真的不需要分配太多硬盘空间,但它显然取决于你打算用 Linux 来做多少事。4 GB 的空间对于 Linux 以及几百个终端命令还有二十多个图形应用程序是足够的。我的 Chromebook 有一个 64 GB 的存储卡,我给了 Linux 系统 30 GB,那是因为我在 Chromebook 上所做的大部分事情都是在 Linux 内完成的。 + +一旦你的 **Linux Beta** 环境准备就绪,你可以通过按键盘上的**搜索**按钮和输入 `terminal` 来启动终端。如果你还是 Linux 新手,你可能不知道当前进入的终端能用来安装什么。当然,这取决于你想用 Linux 来做什么。如果你对 Linux 编程感兴趣,那么你可能会从 Bash(它已经在终端中安装和运行了)和 Python 开始。如果你对 Linux 中的那些迷人的开源应用程序感兴趣,你可以试试 GIMP、MyPaint、LibreOffice 或 Inkscape 等等应用程序。 + +Chrome OS 的 **Linux Beta** 模式不包含图形化的软件安装程序,但 [应用程序可以从终端安装][5]。可以使用 `sudo apt install` 命令安装应用程序。 + + * `sudo` 命令可以允许你使用超级管理员权限来执行某些命令(即 Linux 中的 `root`)。 + * `apt` 命令是一个应用程序的安装工具。 + * `install` 是命令选项,即告诉 `apt` 命令要做什么。 + +你还必须把想要安装的软件包的名字和 `apt` 命令写在一起。以安装 LibreOffice 举例: + +``` +sudo apt install libreoffice +``` +当有提示是否继续时,输入 `y`(代表“确认”),然后按 **回车键**。 + +一旦应用程序安装完毕,你可以像在 Chrome OS 上启动任何应用程序一样启动它:只需要在应用程序启动器输入它的名字。 + +了解 Linux 应用程序的名字和它的包名需要花一些时间,但你也可以用 `apt search` 命令来搜索。例如,可以用以下的方法是找到关于照片的应用程序: + +``` +apt search photo +``` + +因为 Linux 中有很多的应用程序,所以你可以找一些感兴趣的东西,然后尝试一下! + +### 与 Linux 共享文件和设备 + + **Linux Beta** 环境运行在 [容器][7] 中,因此 Chrome OS 需要获得访问 Linux 文件的权限。要授予 Chrome OS 与你在 Linux 上创建的文件的交互权限,请右击要共享的文件夹并选择 **管理 Linux 共享**。 + +![Chrome OS Manage Linux sharing interface][8] + +*Chrome OS 的 Linux 管理共享界面* + +你可以通过 Chrome OS 的 **设置** 程序来管理共享设置以及其他设置。 + +![Chrome OS Settings menu][9] + +*Chrome OS 设置菜单* + +### 学习 Linux + +如果你肯花时间学习 Linux,你不仅能够解锁你 Chromebook 中隐藏的潜力,还能最终学到很多关于计算机的知识。Linux 是一个有价值的工具,一个非常有趣的玩具,一个通往比常规计算更令人兴奋的事物的大门。去了解它吧,你可能会惊讶于你自己和你 Chromebook 的无限潜能。 + +--- + +源自: https://opensource.com/article/21/2/chromebook-linux + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[max27149](https://github.com/max27149) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/wfh_work_home_laptop_work.png?itok=VFwToeMy (Working from home at a laptop) +[2]: https://www.chromium.org/chromium-os +[3]: https://opensource.com/sites/default/files/chromeos.png +[4]: https://opensource.com/sites/default/files/esc-refresh.png +[5]: https://opensource.com/article/18/1/how-install-apps-linux +[6]: https://opensource.com/tags/linux +[7]: https://opensource.com/resources/what-are-linux-containers +[8]: https://opensource.com/sites/default/files/chromeos-manage-linux-sharing.png +[9]: https://opensource.com/sites/default/files/chromeos-beta-linux.png diff --git a/scripts/check/collect.sh b/scripts/check/collect.sh index 3f8e0f0388..fdfb32bc01 100644 --- a/scripts/check/collect.sh +++ b/scripts/check/collect.sh @@ -11,11 +11,11 @@ set -e echo "[收集] 计算 PR 分支与目标分支的分叉点……" -TARGET_BRANCH="${TRAVIS_BRANCH:-master}" +TARGET_BRANCH="${GITHUB_BASE_REF:-master}" echo "[收集] 目标分支设定为:${TARGET_BRANCH}" MERGE_BASE='HEAD^' -[ "$TRAVIS_PULL_REQUEST" != 'false' ] \ +[ "$PULL_REQUEST_ID" != 'false' ] \ && MERGE_BASE="$(git merge-base "$TARGET_BRANCH" HEAD)" echo "[收集] 找到分叉节点:${MERGE_BASE}" diff --git a/sources/news/20200820 Rejoice KDE Lovers- MX Linux Joins the KDE Bandwagon and Now You Can Download MX Linux KDE Edition.md b/sources/news/20200820 Rejoice KDE Lovers- MX Linux Joins the KDE Bandwagon and Now You Can Download MX Linux KDE Edition.md deleted file mode 100644 index d5fcf0d379..0000000000 --- a/sources/news/20200820 Rejoice KDE Lovers- MX Linux Joins the KDE Bandwagon and Now You Can Download MX Linux KDE Edition.md +++ /dev/null @@ -1,91 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Rejoice KDE Lovers! MX Linux Joins the KDE Bandwagon and Now You Can Download MX Linux KDE Edition) -[#]: via: (https://itsfoss.com/mx-linux-kde-edition/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -Rejoice KDE Lovers! MX Linux Joins the KDE Bandwagon and Now You Can Download MX Linux KDE Edition -====== - -Debian-based [MX Linux][1] is already an impressive Linux distribution with [Xfce desktop environment][2] as the default. Even though it works good and is suitable to run with minimal hardware configuration, it still isn’t the best Linux distribution in terms of eye candy. - -That’s where KDE comes to the rescue. Of late, KDE Plasma has reduced a lot of weight and it uses fewer system resources without compromising on the modern looks. No wonder KDE Plasma is one of [the best desktop environments][3] out there. - -![][4] - -With [MX Linux 19.2][5], they began testing a KDE edition and have finally released their first KDE version. - -Also, the KDE edition comes with Advanced Hardware Support (AHS) enabled. Here’s what they have mentioned in their release notes: - -> MX-19.2 KDE is an **Advanced Hardware Support (AHS) **enabled **64-bit only** version of MX featuring the KDE/plasma desktop. Applications utilizing Qt library frameworks are given a preference for inclusion on the iso. - -As I mentioned it earlier, this is MX Linux’s first KDE edition ever, and they’ve also shed some light on it with the announcement as well: - -> This will be first officially supported MX/antiX family iso utilizing the KDE/plasma desktop since the halting of the predecessor MEPIS project in 2013. - -Personally, I enjoyed the experience of using MX Linux until I started using [Pop OS 20.04][6]. So, I’ll give you some key highlights of MX Linux 19.2 KDE edition along with my impressions of testing it. - -### MX Linux 19.2 KDE: Overview - -![][7] - -Out of the box, MX Linux looks cleaner and more attractive with KDE desktop on board. Unlike KDE Neon, it doesn’t feature the latest and greatest KDE stuff, but it looks to be doing the job intended. - -Of course, you will get the same options that you expect from a KDE-powered distro to customize the look and feel of your desktop. In addition to the obvious KDE perks, you will also get the usual MX tools, antiX-live-usb-system, and snapshot feature that comes baked in the Xfce edition. - -It’s a great thing to have the best of both worlds here, as stated in their announcement: - -> MX-19.2 KDE includes the usual MX tools, antiX-live-usb-system, and snapshot technology that our users have come to expect from our standard flagship Xfce releases. Adding KDE/plasma to the existing Xfce/MX-fluxbox desktops will provide for a wider range user needs and wants. - -I haven’t performed a great deal of tests but I did have some issues with extracting archives (it didn’t work the first try) and copy-pasting a file to a new location. Not sure if those are some known bugs — but I thought I should let you know here. - -![][8] - -Other than that, it features every useful tool you’d want to have and works great. With KDE on board, it actually feels more polished and smooth in my case. - -Along with KDE Plasma 5.14.5 on top of Debian 10 “buster”, it also comes with GIMP 2.10.12, MESA, Debian (AHS) 5.6 Kernel, Firefox browser, and few other goodies like VLC, Thunderbird, LibreOffice, and Clementine music player. - -You can also look for more stuff in the MX repositories. - -![][9] - -There are some known issues with the release like the System clock settings not being able adjustable via KDE settings. You can check their [announcement post][10] for more information or their [bug list][11] to make sure everything’s fine before trying it out on your production system. - -### Wrapping Up - -MX Linux 19.2 KDE edition is definitely more impressive than its Xfce offering in my opinion. It would take a while to iron out the bugs for this first KDE release — but it’s not a bad start. - -Speaking of KDE, I recently tested out KDE Neon, the official KDE distribution. I shared my experience in this video. I’ll try to do a video on MX Linux KDE flavor as well. - -[Subscribe to our YouTube channel for more Linux videos][12] - -Have you tried it yet? Let me know your thoughts in the comments below! - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/mx-linux-kde-edition/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://mxlinux.org/ -[2]: https://www.xfce.org/ -[3]: https://itsfoss.com/best-linux-desktop-environments/ -[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/08/mx-linux-kde-edition.jpg?resize=800%2C450&ssl=1 -[5]: https://mxlinux.org/blog/mx-19-2-now-available/ -[6]: https://itsfoss.com/pop-os-20-04-review/ -[7]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/08/mx-linux-19-2-kde.jpg?resize=800%2C452&ssl=1 -[8]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/08/mx-linux-19-2-kde-filemanager.jpg?resize=800%2C452&ssl=1 -[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/08/mx-linux-19-2-kde-info.jpg?resize=800%2C452&ssl=1 -[10]: https://mxlinux.org/blog/mx-19-2-kde-now-available/ -[11]: https://bugs.mxlinux.org/ -[12]: https://www.youtube.com/c/itsfoss?sub_confirmation=1 diff --git a/sources/news/20200822 Cisco open-source code boosts performance of Kubernetes apps over SD-WAN.md b/sources/news/20200822 Cisco open-source code boosts performance of Kubernetes apps over SD-WAN.md deleted file mode 100644 index 72ce014ff4..0000000000 --- a/sources/news/20200822 Cisco open-source code boosts performance of Kubernetes apps over SD-WAN.md +++ /dev/null @@ -1,66 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cisco open-source code boosts performance of Kubernetes apps over SD-WAN) -[#]: via: (https://www.networkworld.com/article/3572310/cisco-open-source-code-boosts-performance-of-kubernetes-apps-over-sd-wan.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Cisco open-source code boosts performance of Kubernetes apps over SD-WAN -====== -Cisco's Cloud-Native SD-WAN project marries SD-WANs to Kubernetes applications to cut down on the manual work needed to optimize latency and packet loss. -Thinkstock - -Cisco has introduced an open-source project that it says could go a long way toward reducing the manual work involved in optimizing performance of Kubernetes-applications across [SD-WANs][1]. - -Cisco said it launched the Cloud-Native SD-WAN (CN-WAN) project to show how Kubernetes applications can be automatically mapped to SD-WAN with the result that the applications perform better over the WAN. - -**More about SD-WAN**: [How to buy SD-WAN technology: Key questions to consider when selecting a supplier][2] • [How to pick an off-site data-backup method][3] •  [SD-Branch: What it is and why you’ll need it][4] • [What are the options for security SD-WAN?][5] - -“In many cases, enterprises deploy an SD-WAN to connect a Kubernetes cluster with users or workloads that consume cloud-native applications. In a typical enterprise, NetOps teams leverage their network expertise to program SD-WAN policies to optimize general connectivity to the Kubernetes hosted applications, with the goal to reduce latency, reduce packet loss, etc.” wrote John Apostolopoulos, vice president and CTO of Cisco’s intent-based networking group in a group [blog][6]. - -“The enterprise usually also has DevOps teams that maintain and optimize the Kubernetes infrastructure. However, despite the efforts of NetOps and DevOps teams, today Kubernetes and SD-WAN operate mostly like ships in the night, often unaware of each other. Integration between SD-WAN and Kubernetes typically involves time-consuming manual coordination between the two teams.” - -Current SD-WAN offering often have APIs that let customers programmatically influence how their traffic is handled over the WAN. This enables interesting and valuable opportunities for automation and application optimization, Apostolopoulos stated.  “We believe there is an opportunity to pair the declarative nature of Kubernetes with the programmable nature of modern SD-WAN solutions,”  he stated. - -Enter CN-WAN, which defines a set of components that can be used to integrate an SD-WAN package, such as Cisco Viptela SD-WAN, with Kubernetes to enable DevOps teams to express the WAN needs of the microservices they deploy in a Kubernetes cluster, while simultaneously letting NetOps automatically render the microservices needs to optimize the application performance over the WAN, Apostolopoulos stated. - -Apostolopoulos wrote that CN-WAN is composed of a Kubernetes Operator, a Reader, and an Adaptor. It works like this: The CN-WAN Operator runs in the Kubernetes cluster, actively monitoring the deployed services. DevOps teams can use standard Kubernetes annotations on the services to define WAN-specific metadata, such as the traffic profile of the application. The CN-WAN Operator then automatically registers the service along with the metadata in a service registry. In a demo at KubeCon EU this week Cisco used Google Service Directory as the service registry. - -Earlier this year [Cisco and Google][7] deepened their relationship with a turnkey package that lets customers mesh SD-WAN connectivity with applications running in a private [data center][8], Google Cloud or another cloud or SaaS application. That jointly developed platform, called Cisco SD-WAN Cloud Hub with Google Cloud, combines Cisco’s SD-WAN policy-, telemetry- and security-setting capabilities with Google's software-defined backbone to ensure that application service-level agreement, security and compliance policies are extended across the network. - -Meanwhile, on the SD-WAN side, the CN-WAN Reader connects to the service registry to learn about how Kubernetes is exposing the services and the associated WAN metadata extracted by the CN-WAN operator, Cisco stated. When new or updated services or metadata are detected, the CN-WAN Reader sends a message towards the CN-WAN Adaptor so SD-WAN policies can be updated. - -Finally, the CN-WAN Adaptor, maps the service-associated metadata into the detailed SD-WAN policies programmed by NetOps in the SD-WAN controller. The SD-WAN controller automatically renders the SD-WAN policies, specified by the NetOps for each metadata type, into specific SD-WAN data-plane optimizations for the service, Cisco stated.   - -“The SD-WAN may support multiple types of access at both sender and receiver (e.g., wired Internet, MPLS, wireless 4G or 5G), as well as multiple service options and prioritizations per access network, and of course multiple paths between source and destination,” Apostolopoulos stated. - -The code for the CN-WAN project is available as open-source in [GitHub][9]. - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3572310/cisco-open-source-code-boosts-performance-of-kubernetes-apps-over-sd-wan.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3031279/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[2]: https://www.networkworld.com/article/3323407/sd-wan/how-to-buy-sd-wan-technology-key-questions-to-consider-when-selecting-a-supplier.html -[3]: https://www.networkworld.com/article/3328488/backup-systems-and-services/how-to-pick-an-off-site-data-backup-method.html -[4]: https://www.networkworld.com/article/3250664/lan-wan/sd-branch-what-it-is-and-why-youll-need-it.html -[5]: https://www.networkworld.com/article/3285728/sd-wan/what-are-the-options-for-securing-sd-wan.html -[6]: https://blogs.cisco.com/networking/introducing-the-cloud-native-sd-wan-project -[7]: https://www.networkworld.com/article/3539252/cisco-integrates-sd-wan-connectivity-with-google-cloud.html -[8]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[9]: https://github.com/CloudNativeSDWAN/cnwan-docs -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/news/20201014 KDE Plasma 5.20 is Here With Exciting Improvements.md b/sources/news/20201014 KDE Plasma 5.20 is Here With Exciting Improvements.md deleted file mode 100644 index 2572d75bba..0000000000 --- a/sources/news/20201014 KDE Plasma 5.20 is Here With Exciting Improvements.md +++ /dev/null @@ -1,122 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (KDE Plasma 5.20 is Here With Exciting Improvements) -[#]: via: (https://itsfoss.com/kde-plasma-5-20/) -[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) - -KDE Plasma 5.20 is Here With Exciting Improvements -====== - -KDE Plasma 5.20 is finally here and there’s a lot of things to be excited about, including the new wallpaper ‘**Shell’** by Lucas Andrade. - -It is worth noting that is not an LTS release unlike [KDE Plasma 5.18][1] and will be maintained for the next 4 months or so. So, if you want the latest and greatest, you can surely go ahead and give it a try. - -In this article, I shall mention the key highlights of KDE Plasma 5.20 from [my experience with it on KDE Neon][2] (Testing Edition). - -![][3] - -### Plasma 5.20 Features - -If you like to see things in action, we made a feature overview video for you. - -[Subscribe to our YouTube channel for more Linux videos][4] - -#### Icon-only Taskbar - -![][5] - -You must be already comfortable with a taskbar that mentions the title of the window along the icon. However, that takes a lot of space in the taskbar, which looks bad when you want to have a clean look with multiple applications/windows opened. - -Not just limited to that, if you launch several windows of the same application, it will group them together and let you cycle through it from a single icon on the task bar. - -So, with this update, you get an icon-only taskbar by default which makes it look a lot cleaner and you can have more things in the taskbar at a glance. - -#### Digital Clock Applet with Date - -![][6] - -If you’ve used any KDE-powered distro, you must have noticed that the digital clock applet (in the bottom-right corner) displays the time but not the date by default. - -It’s always a good choice to have the date and time as well (at least I prefer that). So, with KDE Plasma 5.20, the applet will have both time and date. - -#### Get Notified When your System almost Runs out of Space - -I know this is not a big addition, but a necessary one. No matter whether your home directory is on a different partition, you will be notified when you’re about to run out of space. - -#### Set the Charge Limit Below 100% - -You are in for a treat if you are a laptop user. To help you preserve the battery health, you can now set a charge limit below 100%. I couldn’t show it to you because I use a desktop. - -#### Workspace Improvements - -Working with the workspaces on KDE desktop was already an impressive experience, now with the latest update, several tweaks have been made to take the user experience up a notch. - -To start with, the system tray has been overhauled with a grid-like layout replacing the list view. - -The default shortcut has been re-assigned with Meta+drag instead of Alt+drag to move/re-size windows to avoid conflicts with some other productivity apps with Alt+drag keybind support. You can also use the key binds like Meta + up/left/down arrow to corner-tile windows. - -![][7] - -It is also easier to list all the disks using the old “**Device Notifier**” applet, which has been renamed to “**Disks & Devices**“. - -If that wasn’t enough, you will also find improvements to [KRunner][8], which is the essential application launcher or search utility for users. It will now remember the search text history and you can also have it centered on the screen instead of having it on top of the screen. - -#### System Settings Improvements - -The look and feel of the system setting is the same but it is more useful now. You will notice a new “**Highlight changed settings**” option which will show you the recent/modified changes when compared to the default values. - -So, in that way, you can monitor any changes that you did accidentally or if someone else did it. - -![][9] - -In addition to that, you also get to utilize S.M.A.R.T monitoring and disk failure notifications. - -#### Wayland Support Improvements - -If you prefer to use a Wayland session, you will be happy to know that it now supports [Klipper][10] and you can also middle-click to paste (on KDE apps only for the time being). - -The much-needed screencasting support has also been added. - -#### Other Improvements - -Of course, you will notice some subtle visual improvements or adjustments for the look and feel. You may notice a smooth transition effect when changing the brightness. Similarly, when changing the brightness or volume, the on-screen display that pops up is now less obtrusive - -Options like controlling the scroll speed of mouse/touchpad have been added to give you finer controls. - -You can find the detailed list of changes in its [official changelog][11], if you’re curious. - -### Wrapping Up - -The changes are definitely impressive and should make the KDE experience better than ever before. - -If you’re running KDE Neon, you should get the update soon. But, if you are on Kubuntu, you will have to try the 20.10 ISO to get your hands on Plasma 5.20. - -What do you like the most among the list of changes? Have you tried it yet? Let me know your thoughts in the comments below. - --------------------------------------------------------------------------------- - -via: https://itsfoss.com/kde-plasma-5-20/ - -作者:[Ankush Das][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://itsfoss.com/author/ankush/ -[b]: https://github.com/lujun9972 -[1]: https://itsfoss.com/kde-plasma-5-18-release/ -[2]: https://itsfoss.com/kde-neon-review/ -[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-feat.png?resize=800%2C394&ssl=1 -[4]: https://www.youtube.com/c/itsfoss?sub_confirmation=1 -[5]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-taskbar.jpg?resize=472%2C290&ssl=1 -[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-clock.jpg?resize=372%2C224&ssl=1 -[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-notify.jpg?resize=800%2C692&ssl=1 -[8]: https://docs.kde.org/trunk5/en/kde-workspace/plasma-desktop/krunner.html -[9]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/plasma-disks-smart.png?resize=800%2C539&ssl=1 -[10]: https://userbase.kde.org/Klipper -[11]: https://kde.org/announcements/plasma-5.20.0 diff --git a/sources/talk/20160921 lawyer The MIT License, Line by Line.md b/sources/talk/20160921 lawyer The MIT License, Line by Line.md deleted file mode 100644 index 78abc6b9f1..0000000000 --- a/sources/talk/20160921 lawyer The MIT License, Line by Line.md +++ /dev/null @@ -1,296 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (lawyer The MIT License, Line by Line) -[#]: via: (https://writing.kemitchell.com/2016/09/21/MIT-License-Line-by-Line.html) -[#]: author: (Kyle E. Mitchell https://kemitchell.com/) - -lawyer The MIT License, Line by Line -====== - -### The MIT License, Line by Line - -[The MIT License][1] is the most popular open-source software license. Here’s one read of it, line by line. - -#### Read the License - -If you’re involved in open-source software and haven’t taken the time to read the license from top to bottom—it’s only 171 words—you need to do so now. Especially if licenses aren’t your day-to-day. Make a mental note of anything that seems off or unclear, and keep trucking. I’ll repeat every word again, in chunks and in order, with context and commentary. But it’s important to have the whole in mind. - -> The MIT License (MIT) -> -> Copyright (c) -> -> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -> -> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -> -> The Software is provided “as is”, without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or the use or other dealings in the Software. - -The license is arranged in five paragraphs, but breaks down logically like this: - - * **Header** - * **License Title** : “The MIT License” - * **Copyright Notice** : “Copyright (c) …” - * **License Grant** : “Permission is hereby granted …” - * **Grant Scope** : “… to deal in the Software …” - * **Conditions** : “… subject to …” - * **Attribution and Notice** : “The above … shall be included …” - * **Warranty Disclaimer** : “The software is provided ‘as is’ …” - * **Limitation of Liability** : “In no event …” - - - -Here we go: - -#### Header - -##### License Title - -> The MIT License (MIT) - -“The MIT License” is a not a single license, but a family of license forms derived from language prepared for releases from the Massachusetts Institute of Technology. It has seen a lot of changes over the years, both for the original projects that used it, and also as a model for other projects. The Fedora Project maintains a [kind of cabinet of MIT license curiosities][2], with insipid variations preserved in plain text like anatomical specimens in formaldehyde, tracing a wayward kind of evolution. - -Fortunately, the [Open Source Initiative][3] and [Software Package Data eXchange][4] groups have standardized a generic MIT-style license form as “The MIT License”. OSI in turn has adopted SPDX’ standardized [string identifiers][5] for common open-source licenses, with `MIT` pointing unambiguously to the standardized form “MIT License”. If you want MIT-style terms for a new project, use [the standardized form][1]. - -Even if you include “The MIT License” or “SPDX:MIT” in a `LICENSE` file, any responsible reviewer will still run a comparison of the text against the standard form, just to be sure. While various license forms calling themselves “MIT License” vary only in minor details, the looseness of what counts as an “MIT License” has tempted some authors into adding bothersome “customizations”. The canonical horrible, no good, very bad example of this is [the JSON license][6], an MIT-family license plus “The Software shall be used for Good, not Evil.”. This kind of thing might be “very Crockford”. It is definitely a pain in the ass. Maybe the joke was supposed to be on the lawyers. But they laughed all the way to the bank. - -Moral of the story: “MIT License” alone is ambiguous. Folks probably have a good idea what you mean by it, but you’re only going to save everyone—yourself included—time by copying the text of the standard MIT License form into your project. If you use metadata, like the `license` property in package manager metadata files, to designate the `MIT` license, make sure your `LICENSE` file and any header comments use the standard form text. All of this can be [automated][7]. - -##### Copyright Notice - -> Copyright (c) - -Until the 1976 Copyright Act, United States copyright law required specific actions, called “formalities”, to secure copyright in creative works. If you didn’t follow those formalities, your rights to sue others for unauthorized use of your work were limited, often completely lost. One of those formalities was “notice”: Putting marks on your work and otherwise making it known to the market that you were claiming copyright. The © is a standard symbol for marking copyrighted works, to give notice of copyright. The ASCII character set doesn’t have the © symbol, but `Copyright (c)` gets the same point across. - -The 1976 Copyright Act, which “implemented” many requirements of the international Berne Convention, eliminated formalities for securing copyright. At least in the United States, copyright holders still need to register their copyrighted works before suing for infringement, with potentially higher damages if they register before infringement begins. In practice, however, many register copyright right before bringing suit against someone in particular. You don’t lose your copyright just by failing to put notices on it, registering, sending a copy to the Library of Congress, and so on. - -Even if copyright notices aren’t as absolutely necessary as they used to be, they are still plenty useful. Stating the year a work was authored and who the copyright belonged to give some sense of when copyright in the work might expire, bringing the work into the public domain. The identity of the author or authors is also useful: United States law calculates copyright terms differently for individual and “corporate” authors. Especially in business use, it may also behoove a company to think twice about using software from a known competitor, even if the license terms give very generous permission. If you’re hoping others will see your work and want to license it from you, copyright notices serve nicely for attribution. - -As for “copyright holder”: Not all standard form licenses have a space to write this out. More recent license forms, like [Apache 2.0][8] and [GPL 3.0][9], publish `LICENSE` texts that are meant to be copied verbatim, with header comments and separate files elsewhere to indicate who owns copyright and is giving the license. Those approaches neatly discourage changes to the “standard” texts, accidental or intentional. They also make automated license identification more reliable. - -The MIT License descends from language written for releases of code by institutions. For institutional releases, there was just one clear “copyright holder”, the institution releasing the code. Other institutions cribbed these licenses, replacing “MIT” with their own names, leading eventually to the generic forms we have now. This process repeated for other short-form institutional licenses of the era, notably the [original four-clause BSD License][10] for the University of California, Berkeley, now used in [three-clause][11] and [two-clause][12] variants, as well as [The ISC License][13] for the Internet Systems Consortium, an MIT variant. - -In each case, the institution listed itself as the copyright holder in reliance on rules of copyright ownership, called “[works made for hire][14]” rules, that give employers and clients ownership of copyright in some work their employees and contractors do on their behalf. These rules don’t usually apply to distributed collaborators submitting code voluntarily. This poses a problem for project-steward foundations, like the Apache Foundation and Eclipse Foundation, that accept contributions from a more diverse group of contributors. The usual foundation approach thus far has been to use a house license that states a single copyright holder—[Apache 2.0][8] and [EPL 1.0][15]—backed up by contributor license agreements—[Apache CLAs][16] and [Eclipse CLAs][17]—to collect rights from contributors. Collecting copyright ownership in one place is even more important under “copyleft” licenses like the GPL, which rely on copyright owners to enforce license conditions to promote software-freedom values. - -These days, loads of projects without any kind of institutional or business steward use MIT-style license terms. SPDX and OSI have helped these use cases by standardizing forms of licenses like MIT and ISC that don’t refer to a specific entity or institutional copyright holder. Armed with those forms, the prevailing practice of project authors is to fill their own name in the copyright notice of the form very early on … and maybe bump the year here and there. At least under United States copyright law, the resulting copyright notice doesn’t give a full picture. - -The original owner of a piece of software retains ownership of their work. But while MIT-style license terms give others rights to build on and change the software, creating what the law calls “derivative works”, they don’t give the original author ownership of copyright in others’ contributions. Rather, each contributor has copyright in any [even marginally creative][18] work they make using the existing code as a starting point. - -Most of these projects also balk at the idea of taking contributor license agreements, to say nothing of signed copyright assignments. That’s both naive and understandable. Despite the assumption of some newer open-source developers that sending a pull request on GitHub “automatically” licenses the contribution for distribution on the terms of the project’s existing license, United States law doesn’t recognize any such rule. Strong copyright protection, not permissive licensing, is the default. - -Update: GitHub later changed its site-wide terms of service to include an attempt to flip this default, at least on GitHub.com. I’ve written up some thoughts on that development, not all of them positive, in [another post][19]. - -To fill the gap between legally effective, well-documented grants of rights in contributions and no paper trail at all, some projects have adopted the [Developer Certificate of Origin][20], a standard statement contributors allude to using `Signed-Off-By` metadata tags in their Git commits. The Developer Certificate of Origin was developed for Linux kernel development in the wake of the infamous SCO lawsuits, which alleged that chunks of Linux’ code derived from SCO-owned Unix source. As a means of creating a paper trail showing that each line of Linux came from a contributor, the Developer Certificate of Origin functions nicely. While the Developer Certificate of Origin isn’t a license, it does provide lots of good evidence that those submitting code expected the project to distribute their code, and for others to use it under the kernel’s existing license terms. The kernel also maintains a machine-readable `CREDITS` file listing contributors with name, affiliation, contribution area, and other metadata. I’ve done [some][21] [experiments][22] adapting that approach for projects that don’t use the kernel’s development flow. - -#### License Grant - -> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), - -The meat of The MIT License is, you guessed it, a license. In general terms, a license is permission that one person or legal entity—the “licensor”—gives another—the “licensee”—to do something the law would otherwise let them sue for. The MIT License is a promise not to sue. - -The law sometimes distinguishes licenses from promises to give licenses. If someone breaks a promise to give a license, you may be able to sue them for breaking their promise, but you may not end up with a license. “Hereby” is one of those hokey, archaic-sounding words lawyers just can’t get rid of. It’s used here to show that the license text itself gives the license, and not just a promise of a license. It’s a legal [IIFE][23]. - -While many licenses give permission to a specific, named licensee, The MIT License is a “public license”. Public licenses give everybody—the public at large—permission. This is one of the three great ideas in open-source licensing. The MIT License captures this idea by giving a license “to any person obtaining a copy of … the Software”. As we’ll see later, there is also a condition to receiving this license that ensures others will learn about their permission, too. - -The parenthetical with a capitalized term in quotation marks (a “Definition”), is the standard way to give terms specific meanings in American-style legal documents. Courts will reliably look back to the terms of the definition when they see a defined, capitalized term used elsewhere in the document. - -##### Grant Scope - -> to deal in the Software without restriction, - -From the licensee’s point of view, these are the seven most important words in The MIT License. The key legal concerns are getting sued for copyright infringement and getting sued for patent infringement. Neither copyright law nor patent law uses “to deal in” as a term of art; it has no specific meaning in court. As a result, any court deciding a dispute between a licensor and a licensee would ask what the parties meant and understood by this language. What the court will see is that the language is intentionally broad and open-ended. It gives licensees a strong argument against any claim by a licensor that they didn’t give permission for the licensee to do that specific thing with the software, even if the thought clearly didn’t occur to either side when the license was given. - -> including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, - -No piece of legal writing is perfect, “fully settled in meaning”, or unmistakably clear. Beware anyone who pretends otherwise. This is the least perfect part of The MIT License. There are three main issues: - -First, “including without limitation” is a legal antipattern. It crops up in any number of flavors: - - * “including, without limitation” - * “including, without limiting the generality of the foregoing” - * “including, but not limited to” - * many, many pointless variations - - - -All of these share a common purpose, and they all fail to achieve it reliably. Fundamentally, drafters who use them try to have their cake and eat it, too. In The MIT License, that means introducing specific examples of “dealing in the Software”—“use, copy, modify” and so on—without implying that licensee action has to be something like the examples given to count as “dealing in”. The trouble is that, if you end up needing a court to review and interpret the terms of a license, the court will see its job as finding out what those fighting meant by the language. If the court needs to decide what “deal in” means, it cannot “unsee” the examples, even if you tell it to. I’d argue that “deal in the Software without restriction” alone would be better for licensees. Also shorter. - -Second, the verbs given as examples of “deal in” are a hodgepodge. Some have specific meanings under copyright or patent law, others almost do or just plain don’t: - - * use appears in [United States Code title 35, section 271(a)][24], the patent law’s list of what patent owners can sue others for doing without permission. - - * copy appears in [United States Code title 17, section 106][25], the copyright law’s list of what copyright owners can sue others for doing without permission. - - * modify doesn’t appear in either copyright or patent statute. It is probably closest to “prepare derivative works” under the copyright statute, but may also implicate improving or otherwise derivative inventions. - - * merge doesn’t appear in either copyright or patent statute. “Merger” has a specific meaning in copyright, but that’s clearly not what’s intended here. Rather, a court would probably read “merge” according to its meaning in industry, as in “to merge code”. - - * publish doesn’t appear in either copyright or patent statute. Since “the Software” is what’s being published, it probably hews closest to “distribute” under the [copyright statute][25]. That statute also covers rights to perform and display works “publicly”, but those rights apply only to specific kinds of copyrighted work, like plays, sound recordings, and motion pictures. - - * distribute appears in the [copyright statute][25]. - - * sublicense is a general term of intellectual property law. The right to sublicense means the right to give others licenses of their own, to do some or all of what you have permission to do. The MIT License’s right to sublicense is actually somewhat unusual in open-source licenses generally. The norm is what Heather Meeker calls a “direct licensing” approach, where everyone who gets a copy of the software and its license terms gets a license direct from the owner. Anyone who might get a sublicense under the MIT License will probably end up with a copy of the license telling them they have a direct license, too. - - * sell copies of is a mongrel. It is close to “offer to sell” and “sell” in the [patent statute][24], but refers to “copies”, a copyright concept. On the copyright side, it seems close to “distribute”, but the [copyright statute][25] makes no mention of sales. - - * permit persons to whom the Software is furnished to do so seems redundant of “sublicense”. It’s also unnecessary to the extent folks who get copies also get a direct license. - - - - -Lastly, as a result of this mishmash of legal, industry, general-intellectual-property, and general-use terms, it isn’t clear whether The MIT License includes a patent license. The general language “deal in” and some of the example verbs, especially “use”, point toward a patent license, albeit a very unclear one. The fact that the license comes from the copyright holder, who may or may not have patent rights in inventions in the software, as well as most of the example verbs and the definition of “the Software” itself, all point strongly toward a copyright license. More recent permissive open-source licenses, like [Apache 2.0][8], address copyright, patent, and even trademark separately and specifically. - -##### Three License Conditions - -> subject to the following conditions: - -There’s always a catch! MIT has three! - -If you don’t follow The MIT License’s conditions, you don’t get the permission the license offers. So failing to do what the conditions say at least theoretically leaves you open to a lawsuit, probably a copyright lawsuit. - -Using the value of the software to the licensee to motivate compliance with conditions, even though the licensee paid nothing for the license, is the second great idea of open-source licensing. The last, not found in The MIT License, builds off license conditions: “Copyleft” licenses like the [GNU General Public License][9] use license conditions to control how those making changes can license and distribute their changed versions. - -##### Notice Condition - -> The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -If you give someone a copy of the software, you need to include the license text and any copyright notice. This serves a few critical purposes: - - 1. Gives others notice that they have permission for the software under the public license. This is a key part of the direct-licensing model, where each user gets a license direct from the copyright holder. - - 2. Makes known who’s behind the software, so they can be showered in praises, glory, and cold, hard cash donations. - - 3. Ensures the warranty disclaimer and limitation of liability (coming up next) follow the software around. Everyone who gets a copy should get a copy of those licensor protections, too. - - - - -There’s nothing to stop you charging for providing a copy, or even a copy in compiled form, without source code. But when you do, you can’t pretend that the MIT code is your own proprietary code, or provided under some other license. Those receiving get to know their rights under the “public license”. - -Frankly, compliance with this condition is breaking down. Nearly every open-source license has such an “attribution” condition. Makers of system and installed software often understand they’ll need to compile a notices file or “license information” screen, with copies of license texts for libraries and components, for each release of their own. The project-steward foundations have been instrumental in teaching those practices. But web developers, as a whole, haven’t got the memo. It can’t be explained away by a lack of tooling—there is plenty—or the highly modular nature of packages from npm and other repositories—which uniformly standardize metadata formats for license information. All the good JavaScript minifiers have command-line flags for preserving license header comments. Other tools will concatenate `LICENSE` files from package trees. There’s really no excuse. - -##### Warranty Disclaimer - -> The Software is provided “as is”, without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose and noninfringement. - -Nearly every state in the United States has enacted a version of the Uniform Commercial Code, a model statute of laws governing commercial transactions. Article 2 of the UCC—“Division 2” in California—governs contracts for sales of goods, from used automobiles bought off the lot to large shipments of industrial chemicals to manufacturing plants. - -Some of the UCC’s rules about sales contracts are mandatory. These rules always apply, whether those buying and selling like them or not. Others are just “defaults”. Unless buyers and sellers opt out in writing, the UCC implies that they want the baseline rule found in the UCC’s text for their deal. Among the default rules are implied “warranties”, or promises by sellers to buyers about the quality and usability of the goods being sold. - -There is a big theoretical debate about whether public licenses like The MIT License are contracts—enforceable agreements between licensors and licensees—or just licenses, which go one way, but may come with strings attached, their conditions. There is less debate about whether software counts as “goods”, triggering the UCC’s rules. There is no debate among licensors on liability: They don’t want to get sued for lots of money if the software they give away for free breaks, causes problems, doesn’t work, or otherwise causes trouble. That’s exactly the opposite of what three default rules for “implied warranties” do: - - 1. The implied warranty of “merchantability” under [UCC section 2-314][26] is a promise that “the goods”—the Software—are of at least average quality, properly packaged and labeled, and fit for the ordinary purposes they are intended to serve. This warranty applies only if the one giving the software is a “merchant” with respect to the software, meaning they deal in software and hold themselves out as skilled in software. - - 2. The implied warranty of “fitness for a particular purpose” under [UCC section 2-315][27] kicks in when the seller knows the buyer is relying on them to provide goods for a particular purpose. The goods need to actually be “fit” for that purpose. - - 3. The implied warranty of “noninfringement” is not part of the UCC, but is a common feature of general contract law. This implied promise protects the buyer if it turns out the goods they received infringe somebody else’s intellectual property rights. That would be the case if the software under The MIT License didn’t actually belong to the one trying to license it, or if it fell under a patent owned by someone else. - - - - -[Section 2-316(3)][28] of the UCC requires language opting out of, or “excluding”, implied warranties of merchantability and fitness for a particular purpose to be conspicuous. “Conspicuous” in turn means written or formatted to call attention to itself, the opposite of microscopic fine print meant to slip past unwary consumers. State law may impose a similar attention-grabbing requirement for disclaimers of noninfringement. - -Lawyers have long suffered under the delusion that writing anything in `ALL-CAPS` meets the conspicuous requirement. That isn’t true. Courts have criticized the Bar for pretending as much, and most everyone agrees all-caps does more to discourage reading than compel it. All the same, most open-source-license forms set their warranty disclaimers in all-caps, in part because that’s the only obvious way to make it stand out in plain-text `LICENSE` files. I’d prefer to use asterisks or other ASCII art, but that ship sailed long, long ago. - -##### Limitation of Liability - -> In no event shall the authors or copyright holders be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the Software or the use or other dealings in the Software. - -The MIT License gives permission for software “free of charge”, but the law does not assume that folks receiving licenses free of charge give up their rights to sue when things go wrong and the licensor is to blame. “Limitations of liability”, often paired with “damages exclusions”, work a lot like licenses, as promises not to sue. But these are protections for the licensor against lawsuits by licensees. - -In general, courts read limitations of liability and damages exclusions warily, since they can shift an incredible amount of risk from one side to another. To protect the community’s vital interest in giving folks a way to redress wrongs done in court, they “strictly construe” language limiting liability, reading it against the one protected by it where possible. Limitations of liability have to be specific to stand up. Especially in “consumer” contracts and other situations where those giving up the right to sue lack sophistication or bargaining power, courts have sometimes refused to honor language that seemed buried out of sight. Partly for that reason, partly by sheer force of habit, lawyers tend to give limits of liability the all-caps treatment, too. - -Drilling down a bit, the “limitation of liability” part is a cap on the amount of money a licensee can sue for. In open-source licenses, that limit is always no money at all, $0, “not liable”. By contrast, in commercial licenses, it’s often a multiple of license fees paid in the last 12-month period, though it’s often negotiated. - -The “exclusion” part lists, specifically, kinds of legal claims—reasons to sue for damages—the licensor cannot use. Like many, many legal forms, The MIT License mentions actions “of contract”—for breaching a contract—and “of tort”. Tort rules are general rules against carelessly or maliciously harming others. If you run someone down on the road while texting, you have committed a tort. If your company sells faulty headphones that burn peoples’ ears off, your company has committed a tort. If a contract doesn’t specifically exclude tort claims, courts sometimes read exclusion language in a contract to prevent only contract claims. For good measure, The MIT License throws in “or otherwise”, just to catch the odd admiralty law or other, exotic kind of legal claim. - -The phrase “arising from, out of or in connection with” is a recurring tick symptomatic of the legal draftsman’s inherent, anxious insecurity. The point is that any lawsuit having anything to do with the software is covered by the limitation and exclusions. On the off chance something can “arise from”, but not “out of”, or “in connection with”, it feels better to have all three in the form, so pack ‘em in. Never mind that any court forced to split hairs in this part of the form will have to come up with different meanings for each, on the assumption that a professional drafter wouldn’t use different words in a row to mean the same thing. Never mind that in practice, where courts don’t feel good about a limitation that’s disfavored to begin with, they’ll be more than ready to read the scope trigger narrowly. But I digress. The same language appears in literally millions of contracts. - -#### Overall - -All these quibbles are a bit like spitting out gum on the way into church. The MIT License is a legal classic. The MIT License works. It is by no means a panacea for all software IP ills, in particular the software patent scourge, which it predates by decades. But MIT-style licenses have served admirably, fulfilling a narrow purpose—reversing troublesome default rules of copyright, sales, and contract law—with a minimal combination of discreet legal tools. In the greater context of computing, its longevity is astounding. The MIT License has outlasted and will outlast the vast majority of software licensed under it. We can only guess how many decades of faithful legal service it will have given when it finally loses favor. It’s been especially generous to those who couldn’t have afforded their own lawyer. - -We’ve seen how the The MIT License we know today is a specific, standardized set of terms, bringing order at long last to a chaos of institution-specific, haphazard variations. - -We’ve seen how its approach to attribution and copyright notice informed intellectual property management practices for academic, standards, commercial, and foundation institutions. - -We’ve seen how The MIT Licenses grants permission for software to all, for free, subject to conditions that protect licensors from warranties and liability. - -We’ve seen that despite some crusty verbiage and lawyerly affectation, one hundred and seventy one little words can get a hell of a lot of legal work done, clearing a path for open-source software through a dense underbrush of intellectual property and contract. - -I’m so grateful for all who’ve taken the time to read this rather long post, to let me know they found it useful, and to help improve it. As always, I welcome your comments via [e-mail][29], [Twitter][30], and [GitHub][31]. - -A number of folks have asked where they can read more, or find run-downs of other licenses, like the GNU General Public License or the Apache 2.0 license. No matter what your particular continuing interest may be, I heartily recommend the following books: - - * Andrew M. St. Laurent’s [Understanding Open Source & Free Software Licensing][32], from O’Reilly. - -I start with this one because, while it’s somewhat dated, its approach is also closest to the line-by-line approach used above. O’Reilly has made it [available online][33]. - - * Heather Meeker’s [Open (Source) for Business][34] - -In my opinion, by far the best writing on the GNU General Public License and copyleft more generally. This book covers the history, the licenses, their development, as well as compatibility and compliance. It’s the book I lend to clients considering or dealing with the GPL. - - * Larry Rosen’s [Open Source Licensing][35], from Prentice Hall. - -A great first book, also available for free [online][36]. This is the best introduction to open-source licensing and related law for programmers starting from scratch. This one is also a bit dated in some specific details, but Larry’s taxonomy of licenses and succinct summary of open-source business models stand the test of time. - - - - -All of these were crucial to my own education as an open-source licensing lawyer. Their authors are professional heroes of mine. Have a read! — K.E.M - -I license this article under a [Creative Commons Attribution-ShareAlike 4.0 license][37]. - - --------------------------------------------------------------------------------- - -via: https://writing.kemitchell.com/2016/09/21/MIT-License-Line-by-Line.html - -作者:[Kyle E. Mitchell][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://kemitchell.com/ -[b]: https://github.com/lujun9972 -[1]: http://spdx.org/licenses/MIT -[2]: https://fedoraproject.org/wiki/Licensing:MIT?rd=Licensing/MIT -[3]: https://opensource.org -[4]: https://spdx.org -[5]: http://spdx.org/licenses/ -[6]: https://spdx.org/licenses/JSON -[7]: https://www.npmjs.com/package/licensor -[8]: https://www.apache.org/licenses/LICENSE-2.0 -[9]: https://www.gnu.org/licenses/gpl-3.0.en.html -[10]: http://spdx.org/licenses/BSD-4-Clause -[11]: https://spdx.org/licenses/BSD-3-Clause -[12]: https://spdx.org/licenses/BSD-2-Clause -[13]: http://www.isc.org/downloads/software-support-policy/isc-license/ -[14]: http://worksmadeforhire.com/ -[15]: https://www.eclipse.org/legal/epl-v10.html -[16]: https://www.apache.org/licenses/#clas -[17]: https://wiki.eclipse.org/ECA -[18]: https://en.wikipedia.org/wiki/Feist_Publications,_Inc.,_v._Rural_Telephone_Service_Co. -[19]: https://writing.kemitchell.com/2017/02/16/Against-Legislating-the-Nonobvious.html -[20]: http://developercertificate.org/ -[21]: https://github.com/berneout/berneout-pledge -[22]: https://github.com/berneout/authors-certificate -[23]: https://en.wikipedia.org/wiki/Immediately-invoked_function_expression -[24]: https://www.govinfo.gov/app/details/USCODE-2017-title35/USCODE-2017-title35-partIII-chap28-sec271 -[25]: https://www.govinfo.gov/app/details/USCODE-2017-title17/USCODE-2017-title17-chap1-sec106 -[26]: https://leginfo.legislature.ca.gov/faces/codes_displaySection.xhtml?sectionNum=2314.&lawCode=COM -[27]: https://leginfo.legislature.ca.gov/faces/codes_displaySection.xhtml?sectionNum=2315.&lawCode=COM -[28]: https://leginfo.legislature.ca.gov/faces/codes_displaySection.xhtml?sectionNum=2316.&lawCode=COM -[29]: mailto:kyle@kemitchell.com -[30]: https://twitter.com/kemitchell -[31]: https://github.com/kemitchell/writing/tree/master/_posts/2016-09-21-MIT-License-Line-by-Line.md -[32]: https://lccn.loc.gov/2006281092 -[33]: http://www.oreilly.com/openbook/osfreesoft/book/ -[34]: https://www.amazon.com/dp/1511617772 -[35]: https://lccn.loc.gov/2004050558 -[36]: http://www.rosenlaw.com/oslbook.htm -[37]: https://creativecommons.org/licenses/by-sa/4.0/legalcode diff --git a/sources/talk/20170928 The Lineage of Man.md b/sources/talk/20170928 The Lineage of Man.md index de2cc7515e..b51d67f4f9 100644 --- a/sources/talk/20170928 The Lineage of Man.md +++ b/sources/talk/20170928 The Lineage of Man.md @@ -92,7 +92,7 @@ via: https://twobithistory.org/2017/09/28/the-lineage-of-man.html 作者:[Two-Bit History][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[bestony](https://github.com/bestony) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/sources/talk/20181218 The Rise and Demise of RSS.md b/sources/talk/20181218 The Rise and Demise of RSS.md index 2dfea2074c..e260070c5c 100644 --- a/sources/talk/20181218 The Rise and Demise of RSS.md +++ b/sources/talk/20181218 The Rise and Demise of RSS.md @@ -1,5 +1,5 @@ [#]: collector: (lujun9972) -[#]: translator: (beamrolling) +[#]: translator: ( ) [#]: reviewer: ( ) [#]: publisher: ( ) [#]: url: ( ) diff --git a/sources/talk/20190111 What metrics matter- A guide for open source projects.md b/sources/talk/20190111 What metrics matter- A guide for open source projects.md deleted file mode 100644 index 1438450a99..0000000000 --- a/sources/talk/20190111 What metrics matter- A guide for open source projects.md +++ /dev/null @@ -1,95 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What metrics matter: A guide for open source projects) -[#]: via: (https://opensource.com/article/19/1/metrics-guide-open-source-projects) -[#]: author: (Gordon Haff https://opensource.com/users/ghaff) - -What metrics matter: A guide for open source projects -====== -5 principles for deciding what to measure. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/metrics_data_dashboard_system_computer_analytics.png?itok=oxAeIEI-) - -"Without data, you're just a person with an opinion." - -Those are the words of W. Edwards Deming, the champion of statistical process control, who was credited as one of the inspirations for what became known as the Japanese post-war economic miracle of 1950 to 1960. Ironically, Japanese manufacturers like Toyota were far more receptive to Deming’s ideas than General Motors and Ford were. - -Community management is certainly an art. It’s about mentoring. It’s about having difficult conversations with people who are hurting the community. It’s about negotiation and compromise. It’s about interacting with other communities. It’s about making connections. In the words of Red Hat’s Diane Mueller, it’s about "nurturing conversations." - -However, it’s also about metrics and data. - -Some have much in common with software development projects more broadly. Others are more specific to the management of the community itself. I think of deciding what to measure and how as adhering to five principles. - -### 1. Recognize that behaviors aren't independent of the measurements you choose to highlight. - -In 2008, Daniel Ariely published Predictably Irrational, one of a number of books written around that time that introduced behavioral psychology and behavioral economics to the general public. One memorable quote from that book is the following: “Human beings adjust behavior based on the metrics they’re held against. Anything you measure will impel a person to optimize his score on that metric. What you measure is what you’ll get. Period.” - -This shouldn’t be surprising. It’s a finding that’s been repeatedly confirmed by research. It should also be familiar to just about anyone with business experience. It’s certainly not news to anyone in sales management, for example. Base sales reps’ (or their managers’) bonuses solely on revenue, and they’ll try to discount whatever it takes to maximize revenue, even if it puts margin in the toilet. Conversely, want the sales force to push a new product line—which will probably take extra effort—but skip the spiffs? Probably not happening. - -And lest you think I’m unfairly picking on sales, this behavior is pervasive, all the way up to the CEO, as Ariely describes in a 2010 Harvard Business Review article: “CEOs care about stock value because that’s how we measure them. If we want to change what they care about, we should change what we measure.” - -Developers and other community members are not immune. - -### 2. You need to choose relevant metrics. - -There’s a lot of folk wisdom floating around about what’s relevant and important that’s not necessarily true. My colleague [Dave Neary offers an example from baseball][1]: “In the late '90s, the key measurements that were used to measure batter skill were RBI (runs batted in) and batting average (how often a player got on base with a hit, divided by the number of at-bats). The Oakland A’s were the first major league team to recruit based on a different measurement of player performance: on-base percentage. This measures how often they get to first base, regardless of how it happens.” - -Indeed, the whole revolution of sabermetrics in baseball and elsewhere, which was popularized in Michael Lewis’ Moneyball, often gets talked about in terms of introducing data in a field that historically was more about gut feel and personal experience. But it was also about taking a game that had actually always been fairly numbers-obsessed and coming up with new metrics based on mostly existing data to better measure player value. (The data revolution going on in sports today is more about collecting much more data through video and other means than was previously available.) - -### 3. Quantity may not lead to quality. - -As a corollary, collecting lots of tangential but easy-to-capture data isn’t better than just selecting a few measurements you’ve determined are genuinely useful. In a world where online behavior can be tracked with great granularity and displayed in colorful dashboards, it’s tempting to be distracted by sheer data volume, even when it doesn’t deliver any great insight into community health and trajectory. - -This may seem like an obvious point: Why measure something that isn’t relevant? In practice, metrics often get chosen because they’re easy to measure, not because they’re particularly useful. They tend to be more about inputs than outputs: The number of developers. The number of forum posts. The number of commits. Collectively, measures like this often get called vanity metrics. They’re ubiquitous, but most people involved with community management don’t think much of them. - -Number of downloads may be the worst of the bunch. It’s true that, at some level, they’re an indication of interest in a project. That’s something. But it’s sufficiently distant from actively using the project, much less engaging with the project deeply, that it’s hard to view downloads as a very useful number. - -Is there any harm in these vanity metrics? Yes, to the degree that you start thinking that they’re something to base action on. Probably more seriously, stakeholders like company management or industry observers can come to see them as meaningful indicators of project health. - -### 4. Understand what measurements really mean and how they relate to each other. - -Neary makes this point to caution against myopia. “In one project I worked on,” he says, ”some people were concerned about a recent spike in the number of bug reports coming in because it seemed like the project must have serious quality issues to resolve. However, when we looked at the numbers, it turned out that many of the bugs were coming in because a large company had recently started using the project. The increase in bug reports was actually a proxy for a big influx of new users, which was a good thing.” - -In practice, you often have to measure through proxies. This isn’t an inherent problem, but the further you get between what you want to measure and what you’re actually measuring, the harder it is to connect the dots. It’s fine to track progress in closing bugs, writing code, and adding new features. However, those don’t necessarily correlate with how happy users are or whether the project is doing a good job of working towards its long-term objectives, whatever those may be. - -### 5. Different measurements serve different purposes. - -Some measurements may be non-obvious but useful for tracking the success of a project and community relative to internal goals. Others may be better suited for a press release or other external consumption. For example, as a community manager, you may really care about the number of meetups, mentoring sessions, and virtual briefings your community has held over the past three months. But it’s the number of contributions and contributors that are more likely to grab the headlines. You probably care about those too. But maybe not as much, depending upon your current priorities. - -Still, other measurements may relate to the goals of any sponsoring organizations. The measurements most relevant for projects tied to commercial products are likely to be different from pure community efforts. - -Because communities differ and goals differ, it’s not possible to simply compile a metrics checklist, but here are some ideas to think about: - -Consider qualitative metrics in addition to quantitative ones. Conducting surveys and other studies can be time-consuming, especially if they’re rigorous enough to yield better-than-anecdotal data. It also requires rigor to construct studies so that they can be used to track changes over time. In other words, it’s a lot easier to measure quantitative contributor activity than it is to suss out if the community members are happier about their participation today than they were a year ago. However, given the importance of culture to the health of a community, measuring it in a systematic way can be a worthwhile exercise. - -Breadth of community, including how many are unaffiliated with commercial entities, is important for many projects. The greater the breadth, the greater the potential leverage of the open source development process. It can also be instructive to see how companies and individuals are contributing. Projects can be explicitly designed to better accommodate casual contributors. - -Are new contributors able to have an impact, or are they ignored? How long does it take for code contributions to get committed? How long does it take for a reported bug to be fixed or otherwise responded to? If they asked a question in a forum, did anyone answer them? In other words, are you letting contributors contribute? - -Advancement within the project is also an important metric. [Mikeal Rogers of the Node.js community][2] explains: “The shift that we made was to create a support system and an education system to take a user and turn them into a contributor, first at a very low level, and educate them to bring them into the committer pool and eventually into the maintainer pool. The end result of this is that we have a wide range of skill sets. Rather than trying to attract phenomenal developers, we’re creating new phenomenal developers.” - -Whatever metrics you choose, don’t forget why you made them metrics in the first place. I find a helpful question to ask is: “What am I going to do with this number?” If the answer is to just put it in a report or in a press release, that’s not a great answer. Metrics should be measurements that tell you either that you’re on the right path or that you need to take specific actions to course-correct. - -For this reason, Stormy Peters, who handles community leads at Red Hat, [argues for keeping it simple][3]. She writes, “It’s much better to have one or two key metrics than to worry about all the possible metrics. You can capture all the possible metrics, but as a project, you should focus on moving one. It’s also better to have a simple metric that correlates directly to something in the real world than a metric that is a complicated formula or ration between multiple things. As project members make decisions, you want them to be able to intuitively feel whether or not it will affect the project’s key metric in the right direction.” - -The article is adapted from [How Open Source Ate Software][4] by Gordon Haff (Apress 2018). - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/1/metrics-guide-open-source-projects - -作者:[Gordon Haff][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/ghaff -[b]: https://github.com/lujun9972 -[1]: https://community.redhat.com/blog/2014/07/when-metrics-go-wrong/ -[2]: https://opensource.com/article/17/3/nodejs-community-casual-contributors -[3]: https://medium.com/open-source-communities/3-important-things-to-consider-when-measuring-your-success-50e21ad82858 -[4]: https://www.apress.com/us/book/9781484238936 diff --git a/sources/talk/20190115 What happens when a veteran teacher goes to an open source conference.md b/sources/talk/20190115 What happens when a veteran teacher goes to an open source conference.md deleted file mode 100644 index e16505c36c..0000000000 --- a/sources/talk/20190115 What happens when a veteran teacher goes to an open source conference.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What happens when a veteran teacher goes to an open source conference) -[#]: via: (https://opensource.com/open-organization/19/1/educator-at-open-source-conference) -[#]: author: (Ben Owens https://opensource.com/users/engineerteacher) - -What happens when a veteran teacher goes to an open source conference -====== -Sometimes feeling like a fish out of water is precisely what educators need. - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003588_01_rd3os.combacktoschoolseriesgen_rh_032x_0.png?itok=cApG9aB4) - -"Change is going to be continual, and today is the slowest day society will ever move."—[Tony Fadell][1] - -If ever there was an experience that brought the above quotation home for me, it was my experience at the [All Things Open conference][2] in Raleigh, NC last October. Thousands of people from all over the world attended the conference, and many (if not most), worked as open source coders and developers. As one of the relatively few educators in attendance, I saw and heard things that were completely foreign to me—terms like as Istio, Stack Overflow, Ubuntu, Sidecar, HyperLedger, and Kubernetes tossed around for days. - -I felt like a fish out of water. But in the end, that was the perfect dose of reality I needed to truly understand how open principles can reshape our approach to education. - -### Not-so-strange attractors - -All Things Open attracted me to Raleigh for two reasons, both of which have to do with how our schools must do a better job of creating environments that truly prepare students for a rapidly changing world. - -The first is my belief that schools should embrace the ideals of the [open source way][3]. The second is that educators have to periodically force themselves out of their relatively isolated worlds of "doing school" in order to get a glimpse of what the world is actually doing. - -When I was an engineer for 20 years, I developed a deep sense of the power of an open exchange of ideas, of collaboration, and of the need for rapid prototyping of innovations. Although we didn't call these ideas "open source" at the time, my colleagues and I constantly worked together to identify and solve problems using tools such as [Design Thinking][4] so that our businesses remained competitive and met market demands. When I became a science and math teacher at a small [public school][5] in rural Appalachia, my goal was to adapt these ideas to my classrooms and to the school at large as a way to blur the lines between a traditional school environment and what routinely happens in the "real world." - -Through several years of hard work and many iterations, my fellow teachers and I were eventually able to develop a comprehensive, school-wide project-based learning model, where students worked in collaborative teams on projects that [made real connections][6] between required curriculum and community-based applications. Doing so gave these students the ability to develop skills they can use for a lifetime, rather than just on the next test—skills such as problem solving, critical thinking, oral and written communication, perseverance through setbacks, and adapting to changing conditions, as well as how to have routine conversations with adult mentors form the community. Only after reading [The Open Organization][7] did I realize that what we had been doing essentially embodied what Jim Whitehurst had described. In our case, of course, we applied open principles to an educational context (that model, called Open Way Learning, is the subject of a [book][8] published in December). - -I felt like a fish out of water. But in the end, that was the perfect dose of reality I needed to truly understand how open principles can reshape our approach to education. - -As good as this model is in terms of pushing students into a relevant, engaging, and often unpredictable learning environments, it can only go so far if we, as educators who facilitate this type of project-based learning, do not constantly stay abreast of changing technologies and their respective lexicon. Even this unconventional but proven approach will still leave students ill-prepared for a global, innovation economy if we aren't constantly pushing ourselves into areas outside our own comfort zones. My experience at the All Things Open conference was a perfect example. While humbling, it also forced me to confront what I didn't know so that I can learn from it to help the work I do with other teachers and schools. - -### A critical decision - -I made this point to others when I shared a picture of the All Things Open job board with dozens of colleagues all over the country. I shared it with the caption: "What did you do in your school today to prepare your students for this reality tomorrow?" The honest answer from many was, unfortunately, "not much." That has to change. - -![](https://opensource.com/sites/default/files/images/open-org/owens_1.jpg) -![](https://opensource.com/sites/default/files/images/open-org/owens_2.jpg) -(Images courtesy of Ben Owens, CC BY-SA) - -People in organizations everywhere have to make a critical decision: either embrace the rapid pace of change that is a fact of life in our world or face the hard reality of irrelevance. Our systems in education are at this same crossroads—even ones who think of themselves as being innovative. It involves admitting to students, "I don't know, but I'm willing to learn." That's the kind of teaching and learning experience our students deserve. - -It can happen, but it will take pioneering educators who are willing to move away from comfortable, back-of-the-book answers to help students as they work on difficult and messy challenges. You may very well be a veritable fish out of water. - --------------------------------------------------------------------------------- - -via: https://opensource.com/open-organization/19/1/educator-at-open-source-conference - -作者:[Ben Owens][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/engineerteacher -[b]: https://github.com/lujun9972 -[1]: https://en.wikipedia.org/wiki/Tony_Fadell -[2]: https://allthingsopen.org/ -[3]: https://opensource.com/open-source-way -[4]: https://dschool.stanford.edu/resources-collections/a-virtual-crash-course-in-design-thinking -[5]: https://www.tricountyearlycollege.org/ -[6]: https://www.bie.org/about/what_pbl -[7]: https://www.redhat.com/en/explore/the-open-organization-book -[8]: https://www.amazon.com/Open-Up-Education-Learning-Transform/dp/1475842007/ref=tmm_pap_swatch_0?_encoding=UTF8&qid=&sr= diff --git a/sources/talk/20190131 4 confusing open source license scenarios and how to navigate them.md b/sources/talk/20190131 4 confusing open source license scenarios and how to navigate them.md deleted file mode 100644 index fd93cdd9a6..0000000000 --- a/sources/talk/20190131 4 confusing open source license scenarios and how to navigate them.md +++ /dev/null @@ -1,59 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (4 confusing open source license scenarios and how to navigate them) -[#]: via: (https://opensource.com/article/19/1/open-source-license-scenarios) -[#]: author: (P.Kevin Nelson https://opensource.com/users/pkn4645) - -4 confusing open source license scenarios and how to navigate them -====== - -Before you begin using a piece of software, make sure you fully understand the terms of its license. - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/LAW_openisopen.png?itok=FjmDxIaL) - -As an attorney running an open source program office for a Fortune 500 corporation, I am often asked to look into a product or component where there seems to be confusion as to the licensing model. Under what terms can the code be used, and what obligations run with such use? This often happens when the code or the associated project community does not clearly indicate availability under a [commonly accepted open source license][1]. The confusion is understandable as copyright owners often evolve their products and services in different directions in response to market demands. Here are some of the scenarios I commonly discover and how you can approach each situation. - -### Multiple licenses - -The product is truly open source with an [Open Source Initiative][2] (OSI) open source-approved license, but has changed licensing models at least once if not multiple times throughout its lifespan. This scenario is fairly easy to address; the user simply has to decide if the latest version with its attendant features and bug fixes is worth the conditions to be compliant with the current license. If so, great. If not, then the user can move back in time to a version released under a more palatable license and start from that fork, understanding that there may not be an active community for support and continued development. - -### Old open source - -This is a variation on the multiple licenses model with the twist that current licensing is proprietary only. You have to use an older version to take advantage of open source terms and conditions. Most often, the product was released under a valid open source license up to a certain point in its development, but then the copyright holder chose to evolve the code in a proprietary fashion and offer new releases only under proprietary commercial licensing terms. So, if you want the newest capabilities, you have to purchase a proprietary license, and you most likely will not get a copy of the underlying source code. Most often the open source community that grew up around the original code line falls away once the members understand there will be no further commitment from the copyright holder to the open source branch. While this scenario is understandable from the copyright holder's perspective, it can be seen as "burning a bridge" to the open source community. It would be very difficult to again leverage the benefits of the open source contribution models once a project owner follows this path. - -### Open core - -By far the most common discovery is that a product has both an open source-licensed "community edition" and a proprietary-licensed commercial offering, commonly referred to as open core. This is often encouraging to potential consumers, as it gives them a "try before you buy" option or even a chance to influence both versions of the product by becoming an active member of the community. I usually encourage clients to begin with the community version, get involved, and see what they can achieve. Then, if the product becomes a crucial part of their business plan, they have the option to upgrade to the proprietary level at any time. - -### Freemium - -The component is not open source at all, but instead it is released under some version of the "freemium" model. A version with restricted or time-limited functionality can be downloaded with no immediate purchase required. However, since the source code is usually not provided and its accompanying license does not allow perpetual use, the creation of derivative works, nor further distribution, it is definitely not open source. In this scenario, it is usually best to pass unless you are prepared to purchase a proprietary license and accept all attendant terms and conditions of use. Users are often the most disappointed in this outcome as it has somewhat of a deceptive feel. - -### OSI compliant - -Of course, the happy path I haven't mentioned is to discover the project has a single, clear, OSI-compliant license. In those situations, open source software is as easy as downloading and going forward within appropriate use. - -Each of the more complex scenarios described above can present problems to potential development projects, but consultation with skilled procurement or intellectual property professionals with regard to licensing lineage can reveal excellent opportunities. - -An earlier version of this article was published on [OSS Law][3] and is republished with the author's permission. - - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/1/open-source-license-scenarios - -作者:[P.Kevin Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/pkn4645 -[b]: https://github.com/lujun9972 -[1]: https://opensource.org/licenses -[2]: https://opensource.org/licenses/category -[3]: http://www.pknlaw.com/2017/06/i-thought-that-was-open-source.html diff --git a/sources/talk/20190131 OOP Before OOP with Simula.md b/sources/talk/20190131 OOP Before OOP with Simula.md deleted file mode 100644 index cae9d9bd3a..0000000000 --- a/sources/talk/20190131 OOP Before OOP with Simula.md +++ /dev/null @@ -1,203 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (OOP Before OOP with Simula) -[#]: via: (https://twobithistory.org/2019/01/31/simula.html) -[#]: author: (Sinclair Target https://twobithistory.org) - -OOP Before OOP with Simula -====== - -Imagine that you are sitting on the grassy bank of a river. Ahead of you, the water flows past swiftly. The afternoon sun has put you in an idle, philosophical mood, and you begin to wonder whether the river in front of you really exists at all. Sure, large volumes of water are going by only a few feet away. But what is this thing that you are calling a “river”? After all, the water you see is here and then gone, to be replaced only by more and different water. It doesn’t seem like the word “river” refers to any fixed thing in front of you at all. - -In 2009, Rich Hickey, the creator of Clojure, gave [an excellent talk][1] about why this philosophical quandary poses a problem for the object-oriented programming paradigm. He argues that we think of an object in a computer program the same way we think of a river—we imagine that the object has a fixed identity, even though many or all of the object’s properties will change over time. Doing this is a mistake, because we have no way of distinguishing between an object instance in one state and the same object instance in another state. We have no explicit notion of time in our programs. We just breezily use the same name everywhere and hope that the object is in the state we expect it to be in when we reference it. Inevitably, we write bugs. - -The solution, Hickey concludes, is that we ought to model the world not as a collection of mutable objects but a collection of processes acting on immutable data. We should think of each object as a “river” of causally related states. In sum, you should use a functional language like Clojure. - -![][2] -The author, on a hike, pondering the ontological commitments -of object-oriented programming. - -Since Hickey gave his talk in 2009, interest in functional programming languages has grown, and functional programming idioms have found their way into the most popular object-oriented languages. Even so, most programmers continue to instantiate objects and mutate them in place every day. And they have been doing it for so long that it is hard to imagine that programming could ever look different. - -I wanted to write an article about Simula and imagined that it would mostly be about when and how object-oriented constructs we are familiar with today were added to the language. But I think the more interesting story is about how Simula was originally so unlike modern object-oriented programming languages. This shouldn’t be a surprise, because the object-oriented paradigm we know now did not spring into existence fully formed. There were two major versions of Simula: Simula I and Simula 67. Simula 67 brought the world classes, class hierarchies, and virtual methods. But Simula I was a first draft that experimented with other ideas about how data and procedures could be bundled together. The Simula I model is not a functional model like the one Hickey proposes, but it does focus on processes that unfold over time rather than objects with hidden state that interact with each other. Had Simula 67 stuck with more of Simula I’s ideas, the object-oriented paradigm we know today might have looked very different indeed—and that contingency should teach us to be wary of assuming that the current paradigm will dominate forever. - -### Simula 0 Through 67 - -Simula was created by two Norwegians, Kristen Nygaard and Ole-Johan Dahl. - -In the late 1950s, Nygaard was employed by the Norwegian Defense Research Establishment (NDRE), a research institute affiliated with the Norwegian military. While there, he developed Monte Carlo simulations used for nuclear reactor design and operations research. These simulations were at first done by hand and then eventually programmed and run on a Ferranti Mercury. Nygaard soon found that he wanted a higher-level way to describe these simulations to a computer. - -The kind of simulation that Nygaard commonly developed is known as a “discrete event model.” The simulation captures how a sequence of events change the state of a system over time—but the important property here is that the simulation can jump from one event to the next, since the events are discrete and nothing changes in the system between events. This kind of modeling, according to a paper that Nygaard and Dahl presented about Simula in 1966, was increasingly being used to analyze “nerve networks, communication systems, traffic flow, production systems, administrative systems, social systems, etc.” So Nygaard thought that other people might want a higher-level way to describe these simulations too. He began looking for someone that could help him implement what he called his “Simulation Language” or “Monte Carlo Compiler.” - -Dahl, who had also been employed by NDRE, where he had worked on language design, came aboard at this point to play Wozniak to Nygaard’s Jobs. Over the next year or so, Nygaard and Dahl worked to develop what has been called “Simula 0.” This early version of the language was going to be merely a modest extension to ALGOL 60, and the plan was to implement it as a preprocessor. The language was then much less abstract than what came later. The primary language constructs were “stations” and “customers.” These could be used to model certain discrete event networks; Nygaard and Dahl give an example simulating airport departures. But Nygaard and Dahl eventually came up with a more general language construct that could represent both “stations” and “customers” and also model a wider range of simulations. This was the first of two major generalizations that took Simula from being an application-specific ALGOL package to a general-purpose programming language. - -In Simula I, there were no “stations” or “customers,” but these could be recreated using “processes.” A process was a bundle of data attributes associated with a single action known as the process’ operating rule. You might think of a process as an object with only a single method, called something like `run()`. This analogy is imperfect though, because each process’ operating rule could be suspended or resumed at any time—the operating rules were a kind of coroutine. A Simula I program would model a system as a set of processes that conceptually all ran in parallel. Only one process could actually be “current” at any time, but once a process suspended itself the next queued process would automatically take over. As the simulation ran, behind the scenes, Simula would keep a timeline of “event notices” that tracked when each process should be resumed. In order to resume a suspended process, Simula needed to keep track of multiple call stacks. This meant that Simula could no longer be an ALGOL preprocessor, because ALGOL had only once call stack. Nygaard and Dahl were committed to writing their own compiler. - -In their paper introducing this system, Nygaard and Dahl illustrate its use by implementing a simulation of a factory with a limited number of machines that can serve orders. The process here is the order, which starts by looking for an available machine, suspends itself to wait for one if none are available, and then runs to completion once a free machine is found. There is a definition of the order process that is then used to instantiate several different order instances, but no methods are ever called on these instances. The main part of the program just creates the processes and sets them running. - -The first Simula I compiler was finished in 1965. The language grew popular at the Norwegian Computer Center, where Nygaard and Dahl had gone to work after leaving NDRE. Implementations of Simula I were made available to UNIVAC users and to Burroughs B5500 users. Nygaard and Dahl did a consulting deal with a Swedish company called ASEA that involved using Simula to run job shop simulations. But Nygaard and Dahl soon realized that Simula could be used to write programs that had nothing to do with simulation at all. - -Stein Krogdahl, a professor at the University of Oslo that has written about the history of Simula, claims that “the spark that really made the development of a new general-purpose language take off” was [a paper called “Record Handling”][3] by the British computer scientist C.A.R. Hoare. If you read Hoare’s paper now, this is easy to believe. I’m surprised that you don’t hear Hoare’s name more often when people talk about the history of object-oriented languages. Consider this excerpt from his paper: - -> The proposal envisages the existence inside the computer during the execution of the program, of an arbitrary number of records, each of which represents some object which is of past, present or future interest to the programmer. The program keeps dynamic control of the number of records in existence, and can create new records or destroy existing ones in accordance with the requirements of the task in hand. - -> Each record in the computer must belong to one of a limited number of disjoint record classes; the programmer may declare as many record classes as he requires, and he associates with each class an identifier to name it. A record class name may be thought of as a common generic term like “cow,” “table,” or “house” and the records which belong to these classes represent the individual cows, tables, and houses. - -Hoare does not mention subclasses in this particular paper, but Dahl credits him with introducing Nygaard and himself to the concept. Nygaard and Dahl had noticed that processes in Simula I often had common elements. Using a superclass to implement those common elements would be convenient. This also raised the possibility that the “process” idea itself could be implemented as a superclass, meaning that not every class had to be a process with a single operating rule. This then was the second great generalization that would make Simula 67 a truly general-purpose programming language. It was such a shift of focus that Nygaard and Dahl briefly considered changing the name of the language so that people would know it was not just for simulations. But “Simula” was too much of an established name for them to risk it. - -In 1967, Nygaard and Dahl signed a contract with Control Data to implement this new version of Simula, to be known as Simula 67. A conference was held in June, where people from Control Data, the University of Oslo, and the Norwegian Computing Center met with Nygaard and Dahl to establish a specification for this new language. This conference eventually led to a document called the [“Simula 67 Common Base Language,”][4] which defined the language going forward. - -Several different vendors would make Simula 67 compilers. The Association of Simula Users (ASU) was founded and began holding annual conferences. Simula 67 soon had users in more than 23 different countries. - -### 21st Century Simula - -Simula is remembered now because of its influence on the languages that have supplanted it. You would be hard-pressed to find anyone still using Simula to write application programs. But that doesn’t mean that Simula is an entirely dead language. You can still compile and run Simula programs on your computer today, thanks to [GNU cim][5]. - -The cim compiler implements the Simula standard as it was after a revision in 1986. But this is mostly the Simula 67 version of the language. You can write classes, subclass, and virtual methods just as you would have with Simula 67. So you could create a small object-oriented program that looks a lot like something you could easily write in Python or Ruby: - -``` -! dogs.sim ; -Begin - Class Dog; - ! The cim compiler requires virtual procedures to be fully specified ; - Virtual: Procedure bark Is Procedure bark;; - Begin - Procedure bark; - Begin - OutText("Woof!"); - OutImage; ! Outputs a newline ; - End; - End; - - Dog Class Chihuahua; ! Chihuahua is "prefixed" by Dog ; - Begin - Procedure bark; - Begin - OutText("Yap yap yap yap yap yap"); - OutImage; - End; - End; - - Ref (Dog) d; - d :- new Chihuahua; ! :- is the reference assignment operator ; - d.bark; -End; -``` - -You would compile and run it as follows: - -``` -$ cim dogs.sim -Compiling dogs.sim: -gcc -g -O2 -c dogs.c -gcc -g -O2 -o dogs dogs.o -L/usr/local/lib -lcim -$ ./dogs -Yap yap yap yap yap yap -``` - -(You might notice that cim compiles Simula to C, then hands off to a C compiler.) - -This was what object-oriented programming looked like in 1967, and I hope you agree that aside from syntactic differences this is also what object-oriented programming looks like in 2019. So you can see why Simula is considered a historically important language. - -But I’m more interested in showing you the process model that was central to Simula I. That process model is still available in Simula 67, but only when you use the `Process` class and a special `Simulation` block. - -In order to show you how processes work, I’ve decided to simulate the following scenario. Imagine that there is a village full of villagers next to a river. The river has lots of fish, but between them the villagers only have one fishing rod. The villagers, who have voracious appetites, get hungry every 60 minutes or so. When they get hungry, they have to use the fishing rod to catch a fish. If a villager cannot use the fishing rod because another villager is waiting for it, then the villager queues up to use the fishing rod. If a villager has to wait more than five minutes to catch a fish, then the villager loses health. If a villager loses too much health, then that villager has starved to death. - -This is a somewhat strange example and I’m not sure why this is what first came to mind. But there you go. We will represent our villagers as Simula processes and see what happens over a day’s worth of simulated time in a village with four villagers. - -The full program is [available here as a Gist][6]. - -The last lines of my output look like the following. Here we are seeing what happens in the last few hours of the day: - -``` -1299.45: John is hungry and requests the fishing rod. -1299.45: John is now fishing. -1311.39: John has caught a fish. -1328.96: Betty is hungry and requests the fishing rod. -1328.96: Betty is now fishing. -1331.25: Jane is hungry and requests the fishing rod. -1340.44: Betty has caught a fish. -1340.44: Jane went hungry waiting for the rod. -1340.44: Jane starved to death waiting for the rod. -1369.21: John is hungry and requests the fishing rod. -1369.21: John is now fishing. -1379.33: John has caught a fish. -1409.59: Betty is hungry and requests the fishing rod. -1409.59: Betty is now fishing. -1419.98: Betty has caught a fish. -1427.53: John is hungry and requests the fishing rod. -1427.53: John is now fishing. -1437.52: John has caught a fish. -``` - -Poor Jane starved to death. But she lasted longer than Sam, who didn’t even make it to 7am. Betty and John sure have it good now that only two of them need the fishing rod. - -What I want you to see here is that the main, top-level part of the program does nothing but create the four villager processes and get them going. The processes manipulate the fishing rod object in the same way that we would manipulate an object today. But the main part of the program does not call any methods or modify and properties on the processes. The processes have internal state, but this internal state only gets modified by the process itself. - -There are still fields that get mutated in place here, so this style of programming does not directly address the problems that pure functional programming would solve. But as Krogdahl observes, “this mechanism invites the programmer of a simulation to model the underlying system as a set of processes, each describing some natural sequence of events in that system.” Rather than thinking primarily in terms of nouns or actors—objects that do things to other objects—here we are thinking of ongoing processes. The benefit is that we can hand overall control of our program off to Simula’s event notice system, which Krogdahl calls a “time manager.” So even though we are still mutating processes in place, no process makes any assumptions about the state of another process. Each process interacts with other processes only indirectly. - -It’s not obvious how this pattern could be used to build, say, a compiler or an HTTP server. (On the other hand, if you’ve ever programmed games in the Unity game engine, this should look familiar.) I also admit that even though we have a “time manager” now, this may not have been exactly what Hickey meant when he said that we need an explicit notion of time in our programs. (I think he’d want something like the superscript notation [that Ada Lovelace used][7] to distinguish between the different values a variable assumes through time.) All the same, I think it’s really interesting that right there at the beginning of object-oriented programming we can find a style of programming that is not all like the object-oriented programming we are used to. We might take it for granted that object-oriented programming simply works one way—that a program is just a long list of the things that certain objects do to other objects in the exact order that they do them. Simula I’s process system shows that there are other approaches. Functional languages are probably a better thought-out alternative, but Simula I reminds us that the very notion of alternatives to modern object-oriented programming should come as no surprise. - -If you enjoyed this post, more like it come out every four weeks! Follow [@TwoBitHistory][8] on Twitter or subscribe to the [RSS feed][9] to make sure you know when a new post is out. - -Previously on TwoBitHistory… - -> Hey everyone! I sadly haven't had time to do any new writing but I've just put up an updated version of my history of RSS. This version incorporates interviews I've since done with some of the key people behind RSS like Ramanathan Guha and Dan Libby. -> -> — TwoBitHistory (@TwoBitHistory) [December 18, 2018][10] - - - --------------------------------------------------------------------------------- - -1. Jan Rune Holmevik, “The History of Simula,” accessed January 31, 2019, http://campus.hesge.ch/daehne/2004-2005/langages/simula.htm. ↩ - -2. Ole-Johan Dahl and Kristen Nygaard, “SIMULA—An ALGOL-Based Simulation Langauge,” Communications of the ACM 9, no. 9 (September 1966): 671, accessed January 31, 2019, http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.95.384&rep=rep1&type=pdf. ↩ - -3. Stein Krogdahl, “The Birth of Simula,” 2, accessed January 31, 2019, http://heim.ifi.uio.no/~steinkr/papers/HiNC1-webversion-simula.pdf. ↩ - -4. ibid. ↩ - -5. Ole-Johan Dahl and Kristen Nygaard, “The Development of the Simula Languages,” ACM SIGPLAN Notices 13, no. 8 (August 1978): 248, accessed January 31, 2019, https://hannemyr.com/cache/knojd_acm78.pdf. ↩ - -6. Dahl and Nygaard (1966), 676. ↩ - -7. Dahl and Nygaard (1978), 257. ↩ - -8. Krogdahl, 3. ↩ - -9. Ole-Johan Dahl, “The Birth of Object-Orientation: The Simula Languages,” 3, accessed January 31, 2019, http://www.olejohandahl.info/old/birth-of-oo.pdf. ↩ - -10. Dahl and Nygaard (1978), 265. ↩ - -11. Holmevik. ↩ - -12. Krogdahl, 4. ↩ - - --------------------------------------------------------------------------------- - -via: https://twobithistory.org/2019/01/31/simula.html - -作者:[Sinclair Target][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://twobithistory.org -[b]: https://github.com/lujun9972 -[1]: https://www.infoq.com/presentations/Are-We-There-Yet-Rich-Hickey -[2]: /images/river.jpg -[3]: https://archive.computerhistory.org/resources/text/algol/ACM_Algol_bulletin/1061032/p39-hoare.pdf -[4]: http://web.eah-jena.de/~kleine/history/languages/Simula-CommonBaseLanguage.pdf -[5]: https://www.gnu.org/software/cim/ -[6]: https://gist.github.com/sinclairtarget/6364cd521010d28ee24dd41ab3d61a96 -[7]: https://twobithistory.org/2018/08/18/ada-lovelace-note-g.html -[8]: https://twitter.com/TwoBitHistory -[9]: https://twobithistory.org/feed.xml -[10]: https://twitter.com/TwoBitHistory/status/1075075139543449600?ref_src=twsrc%5Etfw diff --git a/sources/talk/20190204 Config management is dead- Long live Config Management Camp.md b/sources/talk/20190204 Config management is dead- Long live Config Management Camp.md deleted file mode 100644 index 679ac9033b..0000000000 --- a/sources/talk/20190204 Config management is dead- Long live Config Management Camp.md +++ /dev/null @@ -1,118 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Config management is dead: Long live Config Management Camp) -[#]: via: (https://opensource.com/article/19/2/configuration-management-camp) -[#]: author: (Matthew Broberg https://opensource.com/users/mbbroberg) - -Config management is dead: Long live Config Management Camp -====== - -CfgMgmtCamp '19 co-organizers share their take on ops, DevOps, observability, and the rise of YoloOps and YAML engineers. - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cicd_continuous_delivery_deployment_gears.png?itok=kVlhiEkc) - -Everyone goes to [FOSDEM][1] in Brussels to learn from its massive collection of talk tracks, colloquially known as developer rooms, that run the gauntlet of curiosities, covering programming languages like Rust, Go, and Python, to special topics ranging from community, to legal, to privacy. After two days of nonstop activity, many FOSDEM attendees move on to Ghent, Belgium, to join hundreds for Configuration Management Camp ([CfgMgmtCamp][2]). - -Kris Buytaert and Toshaan Bharvani run the popular post-FOSDEM show centered around infrastructure management, featuring hackerspaces, training, workshops, and keynotes. It's a deeply technical exploration of the who, what, and how of building resilient infrastructure. It started in 2013 as a PuppetCamp but expanded to include more communities and tools in 2014. - -I spoke with Kris and Toshaan, who both have a healthy sense of humor, about CfgMgmtCamp's past, present, and future. Our interview has been edited for length and clarity. - -**Matthew: Your opening[keynote][3] is called "CfgMgmtCamp is dead." Is config management dead? Will it live on, or will something take its place?** - -**Kris:** We've noticed people are jumping on the hype of containers, trying to solve the same problems in a different way. But they are still managing config, only in different ways and with other tools. Over the past couple of years, we've evolved from a conference with a focus on infrastructure-as-code tooling, such as Puppet, Chef, CFEngine, Ansible, Juju, and Salt, to a more open source infrastructure automation conference in general. So, config management is definitely not dead. Infrastructure-as-code is also not dead, but it all is evolving. - -**Toshaan:** We see people changing tools, jumping on hype, and communities changing; however, the basic ideas and concepts remain the same. - -**Matthew: It's great to see[observability as the topic][4] of one of your keynotes. Why should those who care about configuration management also care about monitoring and observability?** - -**Kris:** While the name of the conference hasn't changed, the tools have evolved and we have expanded our horizon. Ten years ago, [Devopsdays][5] was just #devopsdays, but it evolved to focus on culture—the C of [CAMS][6] in the DevOps' core principles of Culture, Automation, Measurement, and Sharing. - -![](https://opensource.com/sites/default/files/uploads/cams.png) - -[Monitorama][7] filled the gap on monitoring and metrics (tackling the M in CAMS). Config Management Camp is about open source Automation, the A. Since they are all open source conferences, they fulfill the Sharing part, completing the CAMS concept. - -Observability sits on the line between Automation and Measurement. To go one step further, in some of my talks about open source monitoring, I describe the evolution of monitoring tools from #monitoringsucks to #monitoringlove; for lots of people (including me), the love for monitoring returned because we tied it to automation. We started to provision a service and automatically adapted the monitoring of that service to its state. Gone were the days where the monitoring tool was out of sync with reality. - -Looking at it from the other side, when you have an infrastructure or application so complex that you need observability in it, you'd better not be deploying manually; you will need some form of automation at that level of complexity. So, observability and infrastructure automation are tied together. - -**Toshaan:** Yes, while in the past we focused on configuration management, we will be looking to expand that into all types of infrastructure management. Last year, we played with this idea, and we were able to have a lot of cross-tool presentations. This year, we've taken this a step further by having more differentiated content. - -**Matthew: Some of my virtualization and Linux admin friends push back, saying observability is a developer's responsibility. How would you respond without just saying "DevOps?"** - -**Kris:** What you describe is what I call "Ooops Devs." This is a trend where the people who run the platform don't really care what they run; as long as port 80 is listening and the node pings, they are happy. It's equally bad as "Dev Ooops." "Ooops Devs" is where the devs rant about the ops folks because they are slow, not agile, and not responsive. But, to me, your job as an ops person or as a Linux admin is to keep a service running, and the only way to do that is to take on that task is as a team—with your colleagues who have different roles and insights, people who write code, people who design, etc. It is a shared responsibility. And hiding behind "that is someone else's responsibility," doesn't smell like collaboration going on. - -**Toshaan:** Even in the dark ages of silos, I believe a true sysadmin should have cared about observability, monitoring, and automation. I believe that the DevOps movement has made this much more widespread, and that it has become easier to get this information and expose it. On the other hand, I believe that pure operators or sysadmins have learned to be team players (or, they may have died out). I like the analogy of an army unit composed of different specialty soldiers who work together to complete a mission; we have engineers who work to deliver products or services. - -**Matthew: In a[Devopsdays Zurich talk][8], Kris offered an opinion that Americans build software for acquisition and Europeans build for resilience. In that light, what are the best skills for someone who wants to build meaningful infrastructure?** - -**Toshaan:** I believe still some people don't understand the complexity of code sprawl, and they believe that some new hype will solve this magically. - -**Kris:** This year, we invited [Steve Traugott][9], co-author of the 1998 USENIX paper "[Bootstrapping an Infrastructure][10]" that helped kickstart our community. So many people never read [Infrastructures.org][11], never experienced the pain of building images and image sprawl, and don't understand the evolution we went through that led us to build things the way we build them from source code. - -People should study topics such as idempotence, resilience, reproducibility, and surviving the tenth floor test. (As explained in "Bootstrapping an Infrastructure": "The test we used when designing infrastructures was 'Can I grab a random machine and throw it out the tenth-floor window without adversely impacting users for more than 10 minutes?' If the answer to this was 'yes,' then we knew we were doing things right.") But only after they understand the service they are building—the service is the absolute priority—can they begin working on things like: how can we run this, how can we make sure it keeps running, how can it fail and how can we prevent that, and if it disappears, how can we spin it up again fast, unnoticed by the end user. - -**Toshaan:** 100% uptime. - -**Kris:** The challenge we have is that lots of people don't have that experience yet. We've seen the rise of [YoloOps][12]—just spin it up once, fire, and forget—which results in security problems, stability problems, data loss, etc., and they often grasp onto the solutions in YoloOps, the easy way to do something quickly and move on. But understanding how things will eventually fail takes time, it's called experience. - -**Toshaan:** Well, when I was a student and manned the CentOS stand at FOSDEM, I remember a guy coming up to the stand and complaining that he couldn't do consulting because of the "fire once and forgot" policy of CentOS, and that it just worked too well. I like to call this ZombieOps, but YoloOps works also. - -**Matthew: I see you're leading the second year of YamlCamp as well. Why does a markup language need its own camp?** - -**Kris:** [YamlCamp][13] is a parody, it's a joke. Last year, Bob Walker ([@rjw1][14]) gave a talk titled "Are we all YAML engineers now?" that led to more jokes. We've had a discussion for years about rebranding CfgMgmtCamp; the problem is that people know our name, we have a large enough audience to keep going, and changing the name would mean effort spent on logos, website, DNS, etc. We won't change the name, but we joked that we could rebrand to YamlCamp, because for some weird reason, a lot of the talks are about YAML. :) - -**Matthew: Do you think systems engineers should list YAML as a skill or a language on their CV? Should companies be hiring YAML engineers, or do you have "Long live all YAML engineers" on the website in jest?** - -**Toshaan:** Well, the real question is whether people are willing to call themselves YAML engineers proudly, because we already have enough DevOps engineers. - -**Matthew: What FOSS software helps you manage the event?** - -**Toshaan:** I re-did the website in Hugo CMS because we were spending too much time maintaining the website manually. I chose Hugo, because I was learning Golang, and because it has been successfully used for other conferences and my own website. I also wanted a static website and iCalendar output, so we could use calendar tooling such as Giggity to have a good scheduling tool. - -The website now builds quite nicely, and while I still have some ideas on improvements, maintenance is now much easier. - -For the call for proposals (CFP), we now use [OpenCFP][15]. We want to optimize the submission, voting, selection, and extraction to be as automated as possible, while being easy and comfortable for potential speakers, reviewers, and ourselves to use. OpenCFP seems to be the tool that works; while we still have some feature requirements, I believe that, once we have some time to contribute back to OpenCFP, we'll have a fully functional and easy tool to run CFPs with. - -Last, we switched from EventBrite to Pretix because I wanted to be GDPR compliant and have the ability to run our questions, vouchers, and extra features. Pretix allows us to control registration of attendees, speakers, sponsors, and organizers and have a single overview of all the people coming to the event. - -### Wrapping up - -The beauty of Configuration Management Camp to me is that it continues to evolve with its audience. Configuration management is certainly at the heart of the work, but it's in service to resilient infrastructure. Keep your eyes open for the talk recordings to learn from the [line up of incredible speakers][16], and thank you to the team for running this (free) show! - -You can follow Kris [@KrisBuytaert][17] and Toshaan [@toshywoshy][18]. You can also see Kris' past articles [on his blog][19]. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/configuration-management-camp - -作者:[Matthew Broberg][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/mbbroberg -[b]: https://github.com/lujun9972 -[1]: https://fosdem.org/2019/ -[2]: https://cfgmgmtcamp.eu/ -[3]: https://cfgmgmtcamp.eu/schedule/monday/intro00/ -[4]: https://cfgmgmtcamp.eu/schedule/monday/keynote0/ -[5]: https://www.devopsdays.org/ -[6]: http://devopsdictionary.com/wiki/CAMS -[7]: http://monitorama.com/ -[8]: https://vimeo.com/272519813 -[9]: https://cfgmgmtcamp.eu/schedule/tuesday/keynote1/ -[10]: http://www.infrastructures.org/papers/bootstrap/bootstrap.html -[11]: http://www.infrastructures.org/ -[12]: https://gist.githubusercontent.com/mariozig/5025613/raw/yolo -[13]: https://twitter.com/yamlcamp -[14]: https://twitter.com/rjw1 -[15]: https://github.com/opencfp/opencfp -[16]: https://cfgmgmtcamp.eu/speaker/ -[17]: https://twitter.com/KrisBuytaert -[18]: https://twitter.com/toshywoshy -[19]: https://krisbuytaert.be/index.shtml diff --git a/sources/talk/20190206 4 steps to becoming an awesome agile developer.md b/sources/talk/20190206 4 steps to becoming an awesome agile developer.md deleted file mode 100644 index bad4025aef..0000000000 --- a/sources/talk/20190206 4 steps to becoming an awesome agile developer.md +++ /dev/null @@ -1,82 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (4 steps to becoming an awesome agile developer) -[#]: via: (https://opensource.com/article/19/2/steps-agile-developer) -[#]: author: (Daniel Oh https://opensource.com/users/daniel-oh) - -4 steps to becoming an awesome agile developer -====== -There's no magical way to do it, but these practices will put you well on your way to embracing agile in application development, testing, and debugging. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/metrics_lead-steps-measure.png?itok=DG7rFZPk) - -Enterprises are rushing into their DevOps journey through [agile][1] software development with cloud-native technologies such as [Linux containers][2], [Kubernetes][3], and [serverless][4]. Continuous integration helps enterprise developers reduce bugs, unexpected errors, and improve the quality of their code deployed in production. - -However, this doesn't mean all developers in DevOps automatically embrace agile for their daily work in application development, testing, and debugging. There is no magical way to do it, but the following four practical steps and best practices will put you well on your way to becoming an awesome agile developer. - -### Start with design thinking agile practices - -There are many opportunities to learn about using agile software development practices in your DevOps initiatives. Agile practices inspire people with new ideas and experiences for improving their daily work in application development with team collaboration. More importantly, those practices will help you discover the answers to questions such as: Why am I doing this? What kind of problems am I trying to solve? How do I measure the outcomes? - -A [domain-driven design][5] approach will help you start discovery sooner and easier. For example, the [Start At The End][6] practice helps you redesign your application and explore potential business outcomes—such as, what would happen if your application fails in production? You might also be interested in [Event Storming][7] for interactive and rapid discovery or [Impact Mapping][8] for graphical and strategic design as part of domain-driven design practices. - -### Use a predictive approach first - -In agile software development projects, enterprise developers are mainly focused on adapting to rapidly changing app development environments such as reactive runtimes, cloud-native frameworks, Linux container packaging, and the Kubernetes platform. They believe this is the best way to become an agile developer in their organization. However, this type of adaptive approach typically makes it harder for developers to understand and report what they will do in the next sprint. Developers might know the ultimate goal and, at best, the app features for a release about four months from the current sprint. - -In contrast, the predictive approach places more emphasis on analyzing known risks and planning future sprints in detail. For example, predictive developers can accurately report the functions and tasks planned for the entire development process. But it's not a magical way to make your agile projects succeed all the time because the predictive team depends totally on effective early-stage analysis. If the analysis does not work very well, it may be difficult for the project to change direction once it gets started. - -To mitigate this risk, I recommend that senior agile developers increase the predictive capabilities with a plan-driven method, and junior agile developers start with the adaptive methods for value-driven development. - -### Continuously improve code quality - -Don't hesitate to engage in [continuous integration][9] (CI) practices for improving your application before deploying code into production. To adopt modern application frameworks, such as cloud-native architecture, Linux container packaging, and hybrid cloud workloads, you have to learn about automated tools to address complex CI procedures. - -[Jenkins][10] is the standard CI tool for many organizations; it allows developers to build and test applications in many projects in an automated fashion. Its most important function is detecting unexpected errors during CI to prevent them from happening in production. This should increase business outcomes through better customer satisfaction. - -Automated CI enables agile developers to not only improve the quality of their code but their also application development agility through learning and using open source tools and patterns such as [behavior-driven development][11], [test-driven development][12], [automated unit testing][13], [pair programming][14], [code review][15], and [design pattern][16]. - -### Never stop exploring communities - -Never settle, even if you already have a great reputation as an agile developer. You have to continuously take on bigger challenges to make great software in an agile way. - -By participating in the very active and growing open source community, you will not only improve your skills as an agile developer, but your actions can also inspire other developers who want to learn agile practices. - -How do you get involved in specific communities? It depends on your interests and what you want to learn. It might mean presenting specific topics at conferences or local meetups, writing technical blog posts, publishing practical guidebooks, committing code, or creating pull requests to open source projects' Git repositories. It's worth exploring open source communities for agile software development, as I've found it is a great way to share your expertise, knowledge, and practices with other brilliant developers and, along the way, help each other. - -### Get started - -These practical steps can give you a shorter path to becoming an awesome agile developer. Then you can lead junior developers in your team and organization to become more flexible, valuable, and predictive using agile principles. - - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/steps-agile-developer - -作者:[Daniel Oh][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/daniel-oh -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/article/18/10/what-agile -[2]: https://opensource.com/resources/what-are-linux-containers -[3]: https://opensource.com/resources/what-is-kubernetes -[4]: https://opensource.com/article/18/11/open-source-serverless-platforms -[5]: https://en.wikipedia.org/wiki/Domain-driven_design -[6]: https://openpracticelibrary.com/practice/start-at-the-end/ -[7]: https://openpracticelibrary.com/practice/event-storming/ -[8]: https://openpracticelibrary.com/practice/impact-mapping/ -[9]: https://en.wikipedia.org/wiki/Continuous_integration -[10]: https://jenkins.io/ -[11]: https://en.wikipedia.org/wiki/Behavior-driven_development -[12]: https://en.wikipedia.org/wiki/Test-driven_development -[13]: https://en.wikipedia.org/wiki/Unit_testing -[14]: https://en.wikipedia.org/wiki/Pair_programming -[15]: https://en.wikipedia.org/wiki/Code_review -[16]: https://en.wikipedia.org/wiki/Design_pattern diff --git a/sources/talk/20190206 What blockchain and open source communities have in common.md b/sources/talk/20190206 What blockchain and open source communities have in common.md deleted file mode 100644 index bc4f9464d0..0000000000 --- a/sources/talk/20190206 What blockchain and open source communities have in common.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What blockchain and open source communities have in common) -[#]: via: (https://opensource.com/article/19/2/blockchain-open-source-communities) -[#]: author: (Gordon Haff https://opensource.com/users/ghaff) - -What blockchain and open source communities have in common -====== -Blockchain initiatives can look to open source governance for lessons on establishing trust. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/diversity_flowers_chain.jpg?itok=ns01UPOp) - -One of the characteristics of blockchains that gets a lot of attention is how they enable distributed trust. The topic of trust is a surprisingly complicated one. In fact, there's now an [entire book][1] devoted to the topic by Kevin Werbach. - -But here's what it means in a nutshell. Organizations that wish to work together, but do not fully trust one another, can establish a permissioned blockchain and invite business partners to record their transactions on a shared distributed ledger. Permissioned blockchains can trace assets when transactions are added to the blockchain. A permissioned blockchain implies a degree of trust (again, trust is complicated) among members of a consortium, but no single entity controls the storage and validation of transactions. - -The basic model is that a group of financial institutions or participants in a logistics system can jointly set up a permissioned blockchain that will validate and immutably record transactions. There's no dependence on a single entity, whether it's one of the direct participants or a third-party intermediary who set up the blockchain, to safeguard the integrity of the system. The blockchain itself does so through a variety of cryptographic mechanisms. - -Here's the rub though. It requires that competitors work together cooperatively—a relationship often called [coopetition][2]. The term dates back to the early 20th century, but it grew into widespread use when former Novell CEO Ray Noorda started using the term to describe the company's business strategy in the 1990s. Novell was then planning to get into the internet portal business, which required it to seek partnerships with some of the search engine providers and other companies it would also be competing against. In 1996, coopetition became the subject of a bestselling [book][3]. - -Coopetition can be especially difficult when a blockchain network initiative appears to be driven by a dominant company. And it's hard for the dominant company not to exert outsize influence over the initiative, just as a natural consequence of how big it is. For example, the IBM-Maersk joint venture has [struggled to sign up rival shipping companies][4], in part because Maersk is the world's largest carrier by capacity, a position that makes rivals wary. - -We see this same dynamic in open source communities. The original creators of a project need to not only let go; they need to put governance structures in place that give competing companies confidence that there's a level playing field. - -For example, Sarah Novotny, now head of open source strategy at Google Cloud Platform, [told me in a 2017 interview][5] about the [Kubernetes][6] project that it isn't always easy to give up control, even when people buy into doing what is best for a project. - -> Google turned Kubernetes over to the Cloud Native Computing Foundation (CNCF), which sits under the Linux Foundation umbrella. As [CNCF executive director Dan Kohn puts it][7]: "One of the things they realized very early on is that a project with a neutral home is always going to achieve a higher level of collaboration. They really wanted to find a home for it where a number of different companies could participate." -> -> Defaulting to public may not be either natural or comfortable. "Early on, my first six, eight, or 12 weeks at Google, I think half my electrons in email were spent on: 'Why is this discussion not happening on a public mailing list? Is there a reason that this is specific to GKE [Google Container Engine]? No, there's not a reason,'" said Novotny. - -To be sure, some grumble that open source foundations have become too common and that many are too dominated by paying corporate members. Simon Phipps, currently the president of the Open Source Initiative, gave a talk at OSCON way back in 2015 titled ["Enough Foundations Already!"][8] in which he argued that "before we start another open source foundation, let's agree that what we need protected is software freedom and not corporate politics." - -Nonetheless, while not appropriate for every project, foundations with business, legal, and technical governance are increasingly the model for open source projects that require extensive cooperation among competing companies. A [2017 analysis of GitHub data by the Linux Foundation][9] found a number of different governance models in use by the highest-velocity open source projects. Unsurprisingly, quite a few remained under the control of the company that created or acquired them. However, about a third were under the auspices of a foundation. - -Is there a lesson here for blockchain? Quite possibly. Open source projects can be sponsored by a company while still putting systems and governance in place that are welcoming to outside contributors. However, there's a great deal of history to suggest that doing so is hard because it's hard not to exert control and leverage when you can. Furthermore, even if you make a successful case for being truly open to equal participation to outsiders today, it will be hard to allay suspicions that you might not be as welcoming tomorrow. - -To the degree that we can equate blockchain consortiums with open source communities, this suggests that business blockchain initiatives should look to open source governance for lessons. Dominant players in the ecosystem need to forgo control, and they need to have conversations with partners and potential partners about what types of structures would make participating easier. - -Many blockchain infrastructure software projects are already under foundations such as Hyperledger. But perhaps some specific production deployments of blockchain aimed at specific industries and ecosystems will benefit from formal governance structures as well. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/blockchain-open-source-communities - -作者:[Gordon Haff][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/ghaff -[b]: https://github.com/lujun9972 -[1]: https://mitpress.mit.edu/books/blockchain-and-new-architecture-trust -[2]: https://en.wikipedia.org/wiki/Coopetition -[3]: https://en.wikipedia.org/wiki/Co-opetition_(book) -[4]: https://www.theregister.co.uk/2018/10/30/ibm_struggles_to_sign_up_shipping_carriers_to_blockchain_supply_chain_platform_reports/ -[5]: https://opensource.com/article/17/4/podcast-kubernetes-sarah-novotny -[6]: https://kubernetes.io/ -[7]: http://bitmason.blogspot.com/2017/02/podcast-cloud-native-computing.html -[8]: https://www.oreilly.com/ideas/enough-foundations-already -[9]: https://www.linuxfoundation.org/blog/2017/08/successful-open-source-projects-common/ diff --git a/sources/talk/20190214 Top 5 podcasts for Linux news and tips.md b/sources/talk/20190214 Top 5 podcasts for Linux news and tips.md deleted file mode 100644 index fb827bb39b..0000000000 --- a/sources/talk/20190214 Top 5 podcasts for Linux news and tips.md +++ /dev/null @@ -1,80 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Top 5 podcasts for Linux news and tips) -[#]: via: (https://opensource.com/article/19/2/top-linux-podcasts) -[#]: author: (Stephen Bancroft https://opensource.com/users/stevereaver) - -Top 5 podcasts for Linux news and tips -====== -A tried and tested podcast listener, shares his favorite Linux podcasts over the years, plus a couple of bonus picks. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/linux-penguin-penguins.png?itok=5hlVDue7) - -Like many Linux enthusiasts, I listen to a lot of podcasts. I find my daily commute is the best time to get some time to myself and catch up on the latest tech news. Over the years, I have subscribed and unsubscribed to more show feeds than I care to think about and have distilled them down to the best of the best. - -Here are my top five Linux podcasts I think you should be listening to in 2019, plus a couple of bonus picks. - - 5. [**Late Night Linux**][1]—This podcast, hosted by Joe, [Félim][2], [Graham][3], and [Will][4] from the UK, is rough, ready, and pulls no punches. [Joe Ressington][5] is always ready to tell it how it is, and Félim is always quick with his opinions. It's presented in a casual conversation format—but not one to have one with the kids around, especially with subjects they are all passionate about! - - - 4. [**Ask Noah Show**][6]—This show was forked from the Linux Action Show after it ended. Hosted by [Noah Chelliah][7], it's presented in a radio talkback style and takes live calls from listeners—it's syndicated from a local radio station in Grand Forks, North Dakota. The podcast isn't purely about Linux, but Noah takes on technical challenges and solves them with Linux and answers listeners' questions about how to achieve good technical solutions using Linux. - - - 3. [**The Ubuntu Podcast**][8]—If you want the latest about Ubuntu, you can't go past this show. In another podcast with a UK twist, hosts [Alan Pope][9] (Popey), [Mark Johnson][10], and [Martin Wimpress][11] (Wimpy) present a funny and insightful view of the open source community with news directly from Ubuntu. - - - 2. [**Linux Action News**][12]—The title says it all: it's a news show for Linux. This show was spawned from the popular Linux Action Show and is broadcast by the [Jupiter Broadcasting Network][13], which has many other tech-related podcasts. Hosts Chris Fisher and [Joe Ressington][5] present the show in a more formal "evening news" style, which runs around 30 minutes long. If you want to get a quick weekly update on Linux and Linux-related news, this is the show for you. - - - 1. [**Linux Unplugged**][14]—Finally, coming in at the number one spot is the granddaddy of them all, Linux Unplugged. This show gets to the core of what being in the Linux community is all about. Presented as a casual panel-style discussion by [Chris Fisher][15] and [Wes Payne][16], the podcast includes an interactive voice chatroom where listeners can connect and be heard live on the show as it broadcasts. - - - -Well, there you have it, my current shortlist of Linux podcasts. It's likely to change in the near future, but for now, I am enjoying every minute these guys put together. - -### Bonus podcasts - -Here are two bonus podcasts you might want to check out. - -**[Choose Linux][17]** is a brand-new podcast that is tantalizing because of its hosts: Joe Ressington of Linux Action News, who is a long-time Linux veteran, and [Jason Evangelho][18], a Forbes writer who recently shot to fame in the open source community with his articles showcasing his introduction to Linux and open source. Living vicariously through Jason's introduction to Linux has been and will continue to be fun. - -[**Command Line Heroes**][19] is a podcast produced by Red Hat. It has a very high production standard and has a slightly different format to the shows I have previously mentioned, anchored by a single presenter, developer, and [CodeNewbie][20] founder [Saron Yitbarek][21], who presents the latest innovations in open source. Now in its second season and released fortnightly, I highly recommend that you start from the first episode of this podcast. It starts with a great intro to the O/S wars of the '90s and sets the foundations for the start of Linux. - -Do you have a favorite Linux podcast that isn't on this list? Please share it in the comments. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/top-linux-podcasts - -作者:[Stephen Bancroft][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/stevereaver -[b]: https://github.com/lujun9972 -[1]: https://latenightlinux.com/ -[2]: https://twitter.com/felimwhiteley -[3]: https://twitter.com/degville -[4]: https://twitter.com/8none1 -[5]: https://twitter.com/JoeRessington -[6]: http://www.asknoahshow.com/ -[7]: https://twitter.com/kernellinux?lang=en -[8]: http://ubuntupodcast.org/ -[9]: https://twitter.com/popey -[10]: https://twitter.com/marxjohnson -[11]: https://twitter.com/m_wimpress -[12]: https://linuxactionnews.com/ -[13]: https://www.jupiterbroadcasting.com/ -[14]: https://linuxunplugged.com/ -[15]: https://twitter.com/ChrisLAS -[16]: https://twitter.com/wespayne -[17]: https://chooselinux.show -[18]: https://twitter.com/killyourfm -[19]: https://www.redhat.com/en/command-line-heroes -[20]: https://www.codenewbie.org/ -[21]: https://twitter.com/saronyitbarek diff --git a/sources/talk/20190219 How Linux testing has changed and what matters today.md b/sources/talk/20190219 How Linux testing has changed and what matters today.md deleted file mode 100644 index ad26d6dbec..0000000000 --- a/sources/talk/20190219 How Linux testing has changed and what matters today.md +++ /dev/null @@ -1,99 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How Linux testing has changed and what matters today) -[#]: via: (https://opensource.com/article/19/2/phoronix-michael-larabel) -[#]: author: (Don Watkins https://opensource.com/users/don-watkins) - -How Linux testing has changed and what matters today -====== -Michael Larabel, the founder of Phoronix, shares his insights on the evolution of Linux and open hardware. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/mistake_bug_fix_find_error.png?itok=PZaz3dga) - -If you've ever wondered how your Linux computer stacks up against other Linux, Windows, and MacOS machines or searched for reviews of Linux-compatible hardware, you're probably familiar with [Phoronix][1]. Along with its website, which attracts more than 250 million visitors a year to its Linux reviews and news, the company also offers the [Phoronix Test Suite][2], an open source hardware benchmarking tool, and [OpenBenchmarking.org][3], where test result data is stored. - -According to [Michael Larabel][4], who started Phoronix in 2004, the site "is frequently cited as being the leading source for those interested in computer hardware and Linux. It offers insights regarding the development of the Linux kernel, product reviews, interviews, and news regarding free and open source software." - -I recently had the opportunity to interview Michael about Phoronix and his work. - -The questions and answers have been edited for length and clarity. - -**Don Watkins:** What inspired you to start Phoronix? - -**Michael Larabel:** When I started [Phoronix.com][5] in June 2004, it was still challenging to get a mouse or other USB peripherals working on the popular distributions of the time, like Mandrake, Yoper, MEPIS, and others. So, I set out to work on reviewing different hardware components and their compatibility with Linux. Over time, that shifted more from "does the basic device work?" to how well they perform and what features are supported or unsupported under Linux. - -It's been interesting to see the evolution and the importance of Linux on hardware rise. Linux was very common to LAMP/web servers, but Linux has also become synonymous with high-performance computing (HPC), Android smartphones, cloud software, autonomous vehicles, edge computing, digital signage, and related areas. While Linux hasn't quite dominated the desktop, it's doing great practically everywhere else. - -I also developed the Phoronix Test Suite, with its initial 1.0 public release in 2008, to increase the viability of testing on Linux, engage with more hardware and software vendors on best practices for testing, and just get more test cases running on Linux. At the time, there weren't any really shiny benchmarks on Linux like there were on Windows. - -**DW:** Who are your website's readers? - -**ML:** Phoronix's audience is as diverse as the content. Initially, it was quite desktop/gamer/enthusiast oriented, but as Linux's dominance has grown in HPC, cloud, embedded, etc., my testing has expanded in those areas and thus so has the readership. Readers tend to be interested in open source/Linux ecosystem advancements, performance, and a slight bent towards graphics processor and hardware driver interests. - -**DW:** How important is testing in the Linux world and how has it changed from when you started? - -**ML:** Testing has changed radically since 2004. Back then, many open source projects weren't carrying out any continuous integration (CI) or testing for regressions—both functional issues and performance problems. The hardware vendors supporting Linux were mostly trying to get things working and maintained while being less concerned about performance or scratching away at catching up to Mac, Solaris, and Windows. With time, we've seen the desktop reach close parity with (or exceed, depending upon your views) alternative operating systems. Most PC hardware now works out-of-the-box on Linux, most open source projects engage in some form of CI or testing, and more time and resources are afforded to advancing Linux performance. With high-frequency trading and cloud platforms relying on Linux, performance has become of utmost importance. - -Most of my testing at Phoronix.com is focused on benchmarking processors, graphics cards, storage devices, and other areas of interest to gamers and enthusiasts, but also interesting server platforms. Readers are also quite interested in testing of software components like the Linux kernel, code compilers, and filesystems. But in terms of the Phoronix Test Suite, its scope is rather limitless, with a framework in which new tests can be easily added and automated. There are currently more than 1,000 different profiles/suites, and new ones are routinely added—from machine learning tests to traditional benchmarks. - -**DW:** How important is open source hardware? Where do you see it going? - -**ML:** Open hardware is of increasing importance, especially in light of all the security vulnerabilities and disclosures in recent years. Facebook's work on the [Open Compute Project][6] can be commended, as can Google leveraging [Coreboot][7] in its Chromebook devices, and [Raptor Computing Systems][8]' successful, high-performance, open source POWER9 desktops/workstations/servers. [Intel][9] potentially open sourcing its firmware support package this year is also incredibly tantalizing and will hopefully spur more efforts in this space. - -Outside of that, open source hardware has had a really tough time cracking the consumer space due to the sheer amount of capital necessary and the complexities of designing a modern chip, etc., not to mention competing with the established hardware vendors' marketing budgets and other resources. So, while I would love for 100% open source hardware to dominate—or even compete in features and performance with proprietary hardware—in most segments, that is sadly unlikely to happen, especially with open hardware generally being much more expensive due to economies of scale. - -Software efforts like [OpenBMC][10], Coreboot/[Libreboot][11], and [LinuxBoot][12] are opening up hardware much more. Those efforts at liberating hardware have proven successful and will hopefully continue to be endorsed by more organizations. - -As for [OSHWA][13], I certainly applaud their efforts and the enthusiasm they bring to open source hardware. Certainly, for niche and smaller-scale devices, open source hardware can be a great fit. It will certainly be interesting to see what comes about with OSHWA and some of its partners like Lulzbot, Adafruit, and System76. - -**DW:** Can people install Phoronix Test Suite on their own computers? - -ML: The Phoronix Test Suite benchmarking software is open source under the GPL and can be downloaded from [Phoronix-Test-Suite.com][2] and [GitHub][14]. The benchmarking software works on not only Linux systems but also MacOS, Solaris, BSD, and Windows 10/Windows Server. The Phoronix Test Suite works on x86/x86_64, ARM/AArch64, POWER, RISC-V, and other architectures. - -**DW:** How does [OpenBenchmarking.org][15] work with the Phoronix Test Suite? - -**ML:** OpenBenchmarking.org is, in essence, the "cloud" component to the Phoronix Test Suite. It stores test profiles/test suites in a package manager-like fashion, allows users to upload their own benchmarking results, and offers related functionality around our benchmarking software. - -OpenBenchmarking.org is seamlessly integrated into the Phoronix Test Suite, but from the web interface, it is also where anyone can see the public benchmark results, inspect the open source test profiles to understand their methodology, research hardware and software data, and use similar functionality. - -Another component developed as part of the Phoronix Test Suite is [Phoromatic][16], which effectively allows anyone to deploy their own OpenBenchmarking-like environment within their own private intranet/LAN. This allows organizations to archive their benchmark results locally (and privately), orchestrate benchmarks automatically against groups of systems, manage the benchmark systems, and develop new test cases. - -**DW:** How can people stay up to date on Phoronix? - -**ML:** You can follow [me][17], [Phoronix][18], [Phoronix Test Suite][19], and [OpenBenchMarking.org][20] on Twitter. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/phoronix-michael-larabel - -作者:[Don Watkins][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/don-watkins -[b]: https://github.com/lujun9972 -[1]: https://www.phoronix.com/ -[2]: https://www.phoronix-test-suite.com/ -[3]: https://openbenchmarking.org/ -[4]: https://www.michaellarabel.com/ -[5]: http://Phoronix.com -[6]: https://www.opencompute.org/ -[7]: https://www.coreboot.org/ -[8]: https://www.raptorcs.com/ -[9]: https://www.phoronix.com/scan.php?page=news_item&px=Intel-Open-Source-FSP-Likely -[10]: https://en.wikipedia.org/wiki/OpenBMC -[11]: https://libreboot.org/ -[12]: https://linuxboot.org/ -[13]: https://www.oshwa.org/ -[14]: https://github.com/phoronix-test-suite/ -[15]: http://OpenBenchmarking.org -[16]: http://www.phoronix-test-suite.com/index.php?k=phoromatic -[17]: https://twitter.com/michaellarabel -[18]: https://twitter.com/phoronix -[19]: https://twitter.com/Phoromatic -[20]: https://twitter.com/OpenBenchmark diff --git a/sources/talk/20190219 How our non-profit works openly to make education accessible.md b/sources/talk/20190219 How our non-profit works openly to make education accessible.md deleted file mode 100644 index eee670610c..0000000000 --- a/sources/talk/20190219 How our non-profit works openly to make education accessible.md +++ /dev/null @@ -1,136 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How our non-profit works openly to make education accessible) -[#]: via: (https://opensource.com/open-organization/19/2/building-curriculahub) -[#]: author: (Tanner Johnson https://opensource.com/users/johnsontanner3) - -How our non-profit works openly to make education accessible -====== -To build an open access education hub, our team practiced the same open methods we teach our students. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/OSDC_Education_2_OpenAccess_1040x584_12268077_0614MM.png?itok=xb96iaHe) - -I'm lucky to work with a team of impressive students at Duke University who are leaders in their classrooms and beyond. As members of [CSbyUs][1], a non-profit and student-run organization based at Duke, we connect university students to middle school students, mostly from [title I schools][2] across North Carolina's Research Triangle Park. Our mission is to fuel future change agents from under-resourced learning environments by fostering critical technology skills for thriving in the digital age. - -The CSbyUs Tech R&D team (TRD for short) recently set an ambitious goal to build and deploy a powerful web application over the course of one fall semester. Our team of six knew we had to do something about our workflow to ship a product by winter break. In our middle school classrooms, we teach our learners to use agile methodologies and design thinking to create mobile applications. On the TRD team, we realized we needed to practice what we preach in those classrooms to ship a quality product by semester's end. - -This is the story of how and why we utilized the principles we teach our students in order to deploy technology that will scale our mission and make our teaching resources open and accessible. - -### Setting the scene - -For the past two years, CSbyUs has operated "on the ground," connecting Duke undergraduates to Durham middle schools via after-school programming. After teaching and evaluating several iterations of our unique, student-centered mobile app development curriculum, we saw promising results. Our middle schoolers were creating functional mobile apps, connecting to their mentors, and leaving the class more confident in their computer science skills. Naturally, we wondered how to expand our programming. - -We knew we should take our own advice and lean into web-based technologies to share our work, but we weren't immediately sure what problem we needed to solve. Ultimately, we decided to create a web app that serves as a centralized hub for open source and open access digital education curricula. "CurriculaHub" (name inspired by GitHub) would be the defining pillar of CSbyUs's new website, where educators could share and adapt resources. - -But the vision and implementation didn't happen overnight. - -Given our sense of urgency and the potential of "CurriculaHub," we wanted to start this project with a well defined plan. The stakes were (and are) high, so planning, albeit occasionally tedious, was critical to our success. Like the curriculum we teach, we scaffolded our workflow process with design thinking and agile methodology, two critical 21st century frameworks we often fail to practice in higher ed. - -What follows is a step-wise explanation of our design thinking process, starting from inspiration and ending in a shipped prototype. - -``` -This is the story of how and why we utilized the principles we teach our students in order to deploy technology that will scale our mission and make our teaching resources open and accessible. -``` - -### Our Process - -#### **Step 1: Pre-Work** - -In order to understand the why to our what, you have to know who our team is. - -The members of this team are busy. All of us contribute to CSbyUs beyond our TRD-related responsibilities. As an organization with lofty goals beyond creating a web-based platform, we have to reconcile our "on the ground" commitments (i.e., curriculum curation, research and evaluation, mentorship training and practice, presentations at conferences, etc.) with our "in the cloud" technological goals. - -In addition to balancing time across our organization, we have to be flexible in the ways we communicate. As a remote member of the team, I'm writing this post from Spain, but the rest of our team is based in North Carolina, adding collaboration challenges. - -Before diving into development (or even problem identification), we knew we had to set some clear expectations for how we'd operate as a team. We took a note from our curriculum team's book and started with some [rules of engagement][3]. This is actually [a well-documented approach][4] to setting up a team's [social contract][5] used by teams across the tech space. During a summer internship at IBM, I remember pre-project meetings where my manager and team spent more than an hour clarifying principles of interaction. Whenever we faced uncertainty in our team operations, we'd pull out the rules of engagement and clear things up almost immediately. (An aside: I've found this strategy to be wildly effective not only in my teams, but in all relationships). - -Considering the remote nature of our team, one of our favorite tools is Slack. We use it for almost everything. We can't have sticky-note brainstorms, so we create Slack brainstorm threads. In fact, that's exactly what we did to generate our rules of engagement. One [open source principle we take to heart is transparency][6]; Slack allows us to archive and openly share our thought processes and decision-making steps with the rest of our team. - -#### **Step 2: Empathy Research** - -We're all here for unique reasons, but we find a common intersection: the desire to broaden equity in access to quality digital era education. - -Each member of our team has been lucky enough to study at Duke. We know how it feels to have limitless opportunities and the support of talented peers and renowned professors. But we're mindful that this isn't normal. Across the country and beyond, these opportunities are few and far between. Where they do exist, they're confined within the guarded walls of higher institutes of learning or come with a lofty price tag. - -While our team members' common desire to broaden access is clear, we work hard to root our decisions in research. So our team begins each semester [reviewing][7] [research][8] that justifies our existence. TRD works with CRD (curriculum research and development) and TT (teaching team), our two other CSbyUs sub-teams, to discuss current trends in digital education access, their systemic roots, and novel approaches to broaden access and make materials relevant to learners. We not only perform research collaboratively at the beginning of the semester but also implement weekly stand-up research meetings with the sub-teams. During these, CRD often presents new findings we've gleaned from interviewing current teachers and digging into the current state of access in our local community. They are our constant source of data-driven, empathy-fueling research. - -Through this type of empathy-based research, we have found that educators interested in student-centered teaching and digital era education lack a centralized space for proven and adaptable curricula and lesson plans. The bureaucracy and rigid structures that shape classroom learning in the United States makes reshaping curricula around the personal needs of students daunting and seemingly impossible. As students, educators, and technologists, we wondered how we might unleash the creativity and agency of others by sharing our own resources and creating an online ecosystem of support. - -#### **Step 3: Defining the Problem** - -We wanted to avoid [scope creep][9] caused by a poorly defined mission and vision (something that happens too often in some organizations). We needed structures to define our goals and maintain clarity in scope. Before imagining our application features, we knew we'd have to start with defining our north star. We would generate a clear problem statement to which we could refer throughout development. - -Before imagining our application features, we knew we'd have to start with defining our north star. - -This is common practice for us. Before committing to new programming, new partnerships, or new changes, the CSbyUs team always refers back to our mission and vision and asks, "Does this make sense?" (in fact, we post our mission and vision to the top of every meeting minutes document). If it fits and we have capacity to pursue it, we go for it. And if we don't, then we don't. In the case of a "no," we are always sure to document what and why because, as engineers know, [detailed logs are almost always a good decision][10]. TRD gleaned that big-picture wisdom and implemented a group-defined problem statement to guide our sub-team mission and future development decisions. - -To formulate a single, succinct problem statement, we each began by posting our own takes on the problem. Then, during one of our weekly [30-minute-no-more-no-less stand-up meetings][11], we identified commonalities and differences, ultimately [merging all our ideas into one][12]. Boiled down, we identified that there exist massive barriers for educators, parents, and students to share, modify, and discuss open source and accessible curricula. And of course, our mission would be to break down those barriers with user-centered technology. This "north star" lives as a highly visible document in our Google Drive, which has influenced our feature prioritization and future directions. - -#### **Step 4: Ideating a Solution** - -With our problem defined and our rules of engagement established, we were ready to imagine a solution. - -We believe that effective structures can ensure meritocracy and community. Sometimes, certain personalities dominate team decision-making and leave little space for collaborative input. To avoid that pitfall and maximize our equality of voice, we tend to use "offline" individual brainstorms and merge collective ideas online. It's the same process we used to create our rules of engagement and problem statement. In the case of ideating a solution, we started with "offline" brainstorms of three [S.M.A.R.T. goals][13]. Those goals would be ones we could achieve as a software development team (specifically because the CRD and TT teams offer different skill sets) and address our problem statement. Finally, we wrote these goals in a meeting minutes document, clustering common goals and ultimately identifying themes that describe our application features. In the end, we identified three: support, feedback, and open source curricula. - -From here, we divided ourselves into sub-teams, repeating the goal-setting process with those teams—but in a way that was specific to our features. And if it's not obvious by now, we realized a web-based platform would be the most optimal and scalable solution for supporting students, educators, and parents by providing a hub for sharing and adapting proven curricula. - -To work efficiently, we needed to be adaptive, reinforcing structures that worked and eliminating those that didn't. For example, we put a lot of effort in crafting meeting agendas. We strive to include only those subjects we must discuss in-person and table everything else for offline discussions on Slack or individually organized calls. We practice this in real time, too. During our regular meetings on Google Hangouts, if someone brings up a topic that isn't highly relevant or urgent, the current stand-up lead (a role that rotates weekly) "parking lots" it until the end of the meeting. If we have space at the end, we pull from the parking lot, and if not, we reserve that discussion for a Slack thread. - -This prioritization structure has led to massive gains in meeting efficiency and a focus on progress updates, shared technical hurdle discussions, collective decision-making, and assigning actionable tasks (the next-steps a person has committed to taking, documented with their name attached for everyone to view). - -#### **Step 5: Prototyping** - -This is where the fun starts. - -Our team was only able to unite new people with highly varied experience through the power of open principles and methodologies. - -Given our requirements—like an interactive user experience, the ability to collaborate on blogs and curricula, and the ability to receive feedback from our users—we began identifying the best technologies. Ultimately, we decided to build our web app with a ReactJS frontend and a Ruby on Rails backend. We chose these due to the extensive documentation and active community for both, and the well-maintained libraries that bridge the relationship between the two (e.g., react-on-rails). Since we chose Rails for our backend, it was obvious from the start that we'd work within a Model-View-Controller framework. - -Most of us didn't have previous experience with web development, neither on the frontend nor the backend. So, getting up and running with either technology independently presented a steep learning curve, and gluing the two together only steepened it. To centralize our work, we use an open-access GitHub repository. Given our relatively novice experience in web development, our success hinged on extremely efficient and open collaborations. - -And to explain that, we need to revisit the idea of structures. Some of ours include peer code reviews—where we can exchange best-practices and reusable solutions, maintaining up-to-date tech and user documentation so we can look back and understand design decisions—and (my personal favorite) our questions bot on Slack, which gently reminds us to post and answer questions in a separate Slack #questions channel. - -We've also dabbled with other strategies, like instructional videos for generating basic React components and rendering them in Rails Views. I tried this and in my first video, [I covered a basic introduction to our repository structure][14] and best practices for generating React components. While this proved useful, our team has since realized the wealth of online resources that document various implementations of these technologies robustly. Also, we simply haven't had enough time (but we might revisit them in the future—stay tuned). - -We're also excited about our cloud-based implementation. We use Heroku to host our application and manage data storage. In next iterations, we plan to both expand upon our current features and configure a continuous iteration/continuous development pipeline using services like Jenkins integrated with GitHub. - -#### **Step 6: Testing** - -Since we've [just deployed][1], we are now in a testing stage. Our goals are to collect user feedback across our feature domains and our application experience as a whole, especially as they interact with our specific audiences. Given our original constraints (namely, time and people power), this iteration is the first of many to come. For example, future iterations will allow for individual users to register accounts and post external curricula directly on our site without going through the extra steps of email. We want to scale and maximize our efficiency, and that's part of the recipe we'll deploy in future iterations. As for user testing: We collect user feedback via our contact form, via informal testing within our team, and via structured focus groups. [We welcome your constructive feedback and collaboration][15]. - -Our team was only able to unite new people with highly varied experience through the power of open principles and methodologies. Luckily enough, each one I described in this post is adaptable to virtually every team. - -Regardless of whether you work—on a software development team, in a classroom, or, heck, [even in your family][16]—principles like transparency and community are almost always the best foundation for a successful organization. - - --------------------------------------------------------------------------------- - -via: https://opensource.com/open-organization/19/2/building-curriculahub - -作者:[Tanner Johnson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/johnsontanner3 -[b]: https://github.com/lujun9972 -[1]: http://csbyus.org -[2]: https://www2.ed.gov/programs/titleiparta/index.html -[3]: https://docs.google.com/document/d/1tqV6B6Uk-QB7Psj1rX9tfCyW3E64_v6xDlhRZ-L2rq0/edit -[4]: https://www.atlassian.com/team-playbook/plays/rules-of-engagement -[5]: https://openpracticelibrary.com/practice/social-contract/ -[6]: https://opensource.com/open-organization/resources/open-org-definition -[7]: https://services.google.com/fh/files/misc/images-of-computer-science-report.pdf -[8]: https://drive.google.com/file/d/1_iK0ZRAXVwGX9owtjUUjNz3_2kbyYZ79/view?usp=sharing -[9]: https://www.pmi.org/learning/library/top-five-causes-scope-creep-6675 -[10]: https://www.codeproject.com/Articles/42354/The-Art-of-Logging#what -[11]: https://opensource.com/open-organization/16/2/6-steps-running-perfect-30-minute-meeting -[12]: https://docs.google.com/document/d/1wdPRvFhMKPCrwOG2CGp7kP4rKOXrJKI77CgjMfaaXnk/edit?usp=sharing -[13]: https://www.projectmanager.com/blog/how-to-create-smart-goals -[14]: https://www.youtube.com/watch?v=52kvV0plW1E -[15]: http://csbyus.org/ -[16]: https://opensource.com/open-organization/15/11/what-our-families-teach-us-about-organizational-life diff --git a/sources/talk/20190220 Do Linux distributions still matter with containers.md b/sources/talk/20190220 Do Linux distributions still matter with containers.md deleted file mode 100644 index c1c7886d0d..0000000000 --- a/sources/talk/20190220 Do Linux distributions still matter with containers.md +++ /dev/null @@ -1,87 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Do Linux distributions still matter with containers?) -[#]: via: (https://opensource.com/article/19/2/linux-distributions-still-matter-containers) -[#]: author: (Scott McCarty https://opensource.com/users/fatherlinux) - -Do Linux distributions still matter with containers? -====== -There are two major trends in container builds: using a base image and building from scratch. Each has engineering tradeoffs. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cube_innovation_process_block_container.png?itok=vkPYmSRQ) - -Some people say Linux distributions no longer matter with containers. Alternative approaches, like distroless and scratch containers, seem to be all the rage. It appears we are considering and making technology decisions based more on fashion sense and immediate emotional gratification than thinking through the secondary effects of our choices. We should be asking questions like: How will these choices affect maintenance six months down the road? What are the engineering tradeoffs? How does this paradigm shift affect our build systems at scale? - -It's frustrating to watch. If we forget that engineering is a zero-sum game with measurable tradeoffs—advantages and disadvantages, with costs and benefits of different approaches— we do ourselves a disservice, we do our employers a disservice, and we do our colleagues who will eventually maintain our code a disservice. Finally, we do all of the maintainers ([hail the maintainers!][1]) a disservice by not appreciating the work they do. - -### Understanding the problem - -To understand the problem, we have to investigate why we started using Linux distributions in the first place. I would group the reasons into two major buckets: kernels and other packages. Compiling kernels is actually fairly easy. Slackware and Gentoo (I still have a soft spot in my heart) taught us that. - -On the other hand, the tremendous amount of development and runtime software that needs to be packaged for a usable Linux system can be daunting. Furthermore, the only way you can ensure that millions of permutations of packages can be installed and work together is by using the old paradigm: compile it and ship it together as a thing (i.e., a Linux distribution). So, why do Linux distributions compile kernels and all the packages together? Simple: to make sure things work together. - -First, let's talk about kernels. The kernel is special. Booting a Linux system without a compiled kernel is a bit of a challenge. It's the core of a Linux operating system, and it's the first thing we rely on when a system boots. Kernels have a lot of different configuration options when they're being compiled that can have a tremendous effect on how hardware and software run on one. A secondary problem in this bucket is that system software, like compilers, C libraries, and interpreters, must be tuned for the options you built into the kernel. Gentoo taught us this in a visceral way, which turned everyone into a miniature distribution maintainer. - -Embarrassingly (because I have worked with containers for the last five years), I must admit that I have compiled kernels quite recently. I had to get nested KVM working on RHEL 7 so that I could run [OpenShift on OpenStack][2] virtual machines, in a KVM virtual machine on my laptop, as well as [our Container Development Kit (CDK][3]). #justsayin Suffice to say, I fired RHEL7 up on a brand new 4.X kernel at the time. Like any good sysadmin, I was a little worried that I missed some important configuration options and patches. And, of course, I had missed some things. Sleep mode stopped working right, my docking station stopped working right, and there were numerous other small, random errors. But it did work well enough for a live demo of OpenShift on OpenStack, in a single KVM virtual machine on my laptop. Come on, that's kinda' fun, right? But I digress… - -Now, let's talk about all the other packages. While the kernel and associated system software can be tricky to compile, the much, much bigger problem from a workload perspective is compiling thousands and thousands of packages to give us a useable Linux system. Each package requires subject matter expertise. Some pieces of software require running only three commands: **./configure** , **make** , and **make install**. Others require a lot of subject matter expertise ranging from adding users and configuring specific defaults in **etc** to running post-install scripts and adding systemd unit files. The set of skills necessary for the thousands of different pieces of software you might use is daunting for any single person. But, if you want a usable system with the ability to try new software whenever you want, you have to learn how to compile and install the new software before you can even begin to learn to use it. That's Linux without a Linux distribution. That's the engineering problem you are agreeing to when you forgo a Linux distribution. - -The point is that you have to build everything together to ensure it works together with any sane level of reliability, and it takes a ton of knowledge to build a usable cohort of packages. This is more knowledge than any single developer or sysadmin is ever going to reasonably learn and retain. Every problem I described applies to your [container host][4] (kernel and system software) and [container image][5] (system software and all other packages)—notice the overlap; there are compilers, C libraries, interpreters, and JVMs in the container image, too. - -### The solution - -You already know this, but Linux distributions are the solution. Stop reading and send your nearest package maintainer (again, hail the maintainers!) an e-card (wait, did I just give my age away?). Seriously though, these people do a ton of work, and it's really underappreciated. Kubernetes, Istio, Prometheus, and Knative: I am looking at you. Your time is coming too, when you will be in maintenance mode, overused, and underappreciated. I will be writing this same article again, probably about Kubernetes, in about seven to 10 years. - -### First principles with container builds - -There are tradeoffs to building from scratch and building from base images. - -#### Building from base images - -Building from base images has the advantage that most build operations are nothing more than a package install or update. It relies on a ton of work done by package maintainers in a Linux distribution. It also has the advantage that a patching event six months—or even 10 years—from now (with RHEL) is an operations/systems administrator event (yum update), not a developer event (that requires picking through code to figure out why some function argument no longer works). - -Let's double-click on that a bit. Application code relies on a lot of libraries ranging from JSON munging libraries to object-relational mappers. Unlike the Linux kernel and Glibc, these types of libraries change with very little regard to breaking API compatibility. That means that three years from now your patching event likely becomes a code-changing event, not a yum update event. Got it, let that sink in. Developers, you are getting paged at 2 AM if the security team can't find a firewall hack to block the exploit. - -Building from a base image is not perfect; there are disadvantages, like the size of all the dependencies that get dragged in. This will almost always make your container images larger than building from scratch. Another disadvantage is you will not always have access to the latest upstream code. This can be frustrating for developers, especially when you just want to get something out the door, but not as frustrating as being paged to look at a library you haven't thought about in three years that the upstream maintainers have been changing the whole time. - -If you are a web developer and rolling your eyes at me, I have one word for you: DevOps. That means you are carrying a pager, my friend. - -#### Building from scratch - -Scratch builds have the advantage of being really small. When you don't rely on a Linux distribution in the container, you have a lot of control, which means you can customize everything for your needs. This is a best-of-breed model, and it's valid in certain use cases. Another advantage is you have access to the latest packages. You don't have to wait for a Linux distro to update anything. You are in control, so you choose when to spend the engineering work to incorporate new software. - -Remember, there is a cost to controlling everything. Often, updating to new libraries with new features drags in unwanted API changes, which means fixing incompatibilities in code (in other words, [shaving yaks][6]). Shaving yaks at 2 AM when the application doesn't work is not fun. Luckily, with containers, you can roll back and shave the yaks the next business day, but it will still eat into your time for delivering new value to the business, new features to your applications. Welcome to the life of a sysadmin. - -OK, that said, there are times that building from scratch makes sense. I will completely concede that statically compiled Golang programs and C programs are two decent candidates for scratch/distroless builds. With these types of programs, every container build is a compile event. You still have to worry about API breakage three years from now, but if you are a Golang shop, you should have the skillset to fix things over time. - -### Conclusion - -Basically, Linux distributions do a ton of work to save you time—on a regular Linux system or with containers. The knowledge that maintainers have is tremendous and leveraged so much without really being appreciated. The adoption of containers has made the problem even worse because it's even further abstracted. - -With container hosts, a Linux distribution offers you access to a wide hardware ecosystem, ranging from tiny ARM systems, to giant 128 CPU x86 boxes, to cloud-provider VMs. They offer working container engines and container runtimes out of the box, so you can just fire up your containers and let somebody else worry about making things work. - -For container images, Linux distributions offer you easy access to a ton of software for your projects. Even when you build from scratch, you will likely look at how a package maintainer built and shipped things—a good artist is a good thief—so, don't undervalue this work. - -So, thank you to all of the maintainers in Fedora, RHEL (Frantisek, you are my hero), Debian, Gentoo, and every other Linux distribution. I appreciate the work you do, even though I am a "container guy." - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/linux-distributions-still-matter-containers - -作者:[Scott McCarty][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/fatherlinux -[b]: https://github.com/lujun9972 -[1]: https://aeon.co/essays/innovation-is-overvalued-maintenance-often-matters-more -[2]: https://blog.openshift.com/openshift-on-openstack-delivering-applications-better-together/ -[3]: https://developers.redhat.com/blog/2018/02/13/red-hat-cdk-nested-kvm/ -[4]: https://developers.redhat.com/blog/2018/02/22/container-terminology-practical-introduction/#h.8tyd9p17othl -[5]: https://developers.redhat.com/blog/2018/02/22/container-terminology-practical-introduction/#h.dqlu6589ootw -[6]: https://en.wiktionary.org/wiki/yak_shaving diff --git a/sources/talk/20190225 Teaching scientists how to share code.md b/sources/talk/20190225 Teaching scientists how to share code.md deleted file mode 100644 index 541cce2719..0000000000 --- a/sources/talk/20190225 Teaching scientists how to share code.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( jiangyaomin) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Teaching scientists how to share code) -[#]: via: (https://opensource.com/article/19/2/open-science-git) -[#]: author: (Jon Tennant https://opensource.com/users/jon-tennant) - -Teaching scientists how to share code -====== -This course teaches them how to set up a GitHub project, index their project in Zenodo, and integrate Git into an RStudio workflow. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003588_01_rd3os.combacktoschoolserieshe_rh_051x_0.png?itok=gIzbmxuI) - -Would it surprise you to learn that most of the world's scholarly research is not owned by the people who funded it or who created it? Rather it's owned by private corporations and locked up in proprietary systems, leading to [problems][1] around sharing, reuse, and reproducibility. - -The open science movement is challenging this system, aiming to give researchers control, ownership, and freedom over their work. The [Open Science MOOC][2] (massively open online community) is a mission-driven project launched in 2018 to kick-start an open scientific revolution and foster more partnerships between open source software and open science. - -The Open Science MOOC is a peer-to-peer community of practice, based around sharing knowledge and ideas, learning new skills, and using these things to develop as individuals so research communities can grow as part of a wider cultural shift towards openness. - -### The curriculum - -The Open Science MOOC is divided into 10 core modules, from the principles of open science to becoming an open science advocate. - -The first module, [Open Research Software and Open Source][3], was released in late 2018. It includes three main tasks, all designed to help make research workflows more efficient and more open for collaboration: - -#### 1\. Setting up your first GitHub project - -GitHub is a powerful project management tool, both for coders and non-coders. This task teaches how to create a community around the platform, select an appropriate license, and write good documentation (including README files, contributing guidelines, and codes of conduct) to foster open collaboration and a welcoming community. - -#### 2\. Indexing your project in Zenodo - -[Zenodo][4] is an open science platform that seamlessly integrates with GitHub to help make projects more permanent, reusable, and citable. This task explains how webhooks between Zenodo and GitHub allow new versions of projects to become permanently archived as they progress. This is critical for helping researchers get a [DOI][5] for their work so they can receive full credit for all aspects of a project. As citations are still a primary form of "academic capital," this is essential for researchers. - -#### 3\. Integrating Git into an RStudio workflow - -This task is about giving research a mega-boost through greater collaborative efficiency and reproducibility. Git enables version control in all forms of text-based content, including data analysis and writing papers. Each time you save your work during the development process, Git saves time-stamped copies. This saves the hassle of trying to "roll back" projects if you delete a file or text by mistake, and eliminates horrific file-naming conventions. (For example, does FINAL_Revised_2.2_supervisor_edits_ver1.7_scream.txt look familiar?) Getting Git to interface with RStudio is the painful part, but this task goes through it, step by step, to ease the stress. - -The third task also gives students the ability to interact directly with the MOOC by submitting pull requests to demonstrate their skills. This also adds their name to an online list of open source champions (aka "open sourcerers"). - -The MOOC's inherently interactive style is much more valuable than listening to someone talk at you, either on or off screen, like with many traditional online courses or educational programs. Each task is backed up by expert-gathered knowledge, so students get a rigorous, dual-learning experience. - -### Empowering researchers - -The Open Science MOOC strives to be as open as possible—this means we walk the walk and talk the talk. We are built upon a solid values-based foundation of freedom and equitable access to research. We see this route towards widespread adoption of best scientific practices as an essential part of the research process. - -Everything we produce is openly developed and openly licensed for maximum engagement, sharing, and reuse. An open source workflow underpins our development. All of this happens openly around channels such as [Slack][6] and [GitHub][7] and helps to make the community much more coherent. - -If we can instill the value of open source into modern research, this would empower current and future generations of researchers to think more about fundamental freedoms around knowledge production. We think that is something worth working towards as a community. - -The Open Science MOOC combines the best elements of the open education, open science, and open source worlds. If you're ready to join, [sign up for the full course][3], which is, of course, free. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/2/open-science-git - -作者:[Jon Tennant][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/jiangyaomin) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jon-tennant -[b]: https://github.com/lujun9972 -[1]: https://www.theguardian.com/science/political-science/2018/jun/29/elsevier-are-corrupting-open-science-in-europe -[2]: https://opensciencemooc.eu/ -[3]: https://eliademy.com/catalog/oer/module-5-open-research-software-and-open-source.html -[4]: https://zenodo.org/ -[5]: https://en.wikipedia.org/wiki/Digital_object_identifier -[6]: https://osmooc.herokuapp.com/ -[7]: https://open-science-mooc-invite.herokuapp.com/ diff --git a/sources/talk/20190301 What-s happening in the OpenStack community.md b/sources/talk/20190301 What-s happening in the OpenStack community.md deleted file mode 100644 index 28e3bf2fd3..0000000000 --- a/sources/talk/20190301 What-s happening in the OpenStack community.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What's happening in the OpenStack community?) -[#]: via: (https://opensource.com/article/19/3/whats-happening-openstack) -[#]: author: (Jonathan Bryce https://opensource.com/users/jonathan-bryce) - -What's happening in the OpenStack community? -====== - -In many ways, 2018 was a transformative year for the OpenStack Foundation. - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/travel-mountain-cloud.png?itok=ZKsJD_vb) - -Since 2010, the OpenStack community has been building open source software to run cloud computing infrastructure. Initially, the focus was public and private clouds, but open infrastructure has been pulled into many new important use cases like telecoms, 5G, and manufacturing IoT. - -As OpenStack software matured and grew in scope to support new technologies like bare metal provisioning and container infrastructure, the community widened its thinking to embrace users who deploy and run the software in addition to the developers who build the software. Questions like, "What problems are users trying to solve?" "Which technologies are users trying to integrate?" and "What are the gaps?" began to drive the community's thinking and decision making. - -In response to those questions, the OSF reorganized its approach and created a new "open infrastructure" framework focused on use cases, including edge, container infrastructure, CI/CD, and private and hybrid cloud. And, for the first time, the OSF is hosting open source projects outside of the OpenStack project. - -Following are three highlights from the [OSF 2018 Annual Report][1]; I encourage you to read the entire report for more detailed information about what's new. - -### Pilot projects - -On the heels of launching [Kata Containers][2] in December 2017, the OSF launched three pilot projects in 2018—[Zuul][3], [StarlingX][4], and [Airship][5]—that help further our goals of taking our technology into additional relevant markets. Each project follows the tenets we consider key to the success of true open source, [the Four Opens][6]: open design, open collaboration, open development, and open source. While these efforts are still new, they have been extremely valuable in helping us learn how we should expand the scope of the OSF, as well as showing others the kind of approach we will take. - -While the OpenStack project remained at the core of the team's focus, pilot projects are helping expand usage of open infrastructure across markets and already benefiting the OpenStack community. This has attracted dozens of new developers to the open infrastructure community, which will ultimately benefit the OpenStack community and users. - -There is direct benefit from these contributors working upstream in OpenStack, such as through StarlingX, as well as indirect benefit from the relationships we've built with the Kubernetes community through the Kata Containers project. Airship is similarly bridging the gaps between the Kubernetes and OpenStack ecosystems. This shows users how the technologies work together. A rise in contributions to Zuul has provided the engine for OpenStack CI and keeps our development running smoothly. - -### Containers collaboration - -In addition to investing in new pilot projects, we continued efforts to work with key adjacent projects in 2018, and we made particularly good progress with Kubernetes. OSF staffer Chris Hoge helps lead the cloud provider special interest group, where he has helped standardize how Kubernetes deployments expect to run on top of various infrastructure. This has clarified OpenStack's place in the Kubernetes ecosystem and led to valuable integration points, like having OpenStack as part of the Kubernetes release testing process. - -Additionally, OpenStack Magnum was certified as a Kubernetes installer by the CNCF. Through the Kata Containers community, we have deepened these relationships into additional areas within the container ecosystem resulting in a number of companies getting involved for the first time. - -### Evolving events - -We knew heading into 2018 that the environment around our events was changing and we needed to respond. During the year, we held two successful project team gatherings (PTGs) in Dublin and Denver, reaching capacity for both events while also including new projects and OpenStack operators. We held OpenStack Summits in Vancouver and Berlin, both experiencing increases in attendance and project diversity since Sydney in 2017, with each Summit including more than 30 open source projects. Recognizing this broader audience and the OSF's evolving strategy, the OpenStack Summit was renamed the [Open Infrastructure Summit][7], beginning with the Denver event coming up in April. - -In 2018, we boosted investment in China, onboarding a China Community Manager based in Shanghai and hosting a strategy day in Beijing with 30+ attendees from Gold and Platinum Members in China. This effort will continue in 2019 as we host our first Summit in China: the [Open Infrastructure Summit Shanghai][8] in November. - -We also worked with the community in 2018 to define a new model for events to maximize participation while saving on travel and expenses for the individuals and companies who are increasingly stretched across multiple open source communities. We arrived at a plan that we will implement and iterate on in 2019 where we will collocate PTGs as standalone events adjacent to our Open Infrastructure Summits. - -### Looking ahead - -We've seen impressive progress, but the biggest accomplishment might be in establishing a framework for the future of the foundation itself. In 2018, we advanced the open infrastructure mission by establishing OSF as an effective place to collaborate for CI/CD, container infrastructure, and edge computing, in addition to the traditional public and private cloud use cases. The open infrastructure approach opened a lot of doors in 2018, from the initial release of software from each pilot project, to live 5G demos, to engagement with hyperscale public cloud providers. - -Ultimately, our value comes from the effectiveness of our communities and the software they produce. As 2019 unfolds, our community is excited to apply learnings from 2018 to the benefit of developers, users, and the commercial ecosystem across all our projects. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/whats-happening-openstack - -作者:[Jonathan Bryce][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jonathan-bryce -[b]: https://github.com/lujun9972 -[1]: https://www.openstack.org/foundation/2018-openstack-foundation-annual-report -[2]: https://katacontainers.io/ -[3]: https://zuul-ci.org/ -[4]: https://www.starlingx.io/ -[5]: https://www.airshipit.org/ -[6]: https://www.openstack.org/four-opens/ -[7]: https://www.openstack.org/summit/denver-2019/ -[8]: https://www.openstack.org/summit/shanghai-2019 diff --git a/sources/talk/20190306 How to pack an IT travel kit.md b/sources/talk/20190306 How to pack an IT travel kit.md deleted file mode 100644 index b05ee460b7..0000000000 --- a/sources/talk/20190306 How to pack an IT travel kit.md +++ /dev/null @@ -1,100 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How to pack an IT travel kit) -[#]: via: (https://opensource.com/article/19/3/it-toolkit-remote) -[#]: author: (Peter Cheer https://opensource.com/users/petercheer) - -How to pack an IT travel kit -====== -Before you travel, make sure you're ready for challenges in hardware, infrastructure, and software. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/tools_sysadmin_cloud.png?itok=sUciG0Cn) - -I've had several opportunities to do IT work in less-developed and remote areas, where internet coverage and technology access aren't at the high level we have in our first-world cities. Many people heading off to undeveloped areas ask me for advice on preparing for the local technology landscape. Since conditions vary greatly around this big world, it's impossible to give specific advice for most areas, but I do have some general suggestions based on my experience that may help you. - -Also, before you leave home, do as much research as you can about the general IT and telecom environment where you are traveling so you're a little better prepared for what you may encounter there. - -### Planning for the local hardware and infrastructure - - * Even in many cities, internet connections tend to be expensive, slow, and not reliable for large downloads. Don't forget that internet coverage, speeds, and cost in cities are unlikely to be matched in more remote areas. - - - * The electricity supply may be unreliable with inconsistent voltage. If you are taking your computer, bring some surge protection—although in my experience, the electricity voltage is more likely to drop than to spike. - - - * It is always useful to have a small selection of hand tools, such as screwdrivers and needle-nose pliers, for repairing computer hardware. A lack of spare parts can limit opportunities for much beyond basic troubleshooting, although stripping usable components from dead computers can be worthwhile. - - - -### Planning for the software you'll find - - * You can assume that most of the computer systems you'll find will be some incarnation of Microsoft Windows. You can expect that many will not be officially licensed, not be getting updates nor security patches, and are infected by multiple viruses and other malware. - - - * You can also expect that most application software will be proprietary and much of it will be unlicensed and lag behind the latest release versions. These conditions are depressing for open source enthusiasts, but this is the world as it is, rather than the world we would like it to be. - - - * It is wise to view any Windows system you do not control as potentially infected with viruses and malware. It's good practice to reserve a USB thumb drive for files you'll use with these Windows systems; this means that if (or more likely when) that thumb drive becomes infected, you can just reformat it at no cost. - - - * Bring copies of free antivirus software such as [AVG][1] and [Avast][2], including recent virus definition files for them, as well as virus removal and repair tools such as [Sophos][3] and [Hirens Boot CD][4]. - - - * Trying to keep software current on machines that have no or infrequent access to the internet is a challenge. This is particularly true with web browsers, which tend to go through rapid release cycles. My preferred web browser is Mozilla Firefox and having a copy of the latest release is useful. - - - * Bring repair discs for a selection of recent Microsoft operating systems, and make sure that includes service packs for Windows and Microsoft Office. - - - -### Planning for the software you'll bring - -There's no better way to convey the advantages of open source software than by showing it to people. Here are some recommendations along that line. - - * When gathering software to take with you, make sure you get the full offline installation option. Often, the most prominently displayed download links on websites are stubs that require internet access to download the components. They won't work if you're in an area with poor (or no) internet service. - - - * Also, make sure to get the 32-bit and 64-bit versions of the software. While 32-bit machines are becoming less common, you may encounter them and it's best to be prepared. - - - * Having a [bootable version of Linux][5] is vital for two reasons. First, it can be used to rescue data from a seriously damaged Windows machine. Second, it's an easy way to show off Linux without installing it on someone's machine. [Linux Mint][6] is my favorite distro for this purpose, because the graphical interface (GUI) is similar enough to Windows to appear non-threatening and it includes a good range of application software. - - - * Bring the widest selection of open source applications you can—you can't count on being able to download something from the internet. - - - * When possible, bring your open source software library as portable applications that will run without installing them. One of the many ways to mess up those Windows machines is to install and uninstall a lot of software, and using portable apps gets around this problem. Many open source applications, including Libre Office, GIMP, Blender, and Inkscape, have portable app versions for Windows. - - - * It's smart to bring a supply of blank disks so you can give away copies of your open source software stash on media that is a bit more secure than a USB thumb drive. - - - * Don't forget to bring programs and resources related to projects you will be working on. (For example, most of my overseas work involves tuition, mentoring, and skills transfer, so I usually add a selection of open source software tools for creating learning resources.) - - - -### Your turn - -There are many variables and surprises when doing IT work in undeveloped areas. If you have suggestions—for programs I've missed or tips that I didn't cover—please share them in the comments. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/it-toolkit-remote - -作者:[Peter Cheer][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/petercheer -[b]: https://github.com/lujun9972 -[1]: https://www.avg.com/en-gb/free-antivirus-download -[2]: https://www.avast.com/en-gb/free-antivirus-download -[3]: https://www.sophos.com/en-us/products/free-tools/virus-removal-tool.aspx -[4]: https://www.hiren.info/ -[5]: https://opensource.com/article/18/7/getting-started-etcherio -[6]: https://linuxmint.com/ diff --git a/sources/talk/20190307 Small Scale Scrum vs. Large Scale Scrum.md b/sources/talk/20190307 Small Scale Scrum vs. Large Scale Scrum.md deleted file mode 100644 index 7da83306a5..0000000000 --- a/sources/talk/20190307 Small Scale Scrum vs. Large Scale Scrum.md +++ /dev/null @@ -1,106 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Small Scale Scrum vs. Large Scale Scrum) -[#]: via: (https://opensource.com/article/19/3/small-scale-scrum-vs-large-scale-scrum) -[#]: author: (Agnieszka Gancarczyk https://opensource.com/users/agagancarczyk) - -Small Scale Scrum vs. Large Scale Scrum -====== -We surveyed individual members of small and large scrum teams. Here are some key findings. -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BUSINESS_crowdvsopen.png?itok=AFjno_8v) - -Following the publication of the [Small Scale Scrum framework][1], we wanted to collect feedback on how teams in our target demographic (consultants, open source developers, and students) work and what they value. With this first opportunity to inspect, adapt, and help shape the next stage of Small Scale Scrum, we decided to create a survey to capture some data points and begin to validate some of our assumptions and hypotheses. - -**[[Download the Introduction to Small Scale Scrum guide]][2]** - -Our reasons for using the survey were multifold, but chief among them were the global distribution of teams, the small local data sample available in our office, access to customers, and the industry’s utilization of surveys (e.g., the [Stack Overflow Developer Survey 2018][3], [HackerRank 2018 Developer Skills Report][4], and [GitLab 2018 Global Developer Report][5]). - -The scrum’s iterative process was used to facilitate the creation of the survey shown below: - -![](https://opensource.com/sites/default/files/uploads/survey_process.png) - -[The survey][6], which we invite you to complete, consisted of 59 questions and was distributed at a local college ([Waterford Institute of Technology][7]) and to Red Hat's consultancy and engineering teams. Our initial data was gathered from the responses of 54 individuals spread across small and large scrum teams, who were asked about their experiences with agile within their teams. - -Here are the main results and initial findings of the survey: - - * A full 96% of survey participants practice a form of agile, work in distributed teams, think scrum principles help them reduce development complexity, and believe agile contributes to the success of their projects. - - * Only 8% of survey participants belong to small (one- to three-person) teams, and 10 out of 51 describe their typical project as short-lived (three months or less). - - * The majority of survey participants were software engineers, but quality engineers (QE), project managers (PM), product owners (PO), and scrum masters were also represented. - - * Scrum master, PO, and team member are typical roles in projects. - - * Nearly half of survey respondents work on, or are assigned to, more than one project at the same time. - - * Almost half of projects are customer/value-generating vs. improvement/not directly value-generating or unclassified. - - * Almost half of survey participants said that their work is clarified sometimes or most of the time and estimated before development with extensions available sometimes or most of the time. They said asking for clarification of work items is the team’s responsibility. - - * Almost half of survey respondents said they write tests for their code, and they adhere to best coding practices, document their code, and get their code reviewed before merging. - - * Almost all survey participants introduce bugs to the codebase, which are prioritized by them, the team, PM, PO, team lead, or the scrum master. - - * Participants ask for help and mentoring when a task is complex. They also take on additional roles on their projects when needed, including business analyst, PM, QE, and architect, and they sometimes find changing roles difficult. - - * When changing roles on a daily basis, individuals feel they lose one to two hours on average, but they still complete their work on time most of the time. - - * Most survey participants use scrum (65%), followed by hybrid (18%) and Kanban (12%). This is consistent with results of [VersionOne’s State of Agile Report][8]. - - * The daily standup, sprint, sprint planning and estimating, backlog grooming, and sprint retrospective are among the top scrum ceremonies and principles followed, and team members do preparation work before meetings. - - * The majority of sprints (62%) are three weeks long, followed by two-week sprints (26%), one-week sprints (6%), and four-week sprints (4%). Two percent of participants are not using sprints due to strict release and update timings, with all activities organized and planned around those dates. - - * Teams use [planning poker][9] to estimate (storypoint) user stories. User stories contain acceptance criteria. - - * Teams create and use a [Definition of Done][10] mainly in respect to features and determining completion of user stories. - - * The majority of teams don’t have or use a [Definition of Ready][11] to ensure that user stories are actionable, testable, and clear. - - * Unit, integration, functional, automated, performance/load, and acceptance tests are commonly used. - - * Overall collaboration between team members is considered high, and team members use various communication channels. - - * The majority of survey participants spend more than four hours weekly in meetings, including face-to-face meetings, web conferences, and email communication. - - * The majority of customers are considered large, and half of them understand and follow scrum principles. - - * Customers respect “no deadlines” most of the time and sometimes help create user stories and participate in sprint planning, sprint review and demonstration, sprint retrospective, and backlog review and refinement. - - * Only 27% of survey participants know their customers have a high level of satisfaction with the adoption of agile, while the majority (58%) don’t know this information at all. - - - - -These survey results will inform the next stage of our data-gathering exercise. We will apply Small Scale Scrum to real-world projects, and the guidance obtained from the survey will help us gather key data points as we move toward version 2.0 of Small Scale Scrum. If you want to help, take our [survey][6]. If you have a project to which you'd like to apply Small Scale Scrum, please get in touch. - -[Download the Introduction to Small Scale Scrum guide][2] - - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/small-scale-scrum-vs-large-scale-scrum - -作者:[Agnieszka Gancarczyk][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/agagancarczyk -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/article/19/2/small-scale-scrum-framework -[2]: https://opensource.com/downloads/small-scale-scrum -[3]: https://insights.stackoverflow.com/survey/2018/ -[4]: https://research.hackerrank.com/developer-skills/2018/ -[5]: https://about.gitlab.com/developer-survey/2018/ -[6]: https://docs.google.com/forms/d/e/1FAIpQLScAXf52KMEiEzS68OOIsjLtwZJto_XT7A3b9aB0RhasnE_dEw/viewform?c=0&w=1 -[7]: https://www.wit.ie/ -[8]: https://explore.versionone.com/state-of-agile/versionone-12th-annual-state-of-agile-report -[9]: https://en.wikipedia.org/wiki/Planning_poker -[10]: https://www.scruminc.com/definition-of-done/ -[11]: https://www.scruminc.com/definition-of-ready/ diff --git a/sources/talk/20190312 When the web grew up- A browser story.md b/sources/talk/20190312 When the web grew up- A browser story.md deleted file mode 100644 index 6a168939ad..0000000000 --- a/sources/talk/20190312 When the web grew up- A browser story.md +++ /dev/null @@ -1,65 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (When the web grew up: A browser story) -[#]: via: (https://opensource.com/article/19/3/when-web-grew) -[#]: author: (Mike Bursell https://opensource.com/users/mikecamel) - -When the web grew up: A browser story -====== -A personal story of when the internet came of age. - -![](https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/OSDC_Internet_Sign.png?itok=5MFGKs14) - -Recently, I [shared how][1] upon leaving university in 1994 with a degree in English literature and theology, I somehow managed to land a job running a web server in a world where people didn't really know what a web server was yet. And by "in a world," I don't just mean within the organisation in which I worked, but the world in general. The web was new—really new—and people were still trying to get their heads around it. - -That's not to suggest that the place where I was working—an academic publisher—particularly "got it" either. This was a world in which a large percentage of the people visiting their website were still running 28k8 modems. I remember my excitement in getting a 33k6 modem. At least we were past the days of asymmetric upload/download speeds,1 where 1200/300 seemed like an eminently sensible bandwidth description. This meant that the high-design, high-colour, high-resolution documents created by the print people (with whom I shared a floor) were completely impossible on the web. I wouldn't allow anything bigger than a 40k GIF on the front page of the website, and that was pushing it for many of our visitors. Anything larger than 60k or so would be explicitly linked as a standalone image from a thumbnail on the referring page. - -To say that the marketing department didn't like this was an understatement. Even worse was the question of layout. "Browsers decide how to lay out documents," I explained, time after time, "you can use headers or paragraphs, but how documents appear on the page isn't defined by the document, but by the renderer!" They wanted control. They wanted different coloured backgrounds. After a while, they got that. I went to what I believe was the first W3C meeting at which the idea of Cascading Style Sheets (CSS) was discussed. And argued vehemently against them. The suggestion that document writers should control layout was anathema.2 It took some while for CSS to be adopted, and in the meantime, those who cared about such issues adopted the security trainwreck that was Portable Document Format (PDF). - -How documents were rendered wasn't the only issue. Being a publisher of actual physical books, the whole point of having a web presence, as far as the marketing department was concerned, was to allow customers—or potential customers—to know not only what a book was about, but also how much it was going to cost them to buy. This, however, presented a problem. You see, the internet—in which I include the rapidly growing World Wide Web—was an open, free-for-all libertarian sort of place where nobody was interested in money; in fact, where talk of money was to be shunned and avoided. - -I took the mainstream "Netizen" view that there was no place for pricing information online. My boss—and, indeed, pretty much everybody else in the organisation—took a contrary view. They felt that customers should be able to see how much books would cost them. They also felt that my bank manager would like to see how much money was coming into my bank account on a monthly basis, which might be significantly reduced if I didn't come round to their view. - -Luckily, by the time I'd climbed down from my high horse and got over myself a bit—probably only a few weeks after I'd started digging my heels in—the web had changed, and there were other people putting pricing information up about their products. These newcomers were generally looked down upon by the old schoolers who'd been running web servers since the early days,3 but it was clear which way the wind was blowing. This didn't mean that the battle was won for our website, however. As an academic publisher, we shared an academic IP name ("ac.uk") with the University. The University was less than convinced that publishing pricing information was appropriate until some senior folks at the publisher pointed out that Princeton University Press was doing it, and wouldn't we look a bit silly if…? - -The fun didn't stop there, either. A few months into my tenure as webmaster ("webmaster@…"), we started to see a worrying trend, as did lots of other websites. Certain visitors were single-handedly bringing our webserver to its knees. These visitors were running a new web browser: Netscape. Netscape was badly behaved. Netscape was multi-threaded. - -Why was this an issue? Well, before Netscape, all web browsers had been single-threaded. They would open one connection at a time, so even if you had, say five GIFs on a page,4 they would request the HTML base file, parse that, then download the first GIF, complete that, then the second, complete that, and so on. In fact, they often did the GIFs in the wrong order, which made for very odd page loading, but still, that was the general idea. The rude people at Netscape decided that they could open multiple connections to the webserver at a time to request all the GIFs at the same time, for example! And why was this a problem? Well, the problem was that most webservers were single-threaded. They weren't designed to have multiple connections open at any one time. Certainly, the HTTP server that we ran (MacHTTP) was single-threaded. Even though we had paid for it (it was originally shareware), the version we had couldn't cope with multiple requests at a time. - -The debate raged across the internet. Who did these Netscape people think they were, changing how the world worked? How it was supposed to work? The world settled into different camps, and as with all technical arguments, heated words were exchanged on both sides. The problem was that not only was Netscape multi-threaded, it was also just better than the alternatives. Lots of web server code maintainers, MacHTTP author Chuck Shotton among them, sat down and did some serious coding to produce multi-threaded beta versions of their existing code. Everyone moved almost immediately to the beta versions, they got stable, and in the end, single-threaded browsers either adapted and became multi-threaded themselves, or just went the way of all outmoded products and died a quiet death.6 - -This, for me, is when the web really grew up. It wasn't prices on webpages nor designers being able to define what you'd see on a page,8 but rather when browsers became easier to use and when the network effect of thousands of viewers moving to many millions tipped the balance in favour of the consumer, not the producer. There were more steps in my journey—which I'll save for another time—but from around this point, my employers started looking at our monthly, then weekly, then daily logs, and realising that this was actually going to be something big and that they'd better start paying some real attention. - -1\. How did they come back, again? - -2\. It may not surprise you to discover that I'm still happiest at the command line. - -3\. About six months before. - -4\. Reckless, true, but it was beginning to happen.5 - -5\. Oh, and no—it was GIFs or BMP. JPEG was still a bright idea that hadn't yet taken off. - -6\. It's never actually quiet: there are always a few diehard enthusiasts who insist that their preferred solution is technically superior and bemoan the fact that the rest of the internet has gone to the devil.7 - -7\. I'm not one to talk: I still use Lynx from time to time. - -8\. Creating major and ongoing problems for those with different accessibility needs, I would point out. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/when-web-grew - -作者:[Mike Bursell][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/mikecamel -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/article/18/11/how-web-was-won diff --git a/sources/talk/20190313 Why is no one signing their emails.md b/sources/talk/20190313 Why is no one signing their emails.md deleted file mode 100644 index b2b862951a..0000000000 --- a/sources/talk/20190313 Why is no one signing their emails.md +++ /dev/null @@ -1,104 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why is no one signing their emails?) -[#]: via: (https://arp242.net/weblog/signing-emails.html) -[#]: author: (Martin Tournoij https://arp242.net/) - -Why is no one signing their emails? -====== - - -I received this email a while ago: - -> Queensland University of Technology sent you an Amazon.com Gift Card! -> -> You’ve received an Amazon.com gift card! You’ll need the claim code below to place your order. -> -> Happy shopping! - -Queensland University of Technology? Why would they send me anything? Where is that even? Australia right? That’s the other side of the world! Looks like spam! - -It did look pretty good for spam, so I took a second look. And a very close third look, and then I decided it wasn’t spam. - -I was still confused why they sent me this. A week later I remembered: half a year prior I had done an interview regarding my participation on Stack Overflow for someone’s paper; she was studying somewhere in Australia – presumably the university of Queensland. No one had ever mentioned anything about a reward or Amazon gift card so I wasn’t expecting it. It’s a nice bonus though. - -Here’s the thing: I’ve spent several years professionally developing email systems; I administered email servers; I read all the relevant RFCs. While there are certainly people who are more knowledgable, I know more about email than the vast majority of the population. And I still had to take a careful look to verify the email wasn’t a phishing attempt. - -And I’m not even a target; I’m just this guy, you know? [Ask John Podesta what it is to be targeted][1]: - -> SecureWorks concluded Fancy Bear had sent Podesta an email on March 19, 2016, that had the appearance of a Google security alert, but actually contained a misleading link—a strategy known as spear-phishing. [..] The email was initially sent to the IT department as it was suspected of being a fake but was described as “legitimate” in an e-mail sent by a department employee, who later said he meant to write “illegitimate”. - -Yikes! If I was even remotely high-profile I’d be crazy paranoid about all emails I get. - -It seems to me that there is a fairly easy solution to verify the author of an email: sign it with a digital signature; PGP is probably the best existing solution right now. I don’t even care about encryption here, just signing to prevent phishing. - -PGP has a well-deserved reputation for being hard, but that’s only for certain scenarios. A lot of the problems/difficulties stem from trying to accommodate the “random person A emails random person B” use case, but this isn’t really what I care about here. “Large company with millions of users sends thousands of emails daily” is a very different use case. - -Much of the key exchange/web-of-trust dilemma can be bypassed by shipping email clients with keys for large organisations (PayPal, Google, etc.) baked in, like browsers do with some certificates. Even just publishing your key on your website (or, if you’re a bank, in local branches etc.) is already a lot better than not signing anything at all. Right now there seems to be a catch-22 scenario: clients don’t implement better support as very few people are using PGP, while very few companies bother signing their emails with PGP because so few people can benefit from it. - -On the end-user side, things are also not very hard; we’re just conceded with validating signatures, nothing more. For this purpose PGP isn’t hard. It’s like verifying your Linux distro’s package system: all of them sign their packages (usually with PGP) and they get verified on installation, but as an end-user I never see it unless something goes wrong. - -There are many aspects of PGP that are hard to set up and manage, but verifying signatures isn’t one of them. The user-visible part of this is very limited. Remember, no one is expected to sign their own emails: just verify that the signature is correct (which the software will do). Conceptually, it’s not that different from verifying a handwritten signature. - -DKIM and SPF already exist and are useful, but limited. All both do is verify that an email which claims to be from `amazon.com` is really from `amazon.com`. If I send an email from `mail.amazon-account-security.com` or `amazonn.com` then it just verifies that it was sent from that domain, not that it was sent from the organisation Amazon. - -What I am proposing is subtly different. In my (utopian) future every serious organisation will sign their email with PGP (just like every serious organisation uses https). Then every time I get an email which claims to be from Amazon I can see it’s either not signed, or not signed by a key I know. If adoption is broad enough we can start showing warnings such as “this email wasn’t signed, do you want to trust it?” and “this signature isn’t recognized, yikes!” - -There’s also S/MIME, which has better client support and which works more or less the same as HTTPS: you get a certificate from the Certificate Authority Mafia, sign your email with it, and presto. The downside of this is that anyone can sign their emails with a valid key, which isn’t necessarily telling you much (just because haxx0r.ru has a certificate doesn’t mean it’s trustworthy). - -Is it perfect? No. I understand stuff like key exchange is hard and that baking in keys isn’t perfect. Is it better? Hell yes. Would probably have avoided Podesta and the entire Democratic Party a lot of trouble. Here’s a “[sophisticated new phishing campaign][2]” targeted at PayPal users. How “sophisticated”? Well, by not having glaring stupid spelling errors, duplicating the PayPal layout in emails, duplicating the PayPal login screen, a few forms, and getting an SSL certificate. Truly, the pinnacle of Computer Science. - -Okay sure, they spent some effort on it; but any nincompoop can do it; if this passes for “sophisticated phishing” where “it’s easy to see how users could be fooled” then the bar is pretty low. - -I can’t recall receiving a single email from any organisation that is signed (much less encrypted). Banks, financial services, utilities, immigration services, governments, tax services, voting registration, Facebook, Twitter, a zillion websites … all happily sent me emails hoping I wouldn’t consider them spam and hoping I wouldn’t confuse a phishing email for one of theirs. - -Interesting experiment: send invoices for, say, a utility bill for a local provider. Just copy the layout from the last utility bill you received. I’ll bet you’ll make more money than freelancing on UpWork if you do it right. - -I’ve been intending to write this post for years, but never quite did because “surely not everyone is stupid?” I’m not a crypto expert, so perhaps I’m missing something here, but I wouldn’t know what. Let me know if I am. - -In the meanwhile PayPal is attempting to solve the problem by publishing [articles which advise you to check for spelling errors][3]. Okay, it’s good advice, but do we really want this to be the barrier between an attacker and your money? Or Russian hacking groups and your emails? Anyone can sign any email with any key, but “unknown signature” warnings strike me as a lot better UX than “carefully look for spelling errors or misleading domain names”. - -The way forward is to make it straight-forward to implement signing in apps and then just do it as a developer, whether asked or not; just as you set up https whether you’re asked or not. I’ll write a follow-up with more technical details later, assuming no one pokes holes in this article :-) - -#### Response to some feedback - -Some response to some feedback that I couldn’t be bothered to integrate in the article’s prose: - - * “You can’t trust webmail with crypto!” -If you use webmail then you’re already trusting the email provider with everything. What’s so bad with trusting them to verify a signature, too? - -We’re not communicating state secrets over encrypted email here; we’re just verifying the signature on “PayPal sent you a message, click here to view it”-kind of emails. - - * “Isn’t this ignoring the massive problem that is key management?” -Yes, it’s hard problem; but that doesn’t mean it can’t be done. I already mentioned some possible solutions in the article. - - - - -**Footnotes** - - 1. We could make something better; PGP contians a lot of cruft. But for now PGP is “good enough”. - - - - - --------------------------------------------------------------------------------- - -via: https://arp242.net/weblog/signing-emails.html - -作者:[Martin Tournoij][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://arp242.net/ -[b]: https://github.com/lujun9972 -[1]: https://en.wikipedia.org/wiki/Podesta_emails#Data_theft -[2]: https://www.eset.com/us/about/newsroom/corporate-blog/paypal-users-targeted-in-sophisticated-new-phishing-campaign/ -[3]: https://www.paypal.com/cs/smarthelp/article/how-to-spot-fake,-spoof,-or-phishing-emails-faq2340 diff --git a/sources/talk/20190314 Why feedback, not metrics, is critical to DevOps.md b/sources/talk/20190314 Why feedback, not metrics, is critical to DevOps.md deleted file mode 100644 index b2a79226ed..0000000000 --- a/sources/talk/20190314 Why feedback, not metrics, is critical to DevOps.md +++ /dev/null @@ -1,84 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why feedback, not metrics, is critical to DevOps) -[#]: via: (https://opensource.com/article/19/3/devops-feedback-not-metrics) -[#]: author: (Ranjith Varakantam (Red Hat) https://opensource.com/users/ranjith) - -Why feedback, not metrics, is critical to DevOps -====== - -Metrics can tell you some things, but not the most important things about how your products and teams are doing. - -![CICD with gears][1] - -Most managers and agile coaches depend on metrics over feedback from their teams, users, and even customers. In fact, quite a few use feedback and metrics synonymously, where they present feedback from teams or customers as a bunch of numbers or a graphical representation of those numbers. This is not only unfortunate, but it can be misleading as it presents only part of the story and not the entire truth. - -When it comes to two critical factors—how we manage or guide our teams and how we operate and influence the product that our teams are developing—few exceptional leaders and teams get it right. For one thing, it has become very easy to get your hands on data and metrics. Furthermore, it's still hard to get real feedback from teams and users. It requires significant investments and energy, and unless everyone understands the critical need for it, getting and giving feedback tends to be a low priority and keeps getting pushed to the back burner. - -### How to manage and guide teams - -With the acceptance of agile, a lot of teams have put a ridiculously high value on metrics, such as velocity, burndown charts, cumulative flow diagram (CFD), etc., instead of the value delivered by the team in each iteration or deployment. The focus is on the delivery or output produced without a clear understanding of how this relates to personal performance or implications for the project, product, or service. - -A few managers and agile coaches even abuse the true spirit of agile by misusing metrics to chastise or even penalize their teams. Instead of creating an environment of empowerment, they are slipping back into the command-and-control method where metrics are used to bully teams into submission. - -In our group, the best managers have weekly one-on-one meetings with every team member. These meetings not only give them a real pulse on team morale but also a profound understanding of the project and the decisions being made to move it forward. This weekly feedback loop also helps the team members communicate technical, functional, and even personal issues better. As a result, the team is much more cohesive in understanding the overall project needs and able to make decisions promptly. - -These leaders also skip levels—reaching out to team members two or three levels below them—and have frequent conversations with other group members who interact with their teams on a regular basis. These actions give the managers a holistic picture, which they couldn't get if they relied on feedback from one manager or lead, and help them identify any blind spots the leads and managers may have. - -These one-on-one meetings effectively transform a manager into a coach who has a close understanding of every team member. Like a good coach, these managers both give and receive feedback from the team members regarding the product, decision-making transparency, places where the team feels management is lagging, and areas that are being ignored. This empowers the teams by giving them a voice, not once in a while in an annual meeting or an annual survey, but every week. This is the level where DevOps teams should be in order to deliver their commitments successfully. - -This demands significant investments of time and energy, but the results more than justify it. The alternative is to rely on metrics and annual reviews and surveys, which has failed miserably. Unless we begin valuing feedback over metrics, we will keep seeing the metrics we want to see but failed projects and miserable team morale. - -### Influencing projects and product development - -We see similar behavior on the project or product side, with too few conversations with the users and developers and too much focus on metrics. Let's take the example of a piece of software that was released to the community or market, and the primary success metric is the number of downloads or installs. This can be deceiving for several reasons: - - 1. This product was packaged into another piece of software that users installed; even though the users are not even aware of your product's existence or purpose, it is still counted as a win and something the user needs. - - 2. The marketing team spent a huge budget promoting the product—and even offered an incentive to developers to download it. The _incentive_ drives the downloads, not user need or desire, but the metric is still considered a measure of success. - - 3. Software updates are counted as downloads, even when they are involuntary updates pushed rather than initiated by the user. This keeps bumping up the number, even though the user might have used it once, a year ago, for a specific task. - - - - -In these cases, the user automatically becomes a metric that's used to report how well the product is doing, just based on the fact it was downloaded and it's accepting updates, regardless of whether the user likes or uses the software. Instead, we should be focusing on actual usage of the product and the feedback these users have to offer us, rather than stopping short at the download numbers. - -The same holds true for SaaS products—instead of counting the number of signups, we should look at how often users use the product or service. Signups by themselves have little meaning, especially to the DevOps team where the focus is on getting constant feedback and striving for continuous improvements. - -### Gathering feedback - -So, why do we rely on metrics so much? My guess is they are easy to collect, and the marketing team is more interested in getting the product into the users' hands than evaluating how it is fairing. Unless the engineering team invests quite a bit of time in collecting feedback with tracing, which captures how often the program is executed and which components are used most often, it can be difficult to collect feedback. - -A big advantage of working in an open source community is that we first release the piece of software into a community where we can get feedback. Most open source enthusiasts take the time to log issues and bugs based on their experience with the product. If we can supplement this data with tracing, the team has an accurate record of how the product is used. - -Open as many channels of communication as possible–chat, email, Twitter, etc.—and allow users to choose their feedback channel. - -A few DevOps teams have integrated blue-green deployments, A/B testing, and canary releases to shorten the feedback loop. Setting up these frameworks it is not a trivial matter and calls for a huge upfront investment and constant updates to make them seamlessly work. But once everything is set up and data begins to flow, the team can act upon real feedback based on real user interactions with every new bit of software released. - -Most agile practitioners and lean movement activists push for a build-deploy-measure-learn cycle, and for this to happen, we need to collect feedback in addition to metrics. It might seem expensive and time consuming in the short term, but in the long run, it is a foolproof way of learning. - -### Proof that feedback pays off - -Whether it pertains to people or projects, it pays to rely on first-hand feedback rather than metrics, which are seldom interpreted in impartial ways. We have ample proof of this in other industries, where companies such as Zappos and the Virgin Group have done wonders for their business simply by listening to their customers. There is no reason we cannot follow suit, especially those of us working in open source communities. - -Feedback is the only effective way we can uncover our blind spots. Metrics are not of much help in this regard, as we can't find out what's wrong when we are dealing with unknowns. Blind spots can create serious gaps between reality and what we think we know. Feedback not only encourages continuous improvement, whether it's on a personal or a product level, but the simple act of listening and acting on it increases trust and loyalty. - - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/devops-feedback-not-metrics - -作者:[Ranjith Varakantam (Red Hat)][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/ranjith -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cicd_continuous_delivery_deployment_gears.png?itok=kVlhiEkc (CICD with gears) diff --git a/sources/talk/20190319 6 steps to stop ethical debt in AI product development.md b/sources/talk/20190319 6 steps to stop ethical debt in AI product development.md deleted file mode 100644 index 9bfe810a15..0000000000 --- a/sources/talk/20190319 6 steps to stop ethical debt in AI product development.md +++ /dev/null @@ -1,148 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (6 steps to stop ethical debt in AI product development) -[#]: via: (https://opensource.com/article/19/3/ethical-debt-ai-product-development) -[#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeo) - -6 steps to stop ethical debt in AI product development -====== - -Machine bias in artificial intelligence is a known and unavoidable problem—but it is not unmanageable. - -![old school calculator][1] - -It's official: artificial intelligence (AI) isn't the unbiased genius we want it to be. - -Alphabet (Google's parent company) used its latest annual report [to warn][2] that ethical concerns about its products might hurt future revenue. Entrepreneur Joy Buolamwini established the [Safe Face Pledge][3] to prevent abuse of facial analysis technology. - -And years after St. George's Hospital Medical School in London was found to have used AI that inadvertently [screened out qualified female candidates][4], Amazon scrapped a recruiting tool last fall after machine learning (ML) specialists found it [doing the same thing][5]. - -We've learned the hard way that technologies built with AI are biased like people. Left unchecked, the datasets used to train such products can pose [life-or-death consequences][6] for their end users. - -For example, imagine a self-driving car that can't recognize commands from people with certain accents. If the dataset used to train the technology powering that car isn't exposed to enough voice variations and inflections, it risks not recognizing all its users as fully human. - -Here's the good news: Machine bias in AI is unavoidable—but it is _not_ unmanageable. Just like product and development teams work to reduce technical debt, you can [reduce the risk of ethical debt][7] as well. - -Here are six steps that your technical team can start taking today: - -### 1\. Document your priorities upfront - -Reducing ethical debt within your product will require you to answer two key questions in the product specification phase: - - * Which methods of fairness will you use? - * How will you prioritize them? - - - -If your team is building a product based on ML, it's not enough to reactively fix bugs or pull products from shelves. Instead, answer these questions [in your tech spec][8] so that they're included from the start of your product lifecycle. - -### 2\. Train your data under fairness constraints - -This step is tough because when you try to control or eliminate both direct and indirect bias, you'll find yourself in a Catch-22. - -If you train exclusively on non-sensitive attributes, you eliminate direct discrimination but introduce or reinforce indirect bias. - -However, if you train separate classifiers for each sensitive feature, you reintroduce direct discrimination. - -Another challenge is that detection can occur only after you've trained your model. When this occurs, the only recourse is to scrap the model and retrain it from scratch. - -To reduce these risks, don't just measure average strengths of acceptance and rejection across sensitive groups. Instead, use limits to determine what is or isn't included in the model you're training. When you do this, discrimination tests are expressed as restrictions and limitations on the learning process. - -### 3\. Monitor your datasets throughout the product lifecycle - -Developers build training sets based on data they hope the model will encounter. But many don't monitor the data their creations receive from the real world. - -ML products are unique in that they're continuously taking in data. New data allows the algorithms powering these products to keep refining their results. - -But such products often encounter data in deployment that differs from what they were trained on in production. It's also not uncommon for the algorithm to be updated without the model itself being revalidated. - -This risk will decrease if you appoint someone to monitor the source, history, and context of the data in your algorithm. This person should conduct continuous audits to find unacceptable behavior. - -Bias should be reduced as much as possible while maintaining an acceptable level of accuracy, as defined in the product specification. If unacceptable biases or behaviors are detected, the model should be rolled back to an earlier state prior to the first time you saw bias. - -### 4\. Use tagged training data - -We live in a world with trillions of images and videos at our fingertips, but most neural networks can't use this data for one reason: Most of it isn't tagged. - -Tagging refers to which classes are present in an image and their locations. When you tag an image, you share which classes are present and where they're located. - -This sounds simple—until you realize how much work it would take to draw shapes around every single person in a photo of a crowd or a box around every single person on a highway. - -Even if you succeeded, you might rush your tagging and draw your shapes sloppily, leading to a poorly trained neural network. - -The good news is that more products are coming to market so they can decrease the time and cost of tagging. - -As one example, [Brain Builder][9] is a data annotation product from Neurala that uses open source frameworks like TensorFlow and Caffe. Its goal is to help users [manage and annotate their training data][10]. It also aims to bring diverse class examples to datasets—another key step in data training. - -### 5\. Use diverse class examples - -Training data needs positive and negative examples of classes. If you want specific classes of objects, you need negative examples as well. This (hopefully) mimics the data that the algorithm will encounter in the wild. - -Consider the example of “homes” within a dataset. If the algorithm contains only images of homes in North America, it won't know to recognize homes in Japan, Morocco, or other international locations. Its concept of a “home” is thus limited. - -Neurala warns, "Most AI applications require thousands of images to be tagged, and since data tagging cost is proportional to the time spent tagging, this step alone often costs tens to hundreds of thousands of dollars per project." - -Luckily, 2018 saw a strong increase in the number of open source AI datasets. Synced has a helpful [roundup of 10 datasets][11]—from multi-label images to semantic parsing—that were open sourced last year. If you're looking for datasets by industry, GitHub [has a longer list][12]. - -### 6\. Focus on the subject, not the context - -Tech leaders in monitoring ML datasets should aim to understand how the algorithm classifies data. That's because AI sometimes focuses on irrelevant attributes that are shared by several targets in the training set. - -Let's start by looking at the biased training set below. Wolves were tagged standing in snow, but the model wasn't shown images of dogs. So, when dogs were introduced, the model started tagging them as wolves because both animals were standing in snow. In this case, the AI put too much emphasis on the context (a snowy backdrop). - -![Wolves in snow][13] - -Source: [Gartner][14] (full research available for clients) - -By contrast, here is a training set from Brain Builder that is focused on the subject dogs. When monitoring your own training set, make sure the AI is giving more weight to the subject of each image. If you saw an image classifier state that one of the dogs below is a wolf, you would need to know which aspects of the input led to this misclassification. This is a sign to check your training set and confirm that the data is accurate. - -![Dogs training set][15] - -Source: [Brain Builder][16] - -Reducing ethical debt isn't just the “right thing to do”—it also reduces _technical_ debt. Since programmatic bias is so tough to detect, working to reduce it, from the start of your lifecycle, will save you the need to retrain models from scratch. - -This isn't an easy or perfect job; tech teams will have to make tradeoffs between fairness and accuracy. But this is the essence of product management: compromises based on what's best for the product and its end users. - -Strategy is the soul of all strong products. If your team includes measures of fairness and algorithmic priorities from the start, you'll sail ahead of your competition. - -* * * - -_Lauren Maffeo will present_ [_Erase Unconscious Bias From Your AI Datasets_][17] _at[DrupalCon][18] in Seattle, April 8-12, 2019._ - -* * * - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/ethical-debt-ai-product-development - -作者:[Lauren Maffeo][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/lmaffeo -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/math_money_financial_calculator_colors.jpg?itok=_yEVTST1 (old school calculator) -[2]: https://www.yahoo.com/news/google-warns-rise-ai-may-181710642.html?soc_src=mail&soc_trk=ma -[3]: https://www.safefacepledge.org/ -[4]: https://futurism.com/ai-bias-black-box -[5]: https://uk.reuters.com/article/us-amazon-com-jobs-automation-insight/amazon-scraps-secret-ai-recruiting-tool-that-showed-bias-against-women-idUKKCN1MK08G -[6]: https://opensource.com/article/18/10/open-source-classifiers-ai-algorithms -[7]: https://thenewstack.io/tech-ethics-new-years-resolution-dont-build-software-you-will-regret/ -[8]: https://eng.lyft.com/awesome-tech-specs-86eea8e45bb9 -[9]: https://www.neurala.com/tech -[10]: https://www.roboticsbusinessreview.com/ai/brain-builder-neurala-video-annotation/ -[11]: https://medium.com/syncedreview/2018-in-review-10-open-sourced-ai-datasets-696b3b49801f -[12]: https://github.com/awesomedata/awesome-public-datasets -[13]: https://opensource.com/sites/default/files/uploads/wolves_in_snow.png (Wolves in snow) -[14]: https://www.gartner.com/doc/3889586/control-bias-eliminate-blind-spots -[15]: https://opensource.com/sites/default/files/uploads/ai_ml_canine_recognition.png (Dogs training set) -[16]: https://www.neurala.com/ -[17]: https://events.drupal.org/seattle2019/sessions/erase-unconscious-bias-your-ai-datasets -[18]: https://events.drupal.org/seattle2019 diff --git a/sources/talk/20190319 Hello World Marketing (or, How I Find Good, Boring Software).md b/sources/talk/20190319 Hello World Marketing (or, How I Find Good, Boring Software).md deleted file mode 100644 index 02f7e3fcac..0000000000 --- a/sources/talk/20190319 Hello World Marketing (or, How I Find Good, Boring Software).md +++ /dev/null @@ -1,100 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Hello World Marketing (or, How I Find Good, Boring Software)) -[#]: via: (https://theartofmachinery.com/2019/03/19/hello_world_marketing.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -Hello World Marketing (or, How I Find Good, Boring Software) -====== - -Back in 2001 Joel Spolsky wrote his classic essay [“Good Software Takes Ten Years. Get Used To it”][1]. Nothing much has changed since then: software is still taking around a decade of development to get good, and the industry is still getting used to that fact. Unfortunately, the industry has investors who want to see hockey stick growth rates on software that’s a year old or less. The result is an antipattern I like to call “Hello World Marketing”. Once you start to notice it, you see it everywhere, and it’s a huge red flag when choosing software tools. - - -Of course, by “Hello World”, I’m referring to the programmer’s traditional first program: the one that just displays the message “Hello World”. The aim isn’t to make a useful program; it’s to make a minimal starting point. - -Hello World Marketing is about doing the same thing, but pretending that it’s useful. You’re supposed to be distracted into admiring how neatly a tool solves trivial problems, and forget about features you’ll need in real applications. HWM emphasises what can be done in the first five minutes, and downplays what you might need after several months. HWMed software is optimised for looking good in demos, and sounding exciting in blog posts and presentations. - -For a good example, see Nemil Dalal’s [great series of articles about the early marketing for MongoDB][2]. Notice the heavy use of hackathons, and that a lot of the marketing was about how “SQL looks like COBOL”. Now, I can criticise SQL, too, but if `SELECT` and `WHERE` are serious problems for an application, there are already hundreds of solutions like [SQLAlchemy][3] and [LINQ][4] — solutions that don’t compromise on more advanced features of traditional databases. On the other hand, if you were wondering about those advanced features, you could read vomity-worthy, hand-wavey pieces like “[Living in the post-transactional database future][5]”. - -### How I Find Good, Boring Software - -Obviously, one way to avoid HWM is to stick to software that’s much more than ten years old, and has a good reputation. But sometimes that’s not possible because the tools for a problem only came out during the last decade. Also, sometimes newer tools really do bring new benefits. - -However, it’s much harder to rely on reputation for newer software because “good reputation” often just means “popular”, which often just means “current fad”. Thankfully, there’s a simple and effective trick to avoid being dazzled by hype: just look elsewhere. Instead of looking at the marketing for the core features, look at the things that are usually forgotten. Here are the kinds of things I look at: - -#### Backups and Disaster Recovery - -Backup support is both super important and regularly an afterthought. - -The minimum viable product is full data dump/import functionality, but longer term it’s nice to have things like incremental backups. Some vendors will try to tell you to just copy the data files from disk, but this isn’t guaranteed to give you a consistent snapshot if the software is running live. - -There’s no point backing up data if you can’t restore it, and restoration is the difficult part. Yet many people never test the restoration (until they actually need it). About five years ago I was working with a team that had started using a new, cutting-edge, big-data database. The database looked pretty good, but I suggested we do an end-to-end test of the backup support. We loaded a cluster with one of the multi-terabyte datasets we had, did a backup, wiped the data in the cluster and then tried to restore it. Turns out we were the first people to actually try to restore a dataset of that size — the backup “worked”, but the restoration caused the cluster to crash and burn. We filed a bug report with the original database developers and they fixed it. - -Backup processes that work on small test datasets but fail on large production datasets is a recurring theme. I always recommend testing on production-sized datasets, and testing again as production data grows. - -For batch jobs, a related concept is restartability. If you’re copying large amounts of data from one place to another, and the job gets interrupted in the middle, what happens? Can you keep going from the middle? Alternatively, can you safely retry by starting from the beginning? - -#### Configuration - -A lot of HWMed software can only be configured using a GUI or web UI because that’s what’s obvious and looks good in demos and docs. For one thing, this usually means there’s no good way to back up or restore the configuration. So if a team of people use a shared instance over a year or so, forget about trying to restore it if (or when) it breaks. It’s also much more work to keep multiple deployments consistent (e.g., for dev, testing and prod environments) using separate GUIs. In practice, it just doesn’t happen. - -I prefer a well-commented config file for software I deploy, if nothing else because it can be checked into source control, and I know I can reproduce the deployment using nothing but what’s checked into source control. If something is configured using a UI, I look for a config export/import function. Even then, that feature is often an afterthought, and often imcomplete, so it’s worth testing if it’s possible to deploy the software without ever needing to manually tweak something in the UI. - -There seems to be a recent trend for software to be configured using a REST API instead. Honestly, this is the worst of both config files and GUI-based config, and most of the time people end up using [hacky ways to put the config into a file instead][6]. - -#### Upgrades - -Life would be much easier if everything were static; software upgrade support makes everything more complicated. It’s also not usually shown in demos, so the first upgrade often ends the honeymoon with shiny, new software. - -For HA distributed systems, you’ll need support for graceful shutdown and a certain amount of forward _and_ backwards compatibility (because you’ll have multiple versions running during upgrades). It’s a common mistake to forget about downgrade support. - -Distributed systems are simpler when components have independent replicas that don’t communicate with each other. Anything with clustering (or, worse, consensus algorithms) is often extra tricky to upgrade, and worth testing. - -Things that support horizontal scaling don’t necessarily support rescaling without downtime. This is especially true whenever sharding is involved because live resharding isn’t trivial. - -Here’s a story from a certain popular container app platform. Demos showed how easy it was to launch an app on the platform, and then showed how easy it was to scale it to multiple replicas. What they didn’t show was the upgrade process: When you pushed a new version of your app, the first thing the platform did was _shut down all running instances of it_. Then it would upload the code to a build server and start building it — meaning downtime for however long the build took, plus the time needed to roll out the new version (if it worked). This problem has been fixed in newer releases of the platform. - -#### Security - -Even if software has no built-in access control, all-or-nothing access control is easy to implement (e.g., using a reverse proxy with HTTP basic auth). The harder problem is fine-grained access control. Sometimes you don’t care, but in some environments it makes a big difference to what features you can even use. - -Some immature software has a quick-and-dirty implementation of user-based access control, typically with a GUI for user management. For everything except the core business tool, this isn’t very useful. For human users, every project I’ve worked on has either been with a small team that just shared a single username/password, or with a large team that wanted integration with OpenID Connect, or LDAP, or whatever centralised single-sign-on (SSO) system was used by the organisation. No one wants to manually manage credentials for every tool, every time someone joins or leaves. Similarly, credentials for applications or other non-human users are better generated using an automatable approach — like a config file or API. - -Immature implementations of access control are often missing anything like user groups, but managing permissions at the user level is a time waster. Some SSO integrations only integrate users, not groups, which is a “so close yet so far” when it comes to avoiding permissions busywork. - -#### Others - -I talked about ignoring the hype, but there’s one good signal you can get from the marketing: whether the software is branded as “enterprise” software. Enterprise software is normally bought by someone other than the end user, so it’s usually pleasant to buy but horrible to use. The only exceptions I know of are enterprise versions of normal consumer software, and enterprise software that the buyer will also have to use. Be warned: even if a company sells enterprise software alongside consumer software, there’s no guarantee that they’re just different versions of the same product. Often they’ll be developed by separate teams with different priorities. - -A lot of the stuff in this post can be checked just by skimming through the documentation. If a tool stores data, but the documentation doesn’t mention backups, there probably isn’t any backup suppport. Even if there is and it’s just not documented, that’s not exactly a good sign either. So, sure, documentation quality is worth evaluating by itself. On the other hand, sometimes the documentation is better than the product, so I never trust a tool until I’ve actually tried it out. - -When I first saw Python, I knew that it was a terrible programming language because of the way it used whitespace indentation. Yeah, that was stupid. Later on I learned that 1) the syntax wasn’t a big deal, especially when I’m already indenting C-like languages in the same way, and 2) a lot of practical problems can be solved just by gluing libraries together with a few dozen lines of Python, and that was really useful. We often have strong opinions about syntax that are just prejudice. Syntax can matter, but it’s less important than how the tool integrates with the rest of the system. - -### Weighing Pros and Cons - -You never need to do deep analysis to detect the most overhyped products. Just check a few of these things and they’ll fail spectacularly. - -Even with software that looks solid, I still like to do more tests before entrusting a serious project with it. That’s not because I’m looking for excuses to nitpick and use my favourite tool instead. New tools often really do bring new benefits. But it’s much better to understand the pros and cons of new software, and to use it because the pros outweigh the cons, not because of how slick the Hello World demo is. - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/03/19/hello_world_marketing.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://www.joelonsoftware.com/2001/07/21/good-software-takes-ten-years-get-used-to-it/ -[2]: https://www.nemil.com/mongo/ -[3]: https://www.sqlalchemy.org/ -[4]: https://msdn.microsoft.com/en-us/library/bb308959.aspx -[5]: https://www.mongodb.com/post/36151042528/post-transactional-future -[6]: /2017/07/15/use_terraform_with_vault.html diff --git a/sources/talk/20190320 Managing changes in open source projects.md b/sources/talk/20190320 Managing changes in open source projects.md deleted file mode 100644 index efcd5257b5..0000000000 --- a/sources/talk/20190320 Managing changes in open source projects.md +++ /dev/null @@ -1,97 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Managing changes in open source projects) -[#]: via: (https://opensource.com/article/19/3/managing-changes-open-source-projects) -[#]: author: (Ben Cotton (Red Hat, Community Moderator) https://opensource.com/users/bcotton) - -Managing changes in open source projects -====== - -Here's how to create a visible change process to support the community around an open source project. - -![scrabble letters: "time for change"][1] - -Why bother having a process for proposing changes to your open source project? Why not just let people do what they're doing and merge the features when they're ready? Well, you can certainly do that if you're the only person on the project. Or maybe if it's just you and a few friends. - -But if the project is large, you might need to coordinate how some of the changes land. Or, at the very least, let people know a change is coming so they can adjust if it affects the parts they work on. A visible change process is also helpful to the community. It allows them to give feedback that can improve your idea. And if nothing else, it lets people know what's coming so that they can get excited, and maybe get you a little bit of coverage on Opensource.com or the like. Basically, it's "here's what I'm going to do" instead of "here's what I did," and it might save you some headaches as you scramble to QA right before your release. - -So let's say I've convinced you that having a change process is a good idea. How do you build one? - -**[Watch my talk on this topic]** - - -### Right-size your change process - -Before we start talking about what a change process looks like, I want to make it very clear that this is not a one-size-fits-all situation. The smaller your project is—mainly in the number of contributors—the less process you'll probably need. As [Richard Hackman says][2], the number of communication channels in a team goes up exponentially with the number of people on the team. In community-driven projects, this becomes even more complicated as people come and go, and even your long-time contributors might not be checking in every day. So the change process consolidates those communication channels into a single area where people can quickly check to see if they care and then get back to whatever it is they do. - -At one end of the scale, there's the command-line Twitter client I maintain. The change process there is, "I pick something I want to work on, probably make a Git branch for it but I might forget that, merge it, and tag a release when I'm out of stuff that I can/want to do." At the other end is Fedora. Fedora isn't really a single project; it's a program of related projects that mostly move in the same direction. More than 200 people a week touch Fedora in a technical sense: spec file maintenance, build submission, etc. This doesn't even include the untold number of people who are working on the upstreams. And these upstreams all have their own release schedules and their own processes for how features land and when. Nobody can keep up with everything on their own, so the change process brings important changes to light. - -### Decide who needs to review changes - -One of the first things you need to consider when putting together a change process for your community is: "who needs to review changes?" This isn't necessarily approving the changes; we'll come to that shortly. But are there people who should take a look early in the process? Maybe your release engineering or infrastructure teams need to review them to make sure they don't require changes to build infrastructure. Maybe you have a legal review process to make sure licenses are in order. Or maybe you just have a change wrangler who looks to make sure all the required information is included. Or you may choose to do none of these and have change proposals go directly to the community. - -But this brings up the next step. Do you want full community feedback or only a select group to provide feedback? My preference, and what we do in Fedora, is to publish changes to the community before they're approved. But the structure of your community may fit a model where some approval body signs off on the change before it is sent to the community as an announcement. - -### Determine who approves changes - -Even if you lack any sort of organizational structure, someone ends up approving changes. This should reflect the norms and values of your community. The simplest form of approval is the person who proposed the change implements it. Easy peasy! In loosely organized communities, that might work. Fully democratic communities might put it to a community-wide vote. If a certain number or proportion of members votes in favor, the change is approved. Other communities may give that power to an individual or group. They could be responsible for the entire project or certain subsections. - -In Fedora, change approval is the role of the Fedora Engineering Steering Committee (FESCo). This is a nine-person body elected by community members. This gives the community the ability to remove members who are not acting in the best interests of the project but also enables relatively quick decisions without large overhead. - -In much of this article, I am simply presenting information, but I'm going to take a moment to be opinionated. For any project with a significant contributor base, a model where a small body makes approval decisions is the right approach. A pure democracy can be pretty messy. People who may have no familiarity with the technical ramifications of a change will be able to cast a binding vote. And that process is subject to "brigading," where someone brings along a large group of otherwise-uninterested people to support their position. Think about what it might look like if someone proposed changing the default text editor. Would the decision process be rational? - -### Plan how to enforce changes - -The other advantage of having a defined approval body is it can mediate conflicts between changes. What happens if two proposed changes conflict? Or if a change turns out to have a negative impact? Someone needs to have the authority to say "this isn't going in after all" or make sure conflicting changes are brought into agreement. Your QA team and processes will be a part of this, and maybe they're the ones who will make the final call. - -It's relatively straightforward to come up with a plan if a change doesn't work as expected or is incomplete by the deadline. If you require a contingency plan as part of the change process, then you implement that plan. The harder part is: what happens if someone makes a change that doesn't go through your change process? Here's a secret your friendly project manager doesn't want you to know: you can't force people to go through your process, particularly in community projects. - -So if something sneaks in and you don't discover it until you have a release candidate, you have a couple of options: you can let it in, or you can get someone to forcibly remove it. In either case, you'll have someone who is very unhappy. Either the person who made the change, because you kicked their work out, or the people who had to deal with the breakage it caused. (If it snuck in without anyone noticing, then it's probably not that big of a deal.) - -The answer, in either case, is going to be social pressure to follow the process. Processes are sometimes painful to follow, but a well-designed and well-maintained process will give more benefit than it costs. In this case, the benefit may be identifying breakages sooner or giving other developers a chance to take advantage of new features that are offered. And it can help prevent slips in the release schedule or hero effort from your QA team. - -### Implement your change process - -So we've thought about the life of a change proposal in your project. Throw in some deadlines that make sense for your release cadence, and you can now come up with the policy—but how do you implement it? - -First, you'll want to identify the required information for a change proposal. At a minimum, I'd suggest the following. You may have more requirements depending on the specifics of what your community is making and how it operates. - - * Name and summary - * Benefit to the project - * Scope - * Owner - * Test plan - * Dependencies and impacts - * Contingency plan - - - -You'll also want one or several change wranglers. These aren't gatekeepers so much as shepherds. They may not have the ability to approve or reject change proposals, but they are responsible for moving the proposals through the process. They check the proposal for completeness, submit it to the appropriate bodies, make appropriate announcements, etc. You can have people wrangle their own changes, but this can be a specialized task and will generally benefit from a dedicated person who does this regularly, instead of making community members do it less frequently. - -And you'll need some tooling to manage these changes. This could be a wiki page, a kanban board, a ticket tracker, something else, or a combination of these. But basically, you want to be able to track their state and provide some easy reporting on the status of changes. This makes it easier to know what is complete, what is at risk, and what needs to be deferred to a later release. You can use whatever works best for you, but in general, you'll want to minimize copy-and-pasting and maximize scriptability. - -### Remember to iterate - -Your change process may seem perfect. Then people will start using it. You'll discover edge cases you didn't consider. You'll find that the community hates a certain part of it. Decisions that were once valid will become invalid over time as technology and society change. In Fedora, our Features process revealed itself to be ambiguous and burdensome, so it was refined into the [Changes][3] process we use today. Even though the Changes process is much better than its predecessor, we still adjust it here and there to make sure it's best meeting the needs of the community. - -When designing your process, make sure it fits the size and values of your community. Consider who gets a voice and who gets a vote in approving changes. Come up with a plan for how you'll handle incomplete changes and other exceptions. Decide who will guide the changes through the process and how they'll be tracked. And once you design your change policy, write it down in a place that's easy for your community to find so that they can follow it. But most of all, remember that the process is here to serve the community; the community is not here to serve the process. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/managing-changes-open-source-projects - -作者:[Ben Cotton (Red Hat, Community Moderator)][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/bcotton -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/change_words_scrabble_letters.jpg?itok=mbRFmPJ1 (scrabble letters: "time for change") -[2]: https://hbr.org/2009/05/why-teams-dont-work -[3]: https://fedoraproject.org/wiki/Changes/Policy diff --git a/sources/talk/20190323 How to transition into a Developer Relations career.md b/sources/talk/20190323 How to transition into a Developer Relations career.md deleted file mode 100644 index 40de0edf21..0000000000 --- a/sources/talk/20190323 How to transition into a Developer Relations career.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How to transition into a Developer Relations career) -[#]: via: (https://opensource.com/article/19/3/developer-relations-career) -[#]: author: (Mary Thengvall https://opensource.com/users/marygrace-0) - -How to transition into a Developer Relations career -====== - -Combine your love for open source software with your love for the community in a way that allows you to invest your time in both. - -![][1] - -Let's say you've found an open source project you really love and you want to do more than just contribute. Or you love coding, but you don't want to spend the rest of your life interacting more with your computer than you do with people. How do you combine your love for open source software with your love for the community in a way that allows you to invest your time in both? - -### Developer Relations: A symbiotic relationship - -Enter community management, or as it's more commonly called in the tech industry, Developer Relations (DevRel for short). The goal of DevRel is, at its core, to empower developers. From writing content and creating documentation to supporting local meetups and bubbling up developer feedback internally, everything that a Developer Relations professional does on a day-to-day basis is for the benefit of the community. That's not to say that it doesn't benefit the company as well! After all, as Developer Relations professionals understand, if the community succeeds, so will the company. It's the best kind of symbiotic relationship! - -These hybrid roles have been around since shortly after the open source and free software movements started, but the Developer Relations industry—and the Developer Advocate role, in particular—have exploded over the past few years. So what is Developer Relations exactly? Let's start by defining "community" so that we're all on the same page: - -> **Community:** A group of people who not only share common principles, but also develop and share practices that help individuals in the group thrive. - -This could be a group of people who have gathered around an open source project, a particular topic such as email, or who are all in a similar job function—the DevOps community, for instance. - -As I mentioned, the role of a DevRel team is to empower the community by building up, encouraging, and amplifying the voice of the community members. While this will look slightly different at every company, depending on its goals, priorities, and direction, there are a few themes that are consistent throughout the industry. - - 1. **Listen:** Before making any plans or goals, take the time to listen. - * _Listen to your company stakeholders:_ What do they expect of your team? What do they think you should be responsible for? What metrics are they accustomed to? And what business needs do they care most about? - * _Listen to your customer community:_ What are customers' biggest pain points with your product? Where do they struggle with onboarding? Where does the documentation fail them? - * _Listen to your product's technical audience:_ What problems are they trying to solve? What could be done to make their work life easier? Where do they get their content? What technological advances are they most excited about? - - - 2. **Gather information** -Based on these answers, you can start making your plan. Find the overlapping areas where you can make your product a better fit for the larger technical audience and also make it easier for your customers to use. Figure out what content you can provide that not only answers your community's questions but also solves problems for your company's stakeholders. Learn about the areas where your co-workers struggle and see where your strengths can supplement those needs. - - - 3. **Make connections** -Above all, community managers are responsible for making connections within the community as well as between community members and coworkers. These connections, or "DevRel qualified leads," are what ultimately shows the business value of a community manager's work. By making connections between community members Marie and Bob, who are both interested in the latest developments in Python, or between Marie and your coworker Phil, who's responsible for developer-focused content on your website, you're making your community a valuable source of information for everyone around you. - - - -By getting to know your technical community, you become an expert on what customer needs your product can meet. With great power comes great responsibility. As the expert, you are now responsible for advocating internally for those needs, and you have the potential to make a big difference for your community. - -### Getting started - -So now what? If you're still with me, congratulations! You might just be a good fit for a Community Manager or Developer Advocate role. I'd encourage you to take community work for a test drive and see if you like the pace and the work. There's a lot of context switching and moving around between tasks, which can be a bit of an adjustment for some folks. - -Volunteer to write a blog post for your marketing team (or for [Opensource.com][2]) or help out at an upcoming conference. Apply to speak at a local meetup or offer to advise on a few technical support cases. Get to know your community members on a deeper level. - -Above all, Community Managers are 100% driven by a passion for building technical communities and bringing people together. If that resonates with you, it may be time for a career change! - -I love talking to professionals that help others grow through community and Developer Relations practices. Don't hesitate to [reach out to me][3] if you have any questions or send me a [DM on Twitter][4]. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/developer-relations-career - -作者:[Mary Thengvall][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/marygrace-0 -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/resume_career_document_general.png?itok=JEaFL2XI -[2]: https://opensource.com/how-submit-article -[3]: https://www.marythengvall.com/about -[4]: http://twitter.com/mary_grace diff --git a/sources/talk/20190328 Continuous response- The essential process we-re ignoring in DevOps.md b/sources/talk/20190328 Continuous response- The essential process we-re ignoring in DevOps.md deleted file mode 100644 index f0f0190742..0000000000 --- a/sources/talk/20190328 Continuous response- The essential process we-re ignoring in DevOps.md +++ /dev/null @@ -1,115 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Continuous response: The essential process we're ignoring in DevOps) -[#]: via: (https://opensource.com/article/19/3/continuous-response-devops) -[#]: author: (Randy Bias https://opensource.com/users/randybias) - -Continuous response: The essential process we're ignoring in DevOps -====== -You probably practice CI and CD, but if you aren't thinking about -continuous response, you aren't really doing DevOps. -![CICD with gears][1] - -Continuous response (CR) is an overlooked link in the DevOps process chain. The two other major links—[continuous integration (CI) and continuous delivery (CD)][2]—are well understood, but CR is not. Yet, CR is the essential element of follow-through required to make customers happy and fulfill the promise of greater speed and agility. At the heart of the DevOps movement is the need for greater velocity and agility to bring businesses into our new digital age. CR plays a pivotal role in enabling this. - -### Defining CR - -We need a crisp definition of CR to move forward with breaking it down. To put it into context, let's revisit the definitions of continuous integration (CI) and continuous delivery (CD). Here are Gartner's definitions when I wrote this them down in 2017: - -> [Continuous integration][3] is the practice of integrating, building, testing, and delivering functional software on a scheduled, repeatable, and automated basis. -> -> Continuous delivery is a software engineering approach where teams keep producing valuable software in short cycles while ensuring that the software can be reliably released at any time. - -I propose the following definition for CR: - -> Continuous response is a practice where developers and operators instrument, measure, observe, and manage their deployed software looking for changes in performance, resiliency, end-user behavior, and security posture and take corrective actions as necessary. - -We can argue about whether these definitions are 100% correct. They are good enough for our purposes, which is framing the definition of CR in rough context so we can understand it is really just the last link in the chain of a holistic cycle. - -![The holistic DevOps cycle][4] - -What is this multi-colored ring, you ask? It's the famous [OODA Loop][5]. Before continuing, let's touch on what the OODA Loop is and why it's relevant to DevOps. We'll keep it brief though, as there is already a long history between the OODA Loop and DevOps. - -#### A brief aside: The OODA Loop - -At the heart of core DevOps thinking is using the OODA Loop to create a proactive process for evolving and responding to changing environments. A quick [web search][6] makes it easy to learn the long history between the OODA Loop and DevOps, but if you want the deep dive, I highly recommend [The Tao of Boyd: How to Master the OODA Loop][7]. - -Here is the "evolved OODA Loop" presented by John Boyd: - -![OODA Loop][8] - -The most important thing to understand about the OODA Loop is that it's a cognitive process for adapting to and handling changing circumstances. - -The second most important thing to understand about the OODA Loop is, since it is a thought process that is meant to evolve, it depends on driving feedback back into the earlier parts of the cycle as you iterate. - -As you can see in the diagram above, CI, CD, and CR are all their own isolated OODA Loops within the overall DevOps OODA Loop. The key here is that each OODA Loop is an evolving thought process for how test, release, and success are measured. Simply put, those who can execute on the OODA Loop fastest will win. - -Put differently, DevOps wants to drive speed (executing the OODA Loop faster) combined with agility (taking feedback and using it to constantly adjust the OODA Loop). This is why CR is a vital piece of the DevOps process. We must drive production feedback into the DevOps maturation process. The DevOps notion of Culture, Automation, Measurement, and Sharing ([CAMS][9]) partially but inadequately captures this, whereas CR provides a much cleaner continuation of CI/CD in my mind. - -### Breaking CR down - -CR has more depth and breadth than CI or CD. This is natural, given that what we're categorizing is the post-deployment process by which our software is taking a variety of actions from autonomic responses to analytics of customer experience. I think, when it's broken down, there are three key buckets that CR components fall into. Each of these three areas forms a complete OODA Loop; however, the level of automation throughout the OODA Loop varies significantly. - -The following table will help clarify the three areas of CR: - -CR Type | Purpose | Examples ----|---|--- -Real-time | Autonomics for availability and resiliency | Auto-scaling, auto-healing, developer-in-the-loop automated responses to real-time failures, automated root-cause analysis -Analytic | Feature/fix pipeline | A/B testing, service response times, customer interaction models -Predictive | History-based planning | Capacity planning, hardware failure prediction models, cost-basis analysis - -_Real-time CR_ is probably the best understood of the three. This kind of CR is where our software has been instrumented for known issues and can take an immediate, automated response (autonomics). Examples of known issues include responding to high or low demand (e.g., elastic auto-scaling), responding to expected infrastructure resource failures (e.g., auto-healing), and responding to expected distributed application failures (e.g., circuit breaker pattern). In the future, we will see machine learning (ML) and similar technologies applied to automated root-cause analysis and event correlation, which will then provide a path towards "no ops" or "zero ops" operational models. - -_Analytic CR_ is still the most manual of the CR processes. This kind of CR is focused primarily on observing end-user experience and providing feedback to the product development cycle to add features or fix existing functionality. Examples of this include traditional A/B website testing, measuring page-load times or service-response times, post-mortems of service failures, and so on. - -_Predictive CR_ , due to the resurgence of AI and ML, is one of the innovation areas in CR. It uses historical data to predict future needs. ML techniques are allowing this area to become more fully automated. Examples include automated and predictive capacity planning (primarily for the infrastructure layer), automated cost-basis analysis of service delivery, and real-time reallocation of infrastructure resources to resolve capacity and hardware failure issues before they impact the end-user experience. - -### Diving deeper on CR - -CR, like CI or CD, is a DevOps process supported by a set of underlying tools. CI and CD are not Jenkins, unit tests, or automated deployments alone. They are a process flow. Similarly, CR is a process flow that begins with the delivery of new code via CD, which open source tools like [Spinnaker][10] give us. CR is not monitoring, machine learning, or auto-scaling, but a diverse set of processes that occur after code deployment, supported by a variety of tools. CR is also different in two specific ways. - -First, it is different because, by its nature, it is broader. The general software development lifecycle (SDLC) process means that most [CI/CD processes][11] are similar. However, code running in production differs from app to app or service to service. This means that CR differs as well. - -Second, CR is different because it is nascent. Like CI and CD before it, the process and tools existed before they had a name. Over time, CI/CD became more normalized and easier to scope. CR is new, hence there is lots of room to discuss what's in or out. I welcome your comments in this regard and hope you will run with these ideas. - -### CR: Closing the loop on DevOps - -DevOps arose because of the need for greater service delivery velocity and agility. Essentially, DevOps is an extension of agile software development practices to an operational mindset. It's a direct response to the flexibility and automation possibilities that cloud computing affords. However, much of the thinking on DevOps to date has focused on deploying the code to production and ends there. But our jobs don't end there. As professionals, we must also make certain our code is behaving as expected, we are learning as it runs in production, and we are taking that learning back into the product development process. - -This is where CR lives and breathes. DevOps without CR is the same as saying there is no OODA Loop around the DevOps process itself. It's like saying that operators' and developers' jobs end with the code being deployed. We all know this isn't true. Customer experience is the ultimate measurement of our success. Can people use the software or service without hiccups or undue friction? If not, we need to fix it. CR is the final link in the DevOps chain that enables delivering the truest customer experience. - -If you aren't thinking about continuous response, you aren't doing DevOps. Share your thoughts on CR, and tell me what you think about the concept and the definition. - -* * * - -_This article is based on[The Essential DevOps Process We're Ignoring: Continuous Response][12], which originally appeared on the Cloudscaling blog under a [CC BY 4.0][13] license and is republished with permission._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/3/continuous-response-devops - -作者:[Randy Bias][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/randybias -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/cicd_continuous_delivery_deployment_gears.png?itok=kVlhiEkc (CICD with gears) -[2]: https://opensource.com/article/18/8/what-cicd -[3]: https://www.gartner.com/doc/3187420/guidance-framework-continuous-integration-continuous -[4]: https://opensource.com/sites/default/files/uploads/holistic-devops-cycle-smaller.jpeg (The holistic DevOps cycle) -[5]: https://en.wikipedia.org/wiki/OODA_loop -[6]: https://www.google.com/search?q=site%3Ablog.b3k.us+ooda+loop&rlz=1C5CHFA_enUS730US730&oq=site%3Ablog.b3k.us+ooda+loop&aqs=chrome..69i57j69i58.8660j0j4&sourceid=chrome&ie=UTF-8#q=devops+ooda+loop&* -[7]: http://www.artofmanliness.com/2014/09/15/ooda-loop/ -[8]: https://opensource.com/sites/default/files/uploads/ooda-loop-2-1.jpg (OODA Loop) -[9]: https://itrevolution.com/devops-culture-part-1/ -[10]: https://www.spinnaker.io -[11]: https://opensource.com/article/18/12/cicd-tools-sysadmins -[12]: http://cloudscaling.com/blog/devops/the-essential-devops-process-were-ignoring-continuous-response/ -[13]: https://creativecommons.org/licenses/by/4.0/ diff --git a/sources/talk/20190328 Why do organizations have open secrets.md b/sources/talk/20190328 Why do organizations have open secrets.md deleted file mode 100644 index 8a4c8c0017..0000000000 --- a/sources/talk/20190328 Why do organizations have open secrets.md +++ /dev/null @@ -1,77 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why do organizations have open secrets?) -[#]: via: (https://opensource.com/open-organization/19/3/open-secrets-bystander-effect) -[#]: author: (Laura Hilliger https://opensource.com/users/laurahilliger/users/maryjo) - -Why do organizations have open secrets? -====== -Everyone sees something, but no one says anything—that's the bystander -effect. And it's damaging your organizational culture. -![][1] - -[The five characteristics of an open organization][2] must work together to ensure healthy and happy communities inside our organizations. Even the most transparent teams, departments, and organizations require equal doses of additional open principles—like inclusivity and collaboration—to avoid dysfunction. - -The "open secrets" phenomenon illustrates the limitations of transparency when unaccompanied by additional open values. [A recent article in Harvard Business Review][3] explored the way certain organizational issues—widely apparent but seemingly impossible to solve—lead to discomfort in the workforce. Authors Insiya Hussain and Subra Tangirala performed a number of studies, and found that the more people in an organization who knew about a particular "secret," be it a software bug or a personnel issue, the less likely any one person would be to report the issue or otherwise _do_ something about it. - -Hussain and Tangirala explain that so-called "open secrets" are the result of a [bystander effect][4], which comes into play when people think, "Well, if _everyone_ knows, surely _I_ don't need to be the one to point it out." The authors mention several causes of this behavior, but let's take a closer look at why open secrets might be circulating in your organization—with an eye on what an open leader might do to [create a safe space for whistleblowing][5]. - -### 1\. Fear - -People don't want to complain about a known problem only to have their complaint be the one that initiates the quality assurance, integrity, or redress process. What if new information emerges that makes their report irrelevant? What if they are simply _wrong_? - -At the root of all bystander behavior is fear—fear of repercussions, fear of losing reputation or face, or fear that the very thing you've stood up against turns out to be a non-issue for everyone else. Going on record as "the one who reported" carries with it a reputational risk that is very intimidating. - -The first step to ensuring that your colleagues report malicious behavior, code, or _whatever_ needs reporting is to create a fear-free workplace. We're inundated with the idea that making a mistake is bad or wrong. We're taught that we have to "protect" our reputations. However, the qualities of a good and moral character are _always_ subjective. - -_Tip for leaders_ : Reward courage and strength every time you see it, regardless of whether you deem it "necessary." For example, if in a meeting where everyone except one person agrees on something, spend time on that person's concerns. Be patient and kind in helping that person change their mind, and be open minded about that person being able to change yours. Brains work in different ways; never forget that one person might have a perspective that changes the lay of the land. - -### 2\. Policies - -Usually, complaint procedures and policies are designed to ensure fairness towards all parties involved in the complaint. Discouraging false reporting and ensuring such fairness in situations like these is certainly a good idea. But policies might actually deter people from standing up—because a victim might be discouraged from reporting an experience if the formal policy for reporting doesn't make them feel protected. Standing up to someone in a position of power and saying "Your behavior is horrid, and I'm not going to take it" isn't easy for anyone, but it's particularly difficult for marginalized groups. - -The "open secrets" phenomenon illustrates the limitations of transparency when unaccompanied by additional open values. - -To ensure fairness to all parties, we need to adjust for victims. As part of making the decision to file a report, a victim will be dealing with a variety of internal fears. They'll wonder what might happen to their self-worth if they're put in a situation where they have to talk to someone about their experience. They'll wonder if they'll be treated differently if they're the one who stands up, and how that will affect their future working environments and relationships. Especially in a situation involving an open secret, asking a victim to be strong is asking them to have to trust that numerous other people will back them up. This fear shouldn't be part of their workplace experience; it's just not fair. - -Remember that if one feels responsible for a problem (e.g., "Crap, that's _my code_ that's bringing down the whole server!"), then that person might feel fear at pointing out the mistake. _The important thing is dealing with the situation, not finding someone to blame._ Policies that make people feel personally protected—no matter what the situation—are absolutely integral to ensuring the organization deals with open secrets. - -_Tip for leaders_ : Make sure your team's or organization's policy regarding complaints makes anonymous reporting possible. Asking a victim to "go on record" puts them in the position of having to defend their perspective. If they feel they're the victim of harassment, they're feeling as if they are harassed _and_ being asked to defend their experience. This means they're doing double the work of the perpetrator, who only has to defend themselves. - -### 3\. Marginalization - -Women, LGBTQ people, racial minorities, people with physical disabilities, people who are neuro-atypical, and other marginalized groups often find themselves in positions that them feel routinely dismissed, disempowered, disrespected—and generally dissed. These feelings are valid (and shouldn't be too surprising to anyone who has spent some time looking at issues of diversity and inclusion). Our emotional safety matters, and we tend to be quite protective of it—even if it means letting open secrets go unaddressed. - -Marginalized groups have enough worries weighing on them, even when they're _not_ running the risk of damaging their relationships with others at work. Being seen and respected in both an organization and society more broadly is difficult enough _without_ drawing potentially negative attention. - -Policies that make people feel personally protected—no matter what the situation—are absolutely integral to ensuring the organization deals with open secrets. - -Luckily, in recent years attitudes towards marginalized groups have become visible, and we as a society have begun to talk about our experiences as "outliers." We've also come to realize that marginalized groups aren't actually "outliers" at all; we can thank the colorful, beautiful internet for that. - -_Tip for leaders_ : Diversity and inclusion plays a role in dispelling open secrets. Make sure your diversity and inclusion practices and policies truly encourage a diverse workplace. - -### Model the behavior - -The best way to create a safe workplace and give people the ability to call attention to pervasive problems found within it is to _model the behaviors that you want other people to display_. Dysfunction occurs in cultures that don't pay attention to and value the principles upon which they are built. In order to discourage bystander behavior, transparent, inclusive, adaptable and collaborative communities must create policies that support calling attention to open secrets and then empathetically dealing with whatever the issue may be. - --------------------------------------------------------------------------------- - -via: https://opensource.com/open-organization/19/3/open-secrets-bystander-effect - -作者:[Laura Hilliger][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/laurahilliger/users/maryjo -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/OSDC_secret_ingredient_520x292.png?itok=QbKzJq-N -[2]: https://opensource.com/open-organization/resources/open-org-definition -[3]: https://hbr.org/2019/01/why-open-secrets-exist-in-organizations -[4]: https://www.psychologytoday.com/us/basics/bystander-effect -[5]: https://opensource.com/open-organization/19/2/open-leaders-whistleblowers diff --git a/sources/talk/20190331 Codecademy vs. The BBC Micro.md b/sources/talk/20190331 Codecademy vs. The BBC Micro.md deleted file mode 100644 index a85aadad46..0000000000 --- a/sources/talk/20190331 Codecademy vs. The BBC Micro.md +++ /dev/null @@ -1,145 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Codecademy vs. The BBC Micro) -[#]: via: (https://twobithistory.org/2019/03/31/bbc-micro.html) -[#]: author: (Two-Bit History https://twobithistory.org) - -Codecademy vs. The BBC Micro -====== - -In the late 1970s, the computer, which for decades had been a mysterious, hulking machine that only did the bidding of corporate overlords, suddenly became something the average person could buy and take home. An enthusiastic minority saw how great this was and rushed to get a computer of their own. For many more people, the arrival of the microcomputer triggered helpless anxiety about the future. An ad from a magazine at the time promised that a home computer would “give your child an unfair advantage in school.” It showed a boy in a smart blazer and tie eagerly raising his hand to answer a question, while behind him his dim-witted classmates look on sullenly. The ad and others like it implied that the world was changing quickly and, if you did not immediately learn how to use one of these intimidating new devices, you and your family would be left behind. - -In the UK, this anxiety metastasized into concern at the highest levels of government about the competitiveness of the nation. The 1970s had been, on the whole, an underwhelming decade for Great Britain. Both inflation and unemployment had been high. Meanwhile, a series of strikes put London through blackout after blackout. A government report from 1979 fretted that a failure to keep up with trends in computing technology would “add another factor to our poor industrial performance.”[1][1] The country already seemed to be behind in the computing arena—all the great computer companies were American, while integrated circuits were being assembled in Japan and Taiwan. - -In an audacious move, the BBC, a public service broadcaster funded by the government, decided that it would solve Britain’s national competitiveness problems by helping Britons everywhere overcome their aversion to computers. It launched the _Computer Literacy Project_, a multi-pronged educational effort that involved several TV series, a few books, a network of support groups, and a specially built microcomputer known as the BBC Micro. The project was so successful that, by 1983, an editor for BYTE Magazine wrote, “compared to the US, proportionally more of Britain’s population is interested in microcomputers.”[2][2] The editor marveled that there were more people at the Fifth Personal Computer World Show in the UK than had been to that year’s West Coast Computer Faire. Over a sixth of Great Britain watched an episode in the first series produced for the _Computer Literacy Project_ and 1.5 million BBC Micros were ultimately sold.[3][3] - -[An archive][4] containing every TV series produced and all the materials published for the _Computer Literacy Project_ was put on the web last year. I’ve had a huge amount of fun watching the TV series and trying to imagine what it would have been like to learn about computing in the early 1980s. But what’s turned out to be more interesting is how computing was _taught_. Today, we still worry about technology leaving people behind. Wealthy tech entrepreneurs and governments spend lots of money trying to teach kids “to code.” We have websites like Codecademy that make use of new technologies to teach coding interactively. One would assume that this approach is more effective than a goofy ’80s TV series. But is it? - -### The Computer Literacy Project - -The microcomputer revolution began in 1975 with the release of [the Altair 8800][5]. Only two years later, the Apple II, TRS-80, and Commodore PET had all been released. Sales of the new computers exploded. In 1978, the BBC explored the dramatic societal changes these new machines were sure to bring in a documentary called “Now the Chips Are Down.” - -The documentary was alarming. Within the first five minutes, the narrator explains that microelectronics will “totally revolutionize our way of life.” As eerie synthesizer music plays, and green pulses of electricity dance around a magnified microprocessor on screen, the narrator argues that the new chips are why “Japan is abandoning its ship building, and why our children will grow up without jobs to go to.” The documentary goes on to explore how robots are being used to automate car assembly and how the European watch industry has lost out to digital watch manufacturers in the United States. It castigates the British government for not doing more to prepare the country for a future of mass unemployment. - -The documentary was supposedly shown to the British Cabinet.[4][6] Several government agencies, including the Department of Industry and the Manpower Services Commission, became interested in trying to raise awareness about computers among the British public. The Manpower Services Commission provided funds for a team from the BBC’s education division to travel to Japan, the United States, and other countries on a fact-finding trip. This research team produced a report that cataloged the ways in which microelectronics would indeed mean major changes for industrial manufacturing, labor relations, and office work. In late 1979, it was decided that the BBC should make a ten-part TV series that would help regular Britons “learn how to use and control computers and not feel dominated by them.”[5][7] The project eventually became a multimedia endeavor similar to the _Adult Literacy Project_, an earlier BBC undertaking involving both a TV series and supplemental courses that helped two million people improve their reading. - -The producers behind the _Computer Literacy Project_ were keen for the TV series to feature “hands-on” examples that viewers could try on their own if they had a microcomputer at home. These examples would have to be in BASIC, since that was the language (really the entire shell) used on almost all microcomputers. But the producers faced a thorny problem: Microcomputer manufacturers all had their own dialects of BASIC, so no matter which dialect they picked, they would inevitably alienate some large fraction of their audience. The only real solution was to create a new BASIC—BBC BASIC—and a microcomputer to go along with it. Members of the British public would be able to buy the new microcomputer and follow along without worrying about differences in software or hardware. - -The TV producers and presenters at the BBC were not capable of building a microcomputer on their own. So they put together a specification for the computer they had in mind and invited British microcomputer companies to propose a new machine that met the requirements. The specification called for a relatively powerful computer because the BBC producers felt that the machine should be able to run real, useful applications. Technical consultants for the _Computer Literacy Project_ also suggested that, if it had to be a BASIC dialect that was going to be taught to the entire nation, then it had better be a good one. (They may not have phrased it exactly that way, but I bet that’s what they were thinking.) BBC BASIC would make up for some of BASIC’s usual shortcomings by allowing for recursion and local variables.[6][8] - -The BBC eventually decided that a Cambridge-based company called Acorn Computers would make the BBC Micro. In choosing Acorn, the BBC passed over a proposal from Clive Sinclair, who ran a company called Sinclair Research. Sinclair Research had brought mass-market microcomputing to the UK in 1980 with the Sinclair ZX80. Sinclair’s new computer, the ZX81, was cheap but not powerful enough for the BBC’s purposes. Acorn’s new prototype computer, known internally as the Proton, would be more expensive but more powerful and expandable. The BBC was impressed. The Proton was never marketed or sold as the Proton because it was instead released in December 1981 as the BBC Micro, also affectionately called “The Beeb.” You could get a 16k version for £235 and a 32k version for £335. - -In 1980, Acorn was an underdog in the British computing industry. But the BBC Micro helped establish the company’s legacy. Today, the world’s most popular microprocessor instruction set is the ARM architecture. “ARM” now stands for “Advanced RISC Machine,” but originally it stood for “Acorn RISC Machine.” ARM Holdings, the company behind the architecture, was spun out from Acorn in 1990. - -![Picture of the BBC Micro.][9] _A bad picture of a BBC Micro, taken by me at the Computer History Museum -in Mountain View, California._ - -### The Computer Programme - -A dozen different TV series were eventually produced as part of the _Computer Literacy Project_, but the first of them was a ten-part series known as _The Computer Programme_. The series was broadcast over ten weeks at the beginning of 1982. A million people watched each week-night broadcast of the show; a quarter million watched the reruns on Sunday and Monday afternoon. - -The show was hosted by two presenters, Chris Serle and Ian McNaught-Davis. Serle plays the neophyte while McNaught-Davis, who had professional experience programming mainframe computers, plays the expert. This was an inspired setup. It made for [awkward transitions][10]—Serle often goes directly from a conversation with McNaught-Davis to a bit of walk-and-talk narration delivered to the camera, and you can’t help but wonder whether McNaught-Davis is still standing there out of frame or what. But it meant that Serle could voice the concerns that the audience would surely have. He can look intimidated by a screenful of BASIC and can ask questions like, “What do all these dollar signs mean?” At several points during the show, Serle and McNaught-Davis sit down in front of a computer and essentially pair program, with McNaught-Davis providing hints here and there while Serle tries to figure it out. It would have been much less relatable if the show had been presented by a single, all-knowing narrator. - -The show also made an effort to demonstrate the many practical applications of computing in the lives of regular people. By the early 1980s, the home computer had already begun to be associated with young boys and video games. The producers behind _The Computer Programme_ sought to avoid interviewing “impressively competent youngsters,” as that was likely “to increase the anxieties of older viewers,” a demographic that the show was trying to attract to computing.[7][11] In the first episode of the series, Gill Nevill, the show’s “on location” reporter, interviews a woman that has bought a Commodore PET to help manage her sweet shop. The woman (her name is Phyllis) looks to be 60-something years old, yet she has no trouble using the computer to do her accounting and has even started using her PET to do computer work for other businesses, which sounds like the beginning of a promising freelance career. Phyllis says that she wouldn’t mind if the computer work grew to replace her sweet shop business since she enjoys the computer work more. This interview could instead have been an interview with a teenager about how he had modified _Breakout_ to be faster and more challenging. But that would have been encouraging to almost nobody. On the other hand, if Phyllis, of all people, can use a computer, then surely you can too. - -While the show features lots of BASIC programming, what it really wants to teach its audience is how computing works in general. The show explains these general principles with analogies. In the second episode, there is an extended discussion of the Jacquard loom, which accomplishes two things. First, it illustrates that computers are not based only on magical technology invented yesterday—some of the foundational principles of computing go back two hundred years and are about as simple as the idea that you can punch holes in card to control a weaving machine. Second, the interlacing of warp and weft threads is used to demonstrate how a binary choice (does the weft thread go above or below the warp thread?) is enough, when repeated over and over, to produce enormous variation. This segues, of course, into a discussion of how information can be stored using binary digits. - -Later in the show there is a section about a steam organ that plays music encoded in a long, segmented roll of punched card. This time the analogy is used to explain subroutines in BASIC. Serle and McNaught-Davis lay out the whole roll of punched card on the floor in the studio, then point out the segments where it looks like a refrain is being repeated. McNaught-Davis explains that a subroutine is what you would get if you cut out those repeated segments of card and somehow added an instruction to go back to the original segment that played the refrain for the first time. This is a brilliant explanation and probably one that stuck around in people’s minds for a long time afterward. - -I’ve picked out only a few examples, but I think in general the show excels at demystifying computers by explaining the principles that computers rely on to function. The show could instead have focused on teaching BASIC, but it did not. This, it turns out, was very much a conscious choice. In a retrospective written in 1983, John Radcliffe, the executive producer of the _Computer Literacy Project_, wrote the following: - -> If computers were going to be as important as we believed, some genuine understanding of this new subject would be important for everyone, almost as important perhaps as the capacity to read and write. Early ideas, both here and in America, had concentrated on programming as the main route to computer literacy. However, as our thinking progressed, although we recognized the value of “hands-on” experience on personal micros, we began to place less emphasis on programming and more on wider understanding, on relating micros to larger machines, encouraging people to gain experience with a range of applications programs and high-level languages, and relating these to experience in the real world of industry and commerce…. Our belief was that once people had grasped these principles, at their simplest, they would be able to move further forward into the subject. - -Later, Radcliffe writes, in a similar vein: - -> There had been much debate about the main explanatory thrust of the series. One school of thought had argued that it was particularly important for the programmes to give advice on the practical details of learning to use a micro. But we had concluded that if the series was to have any sustained educational value, it had to be a way into the real world of computing, through an explanation of computing principles. This would need to be achieved by a combination of studio demonstration on micros, explanation of principles by analogy, and illustration on film of real-life examples of practical applications. Not only micros, but mini computers and mainframes would be shown. - -I love this, particularly the part about mini-computers and mainframes. The producers behind _The Computer Programme_ aimed to help Britons get situated: Where had computing been, and where was it going? What can computers do now, and what might they do in the future? Learning some BASIC was part of answering those questions, but knowing BASIC alone was not seen as enough to make someone computer literate. - -### Computer Literacy Today - -If you google “learn to code,” the first result you see is a link to Codecademy’s website. If there is a modern equivalent to the _Computer Literacy Project_, something with the same reach and similar aims, then it is Codecademy. - -“Learn to code” is Codecademy’s tagline. I don’t think I’m the first person to point this out—in fact, I probably read this somewhere and I’m now ripping it off—but there’s something revealing about using the word “code” instead of “program.” It suggests that the important thing you are learning is how to decode the code, how to look at a screen’s worth of Python and not have your eyes glaze over. I can understand why to the average person this seems like the main hurdle to becoming a professional programmer. Professional programmers spend all day looking at computer monitors covered in gobbledygook, so, if I want to become a professional programmer, I better make sure I can decipher the gobbledygook. But dealing with syntax is not the most challenging part of being a programmer, and it quickly becomes almost irrelevant in the face of much bigger obstacles. Also, armed only with knowledge of a programming language’s syntax, you may be able to _read_ code but you won’t be able to _write_ code to solve a novel problem. - -I recently went through Codecademy’s “Code Foundations” course, which is the course that the site recommends you take if you are interested in programming (as opposed to web development or data science) and have never done any programming before. There are a few lessons in there about the history of computer science, but they are perfunctory and poorly researched. (Thank heavens for [this noble internet vigilante][12], who pointed out a particularly egregious error.) The main focus of the course is teaching you about the common structural elements of programming languages: variables, functions, control flow, loops. In other words, the course focuses on what you would need to know to start seeing patterns in the gobbledygook. - -To be fair to Codecademy, they offer other courses that look meatier. But even courses such as their “Computer Science Path” course focus almost exclusively on programming and concepts that can be represented in programs. One might argue that this is the whole point—Codecademy’s main feature is that it gives you little interactive programming lessons with automated feedback. There also just isn’t enough room to cover more because there is only so much you can stuff into somebody’s brain in a little automated lesson. But the producers at the BBC tasked with kicking off the _Computer Literacy Project_ also had this problem; they recognized that they were limited by their medium and that “the amount of learning that would take place as a result of the television programmes themselves would be limited.”[8][13] With similar constraints on the volume of information they could convey, they chose to emphasize general principles over learning BASIC. Couldn’t Codecademy replace a lesson or two with an interactive visualization of a Jacquard loom weaving together warp and weft threads? - -I’m banging the drum for “general principles” loudly now, so let me just explain what I think they are and why they are important. There’s a book by J. Clark Scott about computers called _But How Do It Know?_ The title comes from the anecdote that opens the book. A salesman is explaining to a group of people that a thermos can keep hot food hot and cold food cold. A member of the audience, astounded by this new invention, asks, “But how do it know?” The joke of course is that the thermos is not perceiving the temperature of the food and then making a decision—the thermos is just constructed so that cold food inevitably stays cold and hot food inevitably stays hot. People anthropomorphize computers in the same way, believing that computers are digital brains that somehow “choose” to do one thing or another based on the code they are fed. But learning a few things about how computers work, even at a rudimentary level, takes the homunculus out of the machine. That’s why the Jacquard loom is such a good go-to illustration. It may at first seem like an incredible device. It reads punch cards and somehow “knows” to weave the right pattern! The reality is mundane: Each row of holes corresponds to a thread, and where there is a hole in that row the corresponding thread gets lifted. Understanding this may not help you do anything new with computers, but it will give you the confidence that you are not dealing with something magical. We should impart this sense of confidence to beginners as soon as we can. - -Alas, it’s possible that the real problem is that nobody wants to learn about the Jacquard loom. Judging by how Codecademy emphasizes the professional applications of what it teaches, many people probably start using Codecademy because they believe it will help them “level up” their careers. They believe, not unreasonably, that the primary challenge will be understanding the gobbledygook, so they want to “learn to code.” And they want to do it as quickly as possible, in the hour or two they have each night between dinner and collapsing into bed. Codecademy, which after all is a business, gives these people what they are looking for—not some roundabout explanation involving a machine invented in the 18th century. - -The _Computer Literacy Project_, on the other hand, is what a bunch of producers and civil servants at the BBC thought would be the best way to educate the nation about computing. I admit that it is a bit elitist to suggest we should laud this group of people for teaching the masses what they were incapable of seeking out on their own. But I can’t help but think they got it right. Lots of people first learned about computing using a BBC Micro, and many of these people went on to become successful software developers or game designers. [As I’ve written before][14], I suspect learning about computing at a time when computers were relatively simple was a huge advantage. But perhaps another advantage these people had is shows like _The Computer Programme_, which strove to teach not just programming but also how and why computers can run programs at all. After watching _The Computer Programme_, you may not understand all the gobbledygook on a computer screen, but you don’t really need to because you know that, whatever the “code” looks like, the computer is always doing the same basic thing. After a course or two on Codecademy, you understand some flavors of gobbledygook, but to you a computer is just a magical machine that somehow turns gobbledygook into running software. That isn’t computer literacy. - -_If you enjoyed this post, more like it come out every four weeks! Follow [@TwoBitHistory][15] on Twitter or subscribe to the [RSS feed][16] to make sure you know when a new post is out._ - -_Previously on TwoBitHistory…_ - -> FINALLY some new damn content, amirite? -> -> Wanted to write an article about how Simula bought us object-oriented programming. It did that, but early Simula also flirted with a different vision for how OOP would work. Wrote about that instead! -> -> — TwoBitHistory (@TwoBitHistory) [February 1, 2019][17] - - 1. Robert Albury and David Allen, Microelectronics, report (1979). [↩︎][18] - - 2. Gregg Williams, “Microcomputing, British Style”, Byte Magazine, 40, January 1983, accessed on March 31, 2019, . [↩︎][19] - - 3. John Radcliffe, “Toward Computer Literacy,” Computer Literacy Project Achive, 42, accessed March 31, 2019, [https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/media/Towards Computer Literacy.pdf][20]. [↩︎][21] - - 4. David Allen, “About the Computer Literacy Project,” Computer Literacy Project Archive, accessed March 31, 2019, . [↩︎][22] - - 5. ibid. [↩︎][23] - - 6. Williams, 51. [↩︎][24] - - 7. Radcliffe, 11. [↩︎][25] - - 8. Radcliffe, 5. [↩︎][26] - - - - --------------------------------------------------------------------------------- - -via: https://twobithistory.org/2019/03/31/bbc-micro.html - -作者:[Two-Bit History][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://twobithistory.org -[b]: https://github.com/lujun9972 -[1]: tmp.05mfBL4kP8#fn:1 -[2]: tmp.05mfBL4kP8#fn:2 -[3]: tmp.05mfBL4kP8#fn:3 -[4]: https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/ -[5]: https://twobithistory.org/2018/07/22/dawn-of-the-microcomputer.html -[6]: tmp.05mfBL4kP8#fn:4 -[7]: tmp.05mfBL4kP8#fn:5 -[8]: tmp.05mfBL4kP8#fn:6 -[9]: https://twobithistory.org/images/beeb.jpg -[10]: https://twitter.com/TwoBitHistory/status/1112372000742404098 -[11]: tmp.05mfBL4kP8#fn:7 -[12]: https://twitter.com/TwoBitHistory/status/1111305774939234304 -[13]: tmp.05mfBL4kP8#fn:8 -[14]: https://twobithistory.org/2018/09/02/learning-basic.html -[15]: https://twitter.com/TwoBitHistory -[16]: https://twobithistory.org/feed.xml -[17]: https://twitter.com/TwoBitHistory/status/1091148050221944832?ref_src=twsrc%5Etfw -[18]: tmp.05mfBL4kP8#fnref:1 -[19]: tmp.05mfBL4kP8#fnref:2 -[20]: https://computer-literacy-project.pilots.bbcconnectedstudio.co.uk/media/Towards%20Computer%20Literacy.pdf -[21]: tmp.05mfBL4kP8#fnref:3 -[22]: tmp.05mfBL4kP8#fnref:4 -[23]: tmp.05mfBL4kP8#fnref:5 -[24]: tmp.05mfBL4kP8#fnref:6 -[25]: tmp.05mfBL4kP8#fnref:7 -[26]: tmp.05mfBL4kP8#fnref:8 diff --git a/sources/talk/20190401 How Kubeflow is evolving without ksonnet.md b/sources/talk/20190401 How Kubeflow is evolving without ksonnet.md deleted file mode 100644 index bad5611f3d..0000000000 --- a/sources/talk/20190401 How Kubeflow is evolving without ksonnet.md +++ /dev/null @@ -1,62 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How Kubeflow is evolving without ksonnet) -[#]: via: (https://opensource.com/article/19/4/kubeflow-evolution) -[#]: author: (Jonathan Gershater (Red Hat) https://opensource.com/users/jgershat/users/jgershat) - -How Kubeflow is evolving without ksonnet -====== -There are big differences in how open source communities handle change compared to closed source vendors. -![Chat bubbles][1] - -Many software projects depend on modules that are run as separate open source projects. When one of those modules loses support (as is inevitable), the community around the main project must determine how to proceed. - -This situation is happening right now in the [Kubeflow][2] community. Kubeflow is an evolving open source platform for developing, orchestrating, deploying, and running scalable and portable machine learning workloads on [Kubernetes][3]. Recently, the primary supporter of the Kubeflow component [ksonnet][4] announced that it would [no longer support][5] the software. - -When a piece of software loses support, the decision-making process (and the outcome) differs greatly depending on whether the software is open source or closed source. - -### A cellphone analogy - -To illustrate the differences in how an open source community and a closed source/single software vendor proceed when a component loses support, let's use an example from hardware design. - -Suppose you buy cellphone Model A and it stops working. When you try to get it repaired, you discover the manufacturer is out of business and no longer offering support. Since the cellphone's design is proprietary and closed, no other manufacturers can support it. - -Now, suppose you buy cellphone Model B, it stops working, and its manufacturer is also out of business and no longer offering support. However, Model B's design is open, and another company is in business manufacturing, repairing and upgrading Model B cellphones. - -This illustrates one difference between software written using closed and open source principles. If the vendor of a closed source software solution goes out of business, support disappears with the vendor, unless the vendor sells the software's design and intellectual property. But, if the vendor of an open source solution goes out of business, there is no intellectual property to sell. By the principles of open source, the source code is available for anyone to use and modify, under license, so another vendor can continue to maintain the software. - -### How Kubeflow is evolving without ksonnet - -The ramification of ksonnet's backers' decision to cease development illustrates Kubeflow's open and collaborative design process. Kubeflow's designers have several options, such as replacing ksonnet, adopting and developing ksonnet, etc. Because Kubeflow is an open source project, all options are discussed in the open on the Kubeflow mailing list. Some of the community's suggestions include: - -> * Should we look at projects that are CNCF/Apache projects e.g. [helm][6] -> * I would opt for back to the basics. KISS. How about plain old jsonnet + kubectl + makefile/scripts ? Thats how e.g. the coreos [prometheus operator][7] does it. It would also lower the entry barrier (no new tooling) and let vendors of k8s (gke, openshift, etc) easily build on top of that. -> * I vote for using a simple, _programmatic_ context, be it manual jsonnet + kubectl, or simple Python scripts + Python K8s client, or any tool be can build on top of these. -> - - -The members of the mailing list are discussing and debating alternatives to ksonnet and will arrive at a decision to continue development. What I love about the open source way of adapting is that it's done communally. Unlike closed source software, which is often designed by one vendor, the organizations that are members of an open source project can collaboratively steer the project in the direction they best see fit. As Kubeflow evolves, it will benefit from an open, collaborative decision-making framework. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/4/kubeflow-evolution - -作者:[Jonathan Gershater (Red Hat)][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/jgershat/users/jgershat -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/talk_chat_communication_team.png?itok=CYfZ_gE7 (Chat bubbles) -[2]: https://www.kubeflow.org/ -[3]: https://github.com/kubernetes -[4]: https://ksonnet.io/ -[5]: https://blogs.vmware.com/cloudnative/2019/02/05/welcoming-heptio-open-source-projects-to-vmware/ -[6]: https://landscape.cncf.io -[7]: https://github.com/coreos/prometheus-operator/tree/master/contrib/kube-prometheus diff --git a/sources/talk/20190402 Making computer science curricula as adaptable as our code.md b/sources/talk/20190402 Making computer science curricula as adaptable as our code.md deleted file mode 100644 index 150034a20b..0000000000 --- a/sources/talk/20190402 Making computer science curricula as adaptable as our code.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Making computer science curricula as adaptable as our code) -[#]: via: (https://opensource.com/open-organization/19/4/adaptable-curricula-computer-science) -[#]: author: (Amarachi Achonu https://opensource.com/users/amarach1/users/johnsontanner3) - -Making computer science curricula as adaptable as our code -====== -No two computer science students are alike—so teachers need curricula -that are open and adaptable. -![][1] - -Educators in elementary computer science face a lack of adaptable curricula. Calls for more modifiable, non-rigid curricula are therefore enticing—assuming that such curricula could benefit teachers by increasing their ability to mold resources for individual classrooms and, ultimately, produce better teaching experiences and learning outcomes. - -Our team at [CSbyUs][2] noticed this scarcity, and we've created an open source web platform to facilitate more flexible, adaptable, and tested curricula for computer science educators. The mission of the CSbyUs team has always been utilizing open source technology to improve pedagogy in computer science, which includes increasing support for teachers. Therefore, this project primarily seeks to use open source principles—and the benefits inherent in them—to expand the possibilities of modern curriculum-making and support teachers by increasing access to more adaptable curricula. - -### Rigid, monotonous, mundane - -Why is the lack of adaptable curricula a problem for computer science education? Rigid curricula dominates most classrooms today, primarily through monotonous and routinely distributed lesson plans. Many of these plans are developed without the capacity for dynamic use and application to different classroom atmospheres. In contrast, an _adaptable_ curriculum is one that would _account_ for dynamic and changing classroom environments. - -An adaptable curriculum means freedom and more options for educators. This is especially important in elementary-level classrooms, where instructors are introducing students to computer science for the first time, and in classrooms with higher populations of groups typically underrepresented in the field of computer science. Here especially, it's advantageous for instructors to have access to curricula that explicitly consider diverse classroom landscapes and grants the freedom necessary to adapt to specific student populations. - -### Making it adaptable - -This kind of adaptability is certainly at work at CSbyUs. Hayley Barton—a member of both the organization's curriculum-making team and its teaching team, and a senior at Duke University majoring in Economics and minoring in Computer Science and Spanish—recently demonstrated the benefits of adaptable curricula during an engagement in the field. Reflecting on her teaching experiences, Barton describes a major reason why curriculum adaptation is necessary in computer science classrooms. "We are seeing the range of students that we work with," she says, "and trying to make the curriculum something that can be tailored to different students." - -An adaptable curriculum means freedom and more options for educators. - -A more adaptable curriculum is necessary for truly challenging students, Barton continues. - -The need for change became most evident to Barton when working students to make their own preliminary apps. Barton collaborated with students who appeared to be at different levels of focus and attention. On the one hand, a group of more advanced students took well to the style of a demonstrative curriculum and remained attentive and engaged to the task. On the other hand, another group of students seemed to have more trouble focusing in the classroom or even being motivated to engage with topics of computer science skills. Witnessing this difference among students, it became important that curriculum would need to be adaptable in multiple ways to be able to engage more students at their level. - -"We want to challenge every student without making it too challenging for any individual student," Barton says. "Thinking about those things definitely feeds into how I'm thinking about the curriculum in terms of making it accessible for all the students." - -As a curriculum-maker, she subsequently uses experiences like this to make changes to the original curriculum. - -"If those other students have one-on-one time themselves, they could be doing even more amazing things with their apps," says Barton. - -Taking this advice, Barton would potentially incorporate into the curriculum more emphasis on cultivating students' sense of ownership in computer science, since this is important to their focus and productivity. For this, students may be afforded that sense of one-on-one time. The result will affect the next round of teachers who use the curriculum. - -For these changes to be effective, the onus is on teachers to notice the dynamics of the classroom. In the future, curriculum adaptation may depend on paying particular attention to and identifying these subtle differences of style of curriculum. Identifying and commenting about these subtleties allows the possibility of applying a different strategy, and these are the changes that are applied to the curriculum. - -Curriculum adaptation should be iterative, as it involves learning from experience, returning to the drawing board, making changes, and finally, utilizing the curriculum again. - -"We've gone through a lot of stages of development," Barton says. "The goal is to have this kind of back and forth, where the curriculum is something that's been tested, where we've used our feedback, and also used other research that we've done, to make it something that's actually impactful." - -Hayley's "back and forth" process is an iterative process of curriculum-making. Between utilizing curricula and modifying curricula, instructors like Hayley can take a once-rigid curriculum and mold it to any degree that the user sees fit—again and again. This iterative process depends on tests performed first in the classroom, and it depends on the teacher's rationale and reflection on how curricula uniquely pans out for them. - -Adaptability of curriculum is the most important principle on which the CSbyUs platform is built. Much like Hayley's process of curriculum-making, curriculum adaptation should be _iterative_ , as it involves learning from experience, returning to the drawing board, making changes, and finally, utilizing the curriculum again. Once launched, the CSbyUS website will document this iterative process. - -The open-focused pedagogy behind the CSByUs platform, then, brings to life the flexibility inherent in the process of curriculum adaptation. First, it invites and collects the valuable first-hand perspectives of real educators working with real curricula to produce real learning. Next, it capitalizes on an iterative processes of development—one familiar to open source programmers—to enable modifications to curriculum (and the documentation of those modifications). Finally, it transforms the way teachers encounter curricula by helping them make selections from different versions of both modified curriculum and "the original." Our platform's open source strategy is crucial to cultivating a hub of flexible curricula for educators. - -Open source practices can be a key difference in making rigid curricula more moldable for educators. Furthermore, since this approach effectively melds open source technologies with open-focused pedagogy, open pedagogy can potentially provide flexibility for educators teaching various curriculum across disciplines. - --------------------------------------------------------------------------------- - -via: https://opensource.com/open-organization/19/4/adaptable-curricula-computer-science - -作者:[Amarachi Achonu][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/amarach1/users/johnsontanner3 -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003588_01_rd3os.combacktoschoolserieshe_rh_051x_0.png?itok=gIzbmxuI -[2]: https://csbyus.herokuapp.com/ diff --git a/sources/talk/20190405 D as a C Replacement.md b/sources/talk/20190405 D as a C Replacement.md deleted file mode 100644 index 36b60bb278..0000000000 --- a/sources/talk/20190405 D as a C Replacement.md +++ /dev/null @@ -1,247 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (D as a C Replacement) -[#]: via: (https://theartofmachinery.com/2019/04/05/d_as_c_replacement.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -D as a C Replacement -====== - -Sircmpwn (the main developer behind the [Sway Wayland compositor][1]) recently wrote a blog post about how he thinks [Rust is not a good C replacement][2]. I don’t know if he’d like the [D programming language][3] either, but it’s become a C replacement for me. - -### My C to D Story - -My story is like a lot of systems programmers’ stories. At one time, C was my go-to language for most programming. One day I realised that most of my C programs kept reimplementing things from C++: dynamic arrays, better strings, polymorphic classes, etc. So I tried using C++ instead, and at first I loved it. RAII and classes and generics made programming fun again. Even better was the promise that if I read all these books on C++, and learned to master things like template metaprogramming, I’d become an almighty god of systems programming and my code would be amazing. But learning more eventually had the opposite effect: (in hindsight) my code actually got worse, and I fell out of love. I remember reading Scott Meyer’s Effective C++ and realising it was really more about _ineffective_ C++ — and that most of my C++ code until then was broken. Let’s face it: C might be fiddly to use, but it has a kind of elegance, and “elegant” is rarely a word you hear when C++ is involved. - -Apparently, a lot of ex-C C++ programmers end up going back to C. In my case, I discovered D. It’s also not perfect, but I use it because it feels to me a lot more like the `C += 1` that C++ was meant to be. Here’s an example that’s very superficial, but I think is representative. Take this simple C program: - -``` -#include - -int main() -{ - printf("1 + 1 = %d!\n", 1 + 1); - return 0; -} -``` - -Here’s a version using the C++ standard library: - -``` -#include - -int main() -{ - std::cout << "1 + 1 = " << 1 + 1 << "!" << std::endl; - return 0; -} -``` - -Here’s an idiomatic D version: - -``` -import std.stdio; - -void main() -{ - writef("1 + 1 = %d!\n", 1 + 1); -} -``` - -As I said, it’s a superficial example, but I think it shows a general difference in philosophy between C++ and D. (If I wanted to make the difference even clearer, I’d use an example that needed `iomanip` in C++.) - -Update: Unlike in C, [D’s format strings can work with custom types][4]. Stefan Rohe has also pointed out that [D supports compile-time checking of format strings][5] using its metaprogramming features — unlike C which does it through built-in compiler special casing that can’t be used with custom code. - -This [article about C++ member function pointers][6] happens to also be a good explanation of the origins of D. It’s a good read if you’re a programming language nerd like me, but here’s my TL;DR for everyone else: C++ member function pointers are supposed to feel like a low-level feature (like normal function pointers are), but the complexity and diversity of implementations means they’re really high level. The complexity of the implementations is because of the subtleties of the rules about what you can do with them. The author explains the implementations from several C++ compilers, including what’s “easily [his] favorite implementation”: the elegantly simple Digital Mars C++ implementation. (“Why doesn’t everyone else do it this way?”) The DMC compiler was written by Walter Bright, who invented D. - -D has classes and templates and other core features of C++, but designed by someone who has spent a heck of a lot of time thinking about the C++ spec and how things could be simpler. Walter once said that his experiences implementing C++ templates made him consider not including them in D at all, until he realised they didn’t need to be so complex. - -Here’s a quick tour of D from the point of view of incrementally improving C. - -### `-betterC` - -D compilers support a `-betterC` switch that disables [the D runtime][7] and all high-level features that depend on it. The example C code above can be translated directly into betterC: - -``` -import core.stdc.stdio; - -extern(C): - -int main() -{ - printf("1 + 1 = %d!\n", 1 + 1); - return 0; -} - -$ dmd -betterC example.d -$ ./example -1 + 1 = 2! -``` - -The resulting binary looks a lot like the equivalent C binary. In fact, if you rewrote a C library in betterC, it could still link to code that had been compiled against the C version, and work without changes. Walter Bright wrote a good article walking through all [the changes needed to convert a real C program to betterC][8]. - -You don’t actually need the `-betterC` switch just to write C-like code in D. It’s only needed in special cases where you simply can’t have the D runtime. But let me point out some of my favourite D features that still work with `-betterC`. - -#### `static assert()` - -This allows verifying some assumption at compile time. - -``` -static assert(kNumInducers < 16); -``` - -Systems code often makes assumptions about alignment or structure size or other things. With `static assert`, it’s possible to not only document these assumptions, but trigger a compilation error if someone breaks them by adding a struct member or something. - -#### Slices - -Typical C code is full of pointer/length pairs, and it’s a common bug for them to go out of sync. Slices are a simple and super-useful abstraction for a range of memory defined by a pointer and length. Instead of code like this: - -``` -buffer_p += offset; -buffer_len -= offset; // Got to update both -``` - -You can use this much-less-bug-prone alternative: - -``` -buffer = buffer[offset..$]; -``` - -A slice is nothing but a pointer/length pair with first-class syntactic support. - -Update: [Walter Bright has written more about pointer/length pair problem in C][9]. - -#### Compile Time Function Evaluation (CTFE) - -[Many functions can be evaluated at compile time.][10] - -``` -long factorial(int n) pure -{ - assert (n >= 0 && n <= 20); - long ret = 1; - foreach (j; 2..n+1) ret *= j; - return ret; -} - -// Statically allocated array -// Size is calculated at compile time -Permutation[factorial(kNumThings)] permutation_table; -``` - -#### `scope` Guards - -Code in one part of a function is often coupled to cleanup code in a later part. Failing to match this code up correctly is another common source of bugs (especially when multiple control flow paths are involved). D’s scope guards make it simple to get this stuff right: - -``` -p = malloc(128); -// free() will be called when the current scope exits -scope (exit) free(p); -// Put whatever if statements, or loops, or early returns you like here -``` - -You can even have multiple scope guards in a scope, or have nested scopes. The cleanup routines will be called when needed, in the right order. - -D also supports RAII using struct destructors. - -#### `const` and `immutable` - -It’s a popular myth that `const` in C and C++ is useful for compiler optimisations. Walter Bright has complained that every time he thought of a new `const`-based optimisation for C++, he eventually discovered it didn’t work in real code. So he made some changes to `const` semantics for D, and added `immutable`. You can read more in the [D `const` FAQ][11]. - -#### `pure` - -Functional purity can be enforced. I’ve written about [some of the benefits of the `pure` keyword before][12]. - -#### `@safe` - -SafeD is a subset of D that forbids risky language features like pointer typecasts and inline assembly. Code marked `@safe` is enforced by the compiler to not use these features, so that risky code can be limited to the small percentage of the application that needs it. You can [read more about SafeD in this article][13]. - -#### Metaprogramming - -Like I hinted earlier, metaprogramming has got a bad reputation among some C++ programmers. But [D has the advantage of making metaprogramming less interesting][14], so D programmers tend to just do it when it’s useful, and not as a fun puzzle. - -D has great support for [compile-time reflection][15]. In most cases, compile-time reflection can solve the same problems as run-time reflection, but with compile-time guarantees. Compile-time reflection can also be used to implement run-time reflection where it’s truly needed. - -Need the names of an enumerated type as an array? - -``` -enum State -{ - stopped, - starting, - running, - stopping, -} - -string[] state_names = [__traits(allMembers, State)]; -``` - -Thanks to D’s metaprogramming, the standard library has many nice, type-safe tools, like this [compile-time checked bit flag enum][16]. - -I’ve written more about [using metaprogramming in `-betterC` code][17]. - -#### No Preprocessor - -Okay, this a non-feature as a feature, but D has no equivalent to C’s preprocessor. All its sane use-cases are replaced with native language features, like [manifest constants][18] and [templates][19]. That includes proper [modules][20] support, which means D can break free of the limitations of that old `#include` hack. - -### Normal D - -C-like D code can be written and compiled as normal D code without the `-betterC` switch. The difference is that normal D code is linked to the D runtime, which supports higher-level features, the most obvious ones being garbage collection and object-oriented classes. Some people have confused the D runtime with something like the Java virtual machine, so I once wrote [an article explaining exactly what it is][7] (spoiler: it’s like the C and C++ runtimes, but with more features). - -Even with the runtime, compiled D is not much different from compiled C++. Sometimes I like to write throwaway code to, say, experiment with a new Linux system call or something. I used to think the best language for that is plain old C, but now I always use D. - -D doesn’t natively support `#include`ing C code, but for nice APIs that don’t have a lot of preprocessor craziness (like most of Linux) I usually just write [ad-hoc bindings][21]. Many popular C libraries have maintained D bindings, which can be found in the [Dub registry][22], or in [the Derelict project][23], or in the newer [BindBC project][24]. There are also tools for automated bindings, including the awesome [dpp tool][25] that brings `#include` support directly to D code. - -Update: This post has got a lot of attention from people who’ve never heard of D before. If you’re interested in learning D, I recommend - - * [The DLang Tour][26] for a quick dive into the language - * [Ali Çehreli’s Programming in D book][27] if you prefer something in-depth - * [The D forum Learn group][28] or [IRC channel][29] to get answers to your questions - - - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/04/05/d_as_c_replacement.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://swaywm.org/ -[2]: https://drewdevault.com/2019/03/25/Rust-is-not-a-good-C-replacement.html -[3]: https://dlang.org -[4]: https://wiki.dlang.org/Defining_custom_print_format_specifiers -[5]: https://dlang.org/phobos/std_format.html#format -[6]: https://www.codeproject.com/Articles/7150/Member-Function-Pointers-and-the-Fastest-Possible -[7]: /2017/06/04/what_is_the_d_runtime.html -[8]: https://dlang.org/blog/2018/06/11/dasbetterc-converting-make-c-to-d/ -[9]: https://www.digitalmars.com/articles/b44.html -[10]: https://dlang.org/spec/function.html#interpretation -[11]: https://dlang.org/articles/const-faq.html -[12]: /2016/03/28/dirtying_pure_functions_can_be_useful.html -[13]: https://dlang.org/blog/2016/09/28/how-to-write-trusted-code-in-d/ -[14]: https://epi.github.io/2017/03/18/less_fun.html -[15]: https://dlang.org/spec/traits.html -[16]: https://dlang.org/phobos/std_typecons.html#BitFlags -[17]: /2018/08/13/inheritance_and_polymorphism_2.html -[18]: https://dlang.org/spec/enum.html#manifest_constants -[19]: https://tour.dlang.org/tour/en/basics/templates -[20]: https://ddili.org/ders/d.en/modules.html -[21]: https://wiki.dlang.org/Bind_D_to_C -[22]: https://code.dlang.org/ -[23]: https://github.com/DerelictOrg -[24]: https://github.com/BindBC -[25]: https://github.com/atilaneves/dpp -[26]: https://tour.dlang.org/ -[27]: https://ddili.org/ders/d.en/index.html -[28]: https://forum.dlang.org/group/learn -[29]: irc://irc.freenode.net/d diff --git a/sources/talk/20190410 Anti-lasers could give us perfect antennas, greater data capacity.md b/sources/talk/20190410 Anti-lasers could give us perfect antennas, greater data capacity.md deleted file mode 100644 index 2d2f4d5c05..0000000000 --- a/sources/talk/20190410 Anti-lasers could give us perfect antennas, greater data capacity.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Anti-lasers could give us perfect antennas, greater data capacity) -[#]: via: (https://www.networkworld.com/article/3386879/anti-lasers-could-give-us-perfect-antennas-greater-data-capacity.html#tk.rss_all) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Anti-lasers could give us perfect antennas, greater data capacity -====== -Anti-lasers get close to providing a 100% efficient signal channel for data, say engineers. -![Guirong Hao / Valery Brozhinsky / Getty Images][1] - -Playing laser light backwards could adjust data transmission signals so that they perfectly match receiving antennas. The fine-tuning of signals like this, not achieved with such detail before, could create more capacity for ever-increasing data demand. - -"Imagine, for example, that you could adjust a cell phone signal exactly the right way, so that it is perfectly absorbed by the antenna in your phone," says Stefan Rotter of the Institute for Theoretical Physics of Technische Universität Wien (TU Wien) in a [press release][2]. - -Rotter is talking about “Random Anti-Laser,” a project he has been a part of. The idea behind it is that if one could time-reverse a laser, then the laser (right now considered the best light source ever built) becomes the best available light absorber. Perfect absorption of a signal wave would mean that all of the data-carrying energy is absorbed by the receiving device, thus it becomes 100% efficient. - -**[ Related:[What is 5G wireless? How it will change networking as we know it?][3] ]** - -“The easiest way to think about this process is in terms of a movie showing a conventional laser sending out laser light, which is played backwards,” the TU Wein article says. The anti-laser is the exact opposite of the laser — instead of sending specific colors perfectly when energy is applied, it receives specific colors perfectly. - -Perfect absorption of a signal wave would mean that all of the data-carrying energy is absorbed by the receiving device, thus it becomes 100% efficient. - -Counter-intuitively, it’s the random scattering of light in all directions that’s behind the engineering. However, the Vienna, Austria, university group performs precise calculations on those scattering, splitting signals. That lets the researchers harness the light. - -### How the anti-laser technology works - -The microwave-based, experimental device the researchers have built in the lab to prove the idea doesn’t just potentially apply to cell phones; wireless internet of things (IoT) devices would also get more data throughput. How it works: The device consists of an antenna-containing chamber encompassed by cylinders, all arranged haphazardly, the researchers explain. The cylinders distribute an elaborate, arbitrary wave pattern “similar to [throwing] stones in a puddle of water, at which water waves are deflected.” - -Measurements then take place to identify exactly how the signals return. The team involved, which also includes collaborators from the University of Nice, France, then “characterize the random structure and calculate the wave front that is completely swallowed by the central antenna at the right absorption strength.” Ninety-nine point eight percent is absorbed, making it remarkably and virtually perfect. Data throughput, range, and other variables thus improve. - -**[[Take this mobile device management course from PluralSight and learn how to secure devices in your company without degrading the user experience.][4] ]** - -Achieving perfect antennas has been pretty much only theoretically possible for engineers to date. Reflected energy (RF back into the transmitter from antenna inefficiencies) has always been an issue in general. Reflections from surfaces, too, have been always been a problem. - -“Think about a mobile phone signal that is reflected several times before it reaches your cell phone,” Rotter says. It’s not easy to get the tuning right — as the antennas’ physical locations move, reflected surfaces become different. - -### Scattering lasers - -Scattering, similar to that used in this project, is becoming more important in communications overall. “Waves that are being scattered in a complex way are really all around us,” the group says. - -An example is random-lasers (which the group’s anti-laser is based on) that unlike traditional lasers, do not use reflective surfaces but trap scattered light and then “emit a very complicated, system-specific laser field when supplied with energy.” The anti-random-laser developed by Rotter and his group simply reverses that in time: - -“Instead of a light source that emits a specific wave depending on its random inner structure, it is also possible to build the perfect absorber.” The anti-random-laser. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3386879/anti-lasers-could-give-us-perfect-antennas-greater-data-capacity.html#tk.rss_all - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/03/data_cubes_transformation_conversion_by_guirong_hao_gettyimages-1062387214_plus_abstract_binary_by_valerybrozhinsky_gettyimages-865457032_3x2_2400x1600-100790211-large.jpg -[2]: https://www.tuwien.ac.at/en/news/news_detail/article/126574/ -[3]: https://www.networkworld.com/article/3203489/lan-wan/what-is-5g-wireless-networking-benefits-standards-availability-versus-lte.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fcourses%2Fmobile-device-management-big-picture -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190415 Nyansa-s Voyance expands to the IoT.md b/sources/talk/20190415 Nyansa-s Voyance expands to the IoT.md deleted file mode 100644 index e893c86d53..0000000000 --- a/sources/talk/20190415 Nyansa-s Voyance expands to the IoT.md +++ /dev/null @@ -1,75 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Nyansa’s Voyance expands to the IoT) -[#]: via: (https://www.networkworld.com/article/3388301/nyansa-s-voyance-expands-to-the-iot.html#tk.rss_all) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Nyansa’s Voyance expands to the IoT -====== - -![Brandon Mowinkel \(CC0\)][1] - -Nyansa announced today that their flagship Voyance product can now apply its AI-based secret sauce to [IoT][2] devices, over and above the networking equipment and IT endpoints it could already manage. - -Voyance – a network management product that leverages AI to automate the discovery of devices on the network and identify unusual behavior – has been around for two years now, and Nyansa says that it’s being used to observe a total of 25 million client devices operating across roughly 200 customer networks. - -**More on IoT:** - - * [Most powerful Internet of Things companies][3] - * [10 Hot IoT startups to watch][4] - * [The 6 ways to make money in IoT][5] - * [][6] [Blockchain, service-centric networking key to IoT success][7] - * [Getting grounded in IoT networking and security][8] - * [Building IoT-ready networks must become a priority][9] - * [What is the Industrial IoT? [And why the stakes are so high]][10] - - - -It’s a software-only product (available either via public SaaS or private cloud) that works by scanning a customer’s network and identifying every device attached to it, then establishing a behavioral baseline that will let it flag suspicious actions (e.g., sending a lot more data than other devices of its kind, connecting to unusual servers) and even perform automated root-cause analysis of network issues. - -The process doesn’t happen instantaneously, particularly the creation of the baseline, but it’s designed to be minimally invasive to existing network management frameworks and easy to implement. - -Nyansa said that the medical field has been one of the key targets for the newly IoT-enabled iteration of Voyance, and one early customer – Baptist Health, a Florida-based healthcare company that runs four hospitals and several other clinics and practices – said that Voyance IoT has offered a new level of visibility into the business’ complex array of connected diagnostic and treatment machines. - -“In the past we didn’t have the ability to identify security concerns in this way, related to rogue devices on the enterprise network, and now we’re able to do that,” said CISO Thad Phillips. - -While spiraling network complexity isn’t an issue confined to the IoT, there’s a strong argument that the number and variety of devices connected to an IoT-enabled network represent a new challenge to network management, particularly in light of the fact that many such devices aren’t particularly secure. - -“They’re not manufactured by networking vendors or security vendors, so for a performance standpoint, they have a lot of quirks … and on the security side, that’s sort of a big problem there as well,” said Anand Srinivas, Nyansa’s co-founder and CTO. - -Enabling the Voyance platform to identify and manage IoT devices along with traditional endpoints seems to be mostly a matter of adding new device signatures to the system, but Enterprise Management Associates research director Shamus McGillicuddy said that, while the system’s designed for automation and ease of use, AIOps products like Voyance do need to be managed to make sure that they’re functioning correctly. - -“Anything based on machine learning is going to take a while to make sure it understands your environment and you might have to retrain it,” he said. “There’s always going to be more and more things connecting to IP networks, and it’s just going to be a question of building up a database.” - -Voyance IoT is available now. Pricing starts at $16,000 per year, and goes up with the number of total devices managed. (Current Voyance users can manage up to 100 IoT devices at no additional cost.) - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3388301/nyansa-s-voyance-expands-to-the-iot.html#tk.rss_all - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/geometric_architecture_ceiling_structure_lines_connections_networks_perspective_by_brandon_mowinkel_cc0_via_unsplash_2400x1600-100788530-large.jpg -[2]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[4]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[5]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[6]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[7]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[8]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[9]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[10]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190416 Two tools to help visualize and simplify your data-driven operations.md b/sources/talk/20190416 Two tools to help visualize and simplify your data-driven operations.md deleted file mode 100644 index 8a44c56ca7..0000000000 --- a/sources/talk/20190416 Two tools to help visualize and simplify your data-driven operations.md +++ /dev/null @@ -1,59 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Two tools to help visualize and simplify your data-driven operations) -[#]: via: (https://www.networkworld.com/article/3389756/two-tools-to-help-visualize-and-simplify-your-data-driven-operations.html#tk.rss_all) -[#]: author: (Kent McNeil, Vice President of Software, Ciena Blue Planet ) - -Two tools to help visualize and simplify your data-driven operations -====== -Amidst the rising complexity of networks, and influx of data, service providers are striving to keep operational complexity under control. Blue Planet’s Kent McNeil explains how they can turn this challenge into a huge opportunity, and in fact reduce operational effort by exploiting state-of-the-art graph database visualization and delta-based federation technologies. -![danleap][1] - -**Build the picture: Visualize your data** - -The Internet of Things (IoT), 5G, smart technology, virtual reality – all these applications guarantee one thing for communications service providers (CSPs): more data. As networks become increasingly overwhelmed by mounds of data, CSPs are on the hunt for ways to make the most of the intelligence collected and are looking for ways to monetize their services, provide more customizable offerings, and enhance their network performance. - -Customer analytics has gone some way towards fulfilling this need for greater insights, but with the rise in the volume and variety of consumer and IoT applications, the influx of data will increase at a phenomenal rate. The data includes not only customer-related data, but also device and network data, adding complexity to the picture. CSPs must harness this information to understand the relationships between any two things, to understand the connections within their data and to ultimately, leverage it for a better customer experience. - -**See the upward graphical trend with graph databases** - -Traditional relational databases certainly have their use, but graph databases offer a novel perspective. The visual representation between the component parts enables CSPs to understand and analyze the characteristics, as well as to act in a timely manner when confronted with any discrepancies. - -Graph databases can help CSPs tackle this new challenge, ensuring the data is not just stored, but also processed and analyzed. It enables complex network questions to be asked and answered, ensuring that CSPs are not sidelined as “dumb pipes” in the IoT movement. - -The use of graph databases has started to become more mainstream, as businesses see the benefits. IBM conducted a generic industry study, entitled “The State of Graph Databases Worldwide”, which found that people are moving to graph databases for speed, performance enhancement of applications, and streamlined operations. Ways in which businesses are using, or are planning to use, graph technology is highest for network and IT operations, followed by master data management. Performance is a key factor for CSPs, as is personalization, which enables support for more tailored service offerings. - -Another advantage of graph databases for CSPs is that of unravelling the complexity of network inventory in a clear, visualized picture – this capability gives CSPs a competitive advantage as speed and performance become increasingly paramount. This need for speed and reliability will increase tenfold as IoT continues its impressive global ramp-up. Operational complexity also grows as the influx of generated data produced by IoT will further challenge the scalability of existing operational environments. Graph databases can help CSPs tackle this new challenge, ensuring the data is not just stored, but also processed and analyzed. It enables complex network questions to be asked and answered, ensuring that CSPs are not sidelined as “dumb pipes” in the IoT movement. - -**Change the tide of data with delta-based federation** - -New data, updated data, corrected data, deleted data – all needs to be managed, in line with regulations, and instantaneously. But this capability does not exist in the reality of many CSP’s Operational Support Systems (OSS). Many still battle with updating data and relying on full uploads of network inventory in order to perform key service fulfillment and assurance tasks. This method is time-intensive and risky due to potential conflicts and inaccuracies. With data being accessed from a variety of systems, CSPs must have a way to effectively hone in on only what is required. - -Integrating network data into one simplified system limits the impact on the legacy OSS systems. This allows each OSS to continue its specific role, yet to feed data into a single interface, hence enabling teams to see the complete picture and gain efficiencies while launching new services or pinpointing and resolving service and network issues. - -A delta-based federation model ensures that an accurate picture is presented, and only essential changes are conducted reliably and quickly. This simplified method filters the delta changes, reducing the time involved in updating, and minimizing the system load and risks. A validation process takes place to catch any errors or issues with the data, so CSPs can apply checks and retain control over modifications. Integrating network data into one simplified system limits the impact on the legacy OSS systems. This allows each OSS to continue its specific role, yet to feed data into a single interface, hence enabling teams to see the complete picture and gain efficiencies while launching new services or pinpointing and resolving service and network issues. - -**Ride the wave** - -25 billion connected things are predicted by Gartner on a global scale by 2021 and CSPs are already struggling with the current levels of data, which Gartner estimates at 14.2 billion in 2019. Over the last decade, CSPs have faced significant rises in the levels of data consumed as demand for new services and higher bandwidth applications has taken off. This data wave is set to continue and CSPs have two important tools at their disposal helping them ride the wave. Firstly, CSPs have specialist, legacy OSS already in place which they can leverage as a basis for integrating data and implementing optimized systems. Secondly, they can utilize new technologies in database inventory management: graph databases and delta-based federation. The advantages of effectively integrating network data, visualizing it, and creating a clear map of the inter-connections, enable CSPs to make critical decisions more quickly and accurately, resulting in most optimized and informed service operations. - -[Watch this video to learn more about Blue Planet][2] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389756/two-tools-to-help-visualize-and-simplify-your-data-driven-operations.html#tk.rss_all - -作者:[Kent McNeil, Vice President of Software, Ciena Blue Planet][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/istock-165721901-100793858-large.jpg -[2]: https://www.blueplanet.com/resources/IT-plus-network-now-a-powerhouse-combination.html?utm_campaign=X1058319&utm_source=NWW&utm_term=BPVideo&utm_medium=sponsoredpost4 diff --git a/sources/talk/20190416 What SDN is and where it-s going.md b/sources/talk/20190416 What SDN is and where it-s going.md deleted file mode 100644 index 381c227b65..0000000000 --- a/sources/talk/20190416 What SDN is and where it-s going.md +++ /dev/null @@ -1,146 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What SDN is and where it’s going) -[#]: via: (https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html#tk.rss_all) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -What SDN is and where it’s going -====== -Software-defined networking (SDN) established a foothold in cloud computing, intent-based networking, and network security, with Cisco, VMware, Juniper and others leading the charge. -![seedkin / Getty Images][1] - -Hardware reigned supreme in the networking world until the emergence of software-defined networking (SDN), a category of technologies that separate the network control plane from the forwarding plane to enable more automated provisioning and policy-based management of network resources. - -SDN's origins can be traced to a research collaboration between Stanford University and the University of California at Berkeley that ultimately yielded the [OpenFlow][2] protocol in the 2008 timeframe. - -**[Learn more about the[difference between SDN and NFV][3]. Get regularly scheduled insights by [signing up for Network World newsletters][4]]** - -OpenFlow is only one of the first SDN canons, but it's a key component because it started the networking software revolution. OpenFlow defined a programmable network protocol that could help manage and direct traffic among routers and switches no matter which vendor made the underlying router or switch. - -In the years since its inception, SDN has evolved into a reputable networking technology offered by key vendors including Cisco, VMware, Juniper, Pluribus and Big Switch. The Open Networking Foundation develops myriad open-source SDN technologies as well. - -"Datacenter SDN no longer attracts breathless hype and fevered expectations, but the market is growing healthily, and its prospects remain robust," wrote Brad Casemore, IDC research vice president, data center networks, in a recent report, [_Worldwide Datacenter Software-Defined Networking Forecast, 2018–2022_][5]*. "*Datacenter modernization, driven by the relentless pursuit of digital transformation and characterized by the adoption of cloudlike infrastructure, will help to maintain growth, as will opportunities to extend datacenter SDN overlays and fabrics to multicloud application environments." - -SDN will be increasingly perceived as a form of established, conventional networking, Casemore said. - -IDC estimates that the worldwide data center SDN market will be worth more than $12 billion in 2022, recording a CAGR of 18.5% during the 2017–2022 period. The market generated revenue of nearly $5.15 billion in 2017, up more than 32.2% from 2016. - -In 2017, the physical network represented the largest segment of the worldwide datacenter SDN market, accounting for revenue of nearly $2.2 billion, or about 42% of the overall total revenue. In 2022, however, the physical network is expected to claim about $3.65 billion in revenue, slightly less than the $3.68 billion attributable to network virtualization overlays/SDN controller software but more than the $3.18 billion for SDN applications. - -“We're now at a point where SDN is better understood, where its use cases and value propositions are familiar to most datacenter network buyers and where a growing number of enterprises are finding that SDN offerings offer practical benefits,” Casemore said. “With SDN growth and the shift toward software-based network automation, the network is regaining lost ground and moving into better alignment with a wave of new application workloads that are driving meaningful business outcomes.” - -### **What is SDN? ** - -The idea of programmability is the basis for the most precise definition of what SDN is: technology that separates the control plane management of network devices from the underlying data plane that forwards network traffic. - -IDC broadens that definition of SDN by stating: “Datacenter SDN architectures feature software-defined overlays or controllers that are abstracted from the underlying network hardware, offering intent-or policy-based management of the network as a whole. This results in a datacenter network that is better aligned with the needs of application workloads through automated (thereby faster) provisioning, programmatic network management, pervasive application-oriented visibility, and where needed, direct integration with cloud orchestration platforms.” - -The driving ideas behind the development of SDN are myriad. For example, it promises to reduce the complexity of statically defined networks; make automating network functions much easier; and allow for simpler provisioning and management of networked resources, everywhere from the data center to the campus or wide area network. - -Separating the control and data planes is the most common way to think of what SDN is, but it is much more than that, said Mike Capuano, chief marketing officer for [Pluribus][6]. - -“At its heart SDN has a centralized or distributed intelligent entity that has an entire view of the network, that can make routing and switching decisions based on that view,” Capuano said. “Typically, network routers and switches only know about their neighboring network gear. But with a properly configured SDN environment, that central entity can control everything, from easily changing policies to simplifying configuration and automation across the enterprise.” - -### How does SDN support edge computing, IoT and remote access? - -A variety of networking trends have played into the central idea of SDN. Distributing computing power to remote sites, moving data center functions to the [edge][7], adopting cloud computing, and supporting [Internet of Things][8] environments – each of these efforts can be made easier and more cost efficient via a properly configured SDN environment. - -Typically in an SDN environment, customers can see all of their devices and TCP flows, which means they can slice up the network from the data or management plane to support a variety of applications and configurations, Capuano said. So users can more easily segment an IoT application from the production world if they want, for example. - -Some SDN controllers have the smarts to see that the network is getting congested and, in response, pump up bandwidth or processing to make sure remote and edge components don’t suffer latency. - -SDN technologies also help in distributed locations that have few IT personnel on site, such as an enterprise branch office or service provider central office, said Michael Bushong, vice president of enterprise and cloud marketing at Juniper Networks. - -“Naturally these places require remote and centralized delivery of connectivity, visibility and security. SDN solutions that centralize and abstract control and automate workflows across many places in the network, and their devices, improve operational reliability, speed and experience,” Bushong said. - -### **How does SDN support intent-based networking?** - -Intent-based networking ([IBN][9]) has a variety of components, but basically is about giving network administrators the ability to define what they want the network to do, and having an automated network management platform create the desired state and enforce policies to ensure what the business wants happens. - -“If a key tenet of SDN is abstracted control over a fleet of infrastructure, then the provisioning paradigm and dynamic control to regulate infrastructure state is necessarily higher level,” Bushong said. “Policy is closer to declarative intent, moving away from the minutia of individual device details and imperative and reactive commands.” - -IDC says that intent-based networking “represents an evolution of SDN to achieve even greater degrees of operational simplicity, automated intelligence, and closed-loop functionality.” - -For that reason, IBN represents a notable milestone on the journey toward autonomous infrastructure that includes a self-driving network, which will function much like the self-driving car, producing desired outcomes based on what network operators and their organizations wish to accomplish, Casemore stated. - -“While the self-driving car has been designed to deliver passengers safely to their destination with minimal human intervention, the self-driving network, as part of autonomous datacenter infrastructure, eventually will achieve similar outcomes in areas such as network provisioning, management, and troubleshooting — delivering applications and data, dynamically creating and altering network paths, and providing security enforcement with minimal need for operator intervention,” Casemore stated. - -While IBN technologies are relatively young, Gartner says by 2020, more than 1,000 large enterprises will use intent-based networking systems in production, up from less than 15 in the second quarter of 2018. - -### **How does SDN help customers with security?** - -SDN enables a variety of security benefits. A customer can split up a network connection between an end user and the data center and have different security settings for the various types of network traffic. A network could have one public-facing, low security network that does not touch any sensitive information. Another segment could have much more fine-grained remote access control with software-based [firewall][10] and encryption policies on it, which allow sensitive data to traverse over it. - -“For example, if a customer has an IoT group it doesn’t feel is all that mature with regards to security, via the SDN controller you can segment that group off away from the critical high-value corporate traffic,” Capuano stated. “SDN users can roll out security policies across the network from the data center to the edge and if you do all of this on top of white boxes, deployments can be 30 – 60 percent cheaper than traditional gear.” - -The ability to look at a set of workloads and see if they match a given security policy is a key benefit of SDN, especially as data is distributed, said Thomas Scheibe, vice president of product management for Cisco’s Nexus and ACI product lines. - -"The ability to deploy a whitelist security model like we do with ACI [Application Centric Infrastructure] that lets only specific entities access explicit resources across your network fabric is another key security element SDN enables," Scheibe said. - -A growing number of SDN platforms now support [microsegmentation][11], according to Casemore. - -“In fact, micro-segmentation has developed as a notable use case for SDN. As SDN platforms are extended to support multicloud environments, they will be used to mitigate the inherent complexity of establishing and maintaining consistent network and security policies across hybrid IT landscapes,” Casemore said. - -### **What is SDN’s role in cloud computing?** - -SDN’s role in the move toward [private cloud][12] and [hybrid cloud][13] adoption seems a natural. In fact, big SDN players such as Cisco, Juniper and VMware have all made moves to tie together enterprise data center and cloud worlds. - -Cisco's ACI Anywhere package would, for example, let policies configured through Cisco's SDN APIC (Application Policy Infrastructure Controller) use native APIs offered by a public-cloud provider to orchestrate changes within both the private and public cloud environments, Cisco said. - -“As organizations look to scale their hybrid cloud environments, it will be critical to leverage solutions that help improve productivity and processes,” said [Bob Laliberte][14], a senior analyst with Enterprise Strategy Group, in a recent [Network World article][15]. “The ability to leverage the same solution, like Cisco’s ACI, in your own private-cloud environment as well as across multiple public clouds will enable organizations to successfully scale their cloud environments.” - -Growth of public and private clouds and enterprises' embrace of distributed multicloud application environments will have an ongoing and significant impact on data center SDN, representing both a challenge and an opportunity for vendors, said IDC’s Casemore. - -“Agility is a key attribute of digital transformation, and enterprises will adopt architectures, infrastructures, and technologies that provide for agile deployment, provisioning, and ongoing operational management. In a datacenter networking context, the imperative of digital transformation drives adoption of extensive network automation, including SDN,” Casemore said. - -### Where does SD-WAN fit in? - -The software-defined wide area network ([SD-WAN][16]) is a natural application of SDN that extends the technology over a WAN. While the SDN architecture is typically the underpinning in a data center or campus, SD-WAN takes it a step further. - -At its most basic, SD-WAN lets companies aggregate a variety of network connections – including MPLS, 4G LTE and DSL – into a branch or network edge location and have a software management platform that can turn up new sites, prioritize traffic and set security policies. - -SD-WAN's driving principle is to simplify the way big companies turn up new links to branch offices, better manage the way those links are utilized – for data, voice or video – and potentially save money in the process. - -[SD-WAN][17] lets networks route traffic based on centrally managed roles and rules, no matter what the entry and exit points of the traffic are, and with full security. For example, if a user in a branch office is working in Office365, SD-WAN can route their traffic directly to the closest cloud data center for that app, improving network responsiveness for the user and lowering bandwidth costs for the business. - -"SD-WAN has been a promised technology for years, but in 2019 it will be a major driver in how networks are built and re-built," Anand Oswal, senior vice president of engineering in Cisco’s Enterprise Networking Business, said a Network World [article][18] earlier this year. - -It's a profoundly hot market with tons of players including [Cisco][19], VMware, Silver Peak, Riverbed, Aryaka, Fortinet, Nokia and Versa. - -IDC says the SD-WAN infrastructure market will hit $4.5 billion by 2022, growing at a more than 40% yearly clip between now and then. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3209131/what-sdn-is-and-where-its-going.html#tk.rss_all - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/what-is-sdn_2_where-is-it-going_arrows_fork-in-the-road-100793314-large.jpg -[2]: https://www.networkworld.com/article/2202144/data-center-faq-what-is-openflow-and-why-is-it-needed.html -[3]: https://www.networkworld.com/article/3206709/lan-wan/what-s-the-difference-between-sdn-and-nfv.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.idc.com/getdoc.jsp?containerId=US43862418 -[6]: https://www.networkworld.com/article/3192318/pluribus-recharges-expands-software-defined-network-platform.html -[7]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html -[8]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[9]: https://www.networkworld.com/article/3202699/what-is-intent-based-networking.html -[10]: https://www.networkworld.com/article/3230457/what-is-a-firewall-perimeter-stateful-inspection-next-generation.html -[11]: https://www.networkworld.com/article/3247672/what-is-microsegmentation-how-getting-granular-improves-network-security.html -[12]: https://www.networkworld.com/article/2159885/cloud-computing-gartner-5-things-a-private-cloud-is-not.html -[13]: https://www.networkworld.com/article/3233132/what-is-hybrid-cloud-computing.html -[14]: https://www.linkedin.com/in/boblaliberte90/ -[15]: https://www.networkworld.com/article/3336075/cisco-serves-up-flexible-data-center-options.html -[16]: https://www.networkworld.com/article/3031279/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[17]: https://www.networkworld.com/article/3031279/sd-wan/sd-wan-what-it-is-and-why-you-ll-use-it-one-day.html -[18]: https://www.networkworld.com/article/3332027/cisco-touts-5-technologies-that-will-change-networking-in-2019.html -[19]: https://www.networkworld.com/article/3322937/what-will-be-hot-for-cisco-in-2019.html diff --git a/sources/talk/20190417 Clearing up confusion between edge and cloud.md b/sources/talk/20190417 Clearing up confusion between edge and cloud.md deleted file mode 100644 index 722051e8a7..0000000000 --- a/sources/talk/20190417 Clearing up confusion between edge and cloud.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Clearing up confusion between edge and cloud) -[#]: via: (https://www.networkworld.com/article/3389364/clearing-up-confusion-between-edge-and-cloud.html#tk.rss_all) -[#]: author: (Anne Taylor https://www.networkworld.com/author/Anne-Taylor/) - -Clearing up confusion between edge and cloud -====== -The benefits of edge computing are not just hype; however, that doesn’t mean you should throw cloud computing initiatives to the wind. -![iStock][1] - -Edge computing and cloud computing are sometimes discussed as if they’re mutually exclusive approaches to network infrastructure. While they may function in different ways, utilizing one does not preclude the use of the other. - -Indeed, [Futurum Research][2] found that, among companies that have deployed edge projects, only 15% intend to separate these efforts from their cloud computing initiatives — largely for security or compartmentalization reasons. - -So then, what’s the difference, and how do edge and cloud work together? - -**Location, location, location** - -Moving data and processing to the cloud, as opposed to on-premises data centers, has enabled the business to move faster, more efficiently, less expensively — and in many cases, more securely. - -Yet cloud computing is not without challenges, particularly: - - * Users will abandon a graphics-heavy website if it doesn’t load quickly. So, imagine the lag for compute-heavy processing associated artificial intelligence or machine learning functions. - - * The strength of network connectivity is crucial for large data sets. As enterprises increasingly generate data, particularly with the adoption of Internet of Things (IoT), traditional cloud connections will be insufficient. - - - - -To make up for the lack of speed and connectivity with cloud, processing for mission-critical applications will need to occur closer to the data source. Maybe that’s a robot on the factory floor, digital signage at a retail store, or an MRI machine in a hospital. That’s edge computing, which reduces the distance the data must travel and thereby boosts the performance and reliability of applications and services. - -**One doesn’t supersede the other** - -That said, the benefits gained by edge computing don’t negate the need for cloud. In many cases, IT will now become a decision-maker in terms of best usage for each. For example, edge might make sense for devices running processing-power-hungry apps such as IoT, artificial intelligence, and machine learning. And cloud will work for apps where time isn’t necessarily of the essence, like inventory or big-data projects. - -> “By being able to triage the types of data processing on the edge versus that heading to the cloud, we can keep both systems running smoothly – keeping our customers and employees safe and happy,” [writes Daniel Newman][3], principal analyst for Futurum Research. - -And in reality, edge will require cloud. “To enable digital transformation, you have to build out the edge computing side and connect it with the cloud,” [Tony Antoun][4], senior vice president of edge and digital at GE Digital, told _Automation World_. “It’s a journey from the edge to the cloud and back, and the cycle keeps continuing. You need both to enrich and boost the business and take advantage of different points within this virtual lifecycle.” - -**Ensuring resiliency of cloud and edge** - -Both edge and cloud computing require careful consideration to the underlying processing power. Connectivity and availability, no matter the application, are always critical measures. - -But especially for the edge, it will be important to have a resilient architecture. Companies should focus on ensuring security, redundancy, connectivity, and remote management capabilities. - -Discover how your edge and cloud computing environments can coexist at [APC.com][5]. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389364/clearing-up-confusion-between-edge-and-cloud.html#tk.rss_all - -作者:[Anne Taylor][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Anne-Taylor/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/istock-612507606-100793995-large.jpg -[2]: https://futurumresearch.com/edge-computing-from-edge-to-enterprise/ -[3]: https://futurumresearch.com/edge-computing-data-centers/ -[4]: https://www.automationworld.com/article/technologies/cloud-computing/its-not-edge-vs-cloud-its-both -[5]: https://www.apc.com/us/en/solutions/business-solutions/edge-computing.jsp diff --git a/sources/talk/20190417 Startup MemVerge combines DRAM and Optane into massive memory pool.md b/sources/talk/20190417 Startup MemVerge combines DRAM and Optane into massive memory pool.md deleted file mode 100644 index 71ddf70826..0000000000 --- a/sources/talk/20190417 Startup MemVerge combines DRAM and Optane into massive memory pool.md +++ /dev/null @@ -1,58 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Startup MemVerge combines DRAM and Optane into massive memory pool) -[#]: via: (https://www.networkworld.com/article/3389358/startup-memverge-combines-dram-and-optane-into-massive-memory-pool.html#tk.rss_all) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Startup MemVerge combines DRAM and Optane into massive memory pool -====== -MemVerge bridges two technologies that are already a bridge. -![monsitj / Getty Images][1] - -A startup called MemVerge has announced software to combine regular DRAM with Intel’s Optane DIMM persistent memory into a single clustered storage pool and without requiring any changes to applications. - -MemVerge has been working with Intel in developing this new hardware platform for close to two years. It offers what it calls a Memory-Converged Infrastructure (MCI) to allow existing apps to use Optane DC persistent memory. It's architected to integrate seamlessly with existing applications. - -**[ Read also:[Mass data fragmentation requires a storage rethink][2] ]** - -Optane memory is designed to sit between high-speed memory and [solid-state drives][3] (SSDs) and acts as a cache for the SSD, since it has speed comparable to DRAM but SSD persistence. With Intel’s new Xeon Scalable processors, this can make up to 4.5TB of memory available to a processor. - -Optane runs in one of two modes: Memory Mode and App Direct Mode. In Memory Mode, the Optane memory functions like regular memory and is not persistent. In App Direct Mode, it functions as the SSD cache but apps don’t natively support it. They need to be tweaked to function properly in Optane memory. - -As it was explained to me, apps aren’t designed for persistent storage because the data is already in memory on powerup rather than having to load it from storage. So, the app has to know memory doesn’t go away and that it does not need to shuffle data back and forth between storage and memory. Therefore, apps natively don’t work in persistent memory. - -### Why didn't Intel think of this? - -All of which really begs a question I can’t get answered, at least not immediately: Why didn’t Intel think of this when it created Optane in the first place? - -MemVerge has what it calls Distributed Memory Objects (DMO) hypervisor technology to provide a logical convergence layer to run data-intensive workloads at memory speed with guaranteed data consistency across multiple systems. This allows Optane memory to process and derive insights from the enormous amounts of data in real time. - -That’s because MemVerge’s technology makes random access as fast as sequential access. Normally, random access is slower than sequential because of all the jumping around with random access vs. reading one sequential file. But MemVerge can handle many small files as fast as it handles one large file. - -MemVerge itself is actually software, with a single API for both DRAM and Optane. It’s also available via a hyperconverged server appliance that comes with 2 Cascade Lake processors, up to 512 GB DRAM, 6TB of Optane memory, and 360TB of NVMe physical storage capacity. - -However, all of this is still vapor. MemVerge doesn’t expect to ship a beta product until at least June. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389358/startup-memverge-combines-dram-and-optane-into-massive-memory-pool.html#tk.rss_all - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/big_data_center_server_racks_storage_binary_analytics_by_monsitj_gettyimages-951389152_3x2-100787358-large.jpg -[2]: https://www.networkworld.com/article/3323580/mass-data-fragmentation-requires-a-storage-rethink.html -[3]: https://www.networkworld.com/article/3326058/what-is-an-ssd.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190417 Want to the know future of IoT- Ask the developers.md b/sources/talk/20190417 Want to the know future of IoT- Ask the developers.md deleted file mode 100644 index 4f96f34b2b..0000000000 --- a/sources/talk/20190417 Want to the know future of IoT- Ask the developers.md +++ /dev/null @@ -1,119 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Want to the know future of IoT? Ask the developers!) -[#]: via: (https://www.networkworld.com/article/3389877/want-to-the-know-future-of-iot-ask-the-developers.html#tk.rss_all) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -Want to the know future of IoT? Ask the developers! -====== -A new survey of IoT developers reveals that connectivity, performance, and standards are growing areas of concern as IoT projects hit production. -![Avgust01 / Getty Images][1] - -It may be a cliché that software developers rule the world, but if you want to know the future of an important technology, it pays to look at what the developers are doing. With that in mind, there are some real, on-the-ground insights for the entire internet of things (IoT) community to be gained in a new [survey of more than 1,700 IoT developers][2] (pdf) conducted by the [Eclipse Foundation][3]. - -### IoT connectivity concerns - -Perhaps not surprisingly, security topped the list of concerns, easily outpacing other IoT worries. But that's where things begin to get interesting. More than a fifth (21%) of IoT developers cited connectivity as a challenge, followed by data collection and analysis (19%), performance (18%), privacy (18%), and standards (16%). - -Connectivity rose to second place after being the number three IoT concern for developers last year. Worries over security and data collection and analysis, meanwhile, actually declined slightly year over year. (Concerns over performance, privacy, and standards also increased significantly from last year.) - -**[ Learn more:[Download a PDF bundle of five essential articles about IoT in the enterprise][4] ]** - -“If you look at the list of developers’ top concerns with IoT in the survey,” said [Mike Milinkovich][5], executive director of the Eclipse Foundation via email, “I think connectivity, performance, and standards stand out — those are speaking to the fact that the IoT projects are getting real, that they’re getting out of sandboxes and into production.” - -“With connectivity in IoT,” Milinkovich continued, “everything seems straightforward until you have a sensor in a corner somewhere — narrowband or broadband — and physical constraints make it hard to connect." - -He also cited a proliferation of incompatible technologies that is driving developer concerns over connectivity. - -![][6] - -### IoT standards and interoperability - -Milinkovich also addressed one of [my personal IoT bugaboos: interoperability][7]. “Standards is a proxy for interoperability” among products from different vendors, he explained, which is an “elusive goal” in industrial IoT (IIoT). - -**[[Learn Java from beginning concepts to advanced design patterns in this comprehensive 12-part course!][8] ]** - -“IIoT is about breaking down the proprietary silos and re-tooling the infrastructure that’s been in our factories and logistics for many years using OSS standards and implementations — standard sets of protocols as opposed to vendor-specific protocols,” he said. - -That becomes a big issue when you’re deploying applications in the field and different manufacturers are using different protocols or non-standard extensions to existing protocols and the machines can’t talk to each other. - -**[ Also read:[Interoperability is the key to IoT success][7] ]** - -“This ties back to the requirement of not just having open standards, but more robust implementations of those standards in open source stacks,” Milinkovich said. “To keep maturing, the market needs not just standards, but out-of-the-box interoperability between devices.” - -“Performance is another production-grade concern,” he said. “When you’re in development, you think you know the bottlenecks, but then you discover the real-world issues when you push to production.” - -### Cloudy developments for IoT - -The survey also revealed that in some ways, IoT is very much aligned with the larger technology community. For example, IoT use of public and hybrid cloud architectures continues to grow. Amazon Web Services (AWS) (34%), Microsoft Azure (23%), and Google Cloud Platform (20%) are the leading IoT cloud providers, just as they are throughout the industry. If anything, AWS’ lead may be smaller in the IoT space than it is in other areas, though reliable cloud-provider market share figures are notoriously hard to come by. - -But Milinkovich sees industrial IoT as “a massive opportunity for hybrid cloud” because many industrial IoT users are very concerned about minimizing latency with their factory data, what he calls “their gold.” He sees factories moving towards hybrid cloud environments, leveraging “modern infrastructure technology like Kubernetes, and building around open protocols like HTTP and MQTT while getting rid of the older proprietary protocols.” - -### How IoT development is different - -In some ways, the IoT development world doesn’t seem much different than wider software development. For example, the top IoT programming languages mirror [the popularity of those languages][9] over all, with C and Java ruling the roost. (C led the way on constrained devices, while Java was the top choice for gateway and edge nodes, as well as the IoT cloud.) - -![][10] - -But Milinkovich noted that when developing for embedded or constrained devices, the programmer’s interface to a device could be through any number of esoteric hardware connectors. - -“You’re doing development using emulators and simulators, and it’s an inherently different and more complex interaction between your dev environment and the target for your application,” he said. “Sometimes hardware and software are developed in tandem, which makes it even more complicated.” - -For example, he explained, building an IoT solution may bring in web developers working on front ends using JavaScript and Angular, while backend cloud developers control cloud infrastructure and embedded developers focus on building software to run on constrained devices. - -No wonder IoT developers have so many things to worry about. - -**More about IoT:** - - * [What is the IoT? How the internet of things works][11] - * [What is edge computing and how it’s changing the network][12] - * [Most powerful Internet of Things companies][13] - * [10 Hot IoT startups to watch][14] - * [The 6 ways to make money in IoT][15] - * [What is digital twin technology? [and why it matters]][16] - * [Blockchain, service-centric networking key to IoT success][17] - * [Getting grounded in IoT networking and security][4] - * [Building IoT-ready networks must become a priority][18] - * [What is the Industrial IoT? [And why the stakes are so high]][19] - - - -Join the Network World communities on [Facebook][20] and [LinkedIn][21] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389877/want-to-the-know-future-of-iot-ask-the-developers.html#tk.rss_all - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/iot_internet_of_things_mobile_connections_by_avgust01_gettyimages-1055659210_2400x1600-100788447-large.jpg -[2]: https://drive.google.com/file/d/17WEobD5Etfw5JnoKC1g4IME_XCtPNGGc/view -[3]: https://www.eclipse.org/ -[4]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[5]: https://blogs.eclipse.org/post/mike-milinkovich/measuring-industrial-iot%E2%80%99s-evolution -[6]: https://images.idgesg.net/images/article/2019/04/top-developer-concerns-2019-eclipse-foundation-100793974-large.jpg -[7]: https://www.networkworld.com/article/3204529/interoperability-is-the-key-to-iot-success.html -[8]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fjava -[9]: https://blog.newrelic.com/technology/popular-programming-languages-2018/ -[10]: https://images.idgesg.net/images/article/2019/04/top-iot-programming-languages-eclipse-foundation-100793973-large.jpg -[11]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[12]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[13]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[14]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[15]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[16]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[17]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[18]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[19]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[20]: https://www.facebook.com/NetworkWorld/ -[21]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190418 -Fiber-in-air- 5G network research gets funding.md b/sources/talk/20190418 -Fiber-in-air- 5G network research gets funding.md deleted file mode 100644 index 4a82248cde..0000000000 --- a/sources/talk/20190418 -Fiber-in-air- 5G network research gets funding.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: ('Fiber-in-air' 5G network research gets funding) -[#]: via: (https://www.networkworld.com/article/3389881/extreme-5g-network-research-gets-funding.html#tk.rss_all) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -'Fiber-in-air' 5G network research gets funding -====== -A consortium of tech companies and universities plan to aggressively investigate the exploitation of D-Band to develop a new variant of 5G infrastructure. -![Peshkova / Getty Images][1] - -Wireless transmission at data rates of around 45gbps could one day be commonplace, some engineers say. “Fiber-in-air” is how the latest variant of 5G infrastructure is being described. To get there, a Britain-funded consortium of chip makers, universities, and others intend to aggressively investigate the exploitation of D-Band. That part of the radio spectrum is at 151-174.8 GHz in millimeter wavelengths (mm-wave) and hasn’t been used before. - -The researchers intend to do it by riffing on a now roughly 70-year-old gun-like electron-sending device that can trace its roots back through the annals of radio history: The Traveling Wave Tube, or TWT, an electron gun-magnet-combo that was used in the development of television and still brings space images back to Earth. - -**[ Also read:[The time of 5G is almost here][2] ]** - -D-Band, the spectrum the researchers want to use, has the advantage that it’s wide, so theoretically it should be good for fast, copious data rates. The problem with it though, and the reason it hasn’t thus far been used, is that it’s subject to monkey-wrenching from atmospheric conditions such as rain, explains IQE, a semiconductor wafer and materials producer involved in the project, in a [press release][3]. The team says attenuation is fixable, though. Their solution is the now-aging TWTs. - -The group, which includes BT, Filtronic, Glasgow University, Intel, Nokia Bell Labs, Optocap, and Teledyne e2v, has secured funding of the equivalent of $1.12 million USD from the U.K.’s [Engineering and Physical Sciences Research Council (EPSRC)][4]. That’s the principal public funding body for engineering science research there. - -### Tapping the power of TWTs - -The DLINK system, as the team calls it, will use a high-power vacuum TWT with a special, newly developed tunneling diode and a modulator. Two bands of 10 GHz, each will deliver the throughput, [explains Lancaster University on its website][5]. The tubes are, in fact, special amplifiers that produce 10 Watts. That’s 10 times what an equivalent solid-state solution would likely produce at the same spot in the band, they say. Energy is basically sent from the electron beam to an electric field generated by the input signal. - -Despite TWTs being around for eons, “no D-band TWTs are available in the market.” The development of one is key to these fiber-in-air speeds, the researchers say. - -They will include “unprecedented data rate and transmission distance,” IQE writes. - -The TWT device, although used extensively in space wireless communications since its invention in the 1950s, is overlooked as a significant contributor to global communications systems, say a group of French researchers working separately from this project, who recently argue that TWTs should be given more recognition. - -TWT’s are “the unsung heroes of space exploration,” the Aix-Marseille Université researchers say in [an article on publisher Springer’s website][6]. Springer is promoting the group's 2019-published [paper][7] in the European Physical Journal H in which they delve into the history of the simple electron gun and magnet device. - -“Its role in the history of wireless communications and in the space conquest is significant, but largely ignored,” they write in their paper. - -They will be pleased to hear it maybe isn’t going away anytime soon. - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3389881/extreme-5g-network-research-gets-funding.html#tk.rss_all - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/abstract_data_coding_matrix_structure_network_connections_by_peshkova_gettyimages-897683944_2400x1600-100788487-large.jpg -[2]: https://www.networkworld.com/article/3354477/mobile-world-congress-the-time-of-5g-is-almost-here.html -[3]: https://www.iqep.com/media/2019/03/iqe-partners-in-key-wireless-communications-project-for-5g-infrastructure-(1)/ -[4]: https://epsrc.ukri.org/ -[5]: http://wp.lancs.ac.uk/dlink/ -[6]: https://www.springer.com/gp/about-springer/media/research-news/all-english-research-news/traveling-wave-tubes--the-unsung-heroes-of-space-exploration/16578434 -[7]: https://link.springer.com/article/10.1140%2Fepjh%2Fe2018-90023-1 -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190423 Open architecture and open source - The new wave for SD-WAN.md b/sources/talk/20190423 Open architecture and open source - The new wave for SD-WAN.md deleted file mode 100644 index 8be9e14ada..0000000000 --- a/sources/talk/20190423 Open architecture and open source - The new wave for SD-WAN.md +++ /dev/null @@ -1,186 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Open architecture and open source – The new wave for SD-WAN?) -[#]: via: (https://www.networkworld.com/article/3390151/open-architecture-and-open-source-the-new-wave-for-sd-wan.html#tk.rss_all) -[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) - -Open architecture and open source – The new wave for SD-WAN? -====== -As networking continues to evolve, you certainly don't want to break out a forklift every time new technologies are introduced. Open architecture would allow you to replace the components of a system, and give you more flexibility to control your own networking destiny. -![opensource.com \(CC BY-SA 2.0\)][1] - -I recently shared my thoughts about the [role of open source in networking][2]. I discussed two significant technological changes that we have witnessed. I call them waves, and these waves will redefine how we think about networking and security. - -The first wave signifies that networking is moving to the software so that it can run on commodity off-the-shelf hardware. The second wave is the use of open source technologies, thereby removing the barriers to entry for new product innovation and rapid market access. This is especially supported in the SD-WAN market rush. - -Seemingly, we are beginning to see less investment in hardware unless there is a specific segment that needs to be resolved. But generally, software-based platforms are preferred as they bring many advantages. It is evident that there has been a technology shift. We have moved networking from hardware to software and this shift has positive effects for users, enterprises and service providers. - -**[ Don’t miss[customer reviews of top remote access tools][3] and see [the most powerful IoT companies][4] . | Get daily insights by [signing up for Network World newsletters][5]. ]** - -### Performance (hardware vs software) - -There has always been a misconception that the hardware-based platforms are faster due to the hardware acceleration that exists in the network interface controller (NIC). However, this is a mistaken belief. Nowadays, software platforms can reach similar performance levels as compared to hardware-based platforms. - -Initially, people viewed hardware as a performance-based vehicle but today this does not hold true anymore. Even the bigger vendors are switching to software-based platforms. We are beginning to see this almost everywhere in networking. - -### SD-WAN and open source - -SD-WAN really took off quite rapidly due to the availability of open source. It enabled the vendors to leverage all the available open source components and then create their solution on top. By and large, SD-WAN vendors used the open source as the foundation of their solution and then added additional proprietary code over the baseline. - -However, even when using various open source components, there is still a lot of work left for these vendors to make it to a complete SD-WAN solution, even for reaching a baseline of centrally managed routers with flexible network architecture control, not to talk about complete feature set of SD-WAN. - -The result of the work done by these vendors is still closed products so the fact they are using open source components in their products is merely a time-to-market advantage but not a big benefit to the end users (enterprises) or service providers launching hosted services with these products. They are still limited in flexibility and vendor diversity is only achieved through a multi-vendor strategy which in practice means launching multiple silo services each based on a different SD-WAN vendor without real selection of the technologies that make each of the SD-WAN services they launch. - -I recently came across a company called [Flexiwan][6], their goal is to fundamentally change this limitation of SD-WAN by offering a full open source solution that, as they say, “includes integration points in the core of the system that allow for 3rd party logic to be integrated in an efficient way.” They call this an open architecture, which, in practical terms, means a service provider or enterprise can integrate his own application logic into the core of the SD-WAN router…or select best of breed sub-technologies or applications instead of having these dictated by the vendor himself. I believe there is the possibility of another wave of SD-WAN with a fully open source and open architecture to SD-WAN. - -This type of architecture brings many benefits to users, enterprises and service providers, especially when compared to the typical lock-in of bigger vendors, such as Cisco and VMware. - -With an open source open architecture, it’s easier to control the versions and extend more flexibility by using the software offered by different providers. It offers the ability to switch providers, not to mention being easier to install and upgrade the versions. - -### SD-WAN, open source and open architecture - -An SD-WAN solution that is an open source with open architecture provides a modular and decomposed type of SD-WAN. This enables the selection of elements to provide a solution. - -For example, enterprises and service providers can select the best-of-breed technologies from independent vendors, such as deep packet inspection (DPI), security, wide area network (WAN) optimization, session border controller (SBC), VoIP and other traffic specific optimization logic. - -Some SD-WAN vendors define an open architecture in such a way that they just have a set of APIs, for example, northbound APIs, to enable one to build management or do service chaining. This is one approach to an open architecture but in fact, it’s pretty limited since it does not bring the full benefits that an open architecture should offer. - -### Open source and the fear of hacking - -However, when I think about an open source and open architecture for SD-WAN, the first thing that comes to mind is bad actors. What about the code? If it’s an open source, the bad actor can find vulnerabilities, right? - -The community is a powerful force and will fix any vulnerability. Also with open source, the vendor, who is the one responsible for the open source component will fix the vulnerability much faster than a closed solution, where you are unaware of the vulnerability until a fix is released. - -### The SD-WAN evolution - -Before we go any further, let’s examine the history of SD-WAN and its origins, how we used to connect from the wide area network (WAN) to other branches via private or public links. - -SD-WAN offers the ability to connect your organization to a WAN. This could be connecting to the Internet or other branches, to optimally deliver applications with a good user-experience. Essentially, SD-WAN allows the organizations to design the architecture of their network dynamically by means of software. - -### In the beginning, there was IPSec - -It started with IPSec. Around two decades back, in 1995, the popular design was that of mesh architecture. As a result, we had a lot of point-to-point connections. Firstly, mesh architectures with IPSec VPNs are tiresome to manage as there is a potential for 100s of virtual private network (VPN) configurations. - -Authentically, IPSec started with two fundamental goals. The first was the tunneling protocol that would allow organizations to connect the users or other networks to their private network. This enabled the enterprises to connect to networks that they did not have a direct route to. - -The second goal of IPSec was to encrypt packets at the network layer and therefore securing the data in motion. Let’s face it: at that time, IPSec was terrible for complicated multi-site interconnectivity and high availability designs. If left to its defaults, IPSec is best suited for static designs. - -This was the reason why we had to step in the next era where additional functionality was added to IPSec. For example, IPSec had issues in supporting routing protocols using multicast. To overcome this, IPSec over generic routing encapsulation (GRE) was introduced. - -### The next era of SD-WAN - -During the journey to 2008, one could argue that the next era of WAN connectivity was when additional features were added to IPSec. At this time IPSec became known as a “Swiss army knife.” It could do many things but not anything really well. - -Back then, you could create multiple links, but it failed to select the traffic over these links other than by using simple routing. You needed to add a routing protocol. For advanced agile architectures, IPSec had to be incorporated with other higher-level protocols. - -Features were then added based on measuring the quality. Link quality features were added to analyze any delay, drops and to select alternative links for your applications. We began to add tunnels, multi-links and to select the traffic based on the priority, not just based on the routing. - -The most common way to the tunnel was to have IPSec over GRE. You have the GRE tunnel that enables you to send any protocol end-to-end by using IPSec for the encryption. All this functionality was added to achieve and create dynamic tunnels over IPSec and to optimize the IPSec tunnels. - -This was a move in the right direction, but it was still complex. It was not centrally managed and was error-prone with complex configurations that were unable to manage large deployments. IPSec had far too many limitations, so in the mid-2000s early SD-WAN vendors started cropping up. Some of these vendors enabled the enterprises to aggregate many separate digital subscriber lines (DSL) links into one faster logical link. At the same time, others added time stamps and/or sequence numbers to packets to improve the network performance and security when running over best effort (internet) links. - -International WAN connectivity was a popular focus since the cost delta between the Internet and private multiprotocol label switching (MPLS) was 10x+ different. Primarily, enterprises wanted the network performance and security of MPLS without having to pay a premium for it. - -Most of these solutions sat in-front or behind a traditional router from companies like Cisco. Evidently, just like WAN Optimization vendors, these were additional boxes/solutions that enterprises added to their networks. - -### The next era of SD-WAN, circa 2012 - -It was somewhere in 2012 that we started to see the big rush to the SD-WAN market. Vendors such as Velocloud, Viptela and a lot of the other big players in the SD-WAN market kicked off with the objective of claiming some of the early SD-WAN success and going after the edge router market with a full feature router replacement and management simplicity. - -Open source networking software and other open source components for managing the traffic enabled these early SD-WAN vendors to lay a foundation where a lot of the code base was open source. They would then “glue” it together and add their own additional features. - -Around this time, Intel was driving data plane development kit (DPDK) and advanced encryption standard (AES) instruction set, which enabled that software to run on commodity hardware. The SD-WAN solutions were delivered as closed solutions where each solution used its own set of features. The features and technologies chosen for each vendor were different and not compatible with each other. - -### The recent era of SD-WAN, circa 2017 - -A tipping point in 2017 was the gold rush for SD-WAN deployments. Everyone wanted to have SD-WAN as fast as possible. - -The SD-WAN market has taken off, as seen by 50 vendors with competing, proprietary solutions and market growth curves with a CAGR of 100%. There is a trend of big vendors like Cisco, Vmware and Oracle acquiring startups to compete in this new market. - -As a result, Cisco, which is the traditional enterprise market leader in WAN routing solutions felt threatened since its IWAN solution, which had been around since 2008, was too complex (a 900-page configuration and operations manual). Besides, its simple solution based on the Meraki acquisition was not feature-rich enough for the large enterprises. - -With their acquisition of Viptela, Cisco currently has a 13% of the market share, and for the first time in decades, it is not the market leader. The large cloud vendors, such as Google and Facebook are utilizing their own technology for routing within their very large private networks. - -At some point between 2012 and 2017, we witnessed the service providers adopting SD-WAN. This introduced the onset and movement of managed SD-WAN services. As a result, the service providers wanted to have SD-WAN on the menu for their customers. But there were many limitations in the SD-WAN market, as it was offered as a closed-box solution, giving the service providers limited control. - -At this point surfaced an expectation of change, as service providers and enterprises looked for more control. Customers can get better functionality from a multi-vendor approach than from a single vendor. - -### Don’t forget DIY SD-WAN - -Up to 60% of service providers and enterprises within the USA are now looking at DIY SD-WAN. A DIY SD-WAN solution is not where the many pieces of open source are taken and caste into something. The utmost focus is on the solution that can be self-managed but buy from a vendor. - -Today, the majority of the market is looking for managed solutions and the upper section that has the expertise wants to be equipped with more control options. - -### SD-WAN vendors attempting everything - -There is a trend that some vendors try to do everything with SD-WAN. As a result, whether you are an enterprise or a service provider, you are locked into a solution that is dictated by the SD-WAN vendor. - -The SD-WAN vendors have made the supplier choice or developed what they think is relevant. Evidently, some vendors are using stacks and software development kits (SDKs) that they purchased, for example, for deep packet inspection (DPI). - -Ultimately, you are locked into a specific solution that the vendor has chosen for you. If you are a service provider, you might disapprove of this limitation and if you are an enterprise with specific expertise, you might want to zoom in for more control. - -### All-in-one security vendors - -Many SD-WAN vendors promote themselves as security companies. But would you prefer to buy a security solution from an SD-WAN vendor or from an experienced vendor, such as Checkpoint? - -Both: enterprise and service providers want to have a choice, but with an integrated black box security solution, you don't have a choice. The more you integrate and throw into the same box, the stronger the vendor lock-in is and the weaker the flexibility. - -Essentially, with this approach, you are going for the lowest common denominator instead of the highest. Ideally, the technology of the services that you deploy on your network requires expertise. One vendor cannot be an expert in everything. - -An open architecture lies in a place for experts in different areas to join together and add their own specialist functionality. - -### Encrypted traffic - -As a matter of fact, what is not encrypted today will be encrypted tomorrow. The vendor of the application can perform intelligent things that the SD-WAN vendor cannot because they control both sides. Hence, if you can put something inside the SD-WAN edge device, they can make smart decisions even if the traffic is encrypted. - -But in the case of traditional SD-WANs, there needs to be a corporation with a content provider. However, with an open architecture, you can integrate anything, and nothing prevents the integration. A lot of traffic is encrypted and it's harder to manage encrypted traffic. However, an open architecture would allow the content providers to manage the traffic more effectively. - -### 2019 and beyond: what is an open architecture? - -Cloud providers and enterprises have discovered that 90% of the user experience and security problems arise due to the network: between where the cloud provider resides and where the end-user consumes the application. - -Therefore, both cloud providers and large enterprise with digital strategies are focusing on building their solutions based on open source stacks. Having a viable open source SD-WAN solution is the next step in the SD-WAN evolution, where it moves to involve the community in the solution. This is similar to what happens with containers and tools. - -Now, since we’re in 2019, are we going to witness a new era of SD-WAN? Are we moving to the open architecture with an open source SD-WAN solution? An open architecture should be the core of the SD-WAN infrastructure, where additional technologies are integrated inside the SD-WAN solution and not only complementary VNFs. There is an interface and native APIs that allow you to integrate logic into the router. This way, the router will be able to intercept and act according to the traffic. - -So, if I’m a service provider and have my own application, I would want to write logic that would be able to communicate with my application. Without an open architecture, the service providers can’t really offer differentiation and change the way SD-WAN makes decisions and interacts with the traffic of their applications. - -There is a list of various technologies that you need to be an expert in to be able to integrate. And each one of these technologies can be a company, for example, DPI, VoIP optimization, and network monitoring to name a few. An open architecture will allow you to pick and choose these various elements as per your requirements. - -Networking is going through a lot of changes and it will continue to evolve with the passage of time. As a result, you wouldn’t want something that forces you to break out a forklift each time new technologies are introduced. Primarily, open architecture allows you to replace the components of the system and add code or elements that handle specific traffic and applications. - -### Open source - -Open source gives you more flexibility to control your own destiny. It offers the ability to select your own services that you want to be applied to your system. It provides security in the sense that if something happens to the vendor or there is a vulnerability in the system, you know that you are backed by the community that can fix such misadventures. - -From the perspective of the business model, it makes a more flexible and cost-effective system. Besides, with open source, the total cost of ownership will also be lower. - -**This article is published as part of the IDG Contributor Network.[Want to Join?][7]** - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3390151/open-architecture-and-open-source-the-new-wave-for-sd-wan.html#tk.rss_all - -作者:[Matt Conran][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Matt-Conran/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/03/6554314981_7f95641814_o-100714680-large.jpg -[2]: https://www.networkworld.com/article/3338143/the-role-of-open-source-in-networking.html -[3]: https://www.networkworld.com/article/3262145/lan-wan/customer-reviews-top-remote-access-tools.html#nww-fsb -[4]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html#nww-fsb -[5]: https://www.networkworld.com/newsletters/signup.html#nww-fsb -[6]: https://flexiwan.com/sd-wan-open-source/ -[7]: /contributor-network/signup.html -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190424 How data storage will shift to blockchain.md b/sources/talk/20190424 How data storage will shift to blockchain.md deleted file mode 100644 index b31653e0f7..0000000000 --- a/sources/talk/20190424 How data storage will shift to blockchain.md +++ /dev/null @@ -1,70 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How data storage will shift to blockchain) -[#]: via: (https://www.networkworld.com/article/3390722/how-data-storage-will-shift-to-blockchain.html#tk.rss_all) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -How data storage will shift to blockchain -====== -Move over cloud and traditional in-house enterprise data center storage, distributed storage based on blockchain may be arriving imminently. -![Cybrain / Getty Images][1] - -If you thought cloud storage was digging in its heels to become the go-to method for storing data, and at the same time grabbing share from own-server, in-house storage, you may be interested to hear that some think both are on the way out. Instead organizations will use blockchain-based storage. - -Decentralized blockchain-based file storage will be more secure, will make it harder to lose data, and will be cheaper than anything seen before, say organizations actively promoting the slant on encrypted, distributed technology. - -**[ Read also:[Why blockchain (might be) coming to an IoT implementation near you][2] ]** - -### Storing transactional data in a blockchain - -China company [FileStorm][3], which describes itself in marketing materials as the first [Interplanetary File Storage][4] (IPFS) platform on blockchain, says the key to making it all work is to only store the transactional data in blockchain. The actual data files, such as large video files, are distributed in IPFS. - -IPFS is a distributed, peer-to-peer file storage protocol. File parts come from multiple computers all at the same time, supposedly making the storage hardy. FileStorm adds blockchain on top of it for a form of transactional indexing. - -“Blockchain is designed to store transactions forever, and the data can never be altered, thus a trustworthy system is created,” says Raymond Fu, founder of FileStorm and chief product officer of MOAC, the underlying blockchain system used, in a video on the FileStorm website. - -“The blocks are used to store only small transactional data,” he says. You can’t store the large files on it. Those are distributed. Decentralized data storage platforms are needed for added decentralized blockchain, he says. - -YottaChain, another blockchain storage start-up project is coming at the whole thing from a slightly different angle. It claims its non-IPFS system is more secure partly because it performs deduplication after encryption. - -“Data is 10,000 times more secure than [traditional] centralized storage,” it says on its [website][5]. Deduplication eliminates duplicated or redundant data. - -### Disrupting data storage - -“Blockchain will disrupt data storage,” [says BlockApps separately][6]. The blockchain backend platform provider says advantages to this new generation of storage include that decentralizing data provides more security and privacy. That's due in part because it's harder to hack than traditional centralized storage. That the files are spread piecemeal among nodes, conceivably all over the world, makes it impossible for even the participating node to view the contents of the complete file, it says. - -Sharding, which is the term for the breaking apart and node-spreading of the actual data, is secured through keys. Markets can award token coins for mining, and coins can be spent to gain storage. Excess storage can even be sold. And cryptocurrencies have been started to “incentivize usage and to create a market for buying and selling decentralized storage,” BlockApps explains. - -The final parts of this new storage mix are that lost files are minimized because data can be duplicated simply — the data sets, for example, can be stored multiple times for error correction — and costs are reduced due to efficiencies. - -Square Tech (Shenzhen) Co., which makes blockchain file storage nodes, says in its marketing materials that it intends to build service centers globally to monitor its nodes in real time. Interestingly, another area the company has gotten involved in is the internet of things (IoT), and [it says][7] it wants “to unite the technical resources, capital, and human resources of the IoT industry and blockchain.” Perhaps we end up with a form of the internet of storage things? - -“The entire cloud computing industry will be disrupted by blockchain technology in just a few short years,” says BlockApps. Dropbox and Amazon “may even become overpriced and obsolete if they do not find ways to integrate with the advances.” - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3390722/how-data-storage-will-shift-to-blockchain.html#tk.rss_all - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/chains_binary_data_blockchain_security_by_cybrain_gettyimages-926677890_2400x1600-100788435-large.jpg -[2]: https://www.networkworld.com/article/3386881/why-blockchain-might-be-coming-to-an-iot-implementation-near-you.html -[3]: http://filestorm.net/ -[4]: https://ipfs.io/ -[5]: https://www.yottachain.io/ -[6]: https://blockapps.net/blockchain-disrupt-data-storage/ -[7]: http://www.sikuaikeji.com/ -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190424 No, drone delivery still isn-t ready for prime time.md b/sources/talk/20190424 No, drone delivery still isn-t ready for prime time.md deleted file mode 100644 index c948f458ce..0000000000 --- a/sources/talk/20190424 No, drone delivery still isn-t ready for prime time.md +++ /dev/null @@ -1,91 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (No, drone delivery still isn’t ready for prime time) -[#]: via: (https://www.networkworld.com/article/3390677/drone-delivery-not-ready-for-prime-time.html#tk.rss_all) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -No, drone delivery still isn’t ready for prime time -====== -Despite incremental progress and limited regulatory approval in the U.S. and Australia, drone delivery still isn’t a viable option in the vast majority of use cases. -![Sorry imKirk \(CC0\)][1] - -April has a been a big month for drone delivery. First, [Alphabet’s Wing Aviation drones got approval from Australia’s Civil Aviation Safety Authority (CASA)][2], for public deliveries in the country, and this week [Wing earned Air Carrier Certification from the U.S. Federal Aviation Administration][3]. These two regulatory wins got lot of people got very excited. Finally, the conventional wisdom exulted, drone delivery is actually becoming a reality. - -Not so fast. - -### Drone delivery still in pilot/testing mode - -Despite some small-scale successes and the first signs of regulatory acceptance, drone delivery remains firmly in the carefully controlled pilot/test phase (and yes, I know drones don’t carry pilots). - -**[ Also read:[Coffee drone delivery: Ideas this bad could kill the internet of things][4] ]** - -For example, despite getting FAA approval to begin commercial deliveries, Wing is still working up beginning delivery trials to test the technology and gather information in Virginia later this year. - -But what about that public approval from CASA for deliveries outside Canberra? That’s [a real business][5] now, right? - -Well, yes and no. - -On the “yes” side, the Aussie approval reportedly came after 18 months of tests, 70,000 flights, and more than 3,000 actual deliveries of products from local coffee shops and pharmacies. So, at least some people somewhere in the world are actually getting stuff dropped at their doors by drones. - -In the “no” column, however, goes the fact that the approval covers only about 100 suburban homes, though more are planned to be added “in the coming weeks and months.” More importantly, the approval includes strict limits on when and where the drones can go. No crossing main roads, no nighttime deliveries, and prohibitions to stay away from people. And drone-eligible customers need a safety briefing! - -### Safety briefings required for customers - -That still sounds like a small-scale test, not a full-scale commercial roll-out. And while I think drone-safety classes are probably a good idea – and the testing period apparently passed without any injuries – even the perceived _need_ for them is not be a great advertisement for rapid expansion of drone deliveries. - -Ironically, though, a bigger issue than protecting people from the drones, perhaps, is protecting the drones from people. Instructions to stay 2 to 5 meters away from folks will help, but as I’ve previously addressed, these things are already seen as attractive nuisances and vandalism targets. Further raising the stakes, many local residents were said to be annoyed at the noise created by the drones. Now imagine those contraptions buzzing right by you all loaded down with steaming hot coffee or delicious ice cream. - -And even with all those caveats, no one is talking about the key factors in making drone deliveries a viable business: How much will those deliveries cost and who will pay? For a while, the desire to explore the opportunity will drive investment, but that won’t last forever. If drone deliveries aren’t cost effective for businesses, they won’t spread very far. - -From the customer perspective, most drone delivery tests are not yet charging for the service. If and when they start carrying fees as well as purchases, the novelty factor will likely entice many shoppers to pony up to get their items airlifted directly to their homes. But that also won’t last. Drone delivery will have to demonstrate that it’s better — faster, cheaper, or more reliable — than the existing alternatives to find its niche. - -### Drone deliveries are fast, commercial roll-out will be slow - -Long term, I have no doubt that drone delivery will eventually become an accepted part of the transportation infrastructure. I don’t necessarily buy into Wing’s prediction of an AU $40 million drone delivery market in Australia coming to pass anytime soon, but real commercial operations seem inevitable. - -It’s just going to be more limited than many proponents claim, and it’s likely to take a lot longer than expected to become mainstream. For example, despite ongoing testing, [Amazon has already missed Jeff Bezos’ 2018 deadline to begin commercial drone deliveries][6], and we haven’t heard much about [Walmart’s drone delivery plans][7] lately. And while tests by a number of companies continue in locations ranging from Iceland and Finland to the U.K. and the U.S. have created a lot of interest, they have not yet translated into widespread availability. - -Apart from the issue of how much consumers really want their stuff delivered by an armada of drones (a [2016 U.S. Post Office study][8] found that 44% of respondents liked the idea, while 34% didn’t — and 37% worried that drone deliveries might not be safe), a lot has to happen before that vision becomes reality. - -At a minimum, successful implementations of full-scale commercial drone delivery will require better planning, better-thought-out business cases, more rugged and efficient drone technology, and significant advances in flight control and autonomous flight. Like drone deliveries themselves, all that stuff is coming; it just hasn’t arrived yet. - -**More about drones and the internet of things:** - - * [Drone defense -- powered by IoT -- is now a thing][9] - * [Ideas this bad could kill the Internet of Things][4] - * [10 reasons Amazon's drone delivery plan still won't fly][10] - * [Amazon’s successful drone delivery test doesn’t really prove anything][11] - - - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3390677/drone-delivery-not-ready-for-prime-time.html#tk.rss_all - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/drone_mountains_by_sorry_imkirk_cc0_via_unsplash_1200x800-100763763-large.jpg -[2]: https://medium.com/wing-aviation/wing-launches-commercial-air-delivery-service-in-canberra-5da134312474 -[3]: https://medium.com/wing-aviation/wing-becomes-first-certified-air-carrier-for-drones-in-the-us-43401883f20b -[4]: https://www.networkworld.com/article/3301277/ideas-this-bad-could-kill-the-internet-of-things.html -[5]: https://wing.com/australia/canberra -[6]: https://www.businessinsider.com/jeff-bezos-predicted-amazon-would-be-making-drone-deliveries-by-2018-2018-12?r=US&IR=T -[7]: https://www.networkworld.com/article/2999828/walmart-delivery-drone-plans.html -[8]: https://www.uspsoig.gov/sites/default/files/document-library-files/2016/RARC_WP-17-001.pdf -[9]: https://www.networkworld.com/article/3309413/drone-defense-powered-by-iot-is-now-a-thing.html -[10]: https://www.networkworld.com/article/2900317/10-reasons-amazons-drone-delivery-plan-still-wont-fly.html -[11]: https://www.networkworld.com/article/3185478/amazons-successful-drone-delivery-test-doesnt-really-prove-anything.html -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190426 Profiling D-s Garbage Collection with Bpftrace.md b/sources/talk/20190426 Profiling D-s Garbage Collection with Bpftrace.md deleted file mode 100644 index ce0a408ae6..0000000000 --- a/sources/talk/20190426 Profiling D-s Garbage Collection with Bpftrace.md +++ /dev/null @@ -1,412 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Profiling D's Garbage Collection with Bpftrace) -[#]: via: (https://theartofmachinery.com/2019/04/26/bpftrace_d_gc.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -Profiling D's Garbage Collection with Bpftrace -====== - -Recently I’ve been playing around with using [`bpftrace`][1] to trace and profile D’s garbage collector. Here are some examples of the cool stuff that’s possible. - -### What is `bpftrace`? - -It’s a high-level debugging tool based on Linux’s eBPF. “eBPF” stands for “extended Berkely packet filter”, but that’s just a historical name and doesn’t mean much today. It’s really a virtual machine (like the [JVM][2]) that sits inside the Linux kernel and runs code in a special eBPF instruction set similar to normal machine code. Users are expected to write short programs in high-level languages (including C and others) that get compiled to eBPF and loaded into the kernel on the fly to do interesting things. - -As you might guess, eBPF is powerful for instrumenting a running kernel, but it also supports instrumenting user-space programs. - -### What you need - -First you need a Linux kernel. Sorry BSD, Mac OS and Windows users. (But some of you can use [DTrace][3].) - -Also, not just any Linux kernel will work. This stuff is relatively new, so you’ll need a modern kernel with BPF-related features enabled. You might need to use the newest (or even testing) version of a distro. Here’s how to check if your kernel meets the requirements: - -``` -$ uname -r -4.19.27-gentoo-r1sub -$ # 4.9+ recommended by bpftrace -$ zgrep CONFIG_UPROBES /proc/config.gz -CONFIG_UPROBES=y -$ # Also need -$ # CONFIG_BPF=y -$ # CONFIG_BPF_SYSCALL=y -$ # CONFIG_BPF_JIT=y -$ # CONFIG_HAVE_EBPF_JIT=y -$ # CONFIG_BPF_EVENTS=y -``` - -Of course, [you also need to install the `bpftrace` tool itself][4]. - -### `bpftrace` D “Hello World” - -Here’s a quick test you can do to make sure you’ve got everything working. First, let’s make a Hello World D binary: - -``` -$ pwd -/tmp/ -$ cat hello.d -import std.stdio; - -void main() -{ - writeln("Hello World"); -} -$ dmd hello.d -$ ./hello -Hello World -$ -``` - -Now let’s `bpftrace` it. `bpftrace` uses a high-level language that’s obviously inspired by AWK. I’ll explain enough to understand the post, but you can also check out the [`bpftrace` reference guide][5] and [one-liner tutorial][6]. The minimum you need to know is that a bpftrace program is a list of `event:name /filter predicate/ { program(); code(); }` blocks that define code snippets to be run on events. - -This time I’m only using Linux uprobes, which trigger on functions in user-space programs. The syntax is `uprobe:/path/to/binary:functionName`. One gotcha is that D “[mangles][7]” (encodes) function names before inserting them into the binary. If we want to trigger on the D code’s `main()` function, we need to use the mangled name: `_Dmain`. (By the way, `nm program | grep ' _D.*functionName'` is one quick trick for finding mangled names.) - -Run this `bpftrace` invocation in a terminal as root user: - -``` -# bpftrace -e 'uprobe:/tmp/hello:_Dmain { printf("D Hello World run with process ID %d\n", pid); }' -Attaching 1 probe... -``` - -While this is running, it’ll print a message every time the D Hello World program is executed by any user in any terminal. Press `Ctrl+C` to quit. - -All `bpftrace` code can be run directly from the command line like in the example above. But to make things easier to read from now on, I’ll make neatly formatted scripts. - -### Tracing some real code - -I’m using [D-Scanner][8], the D code analyser, as an example of a simple but non-trivial D workload. One nice thing about `bpftrace` and uprobes is that no modification of the program is needed. I’m just using a normal build of the `dscanner` tool, and using the [D runtime source code][9] as a codebase to analyse. - -Before using `bpftrace`, let’s try using [the profiling that’s built into the D GC implementation itself][10]: - -``` -$ dscanner --DRT-gcopt=profile:1 --etags -... - Number of collections: 85 - Total GC prep time: 0 milliseconds - Total mark time: 17 milliseconds - Total sweep time: 6 milliseconds - Total page recovery time: 3 milliseconds - Max Pause Time: 1 milliseconds - Grand total GC time: 28 milliseconds -GC summary: 35 MB, 85 GC 28 ms, Pauses 17 ms < 1 ms -``` - -(If you can make a custom build, you can also use [the D runtime GC API to get stats][11].) - -There’s one more gotcha when using `bpftrace` on `dscanner` to trace GC functions: the binary file we specify for the uprobe needs to be the binary file that actually contains the GC functions. That could be the D binary itself, or it could be a shared D runtime library. Try running `ldd /path/to/d_program` to list any linked shared libraries, and if the output contains `druntime`, use that full path when specifying uprobes. My `dscanner` binary doesn’t link to a shared D runtime, so I just use the full path to `dscanner`. (Running `which dscanner` gives `/usr/local/bin/dscanner` for me.) - -Anyway, all the GC functions live in a `gc` module, so their mangled names start with `_D2gc`. Here’s a `bpftrace` invocation that tallies GC function calls. For convenience, it also includes a uretprobe to automatically exit when `main()` returns. The output is sorted to make it a little easier to read. - -``` -# cat dumpgcfuncs.bt -uprobe:/usr/local/bin/dscanner:_D2gc* -{ - @[probe] = count(); -} - -uretprobe:/usr/local/bin/dscanner:_Dmain -{ - exit(); -} -# bpftrace dumpgcfuncs.bt | sort - -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC10freeNoSyncMFNbNiPvZv]: 31 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC10initializeFKCQCd11gcinterface2GCZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC11queryNoSyncMFNbPvZS4core6memory8BlkInfo_]: 44041 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC11removeRangeMFNbNiPvZv]: 2 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC12extendNoSyncMFNbPvmmxC8TypeInfoZm]: 251946 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC14collectNoStackMFNbZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC18fullCollectNoStackMFNbZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC4freeMFNbNiPvZv]: 31 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC5queryMFNbPvZS4core6memory8BlkInfo_]: 47704 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6__ctorMFZCQBzQBzQBxQCiQBn]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv]: 80 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6extendMFNbPvmmxC8TypeInfoZm]: 251946 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv]: 12423 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_]: 948995 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZ2goFNbPSQClQClQCjQCu3GcxQBbZk]: 5615 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC7getAttrMFNbPvZk]: 5615 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC8addRangeMFNbNiPvmxC8TypeInfoZv]: 2 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs10freeNoSyncMFNbNiPvZvS_DQDsQDsQDqQEb8freeTimelS_DQErQErQEpQFa8numFreeslTQCdZQEbMFNbNiKQCrZv]: 31 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs11queryNoSyncMFNbPvZS4core6memory8BlkInfo_S_DQEmQEmQEkQEv9otherTimelS_DQFmQFmQFkQFv9numOtherslTQDaZQExMFNbKQDmZQDn]: 44041 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs12extendNoSyncMFNbPvmmxC8TypeInfoZmS_DQEfQEfQEdQEo10extendTimelS_DQFhQFhQFfQFq10numExtendslTQCwTmTmTxQDaZQFdMFNbKQDrKmKmKxQDvZm]: 251946 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs12mallocNoSyncMFNbmkKmxC8TypeInfoZPvS_DQEgQEgQEeQEp10mallocTimelS_DQFiQFiQFgQFr10numMallocslTmTkTmTxQCzZQFcMFNbKmKkKmKxQDsZQDl]: 961498 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs18fullCollectNoStackMFNbZ2goFNbPSQEaQEaQDyQEj3GcxZmTQvZQDfMFNbKQBgZm]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC__T9runLockedS_DQCeQCeQCcQCnQBs7getAttrMFNbPvZ2goFNbPSQDqQDqQDoQDz3GcxQBbZkS_DQEoQEoQEmQEx9otherTimelS_DQFoQFoQFmQFx9numOtherslTQCyTQDlZQFdMFNbKQDoKQEbZk]: 5615 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw15LargeObjectPool10allocPagesMFNbmZm]: 5597 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw15LargeObjectPool13updateOffsetsMFNbmZv]: 10745 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw15LargeObjectPool7getInfoMFNbPvZS4core6memory8BlkInfo_]: 3844 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw15SmallObjectPool7getInfoMFNbPvZS4core6memory8BlkInfo_]: 40197 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw15SmallObjectPool9allocPageMFNbhZPSQChQChQCfQCq4List]: 15022 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx10smallAllocMFNbhKmkZ8tryAllocMFNbZb]: 955967 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx10smallAllocMFNbhKmkZPv]: 955912 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx11ToScanStack4growMFNbZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx11fullcollectMFNbbZm]: 85 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx11removeRangeMFNbNiPvZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx23updateCollectThresholdsMFNbZv]: 84 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx4markMFNbNlPvQcZv]: 253 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx5sweepMFNbZm]: 84 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx7markAllMFNbbZ14__foreachbody3MFNbKSQCm11gcinterface5RangeZi]: 85 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx7markAllMFNbbZv]: 85 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx7newPoolMFNbmbZPSQBtQBtQBrQCc4Pool]: 6 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx7recoverMFNbZm]: 84 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx8addRangeMFNbNiPvQcxC8TypeInfoZv]: 2 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx8bigAllocMFNbmKmkxC8TypeInfoZ15tryAllocNewPoolMFNbZb]: 5 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx8bigAllocMFNbmKmkxC8TypeInfoZ8tryAllocMFNbZb]: 5616 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx8bigAllocMFNbmKmkxC8TypeInfoZPv]: 5586 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx8isMarkedMFNbNlPvZi]: 635 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx9allocPageMFNbhZPSQBuQBuQBsQCd4List]: 15024 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw4Pool10initializeMFNbmbZv]: 6 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw4Pool12freePageBitsMFNbmKxG4mZv]: 16439 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl5protoQo7ProtoGC4termMFZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl5protoQo7ProtoGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl5protoQo7ProtoGC8addRangeMFNbNiPvmxC8TypeInfoZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc4impl6manualQp8ManualGC10initializeFKCQBp11gcinterface2GCZv]: 1 -@[uprobe:/usr/local/bin/dscanner:_D2gc9pooltable__T9PoolTableTSQBc4impl12conservativeQBy4PoolZQBr6insertMFNbNiPQBxZb]: 6 -@[uprobe:/usr/local/bin/dscanner:_D2gc9pooltable__T9PoolTableTSQBc4impl12conservativeQBy4PoolZQBr8findPoolMFNaNbNiPvZPQCe]: 302268 -@[uprobe:/usr/local/bin/dscanner:_D2gc9pooltable__T9PoolTableTSQBc4impl12conservativeQBy4PoolZQBr8minimizeMFNaNbNjZAPQCd]: 30 -Attaching 231 probes... -``` - -All these functions are in [`src/gc/`][12], and most of the interesting ones here are in [`src/gc/impl/conservative/`][13]. There are 85 calls to `_D2gc4impl12conservativeQw3Gcx11fullcollectMFNbbZm`, which [`ddemangle`][14] translates to `nothrow ulong gc.impl.conservative.gc.Gcx.fullcollect(bool)`. That matches up with the report from `--DRT-gcopt=profile:1`. - -The heart of the `bpftrace` program is `@[probe] = count();`. `@` prefixes a global variable, in this case a variable with an empty name (allowed by `bpftrace`). We’re using the variable as a map (like an associative array in D), and indexing it with `probe`, a built-in variable containing the name of the uprobe that was triggered. The tally is kept using the magic `count()` function. - -### Garbage collection timings - -How about something more interesting, like generating a profile of collection timings? This time, to get more data, I won’t make `bpftrace` exit as soon as the `dscanner` exits. I’ll keep it running and run `dscanner` 100 times before quitting `bpftrace` with `Ctrl+C`: - -``` -# cat gcprofile.bt -uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx11fullcollectMFNbbZm -{ - @t = nsecs; -} - -uretprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw3Gcx11fullcollectMFNbbZm / @t / -{ - @gc_times = hist(nsecs - @t); -} -# bpftrace gcprofile.bt -Attaching 2 probes... -^C - -@gc_times: -[64K, 128K) 138 |@ | -[128K, 256K) 1367 |@@@@@@@@@@ | -[256K, 512K) 6687 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| -[512K, 1M) 7 | | -[1M, 2M) 301 |@@ | -``` - -Et voila! A log-scale histogram of the `nsecs` timestamp difference between entering and exiting `fullcollect()`. The times are in nanoseconds, so we see that most collections are taking less than half a millisecond, but we have tail cases that take 1-2ms. - -### Function arguments - -`bpftrace` provides `arg0`, `arg1`, `arg2`, etc. built-in variables for accessing the arguments to a traced function. There are a couple of complications with using them with D code, however. - -The first is that (at the binary level) `dmd` makes `extern(D)` functions (i.e., normal D functions) take arguments in the reverse order of `extern(C)` functions (that `bpftrace` is expecting). Suppose you have a simple three-argument function. If it’s using the C calling convention, `bpftrace` will recognise the first argument as `arg0`. If it’s using the D calling convention, however, it’ll be picked up as `arg2`. - -``` -extern(C) void cfunc(int arg0, int arg1, int arg2) -{ - // ... -} - -// (extern(D) is the default) -extern(D) void dfunc(int arg2, int arg1, int arg0) -{ - // ... -} -``` - -If you look at [the D ABI spec][15], you’ll notice that (just like in C++) there can be a couple of hidden arguments if the function is more complex. If `dfunc` above returned a large struct, there can be an extra hidden argument for implementing [copy elision][16], which means the first argument would actually be `arg3`, and `arg0` would be the hidden argument. If `dfunc` were also a member function, it would have a hidden `this` argument, which would bump up the first argument to `arg4`. - -To get the hang of this, you might need to experiment with tracing function calls with known arguments. - -### Allocation sizes - -Let’s get a histogram of the memory allocation request sizes. Looking at the list of GC functions traced earlier, and comparing it with the GC source code, it looks like we need to trace these functions and grab the `size` argument: - -``` -class ConservativeGC : GC -{ - // ... - void *malloc(size_t size, uint bits, const TypeInfo ti) nothrow; - void *calloc(size_t size, uint bits, const TypeInfo ti) nothrow; - BlkInfo qalloc( size_t size, uint bits, const TypeInfo ti) nothrow; - // ... -} -``` - -As class member functions, they have a hidden `this` argument as well. The last one, `qalloc()`, returns a struct, so it also has a hidden argument for copy elision. So `size` is `arg3` for the first two functions, and `arg4` for `qalloc()`. Time to run a trace: - -``` -# cat allocsizeprofile.bt -uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6mallocMFNbmkxC8TypeInfoZPv, -uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6callocMFNbmkxC8TypeInfoZPv -{ - @ = hist(arg3); -} - -uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_ -{ - @ = hist(arg4); -} - -uretprobe:/usr/local/bin/dscanner:_Dmain -{ - exit(); -} -# bpftrace allocsizeprofile.bt -Attaching 4 probes... -@: -[2, 4) 2489 | | -[4, 8) 9324 |@ | -[8, 16) 46527 |@@@@@ | -[16, 32) 206324 |@@@@@@@@@@@@@@@@@@@@@@@ | -[32, 64) 448020 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| -[64, 128) 147053 |@@@@@@@@@@@@@@@@@ | -[128, 256) 88072 |@@@@@@@@@@ | -[256, 512) 2519 | | -[512, 1K) 1830 | | -[1K, 2K) 3749 | | -[2K, 4K) 1668 | | -[4K, 8K) 256 | | -[8K, 16K) 2533 | | -[16K, 32K) 312 | | -[32K, 64K) 239 | | -[64K, 128K) 209 | | -[128K, 256K) 164 | | -[256K, 512K) 124 | | -[512K, 1M) 48 | | -[1M, 2M) 30 | | -[2M, 4M) 7 | | -[4M, 8M) 1 | | -[8M, 16M) 2 | | -``` - -So, we have a lot of small allocations, with a very long tail of larger allocations. Remember, size is on a log scale, so that long tail represents a very skewed distribution. - -### Small allocation hotspots - -Now for something more complex. Suppose we’re profiling our code and looking for low-hanging fruit for reducing the number of memory allocations. Code that makes a lot of small allocations tends to be a good candidate for this kind of refactoring. `bpftrace` lets us grab stack traces, which can be used to see what part of the main program caused an allocation. - -As of writing, there’s one little complication because of a limitation of `bpftrace`’s stack trace handling: it can only show meaningful function symbol names (as opposed to raw memory addresses) if `bpftrace` quits while the target program is still running. There’s [an open bug report for improving this behaviour][17], but in the meantime I just made sure `dscanner` took a long time, and that I shut down `bpftrace` first. - -Here’s how to grab the top three stack traces that lead to small (<16B) memory allocations with `qalloc()`: - -``` -# cat smallallocs.bt -uprobe:/usr/local/bin/dscanner:_D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_ -{ - if (arg4 < 16) - { - @[ustack] = count(); - } -} - -END -{ - print(@, 3); - clear(@); -} -# bpftrace smallallocs.bt -Attaching 2 probes... -^C@[ - _D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_+0 - _D2rt8lifetime12__arrayAllocFNaNbmxC8TypeInfoxQlZS4core6memory8BlkInfo_+236 - _d_arraysetlengthT+248 - _D8dscanner8analysis25label_var_same_name_check17LabelVarNameCheck9pushScopeMFZv+29 - _D8dscanner8analysis25label_var_same_name_check17LabelVarNameCheck9__mixin175visitMFxC6dparse3ast6ModuleZv+21 - _D8dscanner8analysis3run7analyzeFAyaxC6dparse3ast6ModulexSQCeQBy6config20StaticAnalysisConfigKS7dsymbol11modulecache11ModuleCacheAxS3std12experimental5lexer__T14TokenStructureThVQFpa305_0a20202020737472696e6720636f6d6d656e743b0a20202020737472696e6720747261696c696e67436f6d6d656e743b0a0a20202020696e74206f70436d702873697a655f7420692920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202069662028696e646578203c2069292072657475726e202d313b0a202020202020202069662028696e646578203e2069292072657475726e20313b0a202020202020202072657475726e20303b0a202020207d0a0a20202020696e74206f70436d702872656620636f6e737420747970656f66287468697329206f746865722920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202072657475726e206f70436d70286f746865722e696e646578293b0a202020207d0aZQYobZCQZv9container6rbtree__T12RedBlackTreeTSQBGiQBGd4base7MessageVQBFza62_20612e6c696e65203c20622e6c696e65207c7c2028612e6c696e65203d3d20622e6c696e6520262620612e636f6c756d6e203c20622e636f6c756d6e2920Vbi1ZQGt+11343 - _D8dscanner8analysis3run7analyzeFAAyaxSQBlQBf6config20StaticAnalysisConfigQBoKS6dparse5lexer11StringCacheKS7dsymbol11modulecache11ModuleCachebZb+337 - _Dmain+3618 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZ9__lambda1MFZv+40 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZv+139 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _d_run_main+463 - main+16 - __libc_start_main+235 - 0x41fd89415541f689 -]: 450 -@[ - _D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_+0 - _D2rt8lifetime12__arrayAllocFNaNbmxC8TypeInfoxQlZS4core6memory8BlkInfo_+236 - _d_arrayappendcTX+1944 - _D8dscanner8analysis10unmodified16UnmodifiedFinder9pushScopeMFZv+61 - _D8dscanner8analysis10unmodified16UnmodifiedFinder5visitMFxC6dparse3ast6ModuleZv+21 - _D8dscanner8analysis3run7analyzeFAyaxC6dparse3ast6ModulexSQCeQBy6config20StaticAnalysisConfigKS7dsymbol11modulecache11ModuleCacheAxS3std12experimental5lexer__T14TokenStructureThVQFpa305_0a20202020737472696e6720636f6d6d656e743b0a20202020737472696e6720747261696c696e67436f6d6d656e743b0a0a20202020696e74206f70436d702873697a655f7420692920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202069662028696e646578203c2069292072657475726e202d313b0a202020202020202069662028696e646578203e2069292072657475726e20313b0a202020202020202072657475726e20303b0a202020207d0a0a20202020696e74206f70436d702872656620636f6e737420747970656f66287468697329206f746865722920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202072657475726e206f70436d70286f746865722e696e646578293b0a202020207d0aZQYobZCQZv9container6rbtree__T12RedBlackTreeTSQBGiQBGd4base7MessageVQBFza62_20612e6c696e65203c20622e6c696e65207c7c2028612e6c696e65203d3d20622e6c696e6520262620612e636f6c756d6e203c20622e636f6c756d6e2920Vbi1ZQGt+11343 - _D8dscanner8analysis3run7analyzeFAAyaxSQBlQBf6config20StaticAnalysisConfigQBoKS6dparse5lexer11StringCacheKS7dsymbol11modulecache11ModuleCachebZb+337 - _Dmain+3618 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZ9__lambda1MFZv+40 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZv+139 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _d_run_main+463 - main+16 - __libc_start_main+235 - 0x41fd89415541f689 -]: 450 -@[ - _D2gc4impl12conservativeQw14ConservativeGC6qallocMFNbmkxC8TypeInfoZS4core6memory8BlkInfo_+0 - _D2rt8lifetime12__arrayAllocFNaNbmxC8TypeInfoxQlZS4core6memory8BlkInfo_+236 - _d_arrayappendcTX+1944 - _D8dscanner8analysis3run7analyzeFAyaxC6dparse3ast6ModulexSQCeQBy6config20StaticAnalysisConfigKS7dsymbol11modulecache11ModuleCacheAxS3std12experimental5lexer__T14TokenStructureThVQFpa305_0a20202020737472696e6720636f6d6d656e743b0a20202020737472696e6720747261696c696e67436f6d6d656e743b0a0a20202020696e74206f70436d702873697a655f7420692920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202069662028696e646578203c2069292072657475726e202d313b0a202020202020202069662028696e646578203e2069292072657475726e20313b0a202020202020202072657475726e20303b0a202020207d0a0a20202020696e74206f70436d702872656620636f6e737420747970656f66287468697329206f746865722920636f6e73742070757265206e6f7468726f77204073616665207b0a202020202020202072657475726e206f70436d70286f746865722e696e646578293b0a202020207d0aZQYobZCQZv9container6rbtree__T12RedBlackTreeTSQBGiQBGd4base7MessageVQBFza62_20612e6c696e65203c20622e6c696e65207c7c2028612e6c696e65203d3d20622e6c696e6520262620612e636f6c756d6e203c20622e636f6c756d6e2920Vbi1ZQGt+680 - _D8dscanner8analysis3run7analyzeFAAyaxSQBlQBf6config20StaticAnalysisConfigQBoKS6dparse5lexer11StringCacheKS7dsymbol11modulecache11ModuleCachebZb+337 - _Dmain+3618 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZ9__lambda1MFZv+40 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ6runAllMFZv+139 - _D2rt6dmain211_d_run_mainUiPPaPUAAaZiZ7tryExecMFMDFZvZv+32 - _d_run_main+463 - main+16 - __libc_start_main+235 - 0x41fd89415541f689 -]: 450 -``` - -It looks like a lot of the small allocations are due to a red-black tree in `ModuleCache`. - -### What’s next? - -I think these examples already show that `bpftrace` is a pretty powerful tool. There’s a lot more that can done, and I highly recommended reading [Brendan Gregg’s eBPF tutorials][18]. - -I used uprobes to trace arbitrary functions in the D runtime. The pro of this is the freedom to do anything, but the cons are that I had to refer to the D runtime source code and manually deal with the D ABI. There’s also no guarantee that a script I write today will work with future versions of the runtime. Linux also supports making well-defined tracepoints in user code using a feature called [USDT][19]. That should let D code export stable tracepoints that can be used without worrying about the D ABI. I might do more experiments in future. - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/04/26/bpftrace_d_gc.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://github.com/iovisor/bpftrace -[2]: https://en.wikipedia.org/wiki/Java_virtual_machine -[3]: http://dtrace.org/blogs/about/ -[4]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md -[5]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md -[6]: https://github.com/iovisor/bpftrace/blob/master/docs/tutorial_one_liners.md -[7]: https://dlang.org/spec/abi.html#name_mangling -[8]: https://github.com/dlang-community/D-Scanner -[9]: https://github.com/dlang/druntime/ -[10]: https://dlang.org/spec/garbage.html#gc_config -[11]: https://dlang.org/phobos/core_memory.html#.GC.stats -[12]: https://github.com/dlang/druntime/tree/v2.081.1/src/gc -[13]: https://github.com/dlang/druntime/tree/v2.081.1/src/gc/impl/conservative -[14]: https://github.com/dlang/tools -[15]: https://dlang.org/spec/abi.html#parameters -[16]: https://en.wikipedia.org/wiki/Copy_elision -[17]: https://github.com/iovisor/bpftrace/issues/246 -[18]: http://www.brendangregg.com/blog/2019-01-01/learn-ebpf-tracing.html -[19]: https://lwn.net/Articles/753601/ diff --git a/sources/talk/20190430 Measuring the edge- Finding success with edge deployments.md b/sources/talk/20190430 Measuring the edge- Finding success with edge deployments.md deleted file mode 100644 index abc1d7dd0c..0000000000 --- a/sources/talk/20190430 Measuring the edge- Finding success with edge deployments.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Measuring the edge: Finding success with edge deployments) -[#]: via: (https://www.networkworld.com/article/3391570/measuring-the-edge-finding-success-with-edge-deployments.html#tk.rss_all) -[#]: author: (Anne Taylor https://www.networkworld.com/author/Anne-Taylor/) - -Measuring the edge: Finding success with edge deployments -====== -To make the most of edge computing investments, it’s important to first understand objectives and expectations. -![iStock][1] - -Edge computing deployments are well underway as companies seek to better process the wealth of data being generated, for example, by Internet of Things (IoT) devices. - -So, what are the results? Plus, how can you ensure success with your own edge projects? - -**Measurements of success** - -The [use cases for edge computing deployments][2] vary widely, as do the business drivers and, ultimately, the benefits. - -Whether they’re seeking improved network or application performance, real-time data analytics, a better customer experience, or other efficiencies, enterprises are accomplishing their goals. Based on two surveys — one by [_Automation World_][3] and another by [Futurum Research][4] — respondents have reported: - - * Decreased network downtime - * Increased productivity - * Increased profitability/reduced costs - * Improved business processes - - - -Basically, success metrics can be bucketed into two categories: direct value propositions and cost reductions. In the former, companies are seeking results that measure revenue generation — such as improved digital experiences with customers and users. In the latter, metrics that prove the value of digitizing processes — like speed, quality, and efficacy — will demonstrate success with edge deployments. - -**Goalposts for success with edge** - -Edge computing deployments are underway. But before diving in, understand what’s driving these projects. - -According to the Futurum Research, 29% of respondents are currently investing in edge infrastructure, and 62% expect to adopt within the year. For these companies, the driving force has been the business, which expects operational efficiencies from these investments. Beyond that, there’s an expectation down the road to better align with IoT strategy. - -That being the case, it’s worth considering your business case before diving into edge. Ask: Are you seeking innovation and revenue generation amid digital transformation efforts? Or is your company looking for a low-risk, “test the waters” type of investment in edge? - -Next up, what type of edge project makes sense for your environment? Edge data centers fall into three categories: local devices for a specific purpose (e.g., an appliance for security systems or a gateway for cloud-to-premises storage); small local data centers (typically one to 10 racks for storage and processing); and regional data centers (10+ racks for large office spaces). - -Then, think about these best practices before talking with vendors: - - * Management: Especially for unmanned edge data centers, remote management is critical. You’ll need predictive alerts and a service contract that enables IT to be just as resilient as a regular data center. - * Security:Much of today’s conversation has been around data security. That starts with physical protection. Too many data breaches — including theft and employee error — are caused by physical access to IT assets. - * Standardization: There is no need to reinvent the wheel when it comes to edge data center deployments. Using standard components makes it easier for the internal IT team to deploy, manage, and maintain. - - - -Finally, consider the ecosystem. The end-to-end nature of edge requires not just technology integration, but also that all third parties work well together. A good ecosystem connects customers, partners, and vendors. - -Get further information to kickstart your edge computing environment at [APC.com][5]. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3391570/measuring-the-edge-finding-success-with-edge-deployments.html#tk.rss_all - -作者:[Anne Taylor][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Anne-Taylor/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/istock-912928582-100795093-large.jpg -[2]: https://www.networkworld.com/article/3391016/edge-computing-is-in-most-industries-future.html -[3]: https://www.automationworld.com/article/technologies/cloud-computing/its-not-edge-vs-cloud-its-both -[4]: https://futurumresearch.com/edge-computing-from-edge-to-enterprise/ -[5]: https://www.apc.com/us/en/solutions/business-solutions/edge-computing.jsp diff --git a/sources/talk/20190501 Yet another killer cloud quarter puts pressure on data centers.md b/sources/talk/20190501 Yet another killer cloud quarter puts pressure on data centers.md deleted file mode 100644 index d65fe448b4..0000000000 --- a/sources/talk/20190501 Yet another killer cloud quarter puts pressure on data centers.md +++ /dev/null @@ -1,92 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Yet another killer cloud quarter puts pressure on data centers) -[#]: via: (https://www.networkworld.com/article/3391465/another-strong-cloud-computing-quarter-puts-pressure-on-data-centers.html#tk.rss_all) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -Yet another killer cloud quarter puts pressure on data centers -====== -Continued strong growth from Amazon Web Services, Microsoft Azure, and Google Cloud Platform signals even more enterprises are moving to the cloud. -![Getty Images][1] - -You’d almost think I’d get tired of [writing this story over and over and over][2]… but the ongoing growth of cloud computing is too big a trend to ignore. - -Critically, the impressive growth numbers of the three leading cloud infrastructure providers—Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform—doesn’t occur in a vacuum. It’s not just about new workloads being run in the cloud; it’s also about more and more enterprises moving existing workloads to the cloud from on-premises data centers. - -**[ Also read:[Is the cloud already killing the enterprise data center?][3] ]** - -To put these trends in perspective, let’s take a look at the results for all three vendors. - -### AWS keeps on trucking - -AWS remains by far the dominant player in the cloud infrastructure market, with a massive [$7.7 billion in quarterly sales][4] (an annual run rate of a whopping $30.8 billion). Even more remarkable, somehow AWS continues to grow revenue by almost 42% year over year. Oh, and that kind of growth is not just unique _this_ quarter; the unit has topped 40% revenue growth _every_ quarter since the beginning of 2017. (To be fair, the first quarter of 2018 saw an amazing 49% revenue growth.) - -And unlike many fast-growing tech companies, that incredible expansion isn’t being fueled by equally impressive losses. AWS earned a healthy $2.2 billion operating profit in the quarter, up 59% from the same period last year. One reason? [The company told analysts][5] it made big data center investments in 2016 and 2017, so it hasn’t had to do so more recently (it expects to boost spending on data centers later this year). The company [reportedly][6] described AWS revenue growth as “lumpy,” but it seems to me that the numbers merely vary between huge and even bigger. - -### Microsoft Azure grows even faster than AWS - -Sure, 41% growth is good, but [Microsoft’s quarterly Azure revenue][7] almost doubled that, jumping 73% year over year (fairly consistent with the previous—also stellar—quarter), helping the company exceed estimates for both sales and revenue and sparking a brief shining moment of a $1 billion valuation for the company. Microsoft doesn’t break out Azure’s sales and revenue, but [the commercial cloud business, which includes Azure as well as other cloud businesses, grew 41% in the quarter to $9.6 billion][8]. - -It’s impossible to tell exactly how big Azure is, but it appears to be growing faster than AWS, though off a much smaller base. While some analysts reportedly say Azure is growing faster than AWS was at a similar stage in its development, that may not bear much significance because the addressable cloud market is now far larger than it used be. - -According to [the New York Times][9], like AWS, Microsoft is also now reaping the benefits of heavy investments in new data centers around the world. And the Times credits Microsoft with “particular success” in [hybrid cloud installations][10], helping ease concerns among some slow-to-change enterprise about full-scale cloud adoption. - -**[ Also read:[Why hybrid cloud will turn out to be a transition strategy][11] ]** - -### Can Google Cloud Platform keep up? - -Even as the [overall quarterly numbers for Alphabet][12]—Google’s parent company—didn’t meet analysts’ revenue expectations (which sent the stock tumbling), Google Cloud Platform seems to have continued its strong growth. Alphabet doesn’t break out its cloud unit, but sales in Alphabet’s “Other Revenue” category—which includes cloud computing along with hardware—jumped 25% compared to the same period last year, hitting $5.4 billion. - -More telling, perhaps, Alphabet Chief Financial Officer Ruth Porat [reportedly][13] told analysts that "Google Cloud Platform remains one of the fastest growing businesses in Alphabet." [Porat also mentioned][14] that hiring in the cloud unit was so aggressive that it drove a 20% jump in Alphabet’s operating expenses! - -### Companies keep going cloud - -But the raw numbers tell only part of the story. All that growth means existing customers are spending more, but also that ever-increasing numbers of enterprises are abandoning their hassle and expense of running their data centers in favor of buying what they need from the cloud. - -**[ Also read:[Large enterprises abandon data centers for the cloud][15] ]** - -The New York Times quotes Amy Hood, Microsoft’s chief financial officer, explaining that, “You don’t really get revenue growth unless you have a usage growth, so this is customers deploying and using Azure.” And the Times notes that Microsoft has signed big deals with companies such as [Walgreens Boots Alliance][16] that combined Azure with other Microsoft cloud-based services. - -This growth is true in existing markets, and also includes new markets. For example, AWS just opened new regions in [Indonesia][17] and [Hong Kong][18]. - -**[ Now read:[After virtualization and cloud, what's left on premises?][19] ]** - -Join the Network World communities on [Facebook][20] and [LinkedIn][21] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3391465/another-strong-cloud-computing-quarter-puts-pressure-on-data-centers.html#tk.rss_all - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/cloud_comput_connect_blue-100787048-large.jpg -[2]: https://www.networkworld.com/article/3292935/cloud-computing-just-had-another-kick-ass-quarter.html -[3]: https://www.networkworld.com/article/3268384/is-the-cloud-already-killing-the-enterprise-data-center.html -[4]: https://www.latimes.com/business/la-fi-amazon-earnings-cloud-computing-aws-20190425-story.html -[5]: https://www.businessinsider.com/amazon-q1-2019-earnings-aws-advertising-retail-prime-2019-4 -[6]: https://www.computerweekly.com/news/252462310/Amazon-cautions-against-reading-too-much-into-slowdown-in-AWS-revenue-growth-rate -[7]: https://www.microsoft.com/en-us/Investor/earnings/FY-2019-Q3/press-release-webcast -[8]: https://www.cnbc.com/2019/04/24/microsoft-q3-2019-earnings.html -[9]: https://www.nytimes.com/2019/04/24/technology/microsoft-earnings.html -[10]: https://www.networkworld.com/article/3268448/what-is-hybrid-cloud-really-and-whats-the-best-strategy.html -[11]: https://www.networkworld.com/article/3238466/why-hybrid-cloud-will-turn-out-to-be-a-transition-strategy.html -[12]: https://abc.xyz/investor/static/pdf/2019Q1_alphabet_earnings_release.pdf?cache=8ac2b86 -[13]: https://www.forbes.com/sites/jilliandonfro/2019/04/29/google-alphabet-q1-earnings-2019/#52f5c8c733be -[14]: https://www.youtube.com/watch?v=31_KHdse_0Y -[15]: https://www.networkworld.com/article/3240587/large-enterprises-abandon-data-centers-for-the-cloud.html -[16]: https://www.walgreensbootsalliance.com -[17]: https://www.businesswire.com/news/home/20190403005931/en/AWS-Open-New-Region-Indonesia -[18]: https://www.apnews.com/Business%20Wire/57eaf4cb603e46e6b05b634d9751699b -[19]: https://https//www.networkworld.com/article/3232626/virtualization/extreme-virtualization-impact-on-enterprises.html -[20]: https://www.facebook.com/NetworkWorld/ -[21]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190502 Revolutionary data compression technique could slash compute costs.md b/sources/talk/20190502 Revolutionary data compression technique could slash compute costs.md deleted file mode 100644 index 00316946f6..0000000000 --- a/sources/talk/20190502 Revolutionary data compression technique could slash compute costs.md +++ /dev/null @@ -1,65 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Revolutionary data compression technique could slash compute costs) -[#]: via: (https://www.networkworld.com/article/3392716/revolutionary-data-compression-technique-could-slash-compute-costs.html#tk.rss_all) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Revolutionary data compression technique could slash compute costs -====== -A new form of data compression, called Zippads, will create faster computer programs that could drastically lower the costs of computing. -![Kevin Stanchfield \(CC BY 2.0\)][1] - -There’s a major problem with today’s money-saving memory compression used for storing more data in less space. The issue is that computers store and run memory in predetermined blocks, yet many modern programs function and play out in variable chunks. - -The way it’s currently done is actually, highly inefficient. That’s because the compressed programs, which use objects rather than evenly configured slabs of data, don’t match the space used to store and run them, explain scientists working on a revolutionary new compression system called Zippads. - -The answer, they say—and something that if it works would drastically reduce those inefficiencies, speed things up, and importantly, reduce compute costs—is to compress the varied objects and not the cache lines, as is the case now. Cache lines are fixed-size blocks of memory that are transferred to memory cache. - -**[ Read also:[How to deal with backup when you switch to hyperconverged infrastructure][2] ]** - -“Objects, not cache lines, are the natural unit of compression,” writes Po-An Tsai and Daniel Sanchez in their MIT Computer Science and Artificial Intelligence Laboratory (CSAIL) [paper][3] (pdf). - -They say object-based programs — of the kind used now everyday, such as Python — should be compressed based on their programmed object size, not on some fixed value created by traditional or even state-of-the art cached methods. - -The alternative, too, isn’t to recklessly abandon object-oriented programming just because it’s inefficient at using compression. One must adapt compression to that now common object-using code. - -The scientists claim their new system can increase the compression ratio 1.63 times and improve performance by 17%. It’s the “first compressed memory hierarchy designed for object-based applications,” they say. - -### The benefits of compression - -Compression is a favored technique for making computers more efficient. The main advantage over simply adding more memory is that costs are lowered significantly—you don’t need to add increasing physical main memory hardware because you’re cramming more data into existing. - -However, to date, hardware memory compression has been best suited to more old-school large blocks of data, not the “random, fine-grained memory accesses,” the team explains. It’s not great at accessing small pieces of data, such as words, for example. - -### How the Zippads compression system works - -In Zippads, as the new system is called, stored object hierarchical levels (called “pads”) are located on-chip and are directly accessed. The different levels (pads) have changing speed grades, with newly referenced objects being placed in the fastest pad. As a pad fills up, it begins the process of evicting older, not-so-active objects and ultimately recycles the unused code that is taking up desirable fast space and isn’t being used. Cleverly, at the fast level, the code parts aren’t even compressed, but as they prove their non-usefulness they get kicked down to compressed, slow-to-access, lower-importance pads—and are brought back up as necessary. - -Zippads would “see computers that can run much faster or can run many more apps at the same speeds,” an[ MIT News][4] article says. “Each application consumes less memory, it runs faster, so a device can support more applications within its allotted memory.” Bandwidth is freed up, in other words. - -“All computer systems would benefit from this,” Sanchez, a professor of computer science and electrical engineering, says in the article. “Programs become faster because they stop being bottlenecked by memory bandwidth.” - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3392716/revolutionary-data-compression-technique-could-slash-compute-costs.html#tk.rss_all - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/memory-100787327-large.jpg -[2]: https://www.networkworld.com/article/3389396/how-to-deal-with-backup-when-you-switch-to-hyperconverged-infrastructure.html -[3]: http://people.csail.mit.edu/poantsai/papers/2019.zippads.asplos.pdf -[4]: http://news.mit.edu/2019/hardware-data-compression-0416 -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190507 SD-WAN is Critical for IoT.md b/sources/talk/20190507 SD-WAN is Critical for IoT.md deleted file mode 100644 index a5867d5afd..0000000000 --- a/sources/talk/20190507 SD-WAN is Critical for IoT.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (SD-WAN is Critical for IoT) -[#]: via: (https://www.networkworld.com/article/3393445/sd-wan-is-critical-for-iot.html#tk.rss_all) -[#]: author: (Rami Rammaha https://www.networkworld.com/author/Rami-Rammaha/) - -SD-WAN is Critical for IoT -====== - -![istock][1] - -The Internet of Things (IoT) is everywhere and its use is growing fast. IoT is used by local governments to build smart cities. It’s used to build smart businesses. And, consumers are benefitting as it’s built into smart homes and smart cars. Industry analyst first estimates that over 20 billion IoT devices will be connected by 2020. That’s a 2.5x increase from the more than 8 billion connected devices in 2017*. - -Manufacturing companies have the highest IoT spend to date of industries while the health care market is experiencing the highest IoT growth. By 2020, 50 percent of IoT spending will be driven by manufacturing, transportation and logistics, and utilities. - -IoT growth is being fueled by the promise of analytical data insights that will ultimately yield greater efficiencies and enhanced customer satisfaction. The top use cases driving IoT growth are self-optimizing production, predictive maintenance and automated inventory management. - -From a high-level view, the IoT architecture includes sensors that collect and transmit data (i.e. temperature, speed, humidity, video feed, pressure, IR, proximity, etc.) from “things” like cars, trucks, machines, etc. that are connected over the internet. Data collected is then analyzed, translating raw data into actionable information. Businesses can then act on this information. And at more advanced levels, machine learning and AI algorithms learn and adapt to this information and automatically respond at a system level. - -IDC estimates that by 2025, over 75 billion IoT devices* will be connected. By that time, nearly a quarter of the world’s projected 163 zettabytes* (163 trillion gigabytes) of data will have been created in real-time, and the vast majority of that data will have been created by IoT devices. This massive amount of data will drive an exponential increase in traffic on the network infrastructure requiring massive scalability. Also, this increasing amount of data will require tremendous processing power to mine it and transform it into actionable intelligence. In parallel, security risks will continue to increase as there will be many more potential entry points onto the network. Lastly, management of the overall infrastructure will require better orchestration of policies as well as the means to streamline on-going operations. - -### **How does SD-WAN enable IoT business initiatives?** - -There are three key elements that an [SD-WAN][2] platform must include: - - 1. **Visibility** : Real-time visibility into the network is key. It takes the guesswork out of rapid problem resolution, enabling organizations to run more efficiently by accelerating troubleshooting and applying preventive measures. Furthermore, a CIO is able to pull metrics and see bandwidth consumed by any IoT application. - 2. **Security** : IoT traffic must be isolated from other application traffic. IT must prevent – or at least reduce – the possible attack surface that may be exposed to IoT device traffic. Also, the network must continue delivering other application traffic in the event of a melt down on a WAN link caused by a DDoS attack. - 3. **Agility** : With the increased number of connected devices, applications and users, a comprehensive, intelligent and centralized orchestration approach that continuously adapts to deliver the best experience to the business and users is critical to success. - - - -### Key Silver Peak EdgeConnect SD-WAN capabilities for IoT - -1\. Silver Peak has an [embedded real-time visibility engine][3] allowing IT to gain complete observability into the performance attributes of the network and applications in real-time. The [EdgeConnect][4] SD-WAN appliances deployed in branch offices send information to the centralized [Unity Orchestrator™][5]. Orchestrator collects the data and presents it in a comprehensive management dashboard via customizable widgets. These widgets provide a wealth of operational data including a health heatmap for every SD-WAN appliance deployed, flow counts, active tunnels, logical topologies, top talkers, alarms, bandwidth consumed by each application and location, latency and jitter and much more. Furthermore, the platform maintains weeks’ worth of data with context allowing IT to playback and see what has transpired at a specific time and location, similar to a DVR. - -![Click to read Solution Brief][6] - -2\. The second set of key capabilities center around security and end-to-end zone-based segmentation. An IoT traffic zone may be created on the LAN or branch side. IoT traffic is then mapped all the way across the WAN to the data center or cloud where the data will be processed. Zone-based segmentation is accomplished in a simplified and automated way within the Orchestrator GUI. In cases where further traffic inspection is required, IT can simply service chain to another security service. There are several key benefits realized by this approach. IT can easily and quickly apply segmentation policies; segmentation mitigates the attack surface; and IT can save on additional security investments. - -![***Click to read Solution Brief ***][7] - -3\. EdgeConnect employs machine learning at the global level where with internet sensors and third-party sensors feed into the cloud portal software. The software tracks the geolocation of all IP addresses and IP reputation, distributing signals down to the Unity Orchestrator running in each individual customer’s enterprise. In turn, it is speaking to the edge devices sitting in the branch offices. There, distributed learning is done by looking at the first packet, making an inference based on the first packet what the application is. So, if seeing that 100 times now, every time packets come from that particular IP address and turns out to be an IoT, we can make an inference that IP belongs to IoT application. In parallel, we’re using a mix of traditional techniques to validate the identification of the application. All this combined other multi-level intelligence enables simple and automated policy orchestration across a large number of devices and applications. - -![***Click to read Solution Brief ***][8] - -SD-WAN plays a foundational role as businesses continue to embrace IoT, but choosing the right SD-WAN platform is even more critical to ensuring businesses are ultimately able to fully optimize their operations. - -* Source: [IDC][9] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3393445/sd-wan-is-critical-for-iot.html#tk.rss_all - -作者:[Rami Rammaha][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Rami-Rammaha/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/istock-1019172426-100795551-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://www.silver-peak.com/resource-center/simplify-sd-wan-operations-greater-visibility -[4]: https://www.silver-peak.com/products/unity-edge-connect -[5]: https://www.silver-peak.com/products/unity-orchestrator -[6]: https://images.idgesg.net/images/article/2019/05/1_simplify-100795554-large.jpg -[7]: https://images.idgesg.net/images/article/2019/05/2_centralize-100795555-large.jpg -[8]: https://images.idgesg.net/images/article/2019/05/3_increase-100795558-large.jpg -[9]: https://www.information-age.com/data-forecast-grow-10-fold-2025-123465538/ diff --git a/sources/talk/20190507 Some IT pros say they have too much data.md b/sources/talk/20190507 Some IT pros say they have too much data.md deleted file mode 100644 index a645ae52e9..0000000000 --- a/sources/talk/20190507 Some IT pros say they have too much data.md +++ /dev/null @@ -1,66 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Some IT pros say they have too much data) -[#]: via: (https://www.networkworld.com/article/3393205/some-it-pros-say-they-have-too-much-data.html#tk.rss_all) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Some IT pros say they have too much data -====== -IT professionals have too many data sources to count, and they spend a huge amount of time wrestling that data into usable condition, a survey from Ivanti finds. -![Getty Images][1] - -A new survey has found that a growing number of IT professionals have too many data sources to even count, and they are spending more and more time just wrestling that data into usable condition. - -Ivanti, an IT asset management firm, [surveyed 400 IT professionals on their data situation][2] and found IT faces numerous challenges when it comes to siloes, data, and implementation. The key takeaway is data overload is starting to overwhelm IT managers and data lakes are turning into data oceans. - -**[ Read also:[Understanding mass data fragmentation][3] | Get daily insights [Sign up for Network World newsletters][4] ]** - -Among the findings from Ivanti's survey: - - * Fifteen percent of IT professionals say they have too many data sources to count, and 37% of professionals said they have about 11-25 different sources for data. - * More than half of IT professionals (51%) report they have to work with their data for days, weeks or more before it's actionable. - * Only 10% of respondents said the data they receive is actionable within minutes. - * One in three respondents said they have the resources to act on their data, but more than half (52%) said they only sometimes have the resources. - - - -“It’s clear from the results of this survey that IT professionals are in need of a more unified approach when working across organizational departments and resulting silos,” said Duane Newman, vice president of product management at Ivanti, in a statement. - -### The problem with siloed data - -The survey found siloed data represents a number of problems and challenges. Three key priorities suffer the most: automation (46%), user productivity and troubleshooting (42%), and customer experience (41%). The survey also found onboarding/offboarding suffers the least (20%) due to siloes, so apparently HR and IT are getting things right. - -In terms of what they want from real-time insight, about 70% of IT professionals said their security status was the top priority over other issues. Respondents were least interested in real-time insights around warranty data. - -### Data lake method a recipe for disaster - -I’ve been immersed in this subject for other publications for some time now. Too many companies are hoovering up data for the sake of collecting it with little clue as to what they will do with it later. One thing you have to say about data warehouses, the schema on write at least forces you to think about what you are collecting and how you might use it because you have to store it away in a usable form. - -The new data lake method is schema on read, meaning you filter/clean it when you read it into an application, and that’s just a recipe for disaster. If you are looking at data collected a month or a year ago, do you even know what it all is? Now you have to apply schema to data and may not even remember collecting it. - -Too many people think more data is good when it isn’t. You just drown in it. When you reach a point of having too many data sources to count, you’ve gone too far and are not going to get insight. You’re going to get overwhelmed. Collect data you know you can use. Otherwise you are wasting petabytes of disk space. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3393205/some-it-pros-say-they-have-too-much-data.html#tk.rss_all - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/03/database_data-center_futuristic-technology-100752012-large.jpg -[2]: https://www.ivanti.com/blog/survey-it-professionals-data-sources -[3]: https://www.networkworld.com/article/3262145/lan-wan/customer-reviews-top-remote-access-tools.html#nww-fsb -[4]: https://www.networkworld.com/newsletters/signup.html#nww-fsb -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190509 When it comes to uptime, not all cloud providers are created equal.md b/sources/talk/20190509 When it comes to uptime, not all cloud providers are created equal.md deleted file mode 100644 index 8f9db8a94b..0000000000 --- a/sources/talk/20190509 When it comes to uptime, not all cloud providers are created equal.md +++ /dev/null @@ -1,83 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (When it comes to uptime, not all cloud providers are created equal) -[#]: via: (https://www.networkworld.com/article/3394341/when-it-comes-to-uptime-not-all-cloud-providers-are-created-equal.html#tk.rss_all) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -When it comes to uptime, not all cloud providers are created equal -====== -Cloud uptime is critical today, but vendor-provided data can be confusing. Here's an analysis of how AWS, Google Cloud and Microsoft Azure compare. -![Getty Images][1] - -The cloud is not just important; it's mission-critical for many companies. More and more IT and business leaders I talk to look at public cloud as a core component of their digital transformation strategies — using it as part of their hybrid cloud or public cloud implementation. - -That raises the bar on cloud reliability, as a cloud outage means important services are not available to the business. If this is a business-critical service, the company may not be able to operate while that key service is offline. - -Because of the growing importance of the cloud, it’s critical that buyers have visibility into the reliability number for the cloud providers. The challenge is the cloud providers don't disclose the disruptions in a consistent manner. In fact, some are confusing to the point where it’s difficult to glean any kind of meaningful conclusion. - -**[ RELATED:[What IT pros need to know about Azure Stack][2] and [Which cloud performs better, AWS, Azure or Google?][3] | Get regularly scheduled insights: [Sign up for Network World newsletters][4] ]** - -### Reported cloud outage times don't always reflect actual downtime - -Microsoft Azure and Google Cloud Platform (GCP) both typically provide information on date and time, but only high-level data on the services affected and sparse information on regional impact. The problem with that is it’s difficult to get a sense of overall reliability. For instance, if Azure reports a one-hour outage that impacts five services in three regions, the website might show just a single hour. In actuality, that’s 15 hours of total downtime. - -Between Azure, GCP and Amazon Web Services (AWS), [Azure is the most obscure][5], as it provides the least amount of detail. [GCP does a better job of providing detail][6] at the service level but tends to be obscure with regional information. Sometimes it’s very clear as to what services are unavailable, and other times it’s not. - -[AWS has the most granular reporting][7], as it shows every service in every region. If an incident occurs that impacts three services, all three of those services would light up red. If those were unavailable for one hour, AWS would record three hours of downtime. - -Another inconsistency between the cloud providers is the amount of historical downtime data that is available. At one time, all three of the cloud vendors provided a one-year view into outages. GCP and AWS still do this, but Azure moved to only a [90-day view][5] sometime over the past year. - -### Azure has significantly higher downtime than GCP and AWS - -The next obvious question is who has the most downtime? To answer that, I worked with a third-party firm that has continually collected downtime information directly from the vendor websites. I have personally reviewed the information and can validate its accuracy. Based on the vendors own reported numbers, from the beginning of 2018 through May 3, 2019, AWS leads the pack with only 338 hours of downtime, followed by GCP closely at 361. Microsoft Azure has a whopping total of 1,934 hours of self-reported downtime. - -![][8] - -A few points on these numbers. First, this is an aggregation of the self-reported data from the vendors' websites, which isn’t the “true” number, as regional information or service granularity is sometimes obscured. If a service is unavailable for an hour and it’s reported for an hour on the website but it spanned five regions, correctly five hours should have been used. But for this calculation, we used only one hour because that is what was self-reported. - -Because of this, the numbers are most favorable to Microsoft because they provide the least amount of regional information. The numbers are least favorable to AWS because they provide the most granularity. Also, I believe AWS has the most services in most regions, so they have more opportunities for an outage. - -We had considered normalizing the data, but that would require a significant amount of work to destruct the downtime on a per service per region basis. I may choose to do that in the future, but for now, the vendor-reported view is a good indicator of relative performance. - -Another important point is that only infrastructure as a service (IaaS) services were used to calculate downtime. If Google Street View or Bing Maps went down, most businesses would not care, so it would have been unfair to roll those number in. - -### SLAs do not correlate to reliability - -Given the importance of cloud services today, I would like to see every cloud provider post a 12-month running total of downtime somewhere on their website so customers can do an “apples to apples” comparison. This obviously isn’t the only factor used in determining which cloud provider to use, but it is one of the more critical ones. - -Also, buyers should be aware that there is a big difference between service-level agreements (SLAs) and downtime. A cloud operator can promise anything they want, even provide a 100% SLA, but that just means they need to reimburse the business when a service isn’t available. Most IT leaders I have talked to say the few bucks they get back when a service is out is a mere fraction of what the outage actually cost them. - -### Measure twice and cut once to minimize business disruption - -If you’re reading this and you’re researching cloud services, it’s important to not just make the easy decision of buying for convenience. Many companies look at Azure because Microsoft gives away Azure credits as part of the Enterprise Agreement (EA). I’ve interviewed several companies that took the path of least resistance, but they wound up disappointed with availability and then switched to AWS or GCP later, which can have a disruptive effect. - -I’m certainly not saying to not buy Microsoft Azure, but it is important to do your homework to understand the historical performance of the services you’re considering in the regions you need them. The information on the vendor websites may not tell the full picture, so it’s important to do the necessary due diligence to ensure you understand what you’re buying before you buy it. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394341/when-it-comes-to-uptime-not-all-cloud-providers-are-created-equal.html#tk.rss_all - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/cloud_comput_connect_blue-100787048-large.jpg -[2]: https://www.networkworld.com/article/3208029/azure-stack-microsoft-s-private-cloud-platform-and-what-it-pros-need-to-know-about-it -[3]: https://www.networkworld.com/article/3319776/the-network-matters-for-public-cloud-performance.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://azure.microsoft.com/en-us/status/history/ -[6]: https://status.cloud.google.com/incident/appengine/19008 -[7]: https://status.aws.amazon.com/ -[8]: https://images.idgesg.net/images/article/2019/05/public-cloud-downtime-100795948-large.jpg -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190513 Top auto makers rely on cloud providers for IoT.md b/sources/talk/20190513 Top auto makers rely on cloud providers for IoT.md deleted file mode 100644 index 5adf5f65a7..0000000000 --- a/sources/talk/20190513 Top auto makers rely on cloud providers for IoT.md +++ /dev/null @@ -1,53 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Top auto makers rely on cloud providers for IoT) -[#]: via: (https://www.networkworld.com/article/3395137/top-auto-makers-rely-on-cloud-providers-for-iot.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Top auto makers rely on cloud providers for IoT -====== - -For the companies looking to implement the biggest and most complex [IoT][1] setups in the world, the idea of pairing up with [AWS][2], [Google Cloud][3] or [Azure][4] seems to be one whose time has come. Within the last two months, BMW and Volkswagen have both announced large-scale deals with Microsoft and Amazon, respectively, to help operate their extensive network of operational technology. - -According to Alfonso Velosa, vice president and analyst at Gartner, part of the impetus behind those two deals is that the automotive sector fits in very well with the architecture of the public cloud. Public clouds are great at collecting and processing data from a diverse array of different sources, whether they’re in-vehicle sensors, dealerships, mechanics, production lines or anything else. - -**[ RELATED:[What hybrid cloud means in practice][5]. | Get regularly scheduled insights by [signing up for Network World newsletters][6]. ]** - -“What they’re trying to do is create a broader ecosystem. They think they can leverage the capabilities from these folks,” Velosa said. - -### Cloud providers as IoT partners - -The idea is automated analytics for service and reliability data, manufacturing and a host of other operational functions. And while the full realization of that type of service is still very much a work in progress, it has clear-cut advantages for big companies – a skilled partner handling tricky implementation work, built-in capability for sophisticated analytics and security, and, of course, the ability to scale up in a big way. - -Hence, the structure of the biggest public clouds has upside for many large-scale IoT deployments, not just the ones taking place in the auto industry. The cloud giants have vast infrastructures, with multiple points of presence all over the world. - -To continue reading this article register now - -[Get Free Access][7] - -[Learn More][8] Existing Users [Sign In][7] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3395137/top-auto-makers-rely-on-cloud-providers-for-iot.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[2]: https://www.networkworld.com/article/3324043/aws-does-hybrid-cloud-with-on-prem-hardware-vmware-help.html -[3]: https://www.networkworld.com/article/3388218/cisco-google-reenergize-multicloudhybrid-cloud-joint-development.html -[4]: https://www.networkworld.com/article/3385078/microsoft-introduces-azure-stack-for-hci.html -[5]: https://www.networkworld.com/article/3249495/what-hybrid-cloud-mean-practice -[6]: https://www.networkworld.com/newsletters/signup.html -[7]: javascript:// -[8]: /learn-about-insider/ diff --git a/sources/talk/20190514 Mobility and SD-WAN, Part 1- SD-WAN with 4G LTE is a Reality.md b/sources/talk/20190514 Mobility and SD-WAN, Part 1- SD-WAN with 4G LTE is a Reality.md deleted file mode 100644 index 1ecd68fa41..0000000000 --- a/sources/talk/20190514 Mobility and SD-WAN, Part 1- SD-WAN with 4G LTE is a Reality.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Mobility and SD-WAN, Part 1: SD-WAN with 4G LTE is a Reality) -[#]: via: (https://www.networkworld.com/article/3394866/mobility-and-sd-wan-part-1-sd-wan-with-4g-lte-is-a-reality.html) -[#]: author: (Francisca Segovia ) - -Mobility and SD-WAN, Part 1: SD-WAN with 4G LTE is a Reality -====== - -![istock][1] - -Without a doubt, 5G — the fifth generation of mobile wireless technology — is the hottest topic in wireless circles today. You can’t throw a stone without hitting 5G news. While telecommunications providers are in a heated competition to roll out 5G, it’s important to reflect on current 4G LTE (Long Term Evolution) business solutions as a preview of what we have learned and what’s possible. - -This is part one of a two-part blog series that will explore the [SD-WAN][2] journey through the evolution of these wireless technologies. - -### **Mobile SD-WAN is a reality** - -4G LTE commercialization continues to expand. According to [the GSM (Groupe Spéciale Mobile) Association][3], 710 operators have rolled out 4G LTE in 217 countries, reaching 83 percent of the world’s population. The evolution of 4G is transforming the mobile industry and is setting the stage for the advent of 5G. - -Mobile connectivity is increasingly integrated with SD-WAN, along with MPLS and broadband WAN services today. 4G LTE represents a very attractive transport alternative, as a backup or even an active member of the WAN transport mix to connect users to critical business applications. And in some cases, 4G LTE might be the only choice in locations where fixed lines aren’t available or reachable. Furthermore, an SD-WAN can optimize 4G LTE connectivity and bring new levels of performance and availability to mobile-based business use cases by selecting the best path available across several 4G LTE connections. - -### **Increasing application performance and availability with 4G LTE** - -Silver Peak has partnered with [BEC Technologies][4] to create a joint solution that enables customers to incorporate one or more low-cost 4G LTE services into any [Unity EdgeConnect™][5] SD-WAN edge platform deployment. All the capabilities of the EdgeConnect platform are supported across LTE links including packet-based link bonding, dynamic path control, path conditioning along with the optional [Unity Boost™ WAN Optimization][6] performance pack. This ensures always-consistent, always-available application performance even in the event of an outage or degraded service. - -EdgeConnect also incorporates sophisticated NAT traversal technology that eliminates the requirement for provisioning the LTE service with extra-cost static IP addresses. The Silver Peak [Unity Orchestrator™][7] management software enables the prioritization of LTE bandwidth usage based on branch and application requirements – active-active or backup-only. This solution is ideal in retail point-of-sale and other deployment use cases where always-available WAN connectivity is critical for the business. - -### **Automated SD-WAN enables innovative services** - -An example of an innovative mobile SD-WAN service is [swyMed’s DOT Telemedicine Backpack][8] powered by the EdgeConnect [Ultra Small][9] hardware platform. This integrated telemedicine solution enables first responders to connect to doctors and communicate patient vital statistics and real-time video anywhere, any time, greatly improving and expediting care for emergency patients. Using a lifesaving backpack provisioned with two LTE services from different carriers, EdgeConnect continuously monitors the underlying 4G LTE services for packet loss, latency and jitter. In the case of transport failure or brownout, EdgeConnect automatically initiates a sub-second failover so that voice, video and data connections continue without interruption over the remaining active 4G service. By bonding the two LTE links together with the EdgeConnect SD-WAN, swyMed can achieve an aggregate signal quality in excess of 90 percent, bringing mobile telemedicine to areas that would have been impossible in the past due to poor signal strength. - -To learn more about SD-WAN and the unique advantages that SD-WAN provides to enterprises across all industries, visit the [SD-WAN Explained][2] page on our website. - -### **Prepare for the 5G future** - -In summary, the adoption of 4G LTE is a reality. Service providers are taking advantage of the distinct benefits of SD-WAN to offer managed SD-WAN services that leverage 4G LTE. - -As the race for the 5G gains momentum, service providers are sure to look for ways to drive new revenue streams to capitalize on their initial investments. Stay tuned for part 2 of this 2-blog series where I will discuss how SD-WAN is one of the technologies that can help service providers to transition from 4G to 5G and enable the monetization of a new wave of managed 5G services. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3394866/mobility-and-sd-wan-part-1-sd-wan-with-4g-lte-is-a-reality.html - -作者:[Francisca Segovia][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/istock-952414660-100796279-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://www.gsma.com/futurenetworks/resources/all-ip-statistics/ -[4]: https://www.silver-peak.com/resource-center/edgeconnect-4glte-solution-bec-technologies -[5]: https://www.silver-peak.com/products/unity-edge-connect -[6]: https://www.silver-peak.com/products/unity-boost -[7]: https://www.silver-peak.com/products/unity-orchestrator -[8]: https://www.silver-peak.com/resource-center/mobile-telemedicine-helps-save-lives-streaming-real-time-clinical-data-and-patient -[9]: https://www.silver-peak.com/resource-center/edgeconnect-us-ec-us-specification-sheet diff --git a/sources/talk/20190515 Extreme addresses networked-IoT security.md b/sources/talk/20190515 Extreme addresses networked-IoT security.md deleted file mode 100644 index 1ad756eded..0000000000 --- a/sources/talk/20190515 Extreme addresses networked-IoT security.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Extreme addresses networked-IoT security) -[#]: via: (https://www.networkworld.com/article/3395539/extreme-addresses-networked-iot-security.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Extreme addresses networked-IoT security -====== -The ExtremeAI security app features machine learning that can understand typical behavior of IoT devices and alert when it finds anomalies. -![Getty Images][1] - -[Extreme Networks][2] has taken the wraps off a new security application it says will use machine learning and artificial intelligence to help customers effectively monitor, detect and automatically remediate security issues with networked IoT devices. - -The application – ExtremeAI security—features machine-learning technology that can understand typical behavior of IoT devices and automatically trigger alerts when endpoints act in unusual or unexpected ways, Extreme said. - -**More about edge networking** - - * [How edge networking and IoT will reshape data centers][3] - * [Edge computing best practices][4] - * [How edge computing can help secure the IoT][5] - - - -Extreme said that the ExtremeAI Security application can tie into all leading threat intelligence feeds, and had close integration with its existing [Extreme Workflow Composer][6] to enable automatic threat mitigation and remediation. - -The application integrates the company’s ExtremeAnalytics application which lets customers view threats by severity, category, high-risk endpoints and geography. An automated ticketing feature integrates with variety of popular IT tools such as Slack, Jira, and ServiceNow, and the application interoperates with many popular security tools, including existing network taps, the vendor stated. - -There has been an explosion of new endpoints ranging from million-dollar smart MRI machines to five-dollar sensors, which creates a complex and difficult job for network and security administrators, said Abby Strong, vice president of product marketing for Extreme. “We need smarter, secure and more self-healing networks especially where IT cybersecurity resources are stretched to the limit.” - -Extreme is trying to address an issue that is important to enterprise-networking customers: how to get actionable, usable insights as close to real-time as possible, said Rohit Mehra, Vice President of Network Infrastructure at IDC. “Extreme is melding automation, analytics and security that can look at network traffic patterns and allow the system to take action when needed.” - -The ExtremeAI application, which will be available in October, is but one layer of IoT security Extreme offers. Already on the market, its [Defender for IoT][7] package, which includes a Defender application and adapter, lets customers monitor, set policies and isolate IoT devices across an enterprise. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][8] ]** - -The Extreme AI and Defender packages are now part of what the company calls Extreme Elements, which is a menu of its new and existing Smart OmniEdge, Automated Campus and Agile Data Center software, hardware and services that customers can order to build a manageable, secure system. - -Aside from the applications, the Elements include Extreme Management Center, the company’s network management software; the company’s x86-based intelligent appliances, including the ExtremeCloud Appliance; and [ExtremeSwitching X465 premium][9], a stackable multi-rate gigabit Ethernet switch. - -The switch and applications are just the beginning of a very busy time for Extreme. In its [3Q earnings cal][10]l this month company CEO Ed Meyercord noted Extreme was in the “early stages of refreshing 70 percent of our products” and seven different products will become generally available this quarter – a record for Extreme, he said. - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3395539/extreme-addresses-networked-iot-security.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/iot_security_tablet_conference_digital-100787102-large.jpg -[2]: https://www.networkworld.com/article/3289508/extreme-facing-challenges-girds-for-future-networking-battles.html -[3]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[4]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[5]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[6]: https://www.extremenetworks.com/product/workflow-composer/ -[7]: https://www.extremenetworks.com/product/extreme-defender-for-iot/ -[8]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[9]: https://community.extremenetworks.com/extremeswitching-exos-223284/extremexos-30-2-and-smart-omniedge-premium-x465-switches-are-now-available-7823377 -[10]: https://seekingalpha.com/news/3457137-extreme-networks-minus-15-percent-quarterly-miss-light-guidance -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190516 Will 5G be the first carbon-neutral network.md b/sources/talk/20190516 Will 5G be the first carbon-neutral network.md deleted file mode 100644 index decacfac5d..0000000000 --- a/sources/talk/20190516 Will 5G be the first carbon-neutral network.md +++ /dev/null @@ -1,88 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Will 5G be the first carbon-neutral network?) -[#]: via: (https://www.networkworld.com/article/3395465/will-5g-be-the-first-carbon-neutral-network.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Will 5G be the first carbon-neutral network? -====== -Increased energy consumption in new wireless networks could become ecologically unsustainable. Engineers think they have solutions that apply to 5G, but all is not certain. -![Dushesina/Getty Images][1] - -If wireless networks transfer 1,000 times more data, does that mean they will use 1,000 times more energy? It probably would with the old 4G LTE wireless technologies— LTE doesn’t have much of a sleep-standby. But with 5G, we might have a more energy-efficient option. - -More customers want Earth-friendly options, and engineers are now working on how to achieve it — meaning 5G might introduce the first zero-carbon networks. It’s not all certain, though. - -**[ Related:[What is 5G wireless? And how it will change networking as we know it][2] ]** - -“When the 4G technology for wireless communication was developed, not many people thought about how much energy is consumed in transmitting bits of information,” says Emil Björnson, associate professor of communication systems at Linkoping University, [in an article on the school’s website][3]. - -Standby was never built into 4G, Björnson explains. Reasons include overbuilding — the architects wanted to ensure connections didn’t fail, so they just kept the power up. The downside to that redundancy was that almost the same amount of energy is used whether the system is transmitting data or not. - -“We now know that this is not necessary,” Björnson says. 5G networks don’t use much power during periods of low traffic, and that reduces power consumption. - -Björnson says he knows how to make future-networks — those 5G networks that one day may become the enterprise broadband replacement — super efficient even when there is heavy use. Massive-MIMO (multiple-in, multiple-out) antennas are the answer, he says. That’s hundreds of connected antennas taking advantage of multipath. - -I’ve written before about some of Björnson's Massive-MIMO ideas. He thinks [Massive-MIMO will remove all capacity ceilings from wireless networks][4]. However, he now adds calculations to his research that he claims prove that the Massive-MIMO antenna technology will also reduce power use. He and his group are actively promoting their academic theories in a paper ([pdf][5]). - -**[[Take this mobile device management course from PluralSight and learn how to secure devices in your company without degrading the user experience.][6] ]** - -### Nokia's plan to reduce wireless networks' CO2 emissions - -Björnson's isn’t the only 5G-aimed eco-concept out there. Nokia points out that it isn't just radios transmitting that use electricity. Cooling is actually the main electricity hog, says the telcommunications company, which is one of the world’s principal manufacturers of mobile network equipment. - -Nokia says the global energy cost of Radio Access Networks (RANs) in 2016 (the last year numbers were available), which includes base transceiver stations (BTSs) needed by mobile networks, was around $80 billion. That figure increases with more users coming on stream, something that’s probable. Of the BTS’s electricity use, about 90% “converts to waste heat,” [Harry Kuosa, a marketing executive, writes on Nokia’s blog][7]. And base station sites account for about 80% of a mobile network’s entire energy use, Nokia expands on its website. - -“A thousand-times more traffic that creates a thousand-times higher energy costs is unsustainable,” Nokia says in its [ebook][8] on the subject, “Turning the zero carbon vision into business opportunity,” and it’s why Nokia plans liquid-cooled 5G base stations among other things, including chip improvements. It says the liquid-cooling can reduce CO2 emissions by up to 80%. - -### Will those ideas work? - -Not all agree power consumption can be reduced when implementing 5G, though. Gabriel Brown of Heavy Reading, quotes [in a tweet][9] a China Mobile executive as saying that 5G BTSs will use three times as much power as 4G LTE ones because the higher frequencies used in 5G mean one needs more BTS units to provide the same geographic coverage: For physics reasons, higher frequencies equals shorter range. - -If, as is projected, 5G develops into the new enterprise broadband for the internet of things (IoT), along with associated private networks covering everything else, then these eco- and cost-important questions are going to be salient — and they need answers quickly. 5G will soon be here, and [Gartner estimates that 60% of organizations will adopt it][10]. - -**More about 5G networks:** - - * [How enterprises can prep for 5G networks][11] - * [5G vs 4G: How speed, latency and apps support differ][12] - * [Private 5G networks are coming][13] - * [5G and 6G wireless have security issues][14] - * [How millimeter-wave wireless could help support 5G and IoT][15] - - - -Join the Network World communities on [Facebook][16] and [LinkedIn][17] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3395465/will-5g-be-the-first-carbon-neutral-network.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/01/4g-versus-5g_horizon_sunrise-100784230-large.jpg -[2]: https://www.networkworld.com/article/3203489/lan-wan/what-is-5g-wireless-networking-benefits-standards-availability-versus-lte.html -[3]: https://liu.se/en/news-item/okningen-av-mobildata-kraver-energieffektivare-nat -[4]: https://www.networkworld.com/article/3262991/future-wireless-networks-will-have-no-capacity-limits.html -[5]: https://arxiv.org/pdf/1812.01688.pdf -[6]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fcourses%2Fmobile-device-management-big-picture -[7]: https://www.nokia.com/blog/nokia-has-ambitious-plans-reduce-network-power-consumption/ -[8]: https://pages.nokia.com/2364.Zero.Emissions.ebook.html?did=d000000001af&utm_campaign=5g_in_action_&utm_source=twitter&utm_medium=organic&utm_term=0dbf430c-1c94-47d7-8961-edc4f0ba3270 -[9]: https://twitter.com/Gabeuk/status/1099709788676636672?ref_src=twsrc%5Etfw%7Ctwcamp%5Etweetembed%7Ctwterm%5E1099709788676636672&ref_url=https%3A%2F%2Fwww.lightreading.com%2Fmobile%2F5g%2Fpower-consumption-5g-basestations-are-hungry-hungry-hippos%2Fd%2Fd-id%2F749979 -[10]: https://www.gartner.com/en/newsroom/press-releases/2018-12-18-gartner-survey-reveals-two-thirds-of-organizations-in -[11]: https://www.networkworld.com/article/3306720/mobile-wireless/how-enterprises-can-prep-for-5g.html -[12]: https://www.networkworld.com/article/3330603/mobile-wireless/5g-versus-4g-how-speed-latency-and-application-support-differ.html -[13]: https://www.networkworld.com/article/3319176/mobile-wireless/private-5g-networks-are-coming.html -[14]: https://www.networkworld.com/article/3315626/network-security/5g-and-6g-wireless-technologies-have-security-issues.html -[15]: https://www.networkworld.com/article/3291323/mobile-wireless/millimeter-wave-wireless-could-help-support-5g-and-iot.html -[16]: https://www.facebook.com/NetworkWorld/ -[17]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190517 The modern data center and the rise in open-source IP routing suites.md b/sources/talk/20190517 The modern data center and the rise in open-source IP routing suites.md deleted file mode 100644 index 02063687a0..0000000000 --- a/sources/talk/20190517 The modern data center and the rise in open-source IP routing suites.md +++ /dev/null @@ -1,140 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The modern data center and the rise in open-source IP routing suites) -[#]: via: (https://www.networkworld.com/article/3396136/the-modern-data-center-and-the-rise-in-open-source-ip-routing-suites.html) -[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) - -The modern data center and the rise in open-source IP routing suites -====== -Open source enables passionate people to come together and fabricate work of phenomenal quality. This is in contrast to a single vendor doing everything. -![fdecomite \(CC BY 2.0\)][1] - -As the cloud service providers and search engines started with the structuring process of their business, they quickly ran into the problems of managing the networking equipment. Ultimately, after a few rounds of getting the network vendors to understand their problems, these hyperscale network operators revolted. - -Primarily, what the operators were looking for was a level of control in managing their network which the network vendors couldn’t offer. The revolution burned the path that introduced open networking, and network disaggregation to the work of networking. Let us first learn about disaggregation followed by open networking. - -### Disaggregation - -The concept of network disaggregation involves breaking-up of the vertical networking landscape into individual pieces, where each piece can be used in the best way possible. The hardware can be separated from the software, along with open or closed IP routing suites. This enables the network operators to use the best of breed for the hardware, software and the applications. - -**[ Now see[7 free network tools you must have][2]. ]** - -Networking has always been built as an appliance and not as a platform. The mindset is that the network vendor builds an appliance and as a specialized appliance, they will completely control what you can and cannot do on that box. In plain words, they will not enable anything that is not theirs. As a result, they act as gatekeepers and not gate-enablers. - -Network disaggregation empowers the network operators with the ability to lay hands on the features they need when they need them. However, this is impossible in case of non-disaggregated hardware. - -### Disaggregation leads to using best-of-breed - -In the traditional vertically integrated networking market, you’re forced to live with the software because you like the hardware, or vice-versa. But network disaggregation drives different people to develop things that matter to them. This allows multiple groups of people to connect, with each one focused on doing what he or she does the best. Switching silicon manufacturers can provide the best merchant silicon. Routing suites can be provided by those who are the best at that. And the OS vendors can provide the glue that enables all of these to work well together. - -With disaggregation, people are driven to do what they are good at. One company does the hardware, whereas another does the software and other company does the IP routing suites. Hence, today the networking world looks like more of the server world. - -### Open source - -Within this rise of the modern data center, there is another element that is driving network disaggregation; the notion of open source. Open source is “denoting software for which the original source code is made freely available, it may be redistributed and modified.” It enables passionate people to come together and fabricate work of phenomenal quality. This is in contrast to a single vendor doing everything. - -As a matter of fact, the networking world has always been very vendor driven. However, the advent of open source gives the opportunity to like-minded people rather than the vendor controlling the features. This eliminates the element of vendor lock-in, thereby enabling interesting work. Open source allows more than one company to be involved. - -### Open source in the data center - -The traditional enterprise and data center networks were primarily designed by bridging and Spanning Tree Protocol (STP). However, the modern data center is driven by IP routing and the CLOS topology. As a result, you need a strong IP routing suite. - -That was the point where the need for an open-source routing suite surfaced, the suite that can help drive the modern data center. The primary open-source routing suites are [FRRouting (FRR)][3], BIRD, GoBGP and ExaBGP. - -Open-source IP routing protocol suites are slowly but steadily gaining acceptance and are used in data centers of various sizes. Why? It is because they allow a community of developers and users to work on finding solutions to common problems. Open-source IP routing protocol suites equip them to develop the specific features that they need. It also helps the network operators to create simple designs that make sense to them, as opposed to having everything controlled by the vendor. They also enable routing suites to run on compute nodes. Kubernetes among others uses this model of running a routing protocol on a compute node. - -Today many startups are using FRR. Out of all of the IP routing suites, FRR is preferred in the data center as the primary open-source IP routing protocol suite. Some traditional network vendors have even demonstrated the use of FRR on their networking gear. - -There are lots of new features currently being developed for FRR, not just by the developers but also by the network operators. - -### Use cases for open-source routing suites - -When it comes to use-cases, where do IP routing protocol suites sit? First and foremost, if you want to do any type of routing in the disaggregated network world, you need an IP routing suite. - -Some operators are using FRR at the edge of the network as well, thereby receiving full BGP feeds. Many solutions which use Intel’s DPDK for packet forwarding use FRR as the control plane, receiving full BGP feeds. In addition, there are other vendors using FRR as the core IP routing suite for a full leaf and spine data center architecture. You can even get a version of FRR on pfSense which is a free and open source firewall. - -We need to keep in mind that reference implementations are important. Open source allows you to test at scale. But vendors don’t allow you to do that. However, with FRR, we have the ability to spin up virtual machines (VMs) or even containers by using software like Vagrant to test your network. Some vendors do offer software versions, but they are not fully feature-compatible. - -Also, with open source you do not need to wait. This empowers you with flexibility and speed which drives the modern data center. - -### Deep dive on FRRouting (FRR) - -FRR is a Linux foundation project. In a technical Linux sense, FRR is a group of daemons that work together, providing a complete routing suite that includes BGP, IS-IS, LDP, OSPF, BFD, PIM, and RIP. - -Each one of these daemons communicate with the common routing information base (RIB) daemon called Zebra in order to interface with the OS and to resolve conflicts between the multiple routing protocols providing the same information. Interfacing with the OS is used to receive the link up/down events, to add and delete routes etc. - -### FRRouting (FRR) components: Zebra - -Zebra is the RIB of the routing systems. It knows everything about the state of the system relevant to routing and is able to pass and disseminate this information to all the interested parties. - -The RIB in FRR acts just like a traditional RIB. When a route wins, it goes into the Linux kernel data plane where the forwarding occurs. All of the routing protocols run as separate processes and each of them have their source code in FRR. - -For example, when BGP starts up, it needs to know, for instance, what kind of virtual routing and forwarding (VRF) and IP interfaces are available. Zebra collects and passes this information back to the interested daemons. It passes all the relevant information about state of the machine. - -Furthermore, you can also register information with Zebra. For example, if a particular route changes, the daemon can be informed. This can also be used for reverse path forwarding (RPF). FRR doesn't need to do a pull when changes happen on the network. - -There are a myriad of ways through which you can control Linux and the state. Sometimes you have to use options like the Netlink bus and sometimes you may need to read the state in proc file system of Linux. The goal of Zebra is to gather all this data for the upper level protocols. - -### FRR supports remote data planes - -FRR also has the ability to manage the remote data planes. So, what does this mean? Typically, the data forwarding plane and the routing protocols run on the same box. Another model, adopted by Openflow and SDN for example, is one in which the data forwarding plane can be on one box while FRR runs on a different box on behalf of the first box and pushes the computed routing state on the first box. In other words, the data plane and the control plane run on different boxes. - -If you examine the traditional world, it’s like having one large chassis with different line cards with the ability to install routes in those different line cards. FRR operates with the same model which has one control plane and the capability to offer 3 boxes, if needed. It does this via the forwarding plane manager. - -### Forwarding plane manager - -Zebra can either install routes directly into the data plane of the box it is running on or use a forwarding plane manager to install routes on a remote box. When it installs a route, the forwarding plane manager abstracts the data which displays the route and the next hops. It then pushes the data to a remote system where the remote machine processes it and programs the ASIC appropriately. - -After the data is abstracted, you can use whatever protocol you want in order to push the data to the remote machine. You can even include the data in an email. - -### What is holding people back from open source? - -Since last 30 years the networking world meant that you need to go to a vendor to solve a problem. But now with open-source routing suites, such as, FRR, there is a major drift in the mindset as to how you approach troubleshooting. - -This causes the fear of not being able to use it properly because with open source you are the one who has to fix it. This at first can be scary and daunting. But it doesn’t necessarily have to be. Also, to switch to FRR on a traditional network gear, you need the vendor to enable it, but they may be reluctant as they are on competing platforms which can be another road blocker. - -### The future of FRR - -If we examine FRR from the use case perspective of the data center, FRR is feature-complete. Anyone building an IP based data center FRR has everything available. The latest 7.0 release of FRR adds Yang/NetConf, BGP Enhancements and OpenFabric. - -FRR is not just about providing features, boosting the performance or being the same as or better than the traditional network vendor’s software, it is also about simplifying the process for the end user. - -Since the modern data center is focused on automation and ease of use, FRR has made such progress that the vendors have not caught up with. FRR is very automation friendly. For example, FRR takes BGP and makes it automation-friendly without having to change the protocol. It supports BGP unnumbered that is unmatched by any other vendor suite. This is where the vendors are trying to catch up. - -Also, while troubleshooting, FRR shows peer’s and host’s names and not just the IP addresses. This allows you to understand without having spent much time. However, vendors show the peer’s IP addresses which can be daunting when you need to troubleshoot. - -FRR provides the features that you need to run an efficient network and data center. It makes easier to configure and manage the IP routing suite. Vendors just add keep adding features over features whether they are significant or not. Then you need to travel the certification paths that teach you how to twiddle 20 million nobs. How many of those networks are robust and stable? - -FRR is about supporting features that matter and not every imaginable feature. FRR is an open source project that brings like-minded people together, good work that is offered isn’t turned away. As a case in point, FRR has an open source implementation of EIGRP. - -The problem surfaces when you see a bunch of things, you think you need them. But in reality, you should try to keep the network as simple as possible. FRR is laser-focused on the ease of use and simplifying the use rather than implementing features that are mostly not needed to drive the modern data center. - -For more information and to contribute, why not join the [FRR][4] [mailing list group][4]. - -**This article is published as part of the IDG Contributor Network.[Want to Join?][5]** - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396136/the-modern-data-center-and-the-rise-in-open-source-ip-routing-suites.html - -作者:[Matt Conran][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Matt-Conran/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/12/modular_humanoid_polyhedra_connections_structure_building_networking_by_fdecomite_cc_by_2-0_via_flickr_1200x800-100782334-large.jpg -[2]: https://www.networkworld.com/article/2825879/7-free-open-source-network-monitoring-tools.html -[3]: https://frrouting.org/community/7.0-launch.html -[4]: https://frrouting.org/#participate -[5]: /contributor-network/signup.html -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190521 Enterprise IoT- Companies want solutions in these 4 areas.md b/sources/talk/20190521 Enterprise IoT- Companies want solutions in these 4 areas.md deleted file mode 100644 index 9df4495f05..0000000000 --- a/sources/talk/20190521 Enterprise IoT- Companies want solutions in these 4 areas.md +++ /dev/null @@ -1,119 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Enterprise IoT: Companies want solutions in these 4 areas) -[#]: via: (https://www.networkworld.com/article/3396128/the-state-of-enterprise-iot-companies-want-solutions-for-these-4-areas.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -Enterprise IoT: Companies want solutions in these 4 areas -====== -Based on customer pain points, PwC identified four areas companies are seeking enterprise solutions for, including energy use and sustainability. -![Jackie Niam / Getty Images][1] - -Internet of things (IoT) vendors and pundits like to crow about the billions and billions of connected devices that make the IoT so ubiquitous and powerful. But how much of that installed base is really relevant to the enterprise? - -To find out, I traded emails with Rob Mesirow, principal at [PwC’s Connected Solutions][2], the firm’s new one-stop-shop of IoT solutions, who suggests that consumer adoption may not paint a true picture of the enterprise opportunities. If you remove the health trackers and the smart thermostats from the market, he suggested, there are very few connected devices left. - -So, I wondered, what is actually happening on the enterprise side of IoT? What kinds of devices are we talking about, and in what kinds of numbers? - -**[ Read also:[Forget 'smart homes,' the new goal is 'autonomous buildings'][3] ]** - -“When people talk about the IoT,” Mesirow told me, “they usually focus on [consumer devices, which far outnumber business devices][4]. Yet [connected buildings currently represent only 12% of global IoT projects][5],” he noted, “and that’s without including wearables and smart home projects.” (Mesirow is talking about buildings that “use various IoT devices, including occupancy sensors that determine when people are present in a room in order to keep lighting and temperature controls at optimal levels, lowering energy costs and aiding sustainability goals. Sensors can also detect water and gas leaks and aid in predictive maintenance for HVAC systems.”) - -### 4 key enterprise IoT opportunities - -More specifically, based on customer pain points, PwC’s Connected Solutions is focusing on a few key opportunities, which Mesirow laid out in a [blog post][6] earlier this year. (Not surprisingly, the opportunities seem tied to [the group’s products][7].) - -“A lot of these solutions came directly from our customers’ request,” he noted. “We pre-qualify our solutions with customers before we build them.” - -Let’s take a look at the top four areas, along with a quick reality check on how important they are and whether the technology is ready for prime time. - -#### **1\. Energy use and sustainability** - -The IoT makes it possible to manage buildings and spaces more efficiently, with savings of 25% or more. Occupancy sensors can tell whether anyone is actually in a room, adjusting lighting and temperature to saving money and conserve energy. - -Connected buildings can also help determine when meeting spaces are available, which can boost occupancy at large businesses and universities by 40% while cutting infrastructure and maintenance costs. Other sensors, meanwhile, can detect water and gas leaks and aid in predictive maintenance for HVAC systems. - -**Reality check:** Obviously, much of this technology is not new, but there’s a real opportunity to make it work better by integrating disparate systems and adding better analytics to the data to make planning more effective. - -#### **2. Asset tracking - -** - -“Businesses can also use the IoT to track their assets,“ Mesirow told me, “which can range from trucks to hotel luggage carts to medical equipment. It can even assist with monitoring trash by alerting appropriate people when dumpsters need to be emptied.” - -Asset trackers can instantly identify the location of all kinds of equipment (saving employee time and productivity), and they can reduce the number of lost, stolen, and misplaced devices and machines as well as provide complete visibility into the location of your assets. - -Such trackers can also save employees from wasting time hunting down the devices and machines they need. For example, PwC noted that during an average hospital shift, more than one-third of nurses spend at least an hour looking for equipment such as blood pressure monitors and insulin pumps. Just as important, location tracking often improves asset optimization, reduced inventory needs, and improved customer experience. - -**Reality check:** Asset tracking offers clear value. The real question is whether a given use case is cost effective or not, as well as how the data gathered will actually be used. Too often, companies spend a lot of money and effort tracking their assets, but don’t do much with the information. - -#### **3\. Security and compliance** - -Connected solutions can create better working environments, Mesirow said. “In a hotel, for example, these smart devices can ensure that air and water quality is up to standards, provide automated pest traps, monitor dumpsters and recycling bins, detect trespassers, determine when someone needs assistance, or discover activity in an unauthorized area. Monitoring the water quality of hotel swimming pools can lower chemical and filtering costs,” he said. - -Mesirow cited an innovative use case where, in response to workers’ complaints about harassment, hotel operators—in conjunction with the [American Hotel and Lodging Association][8]—are giving their employees portable devices that alert security staff when workers request assistance. - -**Reality check:** This seems useful, but the ROI might be difficult to calculate. - -#### **4\. Customer experience** - -According to PwC, “Sensors, facial recognition, analytics, dashboards, and notifications can elevate and even transform the customer experience. … Using connected solutions, you can identify and reward your best customers by offering perks, reduced wait times, and/or shorter lines.” - -Those kinds of personalized customer experiences can potentially boost customer loyalty and increase revenue, Mesirow said, adding that the technology can also make staff deployments more efficient and “enhance safety by identifying trespassers and criminals who are tampering with company property.” - -**Reality check:** Creating a great customer experience is critical for businesses today, and this kind of personalized targeting promises to make it more efficient and effective. However, it has to be done in a way that makes customers comfortable and not creeped out. Privacy concerns are very real, especially when it comes to working with facial recognition and other kinds of surveillance technology. For example, [San Francisco recently banned city agencies from using facial recognition][9], and others may follow. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][10] - * [What is edge computing and how it’s changing the network][11] - * [Most powerful Internet of Things companies][12] - * [10 Hot IoT startups to watch][13] - * [The 6 ways to make money in IoT][14] - * [What is digital twin technology? [and why it matters]][15] - * [Blockchain, service-centric networking key to IoT success][16] - * [Getting grounded in IoT networking and security][17] - * [Building IoT-ready networks must become a priority][18] - * [What is the Industrial IoT? [And why the stakes are so high]][19] - - - -Join the Network World communities on [Facebook][20] and [LinkedIn][21] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396128/the-state-of-enterprise-iot-companies-want-solutions-for-these-4-areas.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/iot_internet_of_things_by_jackie_niam_gettyimages-996958260_2400x1600-100788446-large.jpg -[2]: https://digital.pwc.com/content/pwc-digital/en/products/connected-solutions.html#get-connected -[3]: https://www.networkworld.com/article/3309420/forget-smart-homes-the-new-goal-is-autonomous-buildings.html -[4]: https://www.statista.com/statistics/370350/internet-of-things-installed-base-by-category/) -[5]: https://iot-analytics.com/top-10-iot-segments-2018-real-iot-projects/ -[6]: https://www.digitalpulse.pwc.com.au/five-unexpected-ways-internet-of-things/ -[7]: https://digital.pwc.com/content/pwc-digital/en/products/connected-solutions.html -[8]: https://www.ahla.com/ -[9]: https://www.nytimes.com/2019/05/14/us/facial-recognition-ban-san-francisco.html -[10]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[11]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[12]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[13]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[14]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[15]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[16]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[17]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[18]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[19]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[20]: https://www.facebook.com/NetworkWorld/ -[21]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190522 Experts- Enterprise IoT enters the mass-adoption phase.md b/sources/talk/20190522 Experts- Enterprise IoT enters the mass-adoption phase.md deleted file mode 100644 index 86d7bf0efe..0000000000 --- a/sources/talk/20190522 Experts- Enterprise IoT enters the mass-adoption phase.md +++ /dev/null @@ -1,78 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Experts: Enterprise IoT enters the mass-adoption phase) -[#]: via: (https://www.networkworld.com/article/3397317/experts-enterprise-iot-enters-the-mass-adoption-phase.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Experts: Enterprise IoT enters the mass-adoption phase -====== -Dropping hardware prices, 5G boost business internet-of-things deployments; technical complexity encourages partnerships. -![Avgust01 / Getty Images][1] - -[IoT][2] in general has taken off quickly over the past few years, but experts at the recent IoT World highlighted that the enterprise part of the market has been particularly robust of late – it’s not just an explosion of connected home gadgets anymore. - -Donna Moore, chairwoman of the LoRa Alliance, an industry group that works to develop and scale low-power WAN technology for mass usage, said on a panel that she’s never seen growth this fast in the sector. “I’d say we’re now in the early mass adopters [stage],” she said. - -**More on IoT:** - - * [Most powerful Internet of Things companies][3] - * [10 Hot IoT startups to watch][4] - * [The 6 ways to make money in IoT][5] - * [What is digital twin technology? [and why it matters]][6] - * [Blockchain, service-centric networking key to IoT success][7] - * [Getting grounded in IoT networking and security][8] - * [Building IoT-ready networks must become a priority][9] - * [What is the Industrial IoT? [And why the stakes are so high]][10] - - - -The technology itself has pushed adoption to these heights, said Graham Trickey, head of IoT for the GSMA, a trade organization for mobile network operators. Along with price drops for wireless connectivity modules, the array of upcoming technologies nestling under the umbrella label of [5G][11] could simplify the process of connecting devices to [edge-computing][12] hardware – and the edge to the cloud or [data center][13]. - -“Mobile operators are not just providers of connectivity now, they’re farther up the stack,” he said. Technologies like narrow-band IoT and support for highly demanding applications like telehealth are all set to be part of the final 5G spec. - -### Partnerships needed to deal with IoT complexity** - -** - -That’s not to imply that there aren’t still huge tasks facing both companies trying to implement their own IoT frameworks and the creators of the technology underpinning them. For one thing, IoT tech requires a huge array of different sets of specialized knowledge. - -“That means partnerships, because you need an expert in your [vertical] area to know what you’re looking for, you need an expert in communications, and you might need a systems integrator,” said Trickey. - -Phil Beecher, the president and CEO of the Wi-SUN Alliance (the acronym stands for Smart Ubiquitous Networks, and the group is heavily focused on IoT for the utility sector), concurred with that, arguing that broad ecosystems of different technologies and different partners would be needed. “There’s no one technology that’s going to solve all these problems, no matter how much some parties might push it,” he said. - -One of the central problems – [IoT security][14] – is particularly dear to Beecher’s heart, given the consequences of successful hacks of the electrical grid or other utilities. More than one panelist praised the passage of the EU’s General Data Protection Regulation, saying that it offered concrete guidelines for entities developing IoT tech – a crucial consideration for some companies that may not have a lot of in-house expertise in that area. - -Join the Network World communities on [Facebook][15] and [LinkedIn][16] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397317/experts-enterprise-iot-enters-the-mass-adoption-phase.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/iot_internet_of_things_mobile_connections_by_avgust01_gettyimages-1055659210_2400x1600-100788447-large.jpg -[2]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[4]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[5]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[6]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[7]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[8]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[9]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[10]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[11]: https://www.networkworld.com/article/3203489/what-is-5g-how-is-it-better-than-4g.html -[12]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html?nsdr=true -[13]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[14]: https://www.networkworld.com/article/3269736/getting-grounded-in-iot-networking-and-security.html -[15]: https://www.facebook.com/NetworkWorld/ -[16]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190522 The Traffic Jam Whopper project may be the coolest-dumbest IoT idea ever.md b/sources/talk/20190522 The Traffic Jam Whopper project may be the coolest-dumbest IoT idea ever.md deleted file mode 100644 index be8a4833cc..0000000000 --- a/sources/talk/20190522 The Traffic Jam Whopper project may be the coolest-dumbest IoT idea ever.md +++ /dev/null @@ -1,97 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The Traffic Jam Whopper project may be the coolest/dumbest IoT idea ever) -[#]: via: (https://www.networkworld.com/article/3396188/the-traffic-jam-whopper-project-may-be-the-coolestdumbest-iot-idea-ever.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -The Traffic Jam Whopper project may be the coolest/dumbest IoT idea ever -====== -Burger King uses real-time IoT data to deliver burgers to drivers stuck in traffic — and it seems to be working. -![Mike Mozart \(CC BY 2.0\)][1] - -People love to eat in their cars. That’s why we invented the drive-in and the drive-thru. - -But despite a fast-food outlet on the corner of every major intersection, it turns out we were only scratching the surface of this idea. Burger King is taking this concept to the next logical step with its new IoT-powered Traffic Jam Whopper project. - -I have to admit, when I first heard about this, I thought it was a joke, but apparently the [Traffic Jam Whopper project is totally real][2] and has already passed a month-long test in Mexico City. While the company hasn’t specified a timeline, it plans to roll out the Traffic Jam Whopper project in Los Angeles (where else?) and other traffic-plagued megacities such as São Paulo and Shanghai. - -**[ Also read:[Is IoT in the enterprise about making money or saving money?][3] | Get regularly scheduled insights: [Sign up for Network World newsletters][4] ]** - -### How Burger King's Traffic Jam Whopper project works - -According to [Nations Restaurant News][5], this is how Burger King's Traffic Jam Whopper project works: - -The project uses real-time data to target hungry drivers along congested roads and highways for food delivery by couriers on motorcycles. - -The system leverages push notifications to the Burger King app and personalized messaging on digital billboards positioned along busy roads close to a Burger King restaurant. - -[According to the We Believers agency][6] that put it all together, “By leveraging traffic and drivers’ real-time data [location and speed], we adjusted our billboards’ location and content, displaying information about the remaining time in traffic to order, and personalized updates about deliveries in progress.” The menu is limited to Whopper Combos to speed preparation (though the company plans to offer a wider menu as it works out the kinks). - -**[[Become a Microsoft Office 365 administrator in record time with this quick start course from PluralSight.][7] ]** - -The company said orders in Mexico City were delivered in an average of 15 minutes. Fortunately (or unfortunately, depending on how you look at it) many traffic jams hold drivers captive for far longer than that. - -Once the order is ready, the motorcyclist uses Google maps and GPS technology embedded into the app to locate the car that made the order. The delivery person then weaves through traffic to hand over the Whopper. (Lane-splitting is legal in California, but I have no idea if there are other potential safety or law-enforcement issues involved here. For drivers ordering burgers, at least, the Burger King app supports voice ordering. I also don’t know what happens if traffic somehow clears up before the burger arrives.) - -Here’s a video of the pilot program in Mexico City: - -#### **New technology = > new opportunities** - -Even more amazing, this is not _just_ a publicity stunt. NRN quotes Bruno Cardinali, head of marketing for Burger King Latin America and Caribbean, claiming the project boosted sales during rush hour, when app orders are normally slow: - -“Thanks to The Traffic Jam Whopper campaign, we’ve increased deliveries by 63% in selected locations across the month of April, adding a significant amount of orders per restaurant per day, just during rush hours." - -If nothing else, this project shows that creative thinking really can leverage IoT technology into new businesses. In this case, it’s turning notoriously bad traffic—pretty much required for this process to work—from a problem into an opportunity to generate additional sales during slow periods. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][8] - * [What is edge computing and how it’s changing the network][9] - * [Most powerful Internet of Things companies][10] - * [10 Hot IoT startups to watch][11] - * [The 6 ways to make money in IoT][12] - * [What is digital twin technology? [and why it matters]][13] - * [Blockchain, service-centric networking key to IoT success][14] - * [Getting grounded in IoT networking and security][15] - * [Building IoT-ready networks must become a priority][16] - * [What is the Industrial IoT? [And why the stakes are so high]][17] - - - -Join the Network World communities on [Facebook][18] and [LinkedIn][19] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396188/the-traffic-jam-whopper-project-may-be-the-coolestdumbest-iot-idea-ever.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/burger-king-gift-card-100797164-large.jpg -[2]: https://abc7news.com/food/burger-king-to-deliver-to-drivers-stuck-in-traffic/5299073/ -[3]: https://www.networkworld.com/article/3343917/the-big-picture-is-iot-in-the-enterprise-about-making-money-or-saving-money.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.nrn.com/technology/tech-tracker-burger-king-deliver-la-motorists-stuck-traffic?cid= -[6]: https://www.youtube.com/watch?v=LXNgEZV7lNg -[7]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fcourses%2Fadministering-office-365-quick-start -[8]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[9]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[10]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[11]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[12]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[13]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[14]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[15]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[16]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[17]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[18]: https://www.facebook.com/NetworkWorld/ -[19]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190523 Benchmarks of forthcoming Epyc 2 processor leaked.md b/sources/talk/20190523 Benchmarks of forthcoming Epyc 2 processor leaked.md deleted file mode 100644 index 61ae9e656b..0000000000 --- a/sources/talk/20190523 Benchmarks of forthcoming Epyc 2 processor leaked.md +++ /dev/null @@ -1,55 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Benchmarks of forthcoming Epyc 2 processor leaked) -[#]: via: (https://www.networkworld.com/article/3397081/benchmarks-of-forthcoming-epyc-2-processor-leaked.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Benchmarks of forthcoming Epyc 2 processor leaked -====== -Benchmarks of AMD's second-generation Epyc server briefly found their way online and show the chip is larger but a little slower than the Epyc 7601 on the market now. -![Gordon Mah Ung][1] - -Benchmarks of engineering samples of AMD's second-generation Epyc server, code-named “Rome,” briefly found their way online and show a very beefy chip running a little slower than its predecessor. - -Rome is based on the Zen 2 architecture, believed to be more of an incremental improvement over the prior generation than a major leap. It’s already known that Rome would feature a 64-core, 128-thread design, but that was about all of the details. - -**[ Also read:[Who's developing quantum computers][2] ]** - -The details came courtesy of SiSoftware's Sandra PC analysis and benchmarking tool. It’s very popular and has been used by hobbyists and benchmarkers alike for more than 20 years. New benchmarks are uploaded to the Sandra database all the time, and what I suspect happened is someone running a Rome sample ran the benchmark, not realizing the results would be uploaded to the Sandra database. - -The benchmarks were from two different servers, a Dell PowerEdge R7515 and a Super Micro Super Server. The Dell product number is not on the market, so this would indicate a future server with Rome processors. The entry has since been deleted, but several sites, including the hobbyist site Tom’s Hardware Guide, managed to [take a screenshot][3]. - -According to the entry, the chip is a mid-range processor with a base clock speed of 1.4GHz, jumping up to 2.2GHz in turbo mode, with 16MB of Level 2 cache and 256MB of Level 3 cache, the latter of which is crazy. The first-generation Epyc had just 32MB of L3 cache. - -That’s a little slower than the Epyc 7601 on the market now, but when you double the number of cores in the same space, something’s gotta give, and in this case, it’s electricity. The thermal envelope was not revealed by the benchmark. Previous Epyc processors ranged from 120 watts to 180 watts. - -Sandra ranked the processor at #3 for arithmetic and #5 for multimedia processing, which makes me wonder what on Earth beat the Rome chip. Interestingly, the servers were running Windows 10, not Windows Server 2019. - -**[[Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][4] ]** - -Rome is expected to be officially launched at the massive Computex trade show in Taiwan on May 27 and will begin shipping in the third quarter of the year. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397081/benchmarks-of-forthcoming-epyc-2-processor-leaked.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/11/rome_2-100779395-large.jpg -[2]: https://www.networkworld.com/article/3275385/who-s-developing-quantum-computers.html -[3]: https://www.tomshardware.co.uk/amd-epyc-rome-processor-data-center,news-60265.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190523 Edge-based caching and blockchain-nodes speed up data transmission.md b/sources/talk/20190523 Edge-based caching and blockchain-nodes speed up data transmission.md deleted file mode 100644 index 54ddf76db3..0000000000 --- a/sources/talk/20190523 Edge-based caching and blockchain-nodes speed up data transmission.md +++ /dev/null @@ -1,74 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Edge-based caching and blockchain-nodes speed up data transmission) -[#]: via: (https://www.networkworld.com/article/3397105/edge-based-caching-and-blockchain-nodes-speed-up-data-transmission.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Edge-based caching and blockchain-nodes speed up data transmission -====== -Using a combination of edge-based data caches and blockchain-like distributed networks, Bluzelle claims it can significantly speed up the delivery of data across the globe. -![OlgaSalt / /getty][1] - -The combination of a blockchain-like distributed network, along with the ability to locate data at the edge will massively speed up future networks, such as those used by the internet of things (IoT), claims Bluzelle in announcing what is says is the first decentralized data delivery network (DDN). - -Distributed DDNs will be like content delivery networks (CDNs) that now cache content around the world to speed up the web, but in this case, it will be for data, the Singapore-based company explains. Distributed key-value (blockchain) networks and edge computing built into Bluzelle's system will provide significantly faster delivery than existing caching, the company claims in a press release announcing its product. - -“The future of data delivery can only ever be de-centrally distributed,” says Pavel Bains, CEO and co-founder of Bluzelle. It’s because the world requires instant access to data that’s being created at the edge, he argues. - -“But delivery is hampered by existing technology,” he says. - -**[ Also read:[What is edge computing?][2] and [How edge networking and IoT will reshape data centers][3]. ]** - -Bluzelle says decentralized caching is the logical next step to generalized data caching, used for reducing latency. “Decentralized caching expands the theory of caching,” the company writes in a [report][4] (Dropbox pdf) on its [website][5]. It says the cache must be expanded from simply being located at one unique location. - -“Using a combination of distributed networks, the edge and the cloud, [it’s] thereby increasing the transactional throughput of data,” the company says. - -This kind of thing is particularly important in consumer gaming now, where split-second responses from players around the world make or break a game experience, but it will likely be crucial for the IoT, higher-definition media, artificial intelligence, and virtual reality as they gain more of a role in digitization—including at critical enterprise applications. - -“Currently applications are limited to data caching technologies that require complex configuration and management of 10-plus-year-old technology constrained to a few data centers,” Bains says. “These were not designed to handle the ever-increasing volumes of data.” - -Bains says one of the key selling points of Bluzelle's network is that developers should be able to implement and run networks without having to also physically expand the networks manually. - -“Software developers don’t want to react to where their customers come from. Our architecture is designed to always have the data right where the customer is. This provides a superior consumer experience,” he says. - -Data caches are around now, but Bluzelle claims its system, written in C++ and available on Linux and Docker containers, among other platforms, is faster than others. It further says that if its system and a more traditional cache would connect to the same MySQL database in Virginia, say, their users will get the data three to 16 times faster than a traditional “non-edge-caching” network. Write updates to all Bluzelle nodes around the world takes 875 milliseconds (ms), it says. - -The company has been concentrating its efforts on gaming, and with a test setup in Virginia, it says it was able to deliver data 33 times faster—at 22ms to Singapore—than a normal, cloud-based data cache. That traditional cache (located near the database) took 727ms in the Bluzelle-published test. In a test to Ireland, it claims 16ms over 223ms using a traditional cache. - -An algorithm is partly the reason for the gains, the company explains. It “allows the nodes to make decisions and take actions without the need for masternodes,” the company says. Masternodes are the server-like parts of blockchain systems. - -**More about edge networking** - - * [How edge networking and IoT will reshape data centers][3] - * [Edge computing best practices][6] - * [How edge computing can help secure the IoT][7] - - - -Join the Network World communities on [Facebook][8] and [LinkedIn][9] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397105/edge-based-caching-and-blockchain-nodes-speed-up-data-transmission.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/blockchain_crypotocurrency_bitcoin-by-olgasalt-getty-100787949-large.jpg -[2]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[3]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[4]: https://www.dropbox.com/sh/go5bnhdproy1sk5/AAC5MDoafopFS7lXUnmiLAEFa?dl=0&preview=Bluzelle+Report+-+The+Decentralized+Internet+Is+Here.pdf -[5]: https://bluzelle.com/ -[6]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[7]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[8]: https://www.facebook.com/NetworkWorld/ -[9]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190523 Online performance benchmarks all companies should try to achieve.md b/sources/talk/20190523 Online performance benchmarks all companies should try to achieve.md deleted file mode 100644 index 829fb127f8..0000000000 --- a/sources/talk/20190523 Online performance benchmarks all companies should try to achieve.md +++ /dev/null @@ -1,80 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Online performance benchmarks all companies should try to achieve) -[#]: via: (https://www.networkworld.com/article/3397322/online-performance-benchmarks-all-companies-should-try-to-achieve.html) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -Online performance benchmarks all companies should try to achieve -====== -With digital performance more important than ever, companies must ensure their online performance meets customers’ needs. A new ThousandEyes report can help them determine that. -![Thinkstock][1] - -There's no doubt about it: We have entered the experience economy, and digital performance is more important than ever. - -Customer experience is the top brand differentiator, topping price and every other factor. And businesses that provide a poor digital experience will find customers will actively seek a competitor. In fact, recent ZK Research found that in 2018, about two-thirds of millennials changed loyalties to a brand because of a bad experience. (Note: I am an employee of ZK Research.) - -To help companies determine if their online performance is leading, lacking, or on par with some of the top companies, ThousandEyes this week released its [2019 Digital Experience Performance Benchmark Report][2]. This document provides a comparative analysis of web, infrastructure, and network performance from the top 20 U.S. digital retail, travel, and media websites. Although this is a small sampling of companies, those three industries are the most competitive when it comes to using their digital platforms for competitive advantage. The aggregated data from this report can be used as an industry-agnostic performance benchmark that all companies should strive to meet. - -**[ Read also:[IoT providers need to take responsibility for performance][3] ]** - -The methodology of the study was for ThousandEyes to use its own platform to provide an independent view of performance. It uses active monitoring and a global network of monitoring agents to measure application and network layer performance for websites, applications, and services. The company collected data from 36 major cities scattered across the U.S. Six of the locations (Ashburn, Chicago, Dallas, Los Angeles, San Jose, and Seattle) also included vantage points connected to six major broadband ISPs (AT&T, CenturyLink, Charter, Comcast, Cox, and Verizon). This acts as a good proxy for what a user would experience. - -The test involved page load tests against the websites of the major companies in retail, media, and travel and looked at several factors, including DNS response time, round-trip latency, network time (one-way latency), HTTP response time, and page load. The averages and median times can be seen in the table below. Those can be considered the average benchmarks that all companies should try to attain. - -![][4] - -### Choice of content delivery network matters by location - -ThousandEyes' report also looked at how the various services that companies use impacts web performance. For example, the study measured the performance of the content delivery network (CDN) providers in the 36 markets. It found that in Albuquerque, Akamai and Fastly had the most latency, whereas Edgecast had the least. It also found that in Boston, all of the CDN providers were close. Companies can use this type of data to help them select a CDN. Without it, decision makers are essentially guessing and hoping. - -### CDN performance is impacted by ISP - -Another useful set of data was cross-referencing CDN performance by ISP, which lead to some fascinating information. With Comcast, Akamai, Cloudfront, Google and Incapula all had high amounts of latency. Only Edgecast and Fastly offered average latency. On the other hand, all of the CDNs worked great with CenturyLink. This tells a buyer, "If my customer base is largely in Comcast’s footprint, I should look at Edgecast or Fastly or my customers will be impacted." - -### DNS and latency directly impact page load times - -The ThousandEyes study also confirmed some points that many people believe as true but until now had no quantifiable evidence to support it. For example, it's widely accepted that DNS response time and network latency to the CDN edge correlate to web performance; the data in the report now supports that belief. ThousandEyes did some regression analysis and fancy math and found that in general, companies that were in the top quartile of HTTP performance had above-average DNS response time and network performance. There were a few exceptions, but in most cases, this is true. - -Based on all the data, the below are the benchmarks for the three infrastructure metrics gathered and is what businesses, even ones outside the three verticals studied, should hope to achieve to support a high-quality digital experience. - - * DNS response time 25 ms - * Round trip network latency 15 ms - * HTTP response time 250 ms - - - -### Operations teams need to focus on digital performance - -Benchmarking certainly provides value, but the report also offers some recommendations on how operations teams can use the data to improve digital performance. Those include: - - * **Measure site from distributed user vantage points**. There is no single point that will provide a view of digital performance everywhere. Instead, measure from a range of ISPs in different regions and take a multi-layered approach to visibility (application, network and routing). - * **Use internet performance information as a baseline**. Compare your organization's data to the baselines, and if you’re not meeting it in some markets, focus on improvement there. - * **Compare performance to industry peers**. In highly competitive industries, it’s important to understand how you rank versus the competition. Don’t be satisfied with hitting the benchmarks if your key competitors exceed them. - * **Build a strong performance stack.** The data shows that solid DNS and HTTP response times and low latency are correlated to solid page load times. Focus on optimizing those factors and consider them foundational to digital performance. - - - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397322/online-performance-benchmarks-all-companies-should-try-to-achieve.html - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/07/racing_speed_runners_internet-speed-100728363-large.jpg -[2]: https://www.thousandeyes.com/research/digital-experience -[3]: https://www.networkworld.com/article/3340318/iot-providers-need-to-take-responsibility-for-performance.html -[4]: https://images.idgesg.net/images/article/2019/05/thousandeyes-100797290-large.jpg -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190523 Study- Most enterprise IoT transactions are unencrypted.md b/sources/talk/20190523 Study- Most enterprise IoT transactions are unencrypted.md deleted file mode 100644 index 51098dad33..0000000000 --- a/sources/talk/20190523 Study- Most enterprise IoT transactions are unencrypted.md +++ /dev/null @@ -1,93 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Study: Most enterprise IoT transactions are unencrypted) -[#]: via: (https://www.networkworld.com/article/3396647/study-most-enterprise-iot-transactions-are-unencrypted.html) -[#]: author: (Tim Greene https://www.networkworld.com/author/Tim-Greene/) - -Study: Most enterprise IoT transactions are unencrypted -====== -A Zscaler report finds 91.5% of IoT communications within enterprises are in plaintext and so susceptible to interference. -![HYWARDS / Getty Images][1] - -Of the millions of enterprise-[IoT][2] transactions examined in a recent study, the vast majority were sent without benefit of encryption, leaving the data vulnerable to theft and tampering. - -The research by cloud-based security provider Zscaler found that about 91.5 percent of transactions by internet of things devices took place over plaintext, while 8.5 percent were encrypted with [SSL][3]. That means if attackers could intercept the unencrypted traffic, they’d be able to read it and possibly alter it, then deliver it as if it had not been changed. - -**[ For more on IoT security, see[our corporate guide to addressing IoT security concerns][4]. | Get regularly scheduled insights by [signing up for Network World newsletters][5]. ]** - -Researchers looked through one month’s worth of enterprise traffic traversing Zscaler’s cloud seeking the digital footprints of IoT devices. It found and analyzed 56 million IoT-device transactions over that time, and identified the type of devices, protocols they used, the servers they communicated with, how often communication went in and out and general IoT traffic patterns. - -The team tried to find out which devices generate the most traffic and the threats they face. It discovered that 1,015 organizations had at least one IoT device. The most common devices were set-top boxes (52 percent), then smart TVs (17 percent), wearables (8 percent), data-collection terminals (8 percent), printers (7 percent), IP cameras and phones (5 percent) and medical devices (1 percent). - -While they represented only 8 percent of the devices, data-collection terminals generated 80 percent of the traffic. - -The breakdown is that 18 percent of the IoT devices use SSL to communicate all the time, and of the remaining 82 percent, half used it part of the time and half never used it. -The study also found cases of plaintext HTTP being used to authenticate devices and to update software and firmware, as well as use of outdated crypto libraries and weak default credentials. - -While IoT devices are common in enterprises, “many of the devices are employee owned, and this is just one of the reasons they are a security concern,” the report says. Without strict policies and enforcement, these devices represent potential vulnerabilities. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][6] ]** - -Another reason employee-owned IoT devices are a concern is that many businesses don’t consider them a threat because no data is stored on them. But if the data they gather is transmitted insecurely, it is at risk. - -### 5 tips to protect enterprise IoT - -Zscaler recommends these security precautions: - - * Change default credentials to something more secure. As employees bring in devices, encourage them to use strong passwords and to keep their firmware current. - * Isolate IoT devices on networks and restrict inbound and outbound network traffic. - * Restrict access to IoT devices from external networks and block unnecessary ports from external access. - * Apply regular security and firmware updates to IoT devices, and secure network traffic. - * Deploy tools to gain visibility of shadow-IoT devices already inside the network so they can be protected. - - - -**More on IoT:** - - * [What is edge computing and how it’s changing the network][7] - * [Most powerful Internet of Things companies][8] - * [10 Hot IoT startups to watch][9] - * [The 6 ways to make money in IoT][10] - * [What is digital twin technology? [and why it matters]][11] - * [Blockchain, service-centric networking key to IoT success][12] - * [Getting grounded in IoT networking and security][13] - * [Building IoT-ready networks must become a priority][14] - * [What is the Industrial IoT? [And why the stakes are so high]][15] - - - -Join the Network World communities on [Facebook][16] and [LinkedIn][17] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3396647/study-most-enterprise-iot-transactions-are-unencrypted.html - -作者:[Tim Greene][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Tim-Greene/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/network_security_network_traffic_scanning_by_hywards_gettyimages-673891964_2400x1600-100796830-large.jpg -[2]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/3045953/5-things-you-need-to-know-about-ssl.html -[4]: https://www.networkworld.com/article/3269165/internet-of-things/a-corporate-guide-to-addressing-iot-security-concerns.html -[5]: https://www.networkworld.com/newsletters/signup.html -[6]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[7]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[8]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[9]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[10]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[11]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[12]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[13]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[14]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[15]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[16]: https://www.facebook.com/NetworkWorld/ -[17]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190528 Analysing D Code with KLEE.md b/sources/talk/20190528 Analysing D Code with KLEE.md deleted file mode 100644 index c93f6e2b8d..0000000000 --- a/sources/talk/20190528 Analysing D Code with KLEE.md +++ /dev/null @@ -1,680 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Analysing D Code with KLEE) -[#]: via: (https://theartofmachinery.com/2019/05/28/d_and_klee.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -Analysing D Code with KLEE -====== - -[KLEE][1] is symbolic execution engine that can rigorously verify or find bugs in software. It’s designed for C and C++, but it’s just an interpreter for LLVM bitcode combined with theorem prover backends, so it can work with bitcode generated by `ldc2`. One catch is that it needs a compatible bitcode port of the D runtime to run normal D code. I’m still interested in getting KLEE to work with normal D code, but for now I’ve done some experiments with `-betterC` D. - -### How KLEE works - -What makes KLEE special is its support for two kinds of variables: concrete and symbolic. Concrete variables are just like the normal variables in normal code: they have a deterministic value at any given point in the program. On the other hand, symbolic variables contain a bundle of logical constraints instead of values. Take this code: - -``` -int x = klee_int("x"); -klee_assume(x >= 0); -if (x > 42) -{ - doA(x); -} -else -{ - doB(x); - assert (3 * x != 21); -} -``` - -`klee_int("x")` creates a symbolic integer that will be called “`x`” in output reports. Initially it has no contraints and can imply any value that a 32b signed integer can have. `klee_assume(x >= 0)` tells KLEE to add `x >= 0` as a constraint, so now we’re only analysing the code for non-negative 32b signed integers. On hitting the `if`, KLEE checks if both branches are possible. Sure enough, `x > 42` can be true or false even with the constraint `x >= 0`, so KLEE has to _fork_. We now have two processes being interpreted on the VM: one executing `doA()` while `x` holds the constraints `x >= 0, x > 42`, and another executing `doB()` while `x` holds the contraints `x >= 0, x <= 42`. The second process will hit the `assert` statement, and KLEE will try to prove or disprove `3 * x != 21` using the assumptions `x >= 0, x <= 42` — in this case it will disprove it and report a bug with `x = 7` as a crashing example. - -### First steps - -Here’s a toy example just to get things working. Suppose we have a function that makes an assumption for a performance optimisation. Thankfully the assumption is made explicit with `assert` and is documented with a comment. Is the assumption valid? - -``` -int foo(int x) -{ - // 17 is a prime number, so let's use it as a sentinel value for an awesome optimisation - assert (x * x != 17); - // ... - return x; -} -``` - -Here’s a KLEE test rig. The KLEE function declarations and the `main()` entry point need to have `extern(C)` linkage, but anything else can be normal D code as long as it compiles under `-betterC`: - -``` -extern(C): - -int klee_int(const(char*) name); - -int main() -{ - int x = klee_int("x"); - foo(x); - return 0; -} -``` - -It turns out there’s just one (frustrating) complication with running `-betterC` D under KLEE. In D, `assert` is handled specially by the compiler. By default, it throws an `Error`, but for compatibility with KLEE, I’m using the `-checkaction=C` flag. In C, `assert` is usually a macro that translates to code that calls some backend implementation. That implementation isn’t standardised, so of course various C libraries work differently. `ldc2` actually has built-in logic for implementing `-checkaction=C` correctly depending on the C library used. - -KLEE uses a port of [uClibc][2], which translates `assert()` to a four-parameter `__assert()` function, which conflicts with the three-parameter `__assert()` function in other implementations. `ldc2` uses LLVM’s (target) `Triple` type for choosing an `assert()` implementation configuration, but that doesn’t recognise uClibc. As a hacky workaround, I’m telling `ldc2` to compile for Musl, which “tricks” it into using an `__assert_fail()` implementation that KLEE happens to support as well. I’ve opened [an issue report][3]. - -Anyway, if we put all that code above into a file, we can compile it to KLEE-ready bitcode like this: - -``` -ldc2 -g -checkaction=C -mtriple=x86_64-linux-musl -output-bc -betterC -c first.d -``` - -`-g` is optional, but adds debug information that can be useful for later analysis. The KLEE developers recommend disabling compiler optimisations and letting KLEE do its own optimisations instead. - -Now to run KLEE: - -``` -$ klee first.bc -KLEE: output directory is "/tmp/klee-out-1" -KLEE: Using Z3 solver backend -warning: Linking two modules of different target triples: klee_int.bc' is 'x86_64-pc-linux-gnu' whereas 'first.bc' is 'x86_64--linux-musl' - -KLEE: ERROR: first.d:4: ASSERTION FAIL: x * x != 17 -KLEE: NOTE: now ignoring this error at this location - -KLEE: done: total instructions = 35 -KLEE: done: completed paths = 2 -KLEE: done: generated tests = 2 -``` - -Straight away, KLEE has found two execution paths through the program: a happy path, and a path that fails the assertion. Let’s see the results: - -``` -$ ls klee-last/ -assembly.ll -info -messages.txt -run.istats -run.stats -run.stats-journal -test000001.assert.err -test000001.kquery -test000001.ktest -test000002.ktest -warnings.txt -``` - -Here’s the example that triggers the happy path: - -``` -$ ktest-tool klee-last/test000002.ktest -ktest file : 'klee-last/test000002.ktest' -args : ['first.bc'] -num objects: 1 -object 0: name: 'x' -object 0: size: 4 -object 0: data: b'\x00\x00\x00\x00' -object 0: hex : 0x00000000 -object 0: int : 0 -object 0: uint: 0 -object 0: text: .... -``` - -Here’s the example that causes an assertion error: - -``` -$ cat klee-last/test000001.assert.err -Error: ASSERTION FAIL: x * x != 17 -File: first.d -Line: 4 -assembly.ll line: 32 -Stack: - #000000032 in _D5first3fooFiZi () at first.d:4 - #100000055 in main (=1, =94262044506880) at first.d:16 -$ ktest-tool klee-last/test000001.ktest -ktest file : 'klee-last/test000001.ktest' -args : ['first.bc'] -num objects: 1 -object 0: name: 'x' -object 0: size: 4 -object 0: data: b'\xe9&\xd33' -object 0: hex : 0xe926d333 -object 0: int : 869476073 -object 0: uint: 869476073 -object 0: text: .&.3 -``` - -So, KLEE has deduced that when `x` is 869476073, `x * x` does a 32b overflow to 17 and breaks the code. - -It’s overkill for this simple example, but `run.istats` can be opened with [KCachegrind][4] to view things like call graphs and source code coverage. (Unfortunately, coverage stats can be misleading because correct code won’t ever hit boundary check code inserted by the compiler.) - -### MurmurHash preimage - -Here’s a slightly more useful example. D currently uses 32b MurmurHash3 as its standard non-cryptographic hash function. What if we want to find strings that hash to a given special value? In general, we can solve problems like this by asserting that something doesn’t exist (i.e., a string that hashes to a given value) and then challenging the theorem prover to prove us wrong with a counterexample. - -Unfortunately, we can’t just use `hashOf()` directly without the runtime, but we can copy [the hash code from the runtime source][5] into its own module, and then import it into a test rig like this: - -``` -import dhash; - -extern(C): - -void klee_make_symbolic(void* addr, size_t nbytes, const(char*) name); -int klee_assume(ulong condition); - -int main() -{ - // Create a buffer for 8-letter strings and let KLEE manage it symbolically - char[8] s; - klee_make_symbolic(s.ptr, s.sizeof, "s"); - - // Constrain the string to be letters from a to z for convenience - foreach (j; 0..s.length) - { - klee_assume(s[j] > 'a' && s[j] <= 'z'); - } - - assert (dHash(cast(ubyte[])s) != 0xdeadbeef); - return 0; -} -``` - -Here’s how to compile and run it. Because we’re not checking correctness, we can use `-boundscheck=off` for a slight performance boost. It’s also worth enabling KLEE’s optimiser. - -``` -$ ldc2 -g -boundscheck=off -checkaction=C -mtriple=x86_64-linux-musl -output-bc -betterC -c dhash.d dhash_klee.d -$ llvm-link -o dhash_test.bc dhash.bc dhash_klee.bc -$ klee -optimize dhash_test.bc -``` - -It takes just over 4s: - -``` -$ klee-stats klee-last/ -------------------------------------------------------------------------- -| Path | Instrs| Time(s)| ICov(%)| BCov(%)| ICount| TSolver(%)| -------------------------------------------------------------------------- -|klee-last/| 168| 4.37| 87.50| 50.00| 160| 99.95| -------------------------------------------------------------------------- -``` - -And it actually works: - -``` -$ ktest-tool klee-last/test000001.ktest -ktest file : 'klee-last/test000001.ktest' -args : ['dhash_test.bc'] -num objects: 1 -object 0: name: 's' -object 0: size: 8 -object 0: data: b'psgmdxvq' -object 0: hex : 0x7073676d64787671 -object 0: int : 8175854546265273200 -object 0: uint: 8175854546265273200 -object 0: text: psgmdxvq -$ rdmd --eval 'writef("%x\n", hashOf("psgmdxvq"));' -deadbeef -``` - -For comparison, here’s a simple brute force version in plain D: - -``` -import std.stdio; - -void main() -{ - char[8] buffer; - - bool find(size_t idx) - { - if (idx == buffer.length) - { - auto hash = hashOf(buffer[]); - if (hash == 0xdeadbeef) - { - writeln(buffer[]); - return true; - } - return false; - } - - foreach (char c; 'a'..'z') - { - buffer[idx] = c; - auto is_found = find(idx + 1); - if (is_found) return true; - } - - return false; - } - - find(0); -} -``` - -This takes ~17s: - -``` -$ ldc2 -O3 -boundscheck=off hash_brute.d -$ time ./hash_brute -aexkaydh - -real 0m17.398s -user 0m17.397s -sys 0m0.001s -$ rdmd --eval 'writef("%x\n", hashOf("aexkaydh"));' -deadbeef -``` - -The constraint solver implementation is simpler to write, but is still faster because it can automatically do smarter things than calculating hashes of strings from scratch every iteration. - -### Binary search - -Now for an example of testing and debugging. Here’s an implementation of [binary search][6]: - -``` -bool bsearch(const(int)[] haystack, int needle) -{ - while (haystack.length) - { - auto mid_idx = haystack.length / 2; - if (haystack[mid_idx] == needle) return true; - if (haystack[mid_idx] < needle) - { - haystack = haystack[mid_idx..$]; - } - else - { - haystack = haystack[0..mid_idx]; - } - } - return false; -} -``` - -Does it work? Here’s a test rig: - -``` -extern(C): - -void klee_make_symbolic(void* addr, size_t nbytes, const(char*) name); -int klee_range(int begin, int end, const(char*) name); -int klee_assume(ulong condition); - -int main() -{ - // Making an array arr and an x to find in it. - // This time we'll also parameterise the array length. - // We have to apply klee_make_symbolic() to the whole buffer because of limitations in KLEE. - int[8] arr_buffer; - klee_make_symbolic(arr_buffer.ptr, arr_buffer.sizeof, "a"); - int len = klee_range(0, arr_buffer.length+1, "len"); - auto arr = arr_buffer[0..len]; - // Keeping the values in [0, 32) makes the output easier to read. - // (The binary-friendly limit 32 is slightly more efficient than 30.) - int x = klee_range(0, 32, "x"); - foreach (j; 0..arr.length) - { - klee_assume(arr[j] >= 0); - klee_assume(arr[j] < 32); - } - - // Make the array sorted. - // We don't have to actually sort the array. - // We can just tell KLEE to constrain it to be sorted. - foreach (j; 1..arr.length) - { - klee_assume(arr[j - 1] <= arr[j]); - } - - // Test against simple linear search - bool has_x = false; - foreach (a; arr[]) - { - has_x |= a == x; - } - - assert (bsearch(arr, x) == has_x); - - return 0; -} -``` - -When run in KLEE, it keeps running for a long, long time. How do we know it’s doing anything? By default KLEE writes stats every 1s, so we can watch the live progress in another terminal: - -``` -$ watch klee-stats --print-more klee-last/ -Every 2.0s: klee-stats --print-more klee-last/ - ---------------------------------------------------------------------------------------------------------------------- -| Path | Instrs| Time(s)| ICov(%)| BCov(%)| ICount| TSolver(%)| States| maxStates| Mem(MB)| maxMem(MB)| ---------------------------------------------------------------------------------------------------------------------- -|klee-last/| 5834| 637.27| 79.07| 68.75| 172| 100.00| 22| 22| 24.51| 24| ---------------------------------------------------------------------------------------------------------------------- -``` - -`bsearch()` should be pretty fast, so we should see KLEE discovering new states rapidly. But instead it seems to be stuck. [At least one fork of KLEE has heuristics for detecting infinite loops][7], but plain KLEE doesn’t. There are timeout and batching options for making KLEE work better with code that might have infinite loops, but let’s just take another look at the code. In particular, the loop condition: - -``` -while (haystack.length) -{ - // ... -} -``` - -Binary search is supposed to reduce the search space by about half each iteration. `haystack.length` is an unsigned integer, so the loop must terminate as long as it goes down every iteration. Let’s rewrite the code slightly so we can verify if that’s true: - -``` -bool bsearch(const(int)[] haystack, int needle) -{ - while (haystack.length) - { - auto mid_idx = haystack.length / 2; - if (haystack[mid_idx] == needle) return true; - const(int)[] next_haystack; - if (haystack[mid_idx] < needle) - { - next_haystack = haystack[mid_idx..$]; - } - else - { - next_haystack = haystack[0..mid_idx]; - } - // This lets us verify that the search terminates - assert (next_haystack.length < haystack.length); - haystack = next_haystack; - } - return false; -} -``` - -Now KLEE can find the bug! - -``` -$ klee -optimize bsearch.bc -KLEE: output directory is "/tmp/klee-out-2" -KLEE: Using Z3 solver backend -warning: Linking two modules of different target triples: klee_range.bc' is 'x86_64-pc-linux-gnu' whereas 'bsearch.bc' is 'x86_64--linux-musl' - -warning: Linking two modules of different target triples: memset.bc' is 'x86_64-pc-linux-gnu' whereas 'bsearch.bc' is 'x86_64--linux-musl' - -KLEE: ERROR: bsearch.d:18: ASSERTION FAIL: next_haystack.length < haystack.length -KLEE: NOTE: now ignoring this error at this location - -KLEE: done: total instructions = 2281 -KLEE: done: completed paths = 42 -KLEE: done: generated tests = 31 -``` - -Using the failing example as input and stepping through the code, it’s easy to find the problem: - -``` -/// ... -if (haystack[mid_idx] < needle) -{ - // If mid_idx == 0, next_haystack is the same as haystack - // Nothing changes, so the loop keeps repeating - next_haystack = haystack[mid_idx..$]; -} -/// ... -``` - -Thinking about it, the `if` statement already excludes `haystack[mid_idx]` from being `needle`, so there’s no reason to include it in `next_haystack`. Here’s the fix: - -``` -// The +1 matters -next_haystack = haystack[mid_idx+1..$]; -``` - -But is the code correct now? Terminating isn’t enough; it needs to get the right answer, of course. - -``` -$ klee -optimize bsearch.bc -KLEE: output directory is "/tmp/kee-out-3" -KLEE: Using Z3 solver backend -warning: Linking two modules of different target triples: klee_range.bc' is 'x86_64-pc-linux-gnu' whereas 'bsearch.bc' is 'x86_64--linux-musl' - -warning: Linking two modules of different target triples: memset.bc' is 'x86_64-pc-linux-gnu' whereas 'bsearch.bc' is 'x86_64--linux-musl' - -KLEE: done: total instructions = 3152 -KLEE: done: completed paths = 81 -KLEE: done: generated tests = 81 -``` - -In just under 7s, KLEE has verified every possible execution path reachable with arrays of length from 0 to 8. Note, that’s not just coverage of individual code lines, but coverage of full pathways through the code. KLEE hasn’t ruled out stack corruption or integer overflows with large arrays, but I’m pretty confident the code is correct now. - -KLEE has generated test cases that trigger each path, which we can keep and use as a faster-than-7s regression test suite. Trouble is, the output from KLEE loses all type information and isn’t in a convenient format: - -``` -$ ktest-tool klee-last/test000042.ktest -ktest file : 'klee-last/test000042.ktest' -args : ['bsearch.bc'] -num objects: 3 -object 0: name: 'a' -object 0: size: 32 -object 0: data: b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' -object 0: hex : 0x0000000000000000000000000000000001000000100000000000000000000000 -object 0: text: ................................ -object 1: name: 'x' -object 1: size: 4 -object 1: data: b'\x01\x00\x00\x00' -object 1: hex : 0x01000000 -object 1: int : 1 -object 1: uint: 1 -object 1: text: .... -object 2: name: 'len' -object 2: size: 4 -object 2: data: b'\x06\x00\x00\x00' -object 2: hex : 0x06000000 -object 2: int : 6 -object 2: uint: 6 -object 2: text: .... -``` - -But we can write our own pretty-printing code and put it at the end of the test rig: - -``` -char[256] buffer; -char* output = buffer.ptr; -output += sprintf(output, "TestCase(["); -foreach (a; arr[]) -{ - output += sprintf(output, "%d, ", klee_get_value_i32(a)); -} -sprintf(output, "], %d, %s),\n", klee_get_value_i32(x), klee_get_value_i32(has_x) ? "true".ptr : "false".ptr); -fputs(buffer.ptr, stdout); -``` - -Ugh, that would be just one format call with D’s `%(` array formatting specs. The output needs to be buffered up and printed all at once to stop output from different parallel executions getting mixed up. `klee_get_value_i32()` is needed to get a concrete example from a symbolic variable (remember that a symbolic variable is just a bundle of constraints). - -``` -$ klee -optimize bsearch.bc > tests.d -... -$ # Sure enough, 81 test cases -$ wc -l tests.d -81 tests.d -$ # Look at the first 10 -$ head tests.d -TestCase([], 0, false), -TestCase([0, ], 0, true), -TestCase([16, ], 1, false), -TestCase([0, ], 1, false), -TestCase([0, 0, ], 0, true), -TestCase([0, 0, ], 1, false), -TestCase([1, 16, ], 1, true), -TestCase([0, 0, 0, ], 0, true), -TestCase([16, 16, ], 1, false), -TestCase([1, 16, ], 3, false), -``` - -Nice! An autogenerated regression test suite that’s better than anything I would write by hand. This is my favourite use case for KLEE. - -### Change counting - -One last example: - -In Australia, coins come in 5c, 10c, 20c, 50c, $1 (100c) and $2 (200c) denominations. So you can make 70c using 14 5c coins, or using a 50c coin and a 20c coin. Obviously, fewer coins is usually more convenient. There’s a simple [greedy algorithm][8] to make a small pile of coins that adds up to a given value: just keep adding the biggest coin you can to the pile until you’ve reached the target value. It turns out this trick is optimal — at least for Australian coins. Is it always optimal for any set of coin denominations? - -The hard thing about testing optimality is that you don’t know what the correct optimal values are without a known-good algorithm. Without a constraints solver, I’d compare the output of the greedy algorithm with some obviously correct brute force optimiser, run over all possible cases within some small-enough limit. But with KLEE, we can use a different approach: comparing the greedy solution to a non-deterministic solution. - -The greedy algorithm takes the list of coin denominations and the target value as input, so (like in the previous examples) we make those symbolic. Then we make another symbolic array that represents an assignment of coin counts to each coin denomination. We don’t specify anything about how to generate this assignment, but we constrain it to be a valid assignment that adds up to the target value. It’s [non-deterministic][9]. Then we just assert that the total number of coins in the non-deterministic assignment is at least the number of coins needed by the greedy algorithm, which would be true if the greedy algorithm were universally optimal. Finally we ask KLEE to prove the program correct or incorrect. - -Here’s the code: - -``` -// Greedily break value into coins of values in denominations -// denominations must be in strictly decreasing order -int greedy(const(int[]) denominations, int value, int[] coins_used_output) -{ - int num_coins = 0; - foreach (j; 0..denominations.length) - { - int num_to_use = value / denominations[j]; - coins_used_output[j] = num_to_use; - num_coins += num_to_use; - value = value % denominations[j]; - } - return num_coins; -} - -extern(C): - -void klee_make_symbolic(void* addr, size_t nbytes, const(char*) name); -int klee_int(const(char*) name); -int klee_assume(ulong condition); -int klee_get_value_i32(int expr); - -int main(int argc, char** argv) -{ - enum kNumDenominations = 6; - int[kNumDenominations] denominations, coins_used; - klee_make_symbolic(denominations.ptr, denominations.sizeof, "denominations"); - - // We're testing the algorithm itself, not implementation issues like integer overflow - // Keep values small - foreach (d; denominations) - { - klee_assume(d >= 1); - klee_assume(d <= 1024); - } - // Make the smallest denomination 1 so that all values can be represented - // This is just for simplicity so we can focus on optimality - klee_assume(denominations[$-1] == 1); - - // Greedy algorithm expects values in descending order - foreach (j; 1..denominations.length) - { - klee_assume(denominations[j-1] > denominations[j]); - } - - // What we're going to represent - auto value = klee_int("value"); - - auto num_coins = greedy(denominations[], value, coins_used[]); - - // The non-deterministic assignment - int[kNumDenominations] nd_coins_used; - klee_make_symbolic(nd_coins_used.ptr, nd_coins_used.sizeof, "nd_coins_used"); - - int nd_num_coins = 0, nd_value = 0; - foreach (j; 0..kNumDenominations) - { - klee_assume(nd_coins_used[j] >= 0); - klee_assume(nd_coins_used[j] <= 1024); - nd_num_coins += nd_coins_used[j]; - nd_value += nd_coins_used[j] * denominations[j]; - } - - // Making the assignment valid is 100% up to KLEE - klee_assume(nd_value == value); - - // If we find a counterexample, dump it and fail - if (nd_num_coins < num_coins) - { - import core.stdc.stdio; - - puts("Counterexample found."); - - puts("Denominations:"); - foreach (ref d; denominations) - { - printf("%d ", klee_get_value_i32(d)); - } - printf("\nValue: %d\n", klee_get_value_i32(value)); - - void printAssignment(const ref int[kNumDenominations] coins) - { - foreach (j; 0..kNumDenominations) - { - printf("%d * %dc\n", klee_get_value_i32(coins[j]), klee_get_value_i32(denominations[j])); - } - } - - printf("Greedy \"optimum\": %d\n", klee_get_value_i32(num_coins)); - printAssignment(coins_used); - - printf("Better assignment for %d total coins:\n", klee_get_value_i32(nd_num_coins)); - printAssignment(nd_coins_used); - assert (false); - } - - return 0; -} -``` - -And here’s the counterexample it found after 14s: - -``` -Counterexample found. -Denominations: -129 12 10 3 2 1 -Value: 80 -Greedy "optimum": 9 -0 * 129c -6 * 12c -0 * 10c -2 * 3c -1 * 2c -0 * 1c -Better assignment for 8 total coins: -0 * 129c -0 * 12c -8 * 10c -0 * 3c -0 * 2c -0 * 1c -``` - -Note that this isn’t proven to be the new optimum; it’s just a witness that the greedy algorithm isn’t always optimal. There’s a well-known [dynamic programming][10] [solution][11] that always works. - -### What’s next? - -As I said, I’m interesting in getting this to work with full D code. I’m also interested in using [one of the floating point forks of KLEE][12] on some D because floating point is much harder to test thoroughly than integer and string code. - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/05/28/d_and_klee.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://klee.github.io/ -[2]: https://www.uclibc.org/ -[3]: https://github.com/ldc-developers/ldc/issues/3078 -[4]: https://kcachegrind.github.io/html/Home.html -[5]: https://github.com/dlang/druntime/blob/4ad638f61a9b4a98d8ed6eb9f9429c0ef6afc8e3/src/core/internal/hash.d#L670 -[6]: https://www.calhoun.io/lets-learn-algorithms-an-intro-to-binary-search/ -[7]: https://github.com/COMSYS/SymbolicLivenessAnalysis -[8]: https://en.wikipedia.org/wiki/Greedy_algorithm -[9]: http://people.clarkson.edu/~alexis/PCMI/Notes/lectureB03.pdf -[10]: https://www.algorithmist.com/index.php/Dynamic_Programming -[11]: https://www.topcoder.com/community/competitive-programming/tutorials/dynamic-programming-from-novice-to-advanced/ -[12]: https://github.com/srg-imperial/klee-float diff --git a/sources/talk/20190528 Managed WAN and the cloud-native SD-WAN.md b/sources/talk/20190528 Managed WAN and the cloud-native SD-WAN.md deleted file mode 100644 index 026b5d8e81..0000000000 --- a/sources/talk/20190528 Managed WAN and the cloud-native SD-WAN.md +++ /dev/null @@ -1,121 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Managed WAN and the cloud-native SD-WAN) -[#]: via: (https://www.networkworld.com/article/3398476/managed-wan-and-the-cloud-native-sd-wan.html) -[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) - -Managed WAN and the cloud-native SD-WAN -====== -The motivation for WAN transformation is clear, today organizations require: improved internet access and last mile connectivity, additional bandwidth and a reduction in the WAN costs. -![Gerd Altmann \(CC0\)][1] - -In recent years, a significant number of organizations have transformed their wide area network (WAN). Many of these organizations have some kind of cloud-presence across on-premise data centers and remote site locations. - -The vast majority of organizations that I have consulted with have over 10 locations. And it is common to have headquarters in both the US and Europe, along with remote site locations spanning North America, Europe, and Asia. - -A WAN transformation project requires this diversity to be taken into consideration when choosing the best SD-WAN vendor to satisfy both; networking and security requirements. Fundamentally, SD-WAN is not just about physical connectivity, there are many more related aspects. - -**[ Related:[MPLS explained – What you need to know about multi-protocol label switching][2]** - -### Motivations for transforming the WAN - -The motivation for WAN transformation is clear: Today organizations prefer improved internet access and last mile connectivity, additional bandwidth along with a reduction in the WAN costs. Replacing Multiprotocol Label Switching (MPLS) with SD-WAN has of course been the main driver for the SD-WAN evolution, but it is only a single piece of the jigsaw puzzle. - -Many SD-WAN vendors are quickly brought to their knees when they try to address security and gain direct internet access from remote site locations. The problem is how to ensure optimized cloud access that is secure, has improved visibility and predictable performance without the high costs associated with MPLS? SD-WAN is not just about connecting locations. Primarily, it needs to combine many other important network and security elements into one seamless worldwide experience. - -According to a recent report from [Cato Networks][3] into enterprise IT managers, a staggering 85% will confront use cases in 2019 that are poorly addressed or outright ignored by SD-WAN. Examples includes providing secure, Internet access from any location (50%) and improving visibility into and control over mobile access to cloud applications, such as Office 365 (46%). - -### Issues with traditional SD-WAN vendors - -First and foremost, SD-WAN unable to address the security challenges that arise during the WAN transformation. Such security challenges include protection against malware, ransomware and implementing the necessary security policies. Besides, there is a lack of visibility that is required to police the mobile users and remote site locations accessing resources in the public cloud. - -To combat this, organizations have to purchase additional equipment. There has always been and will always be a high cost associated with buying such security appliances. Furthermore, the additional tools that are needed to protect the remote site locations increase the network complexity and reduce visibility. Let’s us not forget that the variety of physical appliances require talented engineers for design, deployment and maintenance. - -There will often be a single network-cowboy. This means the network and security configuration along with the design essentials are stored in the mind of the engineer, not in a central database from where the knowledge can be accessed if the engineer leaves his or her employment. - -The physical appliance approach to SD-WAN makes it hard, if not impossible, to accommodate for the future. If the current SD-WAN vendors continue to focus just on connecting the devices with the physical appliances, they will have limited ability to accommodate for example, with the future of network IoT devices. With these factors in mind what are the available options to overcome the SD-WAN shortcomings? - -One can opt for a do it yourself (DIY) solution, or a managed service, which can fall into the category of telcos, with the improvements of either co-managed or self-managed service categories. - -### Option 1: The DIY solution - -Firstly DIY, from the experience of trying to stitch together a global network, this is not only costly but also complex and is a very constrained approach to the network transformation. We started with physical appliances decades ago and it was sufficient to an extent. The reason it worked was that it suited the requirements of the time, but our environment has changed since then. Hence, we need to accommodate these changes with the current requirements. - -Even back in those days, we always had a breachable perimeter. The perimeter-approach to networking and security never really worked and it was just a matter of time before the bad actor would penetrate the guarded walls. - -Securing a global network involves more than just firewalling the devices. A solid security perimeter requires URL filtering, anti-malware and IPS to secure the internet traffic. If you try to deploy all these functions in a single device, such as, unified threat management (UTM), you will hit scaling problems. As a result, you will be left with appliance sprawl. - -Back in my early days as an engineer, I recall stitching together a global network with a mixture of security and network appliances from a variety of vendors. It was me and just two others who used to get the job done on time and for a production network, our uptime levels were superior to most. - -However, it involved too many late nights, daily flights to our PoPs and of course the major changes required a forklift. A lot of work had to be done at that time, which made me want to push some or most of the work to a 3rd party. - -### Option 2: The managed service solution - -Today, there is a growing need for the managed service approach to SD-WAN. Notably, it simplifies the network design, deployment and maintenance activities while offloading the complexity, in line with what most CIOs are talking about today. - -Managed service provides a number of benefits, such as the elimination of backhauling to centralized cloud connectors or VPN concentrators. Evidently, backhauling is never favored for a network architect. More than often it will result in increased latency, congested links, internet chokepoints, and last-mile outages. - -Managed service can also authenticate mobile users at the local communication hub and not at a centralized point which would increase the latency. So what options are available when considering a managed service? - -### Telcos: An average service level - -Let’s be honest, telcos have a mixed track record and enterprises rely on them with caution. Essentially, you are building a network with 3rd party appliances and services that put the technical expertise outside of the organization. - -Secondly, the telco must orchestrate, monitor and manage numerous technical domains which are likely to introduce further complexity. As a result, troubleshooting requires close coordination with the suppliers which will have an impact on the customer experience. - -### Time equals money - -To resolve a query could easily take two or three attempts. It’s rare that you will get to the right person straight away. This eventually increases the time to resolve problems. Even for a minor feature change, you have to open tickets. Hence, with telcos, it increases the time required to solve a problem. - -In addition, it takes time to make major network changes such as opening new locations, which could take up to 45 days. In the same report mentioned above, 71% of the respondents are frustrated with the telco customer-service-time to resolve the problems, 73% indicated that deploying new locations requires at least 15 days and 47% claimed that “high bandwidth costs” is the biggest frustration while working with telcos. - -When it comes to lead times for projects, an engineer does not care. Does a project manager care if you have an optimum network design? No, many don’t, most just care about the timeframes. During my career, now spanning 18 years, I have never seen comments from any of my contacts saying “you must adhere to your project manager’s timelines”. - -However, out of the experience, the project managers have their ways and lead times do become a big part of your daily job. So as an engineer, 45-day lead time will certainly hit your brand hard, especially if you are an external consultant. - -There is also a problem with bandwidth costs. Telcos need to charge due to their complexity. There is always going to be a series of problems when working with them. Let’s face it, they offer an average service level. - -### Co-management and self-service management - -What is needed is a service that equips with the visibility and control of DIY to managed services. This, ultimately, opens the door to co-management and self-service management. - -Co-management allows both the telco and enterprise to make changes to the WAN. Then we have the self-service management of WAN that allows the enterprises to have sole access over the aspect of their network. - -However, these are just sticking plasters covering up the flaws. We need a managed service that not only connects locations but also synthesizes the site connectivity, along with security, mobile access, and cloud access. - -### Introducing the cloud-native approach to SD-WAN - -There should be a new style of managed services that combines the best of both worlds. It should offer the uptime, predictability and reach of the best telcos along with the cost structure and versatility of cloud providers. All such requirements can be met by what is known as the cloud-native carrier. - -Therefore, we should be looking for a platform that can connect and secure all the users and resources at scale, no matter where they are positioned. Eventually, such a platform will limit the costs and increase the velocity and agility. - -This is what a cloud-native carrier can offer you. You could say it’s a new kind of managed service, which is what enterprises are now looking for. A cloud-native carrier service brings the best of cloud services to the world of networking. This new style of managed service brings to SD-WAN the global reach, self-service, and agility of the cloud with the ability to easily migrate from MPLS. - -In summary, a cloud-native carrier service will improve global connectivity to on-premises and cloud applications, enable secure branch to internet access, and both securely and optimally integrate cloud datacenters. - -**This article is published as part of the IDG Contributor Network.[Want to Join?][4]** - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398476/managed-wan-and-the-cloud-native-sd-wan.html - -作者:[Matt Conran][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Matt-Conran/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/03/network-wan-100713693-large.jpg -[2]: https://www.networkworld.com/article/2297171/sd-wan/network-security-mpls-explained.html -[3]: https://www.catonetworks.com/news/digital-transformation-survey -[4]: /contributor-network/signup.html -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190528 Moving to the Cloud- SD-WAN Matters.md b/sources/talk/20190528 Moving to the Cloud- SD-WAN Matters.md deleted file mode 100644 index 8f6f46b6f2..0000000000 --- a/sources/talk/20190528 Moving to the Cloud- SD-WAN Matters.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Moving to the Cloud? SD-WAN Matters!) -[#]: via: (https://www.networkworld.com/article/3397921/moving-to-the-cloud-sd-wan-matters.html) -[#]: author: (Rami Rammaha https://www.networkworld.com/author/Rami-Rammaha/) - -Moving to the Cloud? SD-WAN Matters! -====== - -![istock][1] - -This is the first in a two-part blog series that will explore how enterprises can realize the full transformation promise of the cloud by shifting to a business first networking model powered by a business-driven [SD-WAN][2]. The focus for this installment will be on automating secure IPsec connectivity and intelligently steering traffic to cloud providers. - -Over the past several years we’ve seen a major shift in data center strategies where enterprise IT organizations are shifting applications and workloads to cloud, whether private or public. More and more, enterprises are leveraging software as-a-service (SaaS) applications and infrastructure as-a-service (IaaS) cloud services from leading providers like [Amazon AWS][3], [Google Cloud][4], [Microsoft Azure][5] and [Oracle Cloud Infrastructure][6]. This represents a dramatic shift in enterprise data traffic patterns as fewer and fewer applications are hosted within the walls of the traditional corporate data center. - -There are several drivers for the shift to IaaS cloud services and SaaS apps, but business agility tops the list for most enterprises. The traditional IT model for provisioning and deprovisioning applications is rigid and inflexible and is no longer able to keep pace with changing business needs. - -According to [LogicMonitor’s Cloud Vision 2020][7] study, more than 80 percent of enterprise workloads will run in the cloud by 2020 with more than 40 percent running on public cloud platforms. This major shift in the application consumption model is having a huge [impact on organizations and infrastructure][8]. A recent article entitled “[How Amazon Web Services is luring banks to the cloud][9],” published by CNBC, reported that some companies already have completely migrated all of their applications and IT workloads to public cloud infrastructures. An interesting fact is that while many enterprises must comply with stringent regulatory compliance mandates such as PCI-DSS or HIPAA, they still have made the move to the cloud. This tells us two things – the maturity of using public cloud services and the trust these organizations have in using them is at an all-time high. Again, it is all about speed and agility – without compromising performance, security and reliability. - -### **Is there a direct correlation between moving to the cloud and adopting SD-WAN?** - -As the cloud enables businesses to move faster, an SD-WAN architecture where top-down business intent is the driver is critical to ensuring success, especially when branch offices are geographically distributed across the globe. Traditional router-centric WAN architectures were never designed to support today’s cloud consumption model for applications in the most efficient way. With a conventional router-centric WAN approach, access to applications residing in the cloud means traversing unnecessary hops, resulting in wasted bandwidth, additional cost, added latency and potentially higher packet loss. In addition, under the existing, traditional WAN model where management tends to be rigid, complex network changes can be lengthy, whether setting up new branches or troubleshooting performance issues. This leads to inefficiencies and a costly operational model. Therefore, enterprises greatly benefit from taking a business-first WAN approach toward achieving greater agility in addition to realizing substantial CAPEX and OPEX savings. - -A business-driven SD-WAN platform is purpose-built to tackle the challenges inherent to the traditional router-centric model and more aptly support today’s cloud consumption model. This means application policies are defined based on business intent, connecting users securely and directly to applications where ever they reside without unnecessary extra hops or security compromises. For example, if the application is hosted in the cloud and is trusted, a business-driven SD-WAN can automatically connect users to it without backhauling traffic to a POP or HQ data center. Now, in general this traffic is usually going across an internet link which, on its own, may not be secure. However, the right SD-WAN platform will have a unified stateful firewall built-in for local internet breakout allowing only branch-initiated sessions to enter the branch and providing the ability to service chain traffic to a cloud-based security service if necessary, before forwarding it to its final destination. If the application is moved and becomes hosted by another provider or perhaps back to a company’s own data center, traffic must be intelligently redirected, wherever the application is being hosted. Without automation and embedded machine learning, dynamic and intelligent traffic steering is impossible. - -### **A closer look at how the Silver Peak EdgeConnect™ SD-WAN edge platform addresses these challenges: ** - -**Automate traffic steering and connectivity to cloud providers** - -An [EdgeConnect][10] virtual instance is easily spun up in any of the [leading cloud providers][11] through their respective marketplaces. For an SD-WAN to intelligently steer traffic to its destination, it requires insights into both HTTP and HTTPS traffic; it must be able to identify apps on the first packet received in order to steer traffic to the right destination in accordance with business intent. This is critical capability because once a TCP connection is NAT’d with a public IP address, it cannot be switched thus it can’t be re-routed once a connection is established. So, the ability of EdgeConnect to identify, classify and automatically steer traffic based on the first packet – and not the second or tenth packet – to the correct destination will assure application SLAs, minimize wasting expensive bandwidth and deliver the highest quality of experience. - -Another critical capability is automatic performance optimization. Irrespective of which link the traffic ends up traversing based on business intent and the unique requirements of the application, EdgeConnect automatically optimizes application performance without human intervention by correcting for out of order packets using Packet Order Correction (POC) or even under high latency conditions that can be related to distance or other issues. This is done using adaptive Forward Error Correction (FEC) and tunnel bonding where a virtual tunnel is created, resulting in a single logical overlay that traffic can be dynamically moved between the different paths as conditions change with each underlay WAN service. In this [lightboard video][12], Dinesh Fernando, a technical marketing engineer at Silver Peak, explains how EdgeConnect automates tunnel creation between sites and cloud providers, how it simplifies data transfers between multi-clouds, and how it improves application performance. - -If your business is global and increasingly dependent on the cloud, the business-driven EdgeConnect SD-WAN edge platform enables seamless multi-cloud connectivity, turning the network into a business accelerant. EdgeConnect delivers: - - 1. A consistent deployment from the branch to the cloud, extending the reach of the SD-WAN into virtual private cloud environments - 2. Multi-cloud flexibility, making it easier to initiate and distribute resources across multiple cloud providers - 3. Investment protection by confidently migrating on premise IT resources to any combination of the leading public cloud platforms, knowing their cloud-hosted instances will be fully supported by EdgeConnect - - - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3397921/moving-to-the-cloud-sd-wan-matters.html - -作者:[Rami Rammaha][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Rami-Rammaha/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/istock-899678028-100797709-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://www.silver-peak.com/company/tech-partners/cloud/aws -[4]: https://www.silver-peak.com/company/tech-partners/cloud/google-cloud -[5]: https://www.silver-peak.com/company/tech-partners/cloud/microsoft-azure -[6]: https://www.silver-peak.com/company/tech-partners/cloud/oracle-cloud -[7]: https://www.logicmonitor.com/resource/the-future-of-the-cloud-a-cloud-influencers-survey/?utm_medium=pr&utm_source=businesswire&utm_campaign=cloudsurvey -[8]: http://www.networkworld.com/article/3152024/lan-wan/in-the-age-of-digital-transformation-why-sd-wan-plays-a-key-role-in-the-transition.html -[9]: http://www.cnbc.com/2016/11/30/how-amazon-web-services-is-luring-banks-to-the-cloud.html?__source=yahoo%257cfinance%257cheadline%257cheadline%257cstory&par=yahoo&doc=104135637 -[10]: https://www.silver-peak.com/products/unity-edge-connect -[11]: https://www.silver-peak.com/company/tech-partners?strategic_partner_type=69 -[12]: https://www.silver-peak.com/resource-center/automate-connectivity-to-cloud-networking-with-sd-wan diff --git a/sources/talk/20190529 Satellite-based internet possible by year-end, says SpaceX.md b/sources/talk/20190529 Satellite-based internet possible by year-end, says SpaceX.md deleted file mode 100644 index 383fac66ca..0000000000 --- a/sources/talk/20190529 Satellite-based internet possible by year-end, says SpaceX.md +++ /dev/null @@ -1,63 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Satellite-based internet possible by year-end, says SpaceX) -[#]: via: (https://www.networkworld.com/article/3398940/space-internet-maybe-end-of-year-says-spacex.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Satellite-based internet possible by year-end, says SpaceX -====== -Amazon, Tesla-associated SpaceX and OneWeb are emerging as just some of the potential suppliers of a new kind of data-friendly satellite internet service that could bring broadband IoT connectivity to most places on Earth. -![Getty Images][1] - -With SpaceX’s successful launch of an initial array of broadband-internet-carrying satellites last week, and Amazon’s surprising posting of numerous satellite engineering-related job openings on its [job board][2] this month, one might well be asking if the next-generation internet space race is finally getting going. (I first wrote about [OneWeb’s satellite internet plans][3] it was concocting with Airbus four years ago.) - -This new batch of satellite-driven internet systems, if they work and are eventually switched on, could provide broadband to most places, including previously internet-barren locations, such as rural areas. That would be good for high-bandwidth, low-latency remote-internet of things (IoT) and increasingly important edge-server connections for verticals like oil and gas and maritime. [Data could even end up getting stored in compliance-friendly outer space, too][4]. Leaky ground-based connections, also, perhaps a thing of the past. - -Of the principal new internet suppliers, SpaceX has gotten farthest along. That’s in part because it has commercial impetus. It needed to create payload for its numerous rocket projects. The Tesla electric-car-associated company (the two firms share materials science) has not only launched its first tranche of 60 satellites for its own internet constellation, called Starlink, but also successfully launched numerous batches (making up the full constellation of 75 satellites) for Iridium’s replacement, an upgraded constellation called Iridium NEXT. - -[The time of 5G is almost here][5] - -Potential competitor OneWeb launched its first six Airbus-built satellites in February. [It has plans for 900 more][6]. SpaceX has been approved for 4,365 more by the FCC, and Project Kuiper, as Amazon’s space internet project is known, wants to place 3,236 satellites in orbit, according to International Telecommunication Union filings [discovered by _GeekWire_][7] earlier this year. [Startup LeoSat, which I wrote about last year, aims to build an internet backbone constellation][8]. Facebook, too, is exploring [space-delivered internet][9]. - -### Why the move to space? - -Laser technical progress, where data is sent in open, free space, rather than via a restrictive, land-based cable or via traditional radio paths, is partly behind this space-internet rush. “Bits travel faster in free space than in glass-fiber cable,” LeoSat explained last year. Additionally, improving microprocessor tech is also part of the mix. - -One important difference from existing older-generation satellite constellations is that this new generation of internet satellites will be located in low Earth orbit (LEO). Initial Starlink satellites will be placed at about 350 miles above Earth, with later launches deployed at 710 miles. - -There’s an advantage to that. Traditional satellites in geostationary orbit, or GSO, have been deployed about 22,000 miles up. That extra distance versus LEO introduces latency and is one reason earlier generations of Internet satellites are plagued by slow round-trip times. Latency didn’t matter when GSO was introduced in 1964, and commercial satellites, traditionally, have been pitched as one-way video links, such as are used by sporting events for broadcast, and not for data. - -And when will we get to experience these new ISPs? “Starlink is targeted to offer service in the Northern U.S. and Canadian latitudes after six launches,” [SpaceX says on its website][10]. Each launch would deliver about 60 satellites. “SpaceX is targeting two to six launches by the end of this year.” - -Global penetration of the “populated world” could be obtained after 24 launches, it thinks. - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398940/space-internet-maybe-end-of-year-says-spacex.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/10/network_iot_world-map_us_globe_nodes_global-100777483-large.jpg -[2]: https://www.amazon.jobs/en/teams/projectkuiper -[3]: https://www.itworld.com/article/2938652/space-based-internet-starts-to-get-serious.html -[4]: https://www.networkworld.com/article/3200242/data-should-be-stored-data-in-space-firm-says.html -[5]: https://www.networkworld.com/article/3354477/mobile-world-congress-the-time-of-5g-is-almost-here.html -[6]: https://www.airbus.com/space/telecommunications-satellites/oneweb-satellites-connection-for-people-all-over-the-globe.html -[7]: https://www.geekwire.com/2019/amazon-lists-scores-jobs-bellevue-project-kuiper-broadband-satellite-operation/ -[8]: https://www.networkworld.com/article/3328645/space-data-backbone-gets-us-approval.html -[9]: https://www.networkworld.com/article/3338081/light-based-computers-to-be-5000-times-faster.html -[10]: https://www.starlink.com/ -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190529 Survey finds SD-WANs are hot, but satisfaction with telcos is not.md b/sources/talk/20190529 Survey finds SD-WANs are hot, but satisfaction with telcos is not.md deleted file mode 100644 index 9b65a6c8dd..0000000000 --- a/sources/talk/20190529 Survey finds SD-WANs are hot, but satisfaction with telcos is not.md +++ /dev/null @@ -1,69 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Survey finds SD-WANs are hot, but satisfaction with telcos is not) -[#]: via: (https://www.networkworld.com/article/3398478/survey-finds-sd-wans-are-hot-but-satisfaction-with-telcos-is-not.html) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -Survey finds SD-WANs are hot, but satisfaction with telcos is not -====== -A recent survey of over 400 IT executives by Cato Networks found that legacy telcos might be on the outside looking in for SD-WANs. -![istock][1] - -This week SD-WAN vendor Cato Networks announced the results of its [Telcos and the Future of the WAN in 2019 survey][2]. The study was a mix of companies of all sizes, with 42% being enterprise-class (over 2,500 employees). More than 70% had a network with more than 10 locations, and almost a quarter (24%) had over 100 sites. All of the respondents have a cloud presence, and almost 80% have at least two data centers. The survey had good geographic diversity, with 57% of respondents coming from the U.S. and 24% from Europe. - -Highlights of the survey include the following key findings: - -## **SD-WANs are hot but not a panacea to all networking challenges** - -The survey found that 44% of respondents have already deployed or will deploy an SD-WAN within the next 12 months. This number is up sharply from 25% when Cato ran the survey a year ago. Another 33% are considering SD-WAN but have no immediate plans to deploy. The primary drivers for the evolution of the WAN are improved internet access (46%), increased bandwidth (39%), improved last-mile availability (38%) and reduced WAN costs (37%). It’s good to see cost savings drop to fourth in motivation, since there is so much more to SD-WAN. - -[The time of 5G is almost here][3] - -It’s interesting that the majority of respondents believe SD-WAN alone can’t address all challenges facing the WAN. A whopping 85% stated they would be confronting issues not addressed by SD-WAN alone. This includes secure, local internet breakout, improved visibility, and control over mobile access to cloud apps. This indicates that customers are looking for SD-WAN to be the foundation of the WAN but understand that other technologies need to be deployed as well. - -## **Telco dissatisfaction is high** - -The traditional telco has been a point of frustration for network professionals for years, and the survey spelled that out loud and clear. Prior to being an analyst, I held a number of corporate IT positions and found telcos to be the single most frustrating group of companies to deal with. The problem was, there was no choice. If you need MPLS services, you need a telco. The same can’t be said for SD-WANs, though; businesses have more choices. - -Respondents to the survey ranked telco service as “average.” It’s been well documented that we are now in the customer-experience era and “good enough” service is no longer good enough. Regarding pricing, 54% gave telcos a failing grade. Although price isn’t everything, this will certainly open the door to competitive SD-WAN vendors. Respondents gave the highest marks for overall experience to SaaS providers, followed by cloud computing suppliers. Global telcos scored the lowest of all vendor types. - -A look deeper explains the frustration level. The network is now mission-critical for companies, but 48% stated they are able to reach the support personnel with the right expertise to solve a problem only on a second attempt. No retailer, airline, hotel or other type of company could survive this, but telco customers had no other options for years. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][4] ]** - -Another interesting set of data points is the speed at which telcos address customer needs. Digital businesses compete on speed, but telco process is the antithesis of fast. Moves, adds and changes take at least one business day for half of the respondents. Also, 70% indicated that opening a new location takes 15 days, and 38% stated it requires 45 days or more. - -## **Security is now part of SD-WAN** - -The use of broadband, cloud access and other trends raise the bar on security for SD-WAN, and the survey confirmed that respondents are skeptical that SD-WANs could address these issues. Seventy percent believe SD-WANs can’t address malware/ransomware, and 49% don’t think SD-WAN helps with enforcing company policies on mobile users. Because of this, network professionals are forced to buy additional security tools from other vendors, but that can drive up complexity. SD-WAN vendors that have intrinsic security capabilities can use that as a point of differentiation. - -## **Managed services are critical to the growth of SD-WANs** - -The survey found that 75% of respondents are using some kind of managed service provider, versus only 25% using an appliance vendor. This latter number was 32% last year. I’m not surprised by this shift and expect it to continue. Legacy WANs were inefficient but straightforward to deploy. D-WANs are highly agile and more cost-effective, but complexity has gone through the roof. Network engineers need to factor in cloud connectivity, distributed security, application performance, broadband connectivity and other issues. Managed services can help businesses enjoy the benefits of SD-WAN while masking the complexity. - -Despite the desire to use an MSP, respondents don’t want to give up total control. Eighty percent stated they preferred self-service or co-managed models. This further explains the shift away from telcos, since they typically work with fully managed models. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398478/survey-finds-sd-wans-are-hot-but-satisfaction-with-telcos-is-not.html - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/02/istock-465661573-100750447-large.jpg -[2]: https://www.catonetworks.com/news/digital-transformation-survey/ -[3]: https://www.networkworld.com/article/3354477/mobile-world-congress-the-time-of-5g-is-almost-here.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190601 True Hyperconvergence at Scale- HPE Simplivity With Composable Fabric.md b/sources/talk/20190601 True Hyperconvergence at Scale- HPE Simplivity With Composable Fabric.md deleted file mode 100644 index 97eb611ef8..0000000000 --- a/sources/talk/20190601 True Hyperconvergence at Scale- HPE Simplivity With Composable Fabric.md +++ /dev/null @@ -1,28 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (True Hyperconvergence at Scale: HPE Simplivity With Composable Fabric) -[#]: via: (https://www.networkworld.com/article/3399619/true-hyperconvergence-at-scale-hpe-simplivity-with-composable-fabric.html) -[#]: author: (HPE https://www.networkworld.com/author/Michael-Cooney/) - -True Hyperconvergence at Scale: HPE Simplivity With Composable Fabric -====== - -Many hyperconverged solutions only focus on software-defined storage. However, many networking functions and technologies can be consolidated for simplicity and scale in the data center. This video describes how HPE SimpliVity with Composable Fabric gives organizations the power to run any virtual machine anywhere, anytime. Read more about HPE SimpliVity [here][1]. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3399619/true-hyperconvergence-at-scale-hpe-simplivity-with-composable-fabric.html - -作者:[HPE][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://hpe.com/info/simplivity diff --git a/sources/talk/20190602 IoT Roundup- New research on IoT security, Microsoft leans into IoT.md b/sources/talk/20190602 IoT Roundup- New research on IoT security, Microsoft leans into IoT.md deleted file mode 100644 index 6d955c6485..0000000000 --- a/sources/talk/20190602 IoT Roundup- New research on IoT security, Microsoft leans into IoT.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IoT Roundup: New research on IoT security, Microsoft leans into IoT) -[#]: via: (https://www.networkworld.com/article/3398607/iot-roundup-new-research-on-iot-security-microsoft-leans-into-iot.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -IoT Roundup: New research on IoT security, Microsoft leans into IoT -====== -Verizon sets up widely available narrow-band IoT service, while most Americans think IoT manufacturers should ensure their products protect personal information. -As with any technology whose use is expanding at such speed, it can be tough to track exactly what’s going on in the [IoT][1] world – everything from basic usage numbers to customer attitudes to more in-depth slices of the market is constantly changing. Fortunately, the month of May brought several new pieces of research to light, which should help provide at least a partial outline of what’s really happening in IoT. - -### Internet of things polls - -Not all of the news is good. An IPSOS Mori poll performed on behalf of the Internet Society and Consumers International (respectively, an umbrella organization for open development and Internet use and a broad-based consumer advocacy group) found that, despite the skyrocketing numbers of smart devices in circulation around the world, more than half of users in large parts of the western world don’t trust those devices to safeguard their privacy. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][2] - * [What is edge computing and how it’s changing the network][3] - * [Most powerful Internet of Things companies][4] - * [10 Hot IoT startups to watch][5] - * [The 6 ways to make money in IoT][6] - * [What is digital twin technology? [and why it matters]][7] - * [Blockchain, service-centric networking key to IoT success][8] - * [Getting grounded in IoT networking and security][9] - * [Building IoT-ready networks must become a priority][10] - * [What is the Industrial IoT? [And why the stakes are so high]][11] - - - -While almost 70 percent of respondents owned connected devices, 55 percent said they didn’t feel their personal information was adequately protected by manufacturers. A further 28 percent said they had avoided using connected devices – smart home, fitness tracking and similar consumer gadgetry – primarily because they were concerned over privacy issues, and a whopping 85 percent of Americans agreed with the argument that manufacturers had a responsibility to produce devices that protected personal information. - -Those concerns are understandable, according to data from the Ponemon Institute, a tech-research organization. Its survey of corporate risk and security personnel, released in early May, found that there have been few concerted efforts to limit exposure to IoT-based security threats, and that those threats are sharply on the rise when compared to past years, with the percentage of organizations that had experienced a data breach related to unsecured IoT devices rising from 15 percent in fiscal 2017 to 26 percent in fiscal 2019. - -Beyond a lack of organizational wherewithal to address those threats, part of the problem in some verticals is technical. Security vendor Forescout said earlier this month that its research showed 40 percent of all healthcare IT environments had more than 20 different operating systems, and more than 30 percent had more than 100 – hardly an ideal situation for smooth patching and updating. - -To continue reading this article register now - -[Get Free Access][12] - -[Learn More][13] Existing Users [Sign In][12] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398607/iot-roundup-new-research-on-iot-security-microsoft-leans-into-iot.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[2]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[5]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[6]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[7]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[8]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[9]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[10]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[11]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[12]: javascript:// -[13]: /learn-about-insider/ diff --git a/sources/talk/20190603 It-s time for the IoT to -optimize for trust.md b/sources/talk/20190603 It-s time for the IoT to -optimize for trust.md deleted file mode 100644 index cc5aa9db7c..0000000000 --- a/sources/talk/20190603 It-s time for the IoT to -optimize for trust.md +++ /dev/null @@ -1,102 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (It’s time for the IoT to 'optimize for trust') -[#]: via: (https://www.networkworld.com/article/3399817/its-time-for-the-iot-to-optimize-for-trust.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -It’s time for the IoT to 'optimize for trust' -====== -If we can't trust the internet of things (IoT) to gather accurate data and use it appropriately, IoT adoption and innovation are likely to suffer. -![Bose][1] - -One of the strengths of internet of things (IoT) technology is that it can do so many things well. From smart toothbrushes to predictive maintenance on jetliners, the IoT has more use cases than you can count. The result is that various IoT uses cases require optimization for particular characteristics, from cost to speed to long life, as well as myriad others. - -But in a recent post, "[How the internet of things will change advertising][2]" (which you should definitely read), the always-insightful Stacy Higginbotham tossed in a line that I can’t stop thinking about: “It's crucial that the IoT optimizes for trust." - -**[ Read also: Network World's[corporate guide to addressing IoT security][3] ]** - -### Trust is the IoT's most important attribute - -Higginbotham was talking about optimizing for trust as opposed to clicks, but really, trust is more important than just about any other value in the IoT. It’s more important than bandwidth usage, more important than power usage, more important than cost, more important than reliability, and even more important than security and privacy (though they are obviously related). In fact, trust is the critical factor in almost every aspect of the IoT. - -Don’t believe me? Let’s take a quick look at some recent developments in the field: - -For one thing, IoT devices often don’t take good care of the data they collect from you. Over 90% of data transactions on IoT devices are not fully encrypted, according to a new [study from security company Zscaler][4]. The [problem][5], apparently, is that many companies have large numbers of consumer-grade IoT devices on their networks. In addition, many IoT devices are attached to the companies’ general networks, and if that network is breached, the IoT devices and data may also be compromised. - -In some cases, ownership of IoT data can raise surprisingly serious trust concerns. According to [Kaiser Health News][6], smartphone sleep apps, as well as smart beds and smart mattress pads, gather amazingly personal information: “It knows when you go to sleep. It knows when you toss and turn. It may even be able to tell when you’re having sex.” And while companies such as Sleep Number say they don’t share the data they gather, their written privacy policies clearly state that they _can_. - -### **Lack of trust may lead to new laws** - -In California, meanwhile, "lawmakers are pushing for new privacy rules affecting smart speakers” such as the Amazon Echo. According to the _[LA Times][7]_ , the idea is “to ensure that the devices don’t record private conversations without permission,” requiring a specific opt-in process. Why is this an issue? Because consumers—and their elected representatives—don’t trust that Amazon, or any IoT vendor, will do the right thing with the data it collects from the IoT devices it sells—perhaps because it turns out that thousands of [Amazon employees have been listening in on what Alexa users are][8] saying to their Echo devices. - -The trust issues get even trickier when you consider that Amazon reportedly considered letting Alexa listen to users even without a wake word like “Alexa” or “computer,” and is reportedly working on [wearable devices designed to read human emotions][9] from listening to your voice. - -“The trust has been breached,” said California Assemblyman Jordan Cunningham (R-Templeton) to the _LA Times_. - -As critics of the bill ([AB 1395][10]) point out, the restrictions matter because voice assistants require this data to improve their ability to correctly understand and respond to requests. - -### **Some first steps toward increasing trust** - -Perhaps recognizing that the IoT needs to be optimized for trust so that we are comfortable letting it do its job, Amazon recently introduced a new Alexa voice command: “[Delete what I said today][11].” - -Moves like that, while welcome, will likely not be enough. - -For example, a [new United Nations report][12] suggests that “voice assistants reinforce harmful gender stereotypes” when using female-sounding voices and names like Alexa and Siri. Put simply, “Siri’s ‘female’ obsequiousness—and the servility expressed by so many other digital assistants projected as young women—provides a powerful illustration of gender biases coded into technology products, pervasive in the technology sector and apparent in digital skills education.” I'm not sure IoT vendors are eager—or equipped—to tackle issues like that. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][13] - * [What is edge computing and how it’s changing the network][14] - * [Most powerful Internet of Things companies][15] - * [10 Hot IoT startups to watch][16] - * [The 6 ways to make money in IoT][17] - * [What is digital twin technology? [and why it matters]][18] - * [Blockchain, service-centric networking key to IoT success][19] - * [Getting grounded in IoT networking and security][20] - * [Building IoT-ready networks must become a priority][21] - * [What is the Industrial IoT? [And why the stakes are so high]][22] - - - -Join the Network World communities on [Facebook][23] and [LinkedIn][24] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3399817/its-time-for-the-iot-to-optimize-for-trust.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/09/bose-sleepbuds-2-100771579-large.jpg -[2]: https://mailchi.mp/iotpodcast/stacey-on-iot-how-iot-changes-advertising?e=6bf9beb394 -[3]: https://www.networkworld.com/article/3269165/internet-of-things/a-corporate-guide-to-addressing-iot-security-concerns.html -[4]: https://www.zscaler.com/blogs/research/iot-traffic-enterprise-rising-so-are-threats -[5]: https://www.csoonline.com/article/3397044/over-90-of-data-transactions-on-iot-devices-are-unencrypted.html -[6]: https://khn.org/news/a-wake-up-call-on-data-collecting-smart-beds-and-sleep-apps/ -[7]: https://www.latimes.com/politics/la-pol-ca-alexa-google-home-privacy-rules-california-20190528-story.html -[8]: https://www.usatoday.com/story/tech/2019/04/11/amazon-employees-listening-alexa-customers/3434732002/ -[9]: https://www.bloomberg.com/news/articles/2019-05-23/amazon-is-working-on-a-wearable-device-that-reads-human-emotions -[10]: https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=201920200AB1395 -[11]: https://venturebeat.com/2019/05/29/amazon-launches-alexa-delete-what-i-said-today-voice-command/ -[12]: https://unesdoc.unesco.org/ark:/48223/pf0000367416.page=1 -[13]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[14]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[15]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[16]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[17]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[18]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[19]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[20]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[21]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[22]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[23]: https://www.facebook.com/NetworkWorld/ -[24]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190604 Data center workloads become more complex despite promises to the contrary.md b/sources/talk/20190604 Data center workloads become more complex despite promises to the contrary.md deleted file mode 100644 index 31d127e77d..0000000000 --- a/sources/talk/20190604 Data center workloads become more complex despite promises to the contrary.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Data center workloads become more complex despite promises to the contrary) -[#]: via: (https://www.networkworld.com/article/3400086/data-center-workloads-become-more-complex-despite-promises-to-the-contrary.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Data center workloads become more complex despite promises to the contrary -====== -The data center is shouldering a greater burden than ever, despite promises of ease and the cloud. -![gorodenkoff / Getty Images][1] - -Data centers are becoming more complex and still run the majority of workloads despite the promises of simplicity of deployment through automation and hyperconverged infrastructure (HCI), not to mention how the cloud was supposed to take over workloads. - -That’s the finding of the Uptime Institute's latest [annual global data center survey][2] (registration required). The majority of IT loads still run on enterprise data centers even in the face of cloud adoption, putting pressure on administrators to have to manage workloads across the hybrid infrastructure. - -**[ Learn[how server disaggregation can boost data center efficiency][3] | Get regularly scheduled insights: [Sign up for Network World newsletters][4] ]** - -With workloads like artificial intelligence (AI) and machine language coming to the forefront, that means facilities face greater power and cooling challenges, since AI is extremely processor-intensive. That puts strain on data center administrators and power and cooling vendors alike to keep up with the growth in demand. - -On top of it all, everyone is struggling to get enough staff with the right skills. - -### Outages, staffing problems, lack of public cloud visibility among top concerns - -Among the key findings of Uptime's report: - - * The large, privately owned enterprise data center facility still forms the bedrock of corporate IT and is expected to be running half of all workloads in 2021. - * The staffing problem affecting most of the data center sector has only worsened. Sixty-one percent of respondents said they had difficulty retaining or recruiting staff, up from 55% a year earlier. - * Outages continue to cause significant problems for operators. Just over a third (34%) of all respondents had an outage or severe IT service degradation in the past year, while half (50%) had an outage or severe IT service degradation in the past three years. - * Ten percent of all respondents said their most recent significant outage cost more than $1 million. - * A lack of visibility, transparency, and accountability of public cloud services is a major concern for enterprises that have mission-critical applications. A fifth of operators surveyed said they would be more likely to put workloads in a public cloud if there were more visibility. Half of those using public cloud for mission-critical applications also said they do not have adequate visibility. - * Improvements in data center facility energy efficiency have flattened out and even deteriorated slightly in the past two years. The average PUE for 2019 is 1.67. - * Rack power density is rising after a long period of flat or minor increases, causing many to rethink cooling strategies. - * Power loss was the single biggest cause of outages, accounting for one-third of outages. Sixty percent of respondents said their data center’s outage could have been prevented with better management/processes or configuration. - - - -Traditionally data centers are improving their reliability through "rigorous attention to power, infrastructure, connectivity and on-site IT replication," the Uptime report says. The solution, though, is pricy. Data center operators are getting distributed resiliency through active-active data centers where at least two active data centers replicate data to each other. Uptime found up to 40% of those surveyed were using this method. - -The Uptime survey was conducted in March and April of this year, surveying 1,100 end users in more than 50 countries and dividing them into two groups: the IT managers, owners, and operators of data centers and the suppliers, designers, and consultants that service the industry. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3400086/data-center-workloads-become-more-complex-despite-promises-to-the-contrary.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/cso_cloud_computing_backups_it_engineer_data_center_server_racks_connections_by_gorodenkoff_gettyimages-943065400_3x2_2400x1600-100796535-large.jpg -[2]: https://uptimeinstitute.com/2019-data-center-industry-survey-results -[3]: https://www.networkworld.com/article/3266624/how-server-disaggregation-could-make-cloud-datacenters-more-efficient.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190604 Moving to the Cloud- SD-WAN Matters- Part 2.md b/sources/talk/20190604 Moving to the Cloud- SD-WAN Matters- Part 2.md deleted file mode 100644 index 2f68bd6f59..0000000000 --- a/sources/talk/20190604 Moving to the Cloud- SD-WAN Matters- Part 2.md +++ /dev/null @@ -1,66 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Moving to the Cloud? SD-WAN Matters! Part 2) -[#]: via: (https://www.networkworld.com/article/3398488/moving-to-the-cloud-sd-wan-matters-part-2.html) -[#]: author: (Rami Rammaha https://www.networkworld.com/author/Rami-Rammaha/) - -Moving to the Cloud? SD-WAN Matters! Part 2 -====== - -![istock][1] - -This is the second installment of the blog series exploring how enterprises can realize the full transformation promise of the cloud by shifting to a business first networking model powered by a business-driven [SD-WAN][2]. The first installment explored automating secure IPsec connectivity and intelligently steering traffic to cloud providers. We also framed the direct correlation between moving to the cloud and adopting an SD-WAN. In this blog, we will expand upon several additional challenges that can be addressed with a business-driven SD-WAN when embracing the cloud: - -### Simplifying and automating security zone-based segmentation - -Securing cloud-first branches requires a robust multi-level approach that addresses following considerations: - - * Restricting outside traffic coming into the branch to sessions exclusively initiated by internal users with a built-in stateful firewall, avoiding appliance sprawl and lowering operational costs; this is referred to as the app whitelist model - * Encrypting communications between end points within the SD-WAN fabric and between branch locations and public cloud instances - * Service chaining traffic to a cloud-hosted security service like [Zscaler][3] for Layer 7 inspection and analytics for internet-bound traffic - * Segmenting traffic spanning the branch, WAN and data center/cloud - * Centralizing policy orchestration and automation of zone-based firewall, VLAN and WAN overlays - - - -A traditional device-centric WAN approach for security segmentation requires the time-consuming manual configuration of routers and/or firewalls on a device-by-device and site-by-site basis. This is not only complex and cumbersome, but it simply can’t scale to 100s or 1000s of sites. Anusha Vaidyanathan, director of product management at Silver Peak, explains how to automate end-to-end zone-based segmentation, emphasizing the advantages of a business-driven approach in this [lightboard video][4]. - -### Delivering the Highest Quality of Experience to IT teams - -The goal for enterprise IT is enabling business agility and increasing operational efficiency. The traditional router-centric WAN approach doesn’t provide the best quality of experience for IT as management and on-going network operations are manual and time consuming, device-centric, cumbersome, error-prone and inefficient. - -A business-driven SD-WAN such as the Silver Peak [Unity EdgeConnect™][5] unified SD-WAN edge platform centralizes the orchestration of business-driven policies. EdgeConnect automation, machine learning and open APIs easily integrate with third-party management tools and real-time visibility tools to deliver the highest quality of experience for IT, enabling them to reclaim nights and weekends. Manav Mishra, vice president of product management at Silver Peak, explains the latest Silver Peak innovations in this [lightboard video][6]. - -As enterprises become increasingly dependent on the cloud and embrace a multi-cloud strategy, they must address a number of new challenges: - - * A centralized approach to securely embracing the cloud and the internet - * How to extend the on-premise data center to a public cloud and migrating workloads between private and public cloud, taking application portability into account - * Deliver consistent high application performance and availability to hosted applications whether they reside in the data center, private or public clouds or are delivered as SaaS services - * A proactive way to quickly resolve complex issues that span the data center and cloud as well as multiple WAN transport services by harnessing the power of advanced visibility and analytics tools - - - -The business-driven EdgeConnect SD-WAN edge platform enables enterprise IT organizations to easily and consistently embrace the public cloud. Unified security and performance capabilities with automation deliver the highest quality of experience for both users and IT while lowering overall WAN expenditures. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398488/moving-to-the-cloud-sd-wan-matters-part-2.html - -作者:[Rami Rammaha][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Rami-Rammaha/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/istock-909772962-100797711-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://www.silver-peak.com/company/tech-partners/zscaler -[4]: https://www.silver-peak.com/resource-center/how-to-create-sd-wan-security-zones-in-edgeconnect -[5]: https://www.silver-peak.com/products/unity-edge-connect -[6]: https://www.silver-peak.com/resource-center/how-to-optimize-quality-of-experience-for-it-using-sd-wan diff --git a/sources/talk/20190604 Why Emacs.md b/sources/talk/20190604 Why Emacs.md deleted file mode 100644 index 0d9b12ba1a..0000000000 --- a/sources/talk/20190604 Why Emacs.md +++ /dev/null @@ -1,94 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why Emacs) -[#]: via: (https://saurabhkukade.github.io/Why-Emacs/) -[#]: author: (Saurabh Kukade http://saurabhkukade.github.io/) - -Why Emacs -====== -![Image of Emacs][1] - -> “Emacs outshines all other editing software in approximately the same way that the noonday sun does the stars. It is not just bigger and brighter; it simply makes everything else vanish.” - -> -Neal Stephenson, “In the Beginning was the Command Line” - -### Introduction - -This is my first blog post about Emacs. I want to discuss step by step customization of Emacs for beginner. If you’re new to Emacs then you are in the right place, if you’re already familiar with Emacs then that is even better, I assure you that we will get to know many new things in here. - -Before getting into how to customize Emacs and what are the exciting features of Emacs I want to write about “why Emacs”. - -### Why Emacs? - -This was first question crossed my mind when one wise man asked me to try Emacs instead of VIM. Well, I am not writing this article to discuss a battle between two editors VIM and Emacs. That is a another story for another day. But Why Emacs? Well here are some things that justifies that Emacs is powerful and highly customizable. - -### 41 Years! - -Initial release year of Emacs is 1976 that means Emacs is standing and adapting changes from last 41 years. - -41 years of time for a software is huge and that makes Emacs is one of the best Software Engineering product. - -### Lisp (Emacs Lisp) - -If you are lisp programmer (lisper) then I don’t need to explain you. But for those who don’t know Lisp and its dialects like Scheme, Clojure then Lisp (and all dialects of Lips) is powerful programming language and it stands different from other languages because of its unique property of “Homoiconicity”. - -As Emacs is implemented in C and Emacs Lisp (Emacs Lisp is a dialect of the Lisp programming language) it makes Emacs what is because, - - * The simple syntax of Lisp, together with the powerful editing features made possible by that simple syntax, add up to a more convenient programming system than is practical with other languages. Lisp and extensible editors are made for each other. - - * The simplicity of Lisp syntax makes intelligent editing operations easier to implement, while the complexity of other languages discourages their users from implementing similar operations for them. - - - - -### Highly Customizable - -To any programmer, tools gives power and convenience for reading, writing and managing a code. - -Hence, if a tool is programmatic-ally customizable then that makes it even more powerful. - -Emacs has above property and in fact is itself one of best tool known for its flexibility and easy customization. Emacs provides basic commands and key configuration for editing a text. This commands and key-configuration are editable and extensible. - -Beside basic configuration, Emacs is not biased towards any specific language for customization. One can customize Emacs for any programming language or extend easily existing customization. - -Emacs provides the consistent environment for multiple programming languages, email, organizer (via org-mode), a shell/interpreter, note taking, and document writing. - -For customizing you don’t need to learn Emacs-lisp from scratch. You can use existing packages available and that’s it. Installing and managing packages in Emacs is easy, Emacs has in-built package manager for it. - -Customization is very portable, one just need to place a file or directory containing personal customization file(s) in the right place and it’s done for getting personal customization to new place. ## Huge platform Support - -Emacs supports Lisp, Ruby, Python, PHP, Java, Erlang, JavaScript, C, C++, Prolog, Tcl, AWK, PostScript, Clojure, Scala, Perl, Haskell, Elixir all of these languages and more like mysql, pgsql etc. Because of the powerful Lisp core, Emacs is easy to extend to add support for new languages if need to. - -Also one can use the built-in IRC client ERC along with BitlBee to connect to your favorite chat services, or use the Jabber package to hop on any XMPP service. - -### Org-mode - -No matter if you are programmer or not. Org mode is for everyone. Org mode lets you to plan projects and organize schedule. It can be also use for publish notes and documents to different formats, like LaTeX->pdf, html, and markdown. - -In fact, Org-mode is so awesome enough that many non-Emacs users started learn Emacs. - -### Final note - -There are number of reason to argue that Emacs is cool and awesome to use. But I just wanted you to give glimpse of why to try Emacs. In the upcoming post I will be writing step by step information to customize Emacs from scratch to awesome IDE. - -Thank you! - -Please don’t forget to comment your thoughts and suggestions below. - --------------------------------------------------------------------------------- - -via: https://saurabhkukade.github.io/Why-Emacs/ - -作者:[Saurabh Kukade][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: http://saurabhkukade.github.io/ -[b]: https://github.com/lujun9972 -[1]: https://saurabhkukade.github.io/img/emacs.jpeg diff --git a/sources/talk/20190606 Cloud adoption drives the evolution of application delivery controllers.md b/sources/talk/20190606 Cloud adoption drives the evolution of application delivery controllers.md deleted file mode 100644 index d7b22353c4..0000000000 --- a/sources/talk/20190606 Cloud adoption drives the evolution of application delivery controllers.md +++ /dev/null @@ -1,67 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cloud adoption drives the evolution of application delivery controllers) -[#]: via: (https://www.networkworld.com/article/3400897/cloud-adoption-drives-the-evolution-of-application-delivery-controllers.html) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -Cloud adoption drives the evolution of application delivery controllers -====== -Application delivery controllers (ADCs) are on the precipice of shifting from traditional hardware appliances to software form factors. -![Aramyan / Getty Images / Microsoft][1] - -Migrating to a cloud computing model will obviously have an impact on the infrastructure that’s deployed. This shift has already been seen in the areas of servers, storage, and networking, as those technologies have evolved to a “software-defined” model. And it appears that application delivery controllers (ADCs) are on the precipice of a similar shift. - -In fact, a new ZK Research [study about cloud computing adoption and the impact on ADCs][2] found that, when looking at the deployment model, hardware appliances are the most widely deployed — with 55% having fully deployed or are currently testing and only 15% currently researching hardware. (Note: I am an employee of ZK Research.) - -Juxtapose this with containerized ADCs where only 34% have deployed or are testing but 24% are currently researching and it shows that software in containers will outpace hardware for growth. Not surprisingly, software on bare metal and in virtual machines showed similar although lower, “researching” numbers that support the thesis that the market is undergoing a shift from hardware to software. - -**[ Read also:[How to make hybrid cloud work][3] ]** - -The study, conducted in collaboration with Kemp Technologies, surveyed 203 respondents from the U.K. and U.S. The demographic split was done to understand regional differences. An equal number of mid and large size enterprises were looked at, with 44% being from over 5,000 employees and the other 56% from companies that have 300 to 5,000 people. - -### Incumbency helps but isn’t a fait accompli for future ADC purchases - -The primary tenet of my research has always been that incumbents are threatened when markets transition, and this is something I wanted to investigate in the study. The survey asked whether buyers would consider an alternative as they evolve their applications from legacy (mode 1) to cloud-native (mode 2). The results offer a bit of good news and bad news for the incumbent providers. Only 8% said they would definitely select a new vendor, but 35% said they would not change. That means the other 57% will look at alternatives. This is sensible, as the requirements for cloud ADCs are different than ones that support traditional applications. - -### IT pros want better automation capabilities - -This begs the question as to what features ADC buyers want for a cloud environment versus traditional ones. The survey asked specifically what features would be most appealing in future purchases, and the top response was automation, followed by central management, application analytics, on-demand scaling (which is a form of automation), and visibility. - -The desire to automate was a positive sign for the evolution of buyer mindset. Just a few years ago, the mere mention of automation would have sent IT pros into a panic. The reality is that IT can’t operate effectively without automation, and technology professionals are starting to understand that. - -The reason automation is needed is that manual changes are holding businesses back. The survey asked how the speed of ADC changes impacts the speed at which applications are rolled out, and a whopping 60% said it creates significant or minor delays. In an era of DevOps and continuous innovation, multiple minor delays create a drag on the business and can cause it to fall behind is more agile competitors. - -![][4] - -### ADC upgrades and service provisioning benefit most from automation - -The survey also drilled down on specific ADC tasks to see where automation would have the most impact. Respondents were asked how long certain tasks took, answering in minutes, days, weeks, or months. Shockingly, there wasn’t a single task where the majority said it could be done in minutes. The closest was adding DNS entries for new virtual IP addresses (VIPs) where 46% said they could do that in minutes. - -Upgrading, provisioning new load balancers, and provisioning new VIPs took the longest. Looking ahead, this foreshadows big problems. As the data center gets more disaggregated and distributed, IT will deploy more software-based ADCs in more places. Taking days or weeks or month to perform these functions will cause the organization to fall behind. - -The study clearly shows changes are in the air for the ADC market. For IT pros, I strongly recommend that as the environment shifts to the cloud, it’s prudent to evaluate new vendors. By all means, see what your incumbent vendor has, but look at least at two others that offer software-based solutions. Also, there should be a focus on automating as much as possible, so the primary evaluation criteria for ADCs should be how easy it is to implement automation. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3400897/cloud-adoption-drives-the-evolution-of-application-delivery-controllers.html - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/cw_microsoft_sharepoint_vs_onedrive_clouds_and_hands_by_aramyan_gettyimages-909772962_2400x1600-100796932-large.jpg -[2]: https://kemptechnologies.com/research-papers/adc-market-research-study-zeus-kerravala/?utm_source=zkresearch&utm_medium=referral&utm_campaign=zkresearch&utm_term=zkresearch&utm_content=zkresearch -[3]: https://www.networkworld.com/article/3119362/hybrid-cloud/how-to-make-hybrid-cloud-work.html#tk.nww-fsb -[4]: https://images.idgesg.net/images/article/2019/06/adc-survey-zk-research-100798593-large.jpg -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190606 For enterprise storage, persistent memory is here to stay.md b/sources/talk/20190606 For enterprise storage, persistent memory is here to stay.md deleted file mode 100644 index 3da91bb311..0000000000 --- a/sources/talk/20190606 For enterprise storage, persistent memory is here to stay.md +++ /dev/null @@ -1,118 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (For enterprise storage, persistent memory is here to stay) -[#]: via: (https://www.networkworld.com/article/3398988/for-enterprise-storage-persistent-memory-is-here-to-stay.html) -[#]: author: (John Edwards ) - -For enterprise storage, persistent memory is here to stay -====== -Persistent memory – also known as storage class memory – has tantalized data center operators for many years. A new technology promises the key to success. -![Thinkstock][1] - -It's hard to remember a time when semiconductor vendors haven't promised a fast, cost-effective and reliable persistent memory technology to anxious [data center][2] operators. Now, after many years of waiting and disappointment, technology may have finally caught up with the hype to make persistent memory a practical proposition. - -High-capacity persistent memory, also known as storage class memory ([SCM][3]), is fast and directly addressable like dynamic random-access memory (DRAM), yet is able to retain stored data even after its power has been switched off—intentionally or unintentionally. The technology can be used in data centers to replace cheaper, yet far slower traditional persistent storage components, such as [hard disk drives][4] (HDD) and [solid-state drives][5] (SSD). - -**Learn more about enterprise storage** - - * [Why NVMe over Fabric matters][6] - * [What is hyperconvergence?][7] - * [How NVMe is changing enterprise storage][8] - * [Making the right hyperconvergence choice: HCI hardware or software?][9] - - - -Persistent memory can also be used to replace DRAM itself in some situations without imposing a significant speed penalty. In this role, persistent memory can deliver crucial operational benefits, such as lightning-fast database-server restarts during maintenance, power emergencies and other expected and unanticipated reboot situations. - -Many different types of strategic operational applications and databases, particularly those that require low-latency, high durability and strong data consistency, can benefit from persistent memory. The technology also has the potential to accelerate virtual machine (VM) storage and deliver higher performance to multi-node, distributed-cloud applications. - -In a sense, persistent memory marks a rebirth of core memory. "Computers in the ‘50s to ‘70s used magnetic core memory, which was direct access, non-volatile memory," says Doug Wong, a senior member of [Toshiba Memory America's][10] technical staff. "Magnetic core memory was displaced by SRAM and DRAM, which are both volatile semiconductor memories." - -One of the first persistent memory devices to come to market is [Intel’s Optane DC][11]. Other vendors that have released persistent memory products or are planning to do so include [Samsung][12], Toshiba America Memory and [SK Hynix][13]. - -### Persistent memory: performance + reliability - -With persistent memory, data centers have a unique opportunity to gain faster performance and lower latency without enduring massive technology disruption. "It's faster than regular solid-state NAND flash-type storage, but you're also getting the benefit that it’s persistent," says Greg Schulz, a senior advisory analyst at vendor-independent storage advisory firm [StorageIO.][14] "It's the best of both worlds." - -Yet persistent memory offers adopters much more than speedy, reliable storage. In an ideal IT world, all of the data associated with an application would reside within DRAM to achieve maximum performance. "This is currently not practical due to limited DRAM and the fact that DRAM is volatile—data is lost when power fails," observes Scott Nelson, senior vice president and general manager of Toshiba Memory America's memory business unit. - -Persistent memory transports compatible applications to an "always on" status, providing continuous access to large datasets through increased system memory capacity, says Kristie Mann, [Intel's][15] director of marketing for data center memory and storage. She notes that Optane DC can supply data centers with up to three-times more system memory capacity (as much as 36TBs), system restarts in seconds versus minutes, 36% more virtual machines per node, and up to 8-times better performance on [Apache Spark][16], a widely used open-source distributed general-purpose cluster-computing framework. - -System memory currently represents 60% of total platform costs, Mann says. She observes that Optane DC persistent memory provides significant customer value by delivering 1.2x performance/dollar on key customer workloads. "This value will dramatically change memory/storage economics and accelerate the data-centric era," she predicts. - -### Where will persistent memory infiltrate enterprise storage? - -Persistent memory is likely to first enter the IT mainstream with minimal fanfare, serving as a high-performance caching layer for high performance SSDs. "This could be adopted relatively-quickly," Nelson observes. Yet this intermediary role promises to be merely a stepping-stone to increasingly crucial applications. - -Over the next few years, persistent technology will impact data centers serving enterprises across an array of sectors. "Anywhere time is money," Schulz says. "It could be financial services, but it could also be consumer-facing or sales-facing operations." - -Persistent memory supercharges anything data-related that requires extreme speed at extreme scale, observes Andrew Gooding, vice president of engineering at [Aerospike][17], which delivered the first commercially available open database optimized for use with Intel Optane DC. - -Machine learning is just one of many applications that stand to benefit from persistent memory. Gooding notes that ad tech firms, which rely on machine learning to understand consumers' reactions to online advertising campaigns, should find their work made much easier and more effective by persistent memory. "They’re collecting information as users within an ad campaign browse the web," he says. "If they can read and write all that data quickly, they can then apply machine-learning algorithms and tailor specific ads for users in real time." - -Meanwhile, as automakers become increasingly reliant on data insights, persistent memory promises to help them crunch numbers and refine sophisticated new technologies at breakneck speeds. "In the auto industry, manufacturers face massive data challenges in autonomous vehicles, where 20 exabytes of data needs to be processed in real time, and they're using self-training machine-learning algorithms to help with that," Gooding explains. "There are so many fields where huge amounts of data need to be processed quickly with machine-learning techniques—fraud detection, astronomy... the list goes on." - -Intel, like other persistent memory vendors, expects cloud service providers to be eager adopters, targeting various types of in-memory database services. Google, for example, is applying persistent memory to big data workloads on non-relational databases from vendors such as Aerospike and [Redis Labs][18], Mann says. - -High-performance computing (HPC) is yet another area where persistent memory promises to make a tremendous impact. [CERN][19], the European Organization for Nuclear Research, is using Intel's Optane DC to significantly reduce wait times for scientific computing. "The efficiency of their algorithms depends on ... persistent memory, and CERN considers it a major breakthrough that is necessary to the work they are doing," Mann observes. - -### How to prepare storage infrastructure for persistent memory - -Before jumping onto the persistent memory bandwagon, organizations need to carefully scrutinize their IT infrastructure to determine the precise locations of any existing data bottlenecks. This task will be primary application-dependent, Wong notes. "If there is significant performance degradation due to delays associated with access to data stored in non-volatile storage—SSD or HDD—then an SCM tier will improve performance," he explains. Yet some applications will probably not benefit from persistent memory, such as compute-bound applications where CPU performance is the bottleneck. - -Developers may need to reevaluate fundamental parts of their storage and application architectures, Gooding says. "They will need to know how to program with persistent memory," he notes. "How, for example, to make sure writes are flushed to the actual persistent memory device when necessary, as opposed to just sitting in the CPU cache." - -To leverage all of persistent memory's potential benefits, significant changes may also be required in how code is designed. When moving applications from DRAM and flash to persistent memory, developers will need to consider, for instance, what happens when a program crashes and restarts. "Right now, if they write code that leaks memory, that leaked memory is recovered on restart," Gooding explains. With persistent memory, that isn't necessarily the case. "Developers need to make sure the code is designed to reconstruct a consistent state when a program restarts," he notes. "You may not realize how much your designs rely on the traditional combination of fast volatile DRAM and block storage, so it can be tricky to change your code designs for something completely new like persistent memory." - -Older versions of operating systems may also need to be updated to accommodate the new technology, although newer OSes are gradually becoming persistent memory aware, Schulz says. "In other words, if they detect that persistent memory is available, then they know how to utilize that either as a cache, or some other memory." - -Hypervisors, such as [Hyper-V][20] and [VMware][21], now know how to leverage persistent memory to support productivity, performance and rapid restarts. By utilizing persistent memory along with the latest versions of VMware, a whole system can see an uplift in speed and also maximize the number of VMs to fit on a single host, says Ian McClarty, CEO and president of data center operator [PhoenixNAP Global IT Services][22]. "This is a great use case for companies who want to own less hardware or service providers who want to maximize hardware to virtual machine deployments." - -Many key enterprise applications, particularly databases, are also becoming persistent memory aware. SQL Server and [SAP’s][23] flagship [HANA][24] database management platform have both embraced persistent memory. "The SAP HANA platform is commonly used across multiple industries to process data and transactions, and then run advanced analytics ... to deliver real-time insights," Mann observes. - -In terms of timing, enterprises and IT organizations should begin persistent memory planning immediately, Schulz recommends. "You should be talking with your vendors and understanding their roadmap, their plans, for not only supporting this technology, but also in what mode: as storage, as memory." - -Join the Network World communities on [Facebook][25] and [LinkedIn][26] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3398988/for-enterprise-storage-persistent-memory-is-here-to-stay.html - -作者:[John Edwards][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2017/08/file_folder_storage_sharing_thinkstock_477492571_3x2-100732889-large.jpg -[2]: https://www.networkworld.com/article/3353637/the-data-center-is-being-reimagined-not-disappearing.html -[3]: https://www.networkworld.com/article/3026720/the-next-generation-of-storage-disruption-storage-class-memory.html -[4]: https://www.networkworld.com/article/2159948/hard-disk-drives-vs--solid-state-drives--are-ssds-finally-worth-the-money-.html -[5]: https://www.networkworld.com/article/3326058/what-is-an-ssd.html -[6]: https://www.networkworld.com/article/3273583/why-nvme-over-fabric-matters.html -[7]: https://www.networkworld.com/article/3207567/what-is-hyperconvergence -[8]: https://www.networkworld.com/article/3280991/what-is-nvme-and-how-is-it-changing-enterprise-storage.html -[9]: https://www.networkworld.com/article/3318683/making-the-right-hyperconvergence-choice-hci-hardware-or-software -[10]: https://business.toshiba-memory.com/en-us/top.html -[11]: https://www.intel.com/content/www/us/en/architecture-and-technology/optane-dc-persistent-memory.html -[12]: https://www.samsung.com/semiconductor/ -[13]: https://www.skhynix.com/eng/index.jsp -[14]: https://storageio.com/ -[15]: https://www.intel.com/content/www/us/en/homepage.html -[16]: https://spark.apache.org/ -[17]: https://www.aerospike.com/ -[18]: https://redislabs.com/ -[19]: https://home.cern/ -[20]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/about/ -[21]: https://www.vmware.com/ -[22]: https://phoenixnap.com/ -[23]: https://www.sap.com/index.html -[24]: https://www.sap.com/products/hana.html -[25]: https://www.facebook.com/NetworkWorld/ -[26]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190606 Self-learning sensor chips won-t need networks.md b/sources/talk/20190606 Self-learning sensor chips won-t need networks.md deleted file mode 100644 index c5abec5426..0000000000 --- a/sources/talk/20190606 Self-learning sensor chips won-t need networks.md +++ /dev/null @@ -1,82 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Self-learning sensor chips won’t need networks) -[#]: via: (https://www.networkworld.com/article/3400659/self-learning-sensor-chips-wont-need-networks.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Self-learning sensor chips won’t need networks -====== -Scientists working on new, machine-learning networks aim to embed everything needed for artificial intelligence (AI) onto a processor, eliminating the need to transfer data to the cloud or computers. -![Jiraroj Praditcharoenkul / Getty Images][1] - -Tiny, intelligent microelectronics should be used to perform as much sensor processing as possible on-chip rather than wasting resources by sending often un-needed, duplicated raw data to the cloud or computers. So say scientists behind new, machine-learning networks that aim to embed everything needed for artificial intelligence (AI) onto a processor. - -“This opens the door for many new applications, starting from real-time evaluation of sensor data,” says [Fraunhofer Institute for Microelectronic Circuits and Systems][2] on its website. No delays sending unnecessary data onwards, along with speedy processing, means theoretically there is zero latency. - -Plus, on-microprocessor, self-learning means the embedded, or sensor, devices can self-calibrate. They can even be “completely reconfigured to perform a totally different task afterwards,” the institute says. “An embedded system with different tasks is possible.” - -**[ Also read:[What is edge computing?][3] and [How edge networking and IoT will reshape data centers][4] ]** - -Much internet of things (IoT) data sent through networks is redundant and wastes resources: a temperature reading taken every 10 minutes, say, when the ambient temperature hasn’t changed, is one example. In fact, one only needs to know when the temperature has changed, and maybe then only when thresholds have been met. - -### Neural network-on-sensor chip - -The commercial German research organization says it’s developing a specific RISC-V microprocessor with a special hardware accelerator designed for a [brain-copying, artificial neural network (ANN) it has developed][5]. The architecture could ultimately be suitable for the condition-monitoring or predictive sensors of the kind we will likely see more of in the industrial internet of things (IIoT). - -Key to Fraunhofer IMS’s [Artificial Intelligence for Embedded Systems (AIfES)][6] is that the self-learning takes place at chip level rather than in the cloud or on a computer, and that it is independent of “connectivity towards a cloud or a powerful and resource-hungry processing entity.” But it still offers a “full AI mechanism, like independent learning,” - -It’s “decentralized AI,” says Fraunhofer IMS. "It’s not focused towards big-data processing.” - -Indeed, with these kinds of systems, no connection is actually required for the raw data, just for the post-analytical results, if indeed needed. Swarming can even replace that. Swarming lets sensors talk to one another, sharing relevant information without even getting a host network involved. - -“It is possible to build a network from small and adaptive systems that share tasks among themselves,” Fraunhofer IMS says. - -Other benefits in decentralized neural networks include that they can be more secure than the cloud. Because all processing takes place on the microprocessor, “no sensitive data needs to be transferred,” Fraunhofer IMS explains. - -### Other edge computing research - -The Fraunhofer researchers aren’t the only academics who believe entire networks become redundant with neuristor, brain-like AI chips. Binghamton University and Georgia Tech are working together on similar edge-oriented tech. - -“The idea is we want to have these chips that can do all the functioning in the chip, rather than messages back and forth with some sort of large server,” Binghamton said on its website when [I wrote about the university's work last year][7]. - -One of the advantages of no major communications linking: Not only don't you have to worry about internet resilience, but also that energy is saved creating the link. Energy efficiency is an ambition in the sensor world — replacing batteries is time consuming, expensive, and sometimes, in the case of remote locations, extremely difficult. - -Memory or storage for swaths of raw data awaiting transfer to be processed at a data center, or similar, doesn’t have to be provided either — it’s been processed at the source, so it can be discarded. - -**More about edge networking:** - - * [How edge networking and IoT will reshape data centers][4] - * [Edge computing best practices][8] - * [How edge computing can help secure the IoT][9] - - - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3400659/self-learning-sensor-chips-wont-need-networks.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/industry_4-0_industrial_iot_smart_factory_automation_by_jiraroj_praditcharoenkul_gettyimages-902668940_2400x1600-100788458-large.jpg -[2]: https://www.ims.fraunhofer.de/en.html -[3]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[5]: https://www.ims.fraunhofer.de/en/Business_Units_and_Core_Competencies/Electronic_Assistance_Systems/News/AIfES-Artificial_Intelligence_for_Embedded_Systems.html -[6]: https://www.ims.fraunhofer.de/en/Business_Units_and_Core_Competencies/Electronic_Assistance_Systems/technologies/Artificial-Intelligence-for-Embedded-Systems-AIfES.html -[7]: https://www.networkworld.com/article/3326557/edge-chips-could-render-some-networks-useless.html -[8]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[9]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190606 What to do when yesterday-s technology won-t meet today-s support needs.md b/sources/talk/20190606 What to do when yesterday-s technology won-t meet today-s support needs.md deleted file mode 100644 index 622537f2f9..0000000000 --- a/sources/talk/20190606 What to do when yesterday-s technology won-t meet today-s support needs.md +++ /dev/null @@ -1,53 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (What to do when yesterday’s technology won’t meet today’s support needs) -[#]: via: (https://www.networkworld.com/article/3399875/what-to-do-when-yesterday-s-technology-won-t-meet-today-s-support-needs.html) -[#]: author: (Anand Rajaram ) - -What to do when yesterday’s technology won’t meet today’s support needs -====== - -![iStock][1] - -You probably already know that end user technology is exploding and are feeling the effects of it in your support organization every day. Remember when IT sanctioned and standardized every hardware and software instance in the workplace? Those days are long gone. Today, it’s the driving force of productivity that dictates what will or won’t be used – and that can be hard on a support organization. - -Whatever users need to do their jobs better, faster, more efficiently is what you are seeing come into the workplace. So naturally, that’s what comes into your service desk too. Support organizations see all kinds of [devices, applications, systems, and equipment][2], and it’s adding a great deal of complexity and demand to keep up with. In fact, four of the top five factors causing support ticket volumes to rise are attributed to new and current technology. - -To keep up with the steady [rise of tickets][3] and stay out in front of this surge, support organizations need to take a good, hard look at the processes and technologies they use. Yesterday’s methods won’t cut it. The landscape is simply changing too fast. Supporting today’s users and getting them back to work fast requires an expanding set of skills and tools. - -So where do you start with a new technology project? Just because a technology is new or hyped doesn’t mean it’s right for your organization. It’s important to understand your project goals and the experience you really want to create and match your technology choices to those goals. But don’t go it alone. Talk to your teams. Get intimately familiar with how your support organization works today. Understand your customers’ needs at a deep level. And bring the right people to the table to cover: - - * Business problem analysis: What existing business issue are stakeholders unhappy with? - * The impact of that problem: How does that issue justify making a change? - * Process automation analysis: What area(s) can technology help automate? - * Other solutions: Have you considered any other options besides technology? - - - -With these questions answered, you’re ready to entertain your technology options. Put together your “must-haves” in a requirements document and reach out to potential suppliers. During the initial information-gathering stage, assess if the supplier understands your goals and how their technology helps you meet them. To narrow the field, compare solutions side by side against your goals. Select the top two or three for more in-depth product demos before moving into product evaluations. By the time you’re ready for implementation, you have empirical, practical knowledge of how the solution will perform against your business goals. - -The key takeaway is this: Technology for technology’s sake is just technology. But technology that drives business value is a solution. If you want a solution that drives results for your organization and your customers, it’s worth following a strategic selection process to match your goals with the best technology for the job. - -For more insight, check out the [LogMeIn Rescue][4] and HDI webinar “[Technology and the Service Desk: Expanding Mission, Expanding Skills”][5]. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3399875/what-to-do-when-yesterday-s-technology-won-t-meet-today-s-support-needs.html - -作者:[Anand Rajaram][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/06/istock-1019006240-100798168-large.jpg -[2]: https://www.logmeinrescue.com/resources/datasheets/infographic-mobile-support-are-your-employees-getting-what-they-need?utm_source=idg%20media&utm_medium=display&utm_campaign=native&sfdc= -[3]: https://www.logmeinrescue.com/resources/analyst-reports/the-importance-of-remote-support-in-a-shift-left-world?utm_source=idg%20media&utm_medium=display&utm_campaign=native&sfdc= -[4]: https://www.logmeinrescue.com/?utm_source=idg%20media&utm_medium=display&utm_campaign=native&sfdc= -[5]: https://www.brighttalk.com/webcast/8855/312289?utm_source=LogMeIn7&utm_medium=brighttalk&utm_campaign=312289 diff --git a/sources/talk/20190611 6 ways to make enterprise IoT cost effective.md b/sources/talk/20190611 6 ways to make enterprise IoT cost effective.md deleted file mode 100644 index 492262c617..0000000000 --- a/sources/talk/20190611 6 ways to make enterprise IoT cost effective.md +++ /dev/null @@ -1,89 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (6 ways to make enterprise IoT cost effective) -[#]: via: (https://www.networkworld.com/article/3401082/6-ways-to-make-enterprise-iot-cost-effective.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -6 ways to make enterprise IoT cost effective -====== -Rob Mesirow, a principal at PwC’s Connected Solutions unit, offers tips for successfully implementing internet of things (IoT) projects without breaking the bank. -![DavidLeshem / Getty][1] - -There’s little question that the internet of things (IoT) holds enormous potential for the enterprise, in everything from asset tracking to compliance. - -But enterprise uses of IoT technology are still evolving, and it’s not yet entirely clear which use cases and practices currently make economic and business sense. So, I was thrilled to trade emails recently with [Rob Mesirow][2], a principal at [PwC’s Connected Solutions][3] unit, about how to make enterprise IoT implementations as cost effective as possible. - -“The IoT isn’t just about technology (hardware, sensors, software, networks, communications, the cloud, analytics, APIs),” Mesirow said, “though tech is obviously essential. It also includes ensuring cybersecurity, managing data governance, upskilling the workforce and creating a receptive workplace culture, building trust in the IoT, developing interoperability, and creating business partnerships and ecosystems—all part of a foundation that’s vital to a successful IoT implementation.” - -**[ Also read:[Enterprise IoT: Companies want solutions in these 4 areas][4] ]** - -Yes, that sounds complicated—and a lot of work for a still-hard-to-quantify return. Fortunately, though, Mesirow offered up some tips on how companies can make their IoT implementations as cost effective as possible. - -### 1\. Don’t wait for better technology - -Mesirow advised against waiting to implement IoT projects until you can deploy emerging technology such as [5G networks][5]. That makes sense, as long as your implementation doesn’t specifically require capabilities available only in the new technology. - -### 2\. Start with the basics, and scale up as needed - -“Companies need to start with the basics—building one app/task at a time—instead of jumping ahead with enterprise-wide implementations and ecosystems,” Mesirow said. - -“There’s no need to start an IoT initiative by tackling a huge, expensive ecosystem. Instead, begin with one manageable use case, and build up and out from there. The IoT can inexpensively automate many everyday tasks to increase effectiveness, employee productivity, and revenue.” - -After you pick the low-hanging fruit, it’s time to become more ambitious. - -“After getting a few successful pilots established, businesses can then scale up as needed, building on the established foundation of business processes, people experience, and technology," Mesirow said, - -### 3\. Make dumb things smart - -Of course, identifying the ripest low-hanging fruit isn’t always easy. - -“Companies need to focus on making dumb things smart, deploying infrastructure that’s not going to break the bank, and providing enterprise customers the opportunity to experience what data intelligence can do for their business,” Mesirow said. “Once they do that, things will take off.” - -### 4\. Leverage lower-cost networks - -“One key to building an IoT inexpensively is to use low-power, low-cost networks (Low-Power Wide-Area Networks (LPWAN)) to provide IoT services, which reduces costs significantly,” Mesirow said. - -Naturally, he mentioned that PwC has three separate platforms with some 80 products that hang off those platforms, which he said cost “a fraction of traditional IoT offerings, with security and privacy built in.” - -Despite the product pitch, though, Mesirow is right to call out the efficiencies involved in using low-cost, low-power networks instead of more expensive existing cellular. - -### 5\. Balance security vs. cost - -Companies need to plan their IoT network with costs vs. security in mind, Mesirow said. “Open-source networks will be less expensive, but there may be security concerns,” he said. - -That’s true, of course, but there may be security concerns in _any_ network, not just open-source solutions. Still, Mesirow’s overall point remains valid: Enterprises need to carefully consider all the trade-offs they’re making in their IoT efforts. - -### 6\. Account for _all_ the value IoT provides - -Finally, Mesirow pointed out that “much of the cost-effectiveness comes from the _value_ the IoT provides,” and its important to consider the return, not just the investment. - -“For example,” Mesirow said, the IoT “increases productivity by enabling the remote monitoring and control of business operations. It saves on energy costs by automatically turning off lights and HVAC when spaces are vacant, and predictive maintenance alerts lead to fewer machine repairs. And geolocation can lead to personalized marketing to customer smartphones, which can increase sales to nearby stores.” - -**[ Now read this:[5 reasons the IoT needs its own networks][6] ]** - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3401082/6-ways-to-make-enterprise-iot-cost-effective.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/money_financial_salary_growth_currency_by-davidleshem-100787975-large.jpg -[2]: https://twitter.com/robmesirow -[3]: https://digital.pwc.com/content/pwc-digital/en/products/connected-solutions.html -[4]: https://www.networkworld.com/article/3396128/the-state-of-enterprise-iot-companies-want-solutions-for-these-4-areas.html -[5]: https://www.networkworld.com/article/3203489/what-is-5g-how-is-it-better-than-4g.html -[6]: https://www.networkworld.com/article/3284506/5-reasons-the-iot-needs-its-own-networks.html -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190611 The carbon footprints of IT shops that train AI models are huge.md b/sources/talk/20190611 The carbon footprints of IT shops that train AI models are huge.md deleted file mode 100644 index b440b8d65b..0000000000 --- a/sources/talk/20190611 The carbon footprints of IT shops that train AI models are huge.md +++ /dev/null @@ -1,68 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (The carbon footprints of IT shops that train AI models are huge) -[#]: via: (https://www.networkworld.com/article/3401919/the-carbon-footprints-of-it-shops-that-train-ai-models-are-huge.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -The carbon footprints of IT shops that train AI models are huge -====== -Artificial intelligence (AI) model training can generate five times more carbon dioxide than a car does in a lifetime, researchers at the University of Massachusetts, Amherst find. -![ipopba / Getty Images][1] - -A new research paper from the University of Massachusetts, Amherst looked at the carbon dioxide (CO2) generated over the course of training several common large artificial intelligence (AI) models and found that the process can generate nearly five times the amount as an average American car over its lifetime plus the process of making the car itself. - -The [paper][2] specifically examined the model training process for natural-language processing (NLP), which is how AI handles natural language interactions. The study found that during the training process, more than 626,000 pounds of carbon dioxide is generated. - -This is significant, since AI training is one IT process that has remained firmly on-premises and not moved to the cloud. Very expensive equipment is needed, as is large volumes of data, so the cloud isn’t right work for most AI training, and the report notes this. Plus, IT shops want to keep that kind of IP in house. So, if you are experimenting with AI, that power bill is going to go up. - -**[ Read also:[How to plan a software-defined data-center network][3] ]** - -While the report used carbon dioxide as a measure, that’s still the product of electricity generation. Training involves the use of the most powerful processors, typically Nvidia GPUs, and they are not known for being low-power draws. And as the paper notes, “model training also incurs a substantial cost to the environment due to the energy required to power this hardware for weeks or months at a time.” - -Training is the most processor-intensive portion of AI. It can take days, weeks, or even months to “learn” what the model needs to know. That means power-hungry Nvidia GPUs running at full utilization for the entire time. In this case, how to handle and process natural language questions rather than broken sentences of keywords like your typical Google search. - -The report said training one model with a neural architecture generated 626,155 pounds of CO2. By contrast, one passenger flying round trip between New York and San Francisco would generate 1,984 pounds of CO2, an average American would generate 11,023 pounds in one year, and a car would generate 126,000 pounds over the course of its lifetime. - -### How the researchers calculated the CO2 amounts - -The researchers used four models in the NLP field that have been responsible for the biggest leaps in performance. They are Transformer, ELMo, BERT, and GPT-2. They trained all of the models on a single Nvidia Titan X GPU, with the exception of ELMo which was trained on three Nvidia GTX 1080 Ti GPUs. Each model was trained for a maximum of one day. - -**[[Learn Java from beginning concepts to advanced design patterns in this comprehensive 12-part course!][4] ]** - -They then used the number of training hours listed in the model’s original papers to calculate the total energy consumed over the complete training process. That number was converted into pounds of carbon dioxide equivalent based on the average energy mix in the U.S. - -The big takeaway is that computational costs start out relatively inexpensive, but they mushroom when additional tuning steps were used to increase the model’s final accuracy. A tuning process known as neural architecture search ([NAS][5]) is the worst offender because it does so much processing. NAS is an algorithm that searches for the best neural network architecture. It is seriously advanced AI and requires the most processing time and power. - -The researchers suggest it would be beneficial to directly compare different models to perform a cost-benefit (accuracy) analysis. - -“To address this, when proposing a model that is meant to be re-trained for downstream use, such as re-training on a new domain or fine-tuning on a new task, authors should report training time and computational resources required, as well as model sensitivity to hyperparameters. This will enable direct comparison across models, allowing subsequent consumers of these models to accurately assess whether the required computational resources,” the authors wrote. - -They also say researchers who are cost-constrained should pool resources and avoid the cloud, as cloud compute time is more expensive. In an example, it said a GPU server with eight Nvidia 1080 Ti GPUs and supporting hardware is available for approximately $20,000. To develop the sample models used in their study, that hardware would cost $145,000, plus electricity to run the models, about half the estimated cost to use on-demand cloud GPUs. - -“Unlike money spent on cloud compute, however, that invested in centralized resources would continue to pay off as resources are shared across many projects. A government-funded academic compute cloud would provide equitable access to all researchers,” they wrote. - -Join the Network World communities on [Facebook][6] and [LinkedIn][7] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3401919/the-carbon-footprints-of-it-shops-that-train-ai-models-are-huge.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/ai-vendor-relationship-management_artificial-intelligence_hand-on-virtual-screen-100795246-large.jpg -[2]: https://arxiv.org/abs/1906.02243 -[3]: https://www.networkworld.com/article/3284352/data-center/how-to-plan-a-software-defined-data-center-network.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fjava -[5]: https://www.oreilly.com/ideas/what-is-neural-architecture-search -[6]: https://www.facebook.com/NetworkWorld/ -[7]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190612 IoT security vs. privacy- Which is a bigger issue.md b/sources/talk/20190612 IoT security vs. privacy- Which is a bigger issue.md deleted file mode 100644 index 2f06f6afc1..0000000000 --- a/sources/talk/20190612 IoT security vs. privacy- Which is a bigger issue.md +++ /dev/null @@ -1,95 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IoT security vs. privacy: Which is a bigger issue?) -[#]: via: (https://www.networkworld.com/article/3401522/iot-security-vs-privacy-which-is-a-bigger-issue.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -IoT security vs. privacy: Which is a bigger issue? -====== -When it comes to the internet of things (IoT), security has long been a key concern. But privacy issues could be an even bigger threat. -![Ring][1] - -If you follow the news surrounding the internet of things (IoT), you know that security issues have long been a key concern for IoT consumers, enterprises, and vendors. Those issues are very real, but I’m becoming increasingly convinced that related but fundamentally different _privacy_ vulnerabilities may well be an even bigger threat to the success of the IoT. - -In June alone, we’ve seen a flood of IoT privacy issues inundate the news cycle, and observers are increasingly sounding the alarm that IoT users should be paying attention to what happens to the data collected by IoT devices. - -**[ Also read:[It’s time for the IoT to 'optimize for trust'][2] and [A corporate guide to addressing IoT security][2] ]** - -Predictably, most of the teeth-gnashing has come on the consumer side, but that doesn’t mean enterprises users are immune to the issue. One the one hand, just like consumers, companies are vulnerable to their proprietary information being improperly shared and misused. More immediately, companies may face backlash from their own customers if they are seen as not properly guarding the data they collect via the IoT. Too often, in fact, enterprises shoot themselves in the foot on privacy issues, with practices that range from tone-deaf to exploitative to downright illegal—leading almost [two-thirds (63%) of consumers to describe IoT data collection as “creepy,”][3] while more than half (53%) “distrust connected devices to protect their privacy and handle information in a responsible manner.” - -### Ring becoming the poster child for IoT privacy issues - -As a case in point, let’s look at the case of [Ring, the IoT doorbell company now owned by Amazon][4]. Ring is [reportedly working with police departments to build a video surveillance network in residential neighborhoods][5]. Police in more than 50 cities and towns across the country are apparently offering free or discounted Ring doorbells, and sometimes requiring the recipients to share footage for use in investigations. (While [Ring touts the security benefits][6] of working with law enforcement, it has asked police departments to end the practice of _requiring_ users to hand over footage, as it appears to violate the devices’ terms of service.) - -Many privacy advocates are troubled by this degree of cooperation between police and Ring, but that’s only part of the problem. Last year, for example, [Ring workers in Ukraine reportedly watched customer feeds][7]. Amazingly, though, even that only scratches the surface of the privacy flaps surrounding Ring. - -### Guilty by video? - -According to [Motherboard][8], “Ring is using video captured by its doorbell cameras in Facebook advertisements that ask users to identify and call the cops on a woman whom local police say is a suspected thief.” While the police are apparently appreciative of the “additional eyes that may see this woman and recognize her,” the ad calls the woman a thief even though she has not been charged with a crime, much less convicted! - -Ring may be today’s poster child for IoT privacy issues, but IoT privacy complaints are widespread. In many cases, it comes down to what IoT users—or others nearby—are getting in return for giving up their privacy. According to the [Guardian][9], for example, Google’s Sidewalk Labs smart city project is little more than “surveillance capitalism.” And while car owners may get a discount on auto insurance in return for sharing their driving data, that relationship is hardly set in stone. It may not be long before drivers have to give up their data just to get insurance at all. - -**[[Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][10] ]** - -And as the recent [data breach at the U.S. Customs and Border Protection][11] once again demonstrates, private data is “[a genie without a bottle][12].” No matter what legal or technical protections are put in place, the data may always be revealed or used in unforeseen ways. Heck, when you put it all together, it’s enough to make you wonder [whether doorbells really need to be smart][13] at all? - -**Read more about IoT:** - - * [Google’s biggest, craziest ‘moonshot’ yet][14] - * [What is the IoT? How the internet of things works][15] - * [What is edge computing and how it’s changing the network][16] - * [Most powerful internet of things companies][17] - * [10 Hot IoT startups to watch][18] - * [The 6 ways to make money in IoT][19] - * [What is digital twin technology? [and why it matters]][20] - * [Blockchain, service-centric networking key to IoT success][21] - * [Getting grounded in IoT networking and security][22] - * [Building IoT-ready networks must become a priority][23] - * [What is the Industrial IoT? [And why the stakes are so high]][24] - - - -Join the Network World communities on [Facebook][25] and [LinkedIn][26] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3401522/iot-security-vs-privacy-which-is-a-bigger-issue.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/04/ringvideodoorbellpro-100794084-large.jpg -[2]: https://www.networkworld.com/article/3269165/internet-of-things/a-corporate-guide-to-addressing-iot-security-concerns.html -[3]: https://www.cpomagazine.com/data-privacy/consumers-still-concerned-about-iot-security-and-privacy-issues/ -[4]: https://www.cnbc.com/2018/02/27/amazon-buys-ring-a-former-shark-tank-reject.html -[5]: https://www.cnet.com/features/amazons-helping-police-build-a-surveillance-network-with-ring-doorbells/ -[6]: https://blog.ring.com/2019/02/14/how-rings-neighbors-creates-safer-more-connected-communities/ -[7]: https://www.theinformation.com/go/b7668a689a -[8]: https://www.vice.com/en_us/article/pajm5z/amazon-home-surveillance-company-ring-law-enforcement-advertisements -[9]: https://www.theguardian.com/cities/2019/jun/06/toronto-smart-city-google-project-privacy-concerns -[10]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[11]: https://www.washingtonpost.com/technology/2019/06/10/us-customs-border-protection-says-photos-travelers-into-out-country-were-recently-taken-data-breach/?utm_term=.0f3a38aa40ca -[12]: https://smartbear.com/blog/test-and-monitor/data-scientists-are-sexy-and-7-more-surprises-from/ -[13]: https://slate.com/tag/should-this-thing-be-smart -[14]: https://www.networkworld.com/article/3058036/google-s-biggest-craziest-moonshot-yet.html -[15]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[16]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[17]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[18]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[19]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[20]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[21]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[22]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[23]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[24]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[25]: https://www.facebook.com/NetworkWorld/ -[26]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190612 Software Defined Perimeter (SDP)- Creating a new network perimeter.md b/sources/talk/20190612 Software Defined Perimeter (SDP)- Creating a new network perimeter.md deleted file mode 100644 index 88a540e875..0000000000 --- a/sources/talk/20190612 Software Defined Perimeter (SDP)- Creating a new network perimeter.md +++ /dev/null @@ -1,121 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Software Defined Perimeter (SDP): Creating a new network perimeter) -[#]: via: (https://www.networkworld.com/article/3402258/software-defined-perimeter-sdp-creating-a-new-network-perimeter.html) -[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) - -Software Defined Perimeter (SDP): Creating a new network perimeter -====== -Considering the way networks work today and the change in traffic patterns; both internal and to the cloud, this limits the effect of the fixed perimeter. -![monsitj / Getty Images][1] - -Networks were initially designed to create internal segments that were separated from the external world by using a fixed perimeter. The internal network was deemed trustworthy, whereas the external was considered hostile. However, this is still the foundation for most networking professionals even though a lot has changed since the inception of the design. - -More often than not the fixed perimeter consists of a number of network and security appliances, thereby creating a service chained stack, resulting in appliance sprawl. Typically, the appliances that a user may need to pass to get to the internal LAN may vary. But generally, the stack would consist of global load balancers, external firewall, DDoS appliance, VPN concentrator, internal firewall and eventually LAN segments. - -The perimeter approach based its design on visibility and accessibility. If an entity external to the network can’t see an internal resource, then access cannot be gained. As a result, external entities were blocked from coming in, yet internal entities were permitted to passage out. However, it worked only to a certain degree. Realistically, the fixed network perimeter will always be breachable; it's just a matter of time. Someone with enough skill will eventually get through. - -**[ Related:[MPLS explained – What you need to know about multi-protocol label switching][2]** - -### Environmental changes – the cloud and mobile workforce - -Considering the way networks work today and the change in traffic patterns; both internal and to the cloud, this limits the effect of the fixed perimeter. Nowadays, we have a very fluid network perimeter with many points of entry. - -Imagine a castle with a portcullis that was used to gain access. To gain entry into the portcullis was easy as we just needed to pass one guard. There was only one way in and one way out. But today, in this digital world, we have so many small doors and ways to enter, all of which need to be individually protected. - -This boils down to the introduction of cloud-based application services and changing the location of the perimeter. Therefore, the existing networking equipment used for the perimeter is topologically ill-located. Nowadays, everything that is important is outside the perimeter, such as, remote access workers, SaaS, IaaS and PaaS-based applications. - -Users require access to the resources in various cloud services regardless of where the resources are located, resulting in complex-to-control multi-cloud environments. Objectively, the users do not and should not care where the applications are located. They just require access to the application. Also, the increased use of mobile workforce that demands anytime and anywhere access from a variety of devices has challenged the enterprises to support this dynamic workforce. - -There is also an increasing number of devices, such as, BYOD, on-site contractors, and partners that will continue to grow internal to the network. This ultimately leads to a world of over-connected networks. - -### Over-connected networks - -Over-connected networks result in complex configurations of network appliances. This results in large and complex policies without any context. - -They provide a level of coarse-grained access to a variety of services where the IP address does not correspond to the actual user. Traditional appliances that use static configurations to limit the incoming and outgoing traffic are commonly based on information in the IP packet and the port number. - -Essentially, there is no notion of policy and explanation of why a given source IP address is on the list. This approach fails to take into consideration any notion of trust and dynamically adjust access in relation to the device, users and application request events. - -### Problems with IP addresses - -Back in the early 1990s, RFC 1597 declared three IP ranges reserved for private use: 10.0.0.0/8, 172.16.0.0/12 and 192.168.0.0/16. If an end host was configured with one of these addresses, it was considered more secure. However, this assumption of trust was shattered with the passage of time and it still haunts us today. - -Network Address Translation (NAT) also changed things to a great extent. NAT allowed internal trusted hosts to communicate directly with the external untrusted hosts. However, since Transmission Control Protocol (TCP) is bidirectional, it allows the data to be injected by the external hosts while connecting back to the internal hosts. - -Also, there is no contextual information regarding the IP addresses as the sole purpose revolved around connectivity. If you have the IP address of someone, you can connect to them. The authentication was handled higher up in the stack. - -Not only do user’s IP addresses change regularly, but there’s also not a one-to-one correspondence between the users and IP addresses. Anyone can communicate from any IP address they please and also insert themselves between you and the trusted resource. - -Have you ever heard of the 20-year old computer that responds to an internet control message protocol (ICMP) request, yet no one knows where it is? But this would not exist on a zero trust network as the network is dark until the administrator turns the lights on with a whitelist policy rule set. This is contrary to the legacy black policy rule set. You can find more information on zero trust in my course: [Zero Trust Networking: The Big Picture][3]. - -Therefore, we can’t just rely on the IP addresses and expect them to do much more other than connect. As a result, we have to move away from the IP addresses and network location as the proxy for access trust. The network location can longer be the driver of network access levels. It is not fully equipped to decide the trust of a device, user or application. - -### Visibility – a major gap - -When we analyze networking and its flaws, visibility is a major gap in today’s hybrid environments. By and large, enterprise networks are complex beasts. More than often networking pros do not have accurate data or insights into who or what is accessing the network resource. - -I.T does not have the visibility in place to detect, for example, insecure devices, unauthorized users and potentially harmful connections that could propagate malware or perform data exfiltration. - -Also, once you know how network elements connect, how do you ensure that they don’t reconnect through a broader definition of connectivity? For this, you need contextual visibility. You need full visibility into the network to see who, what, when, and how they are connecting with the device. - -### What’s the workaround? - -A new approach is needed that enables the application owners to protect the infrastructure located in a public or private cloud and on-premise data center. This new network architecture is known as [software-defined perimeter][4] (SDP). Back in 2013, Cloud Security Alliance (CSA) launched the SDP initiative, a project designed to develop the architecture for creating more robust networks. - -The principles behind SDPs are not entirely new. Organizations within the DoD and Intelligence Communities (IC) have implemented a similar network architecture that is based on authentication and authorization prior to network access. - -Typically, every internal resource is hidden behind an appliance. And a user must authenticate before visibility of the authorized services is made available and access is granted. - -### Applying the zero trust framework - -SDP is an extension to [zero trust][5] which removes the implicit trust from the network. The concept of SDP started with Google’s BeyondCorp, which is the general direction that the industry is heading to right now. - -Google’s BeyondCorp puts forward the idea that the corporate network does not have any meaning. The trust regarding accessing an application is set by a static network perimeter containing a central appliance. This appliance permits the inbound and outbound access based on a very coarse policy. - -However, access to the application should be based on other parameters such as who the user is, the judgment of the security stance of the device, followed by some continuous assessment of the session. Rationally, only then should access be permitted. - -Let’s face it, the assumption that internal traffic can be trusted is flawed and zero trust assumes that all hosts internal to the network are internet facing, thereby hostile. - -### What is software-defined perimeter (SDP)? - -The SDP aims to deploy perimeter functionality for dynamically provisioned perimeters meant for clouds, hybrid environments, and on-premise data center infrastructures. There is often a dynamic tunnel that automatically gets created during the session. That is a one-to-one mapping between the requesting entity and the trusted resource. The important point to note here is that perimeters are formed not solely to obey a fixed location already design by the network team. - -SDP relies on two major pillars and these are the authentication and authorization stages. SDPs require endpoints to authenticate and be authorized first before obtaining network access to the protected entities. Then, encrypted connections are created in real-time between the requesting systems and application infrastructure. - -Authenticating and authorizing the users and their devices before even allowing a single packet to reach the target service, enforces what's known as least privilege at the network layer. Essentially, the concept of least privilege is for an entity to be granted only the minimum privileges that it needs to get its work done. Within a zero trust network, privilege is more dynamic than it would be in traditional networks since it uses many different attributes of activity to determine the trust score. - -### The dark network - -Connectivity is based on a need-to-know model. Under this model, no DNS information, internal IP addresses or visible ports of internal network infrastructure are transmitted. This is the reason why SDP assets are considered as “dark”. As a result, SDP isolates any concerns about the network and application. The applications and users are considered abstract, be it on-premise or in the cloud, which becomes irrelevant to the assigned policy. - -Access is granted directly between the users and their devices to the application and resource, regardless of the underlying network infrastructure. There simply is no concept of inside and outside of the network. This ultimately removes the network location point as a position of advantage and also eliminates the excessive implicit trust that IP addresses offer. - -**This article is published as part of the IDG Contributor Network.[Want to Join?][6]** - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402258/software-defined-perimeter-sdp-creating-a-new-network-perimeter.html - -作者:[Matt Conran][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Matt-Conran/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/03/sdn_software-defined-network_architecture-100791938-large.jpg -[2]: https://www.networkworld.com/article/2297171/sd-wan/network-security-mpls-explained.html -[3]: http://pluralsight.com/courses/zero-trust-networking-big-picture -[4]: https://network-insight.net/2018/09/software-defined-perimeter-zero-trust/ -[5]: https://network-insight.net/2018/10/zero-trust-networking-ztn-want-ghosted/ -[6]: /contributor-network/signup.html -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190613 Data centers should sell spare UPS capacity to the grid.md b/sources/talk/20190613 Data centers should sell spare UPS capacity to the grid.md deleted file mode 100644 index 69b4356661..0000000000 --- a/sources/talk/20190613 Data centers should sell spare UPS capacity to the grid.md +++ /dev/null @@ -1,59 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Data centers should sell spare UPS capacity to the grid) -[#]: via: (https://www.networkworld.com/article/3402039/data-centers-should-sell-spare-ups-capacity-to-the-grid.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Data centers should sell spare UPS capacity to the grid -====== -Distributed Energy is gaining traction, providing an opportunity for data centers to sell excess power in data center UPS batteries to the grid. -![Getty Images][1] - -The energy storage capacity in uninterruptable power supply (UPS) batteries, languishing often dormant in data centers, could provide new revenue streams for those data centers, says Eaton, a major electrical power management company. - -Excess, grid-generated power, created during times of low demand, should be stored on the now-proliferating lithium-backup power systems strewn worldwide in data centers, Eaton says. Then, using an algorithm tied to grid-demand, electricity should be withdrawn as necessary for grid use. It would then be slid back onto the backup batteries when not needed. - -**[ Read also:[How server disaggregation can boost data center efficiency][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ]** - -The concept is called Distributed Energy and has been gaining traction in part because electrical generation is changing—emerging green power, such as wind and solar, being used now at the grid-level have considerations that differ from the now-retiring, fossil-fuel power generation. You can generate solar only in daylight, yet much demand takes place on dark evenings, for example. - -Coal, gas, and oil deliveries have always been, to a great extent, pre-planned, just-in-time, and used for electrical generation in real time. Nowadays, though, fluctuations between supply, storage, and demand are kicking in. Electricity storage on the grid is required. - -Eaton says that by piggy-backing on existing power banks, electricity distribution could be evened out better. The utilities would deliver power more efficiently, despite the peaks and troughs in demand—with the data center UPS, in effect, acting like a quasi-grid-power storage battery bank, or virtual power plant. - -The objective of this UPS use case, called EnergyAware, is to regulate frequency in the grid. That’s related to the tolerances needed to make the grid work—the cycles per second, or hertz, inherent in electrical current can’t deviate too much. Abnormalities happen if there’s a suddent spike in demand but no power on hand to supply the surge. - -### How the Distributed Energy concept works - -The distributed energy resource (DER), which can be added to any existing lithium-ion battery bank, in any building, allows for the consumption of energy, or the distribution of it, based on a Frequency Regulation grid-demand algorithm. It charges or discharges the backup battery, connected to the grid, thus balancing the grid frequency. - -Often, not much power will need to be removed, just “micro-bursts of energy,” explains Sean James, director of Energy Research at Microsoft, in an Eaton-published promotional video. Microsoft Innovation Center in Virginia has been working with Eaton on the project. Those bursts are enough to get the frequency tolerances back on track, but the UPS still functions as designed. - -Eaton says data centers should start participating in energy markets. That could mean bidding, as a producer of power, to those who need to buy it—the electricity market, also known as the grid. Data centers could conceivably even switch on generators to operate the data halls if the price for its battery-stored power was particularly lucrative at certain times. - -“A data center in the future wouldn’t just be a huge load on the grid,” James says. “In the future, you don’t have a data center or a power plant. It’s something in the middle. A data plant,” he says on the Eaton [website][4]. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3402039/data-centers-should-sell-spare-ups-capacity-to-the-grid.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/10/business_continuity_server-100777720-large.jpg -[2]: https://www.networkworld.com/article/3266624/how-server-disaggregation-could-make-cloud-datacenters-more-efficient.html -[3]: https://www.networkworld.com/newsletters/signup.html -[4]: https://www.eaton.com/us/en-us/products/backup-power-ups-surge-it-power-distribution/backup-power-ups/dual-purpose-ups-technology.html -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190617 5 transferable higher-education skills.md b/sources/talk/20190617 5 transferable higher-education skills.md deleted file mode 100644 index db0f584aaf..0000000000 --- a/sources/talk/20190617 5 transferable higher-education skills.md +++ /dev/null @@ -1,64 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (5 transferable higher-education skills) -[#]: via: (https://opensource.com/article/19/6/5-transferable-higher-education-skills) -[#]: author: (Stephon Brown https://opensource.com/users/stephb) - -5 transferable higher-education skills -====== -If you're moving from the Ivory Tower to the Matrix, you already have -the foundation for success in the developer role. -![Two hands holding a resume with computer, clock, and desk chair ][1] - -My transition from a higher-education professional into the tech realm was comparable to moving from a pond into an ocean. There was so much to learn, and after learning, there was still so much more to learn! - -Rather than going down the rabbit hole and being overwhelmed by what I did not know, in the last two to three months, I have been able to take comfort in the realization that I was not entirely out of my element as a developer. The skills I acquired during my six years as a university professional gave me the foundation to be successful in the developer role. - -These skills are transferable in any direction you plan to go within or outside tech, and it's valuable to reflect on how they apply to your new position. - -### 1\. Composition and documentation - -Higher education is replete with opportunities to develop skills related to composition and communication. In most cases, clear writing and communication are mandatory requirements for university administrative and teaching positions. Although you may not yet be well-versed in deep technical concepts, learning documentation and writing your progress may be two of the strongest skills you bring as a former higher education administrator. All of those "In response to…" emails will finally come in handy when describing the inner workings of a class or leaving succinct comments for other developers to follow what you have implemented. - -### 2\. Problem-solving and critical thinking - -Whether you've been an adviser who sits with students and painstakingly develops class schedules for graduation or a finance buff who balances government funds, you will not leave critical thinking behind as you transition into a developer role. Although your critical thinking may have seemed specialized for your work, the skill of turning problems into opportunities is not lost when contributing to code. The experience gained while spending long days and nights revising recruitment strategies will be necessary when composing algorithms and creative ways of delivering data. Continue to foster a passion for solving problems, and you will not have any trouble becoming an efficient and skillful developer. - -### 3\. Communication - -Though it may seem to overlap with writing (above), communication spans verbal and written disciplines. When you're interacting with clients and leadership, you may have a leg up over your peers because of your higher-education experience. Being approachable and understanding how to manage interactions are skills that some software practitioners may not have fostered to an impactful level. Although you will experience days of staring at a screen and banging your head against the keyboard, you can rest well in knowing you can describe technical concepts and interact with a wide range of audiences, from clients to peers. - -### 4\. Leadership - -Sitting on that panel; planning that event; leading that workshop. All of those experiences provide you with the grounding to plan and lead smaller projects as a new developer. Leadership is not limited to heading up large and small teams; its essence lies in taking initiative. This can be volunteering to do research on a new feature or do more extensive unit tests for your code. However you use it, your foundation as an educator will allow you to go further in technology development and maintenance. - -### 5\. Research - -You can Google with the best of them. Being able to clearly truncate your query into the idea you are searching for is characteristic of a higher-education professional. Most administrator or educator jobs focus on solving problems in a defined process for qualitative, quantitative, or mixed results; therefore, cultivating your scientific mind is valuable when providing software solutions. Your research skills also open opportunities for branching into data science and machine learning. - -### Bonus: Collaboration - -Being able to reach across various offices and fields for event planning and program implementation fit well within team collaboration—both within your new team and across development teams. This may leak into the project management realm, but being able to plan and divide work between teams and establish accountability will allow you as a new developer to understand the software development lifecycle process a little more intimately because of your past related experience. - -### Summary - -As a developer jumping head-first into technology after years of walking students through the process of navigating higher education, [imposter syndrome][2] has been a constant fear since moving into technology. However, I have been able to take heart in knowing my experience as an educator and an administrator has not gone in vain. If you are like me, be encouraged in knowing that these transferable skills, some of which fall into the soft-skills and other categories, will continue to benefit you as a developer and a professional. - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/6/5-transferable-higher-education-skills - -作者:[Stephon Brown][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/stephb -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/resume_career_document_general.png?itok=JEaFL2XI (Two hands holding a resume with computer, clock, and desk chair ) -[2]: https://en.wikipedia.org/wiki/Impostor_syndrome diff --git a/sources/talk/20190618 17 predictions about 5G networks and devices.md b/sources/talk/20190618 17 predictions about 5G networks and devices.md deleted file mode 100644 index d8833f9887..0000000000 --- a/sources/talk/20190618 17 predictions about 5G networks and devices.md +++ /dev/null @@ -1,82 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (17 predictions about 5G networks and devices) -[#]: via: (https://www.networkworld.com/article/3403358/17-predictions-about-5g-networks-and-devices.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -17 predictions about 5G networks and devices -====== -Not surprisingly, the new Ericsson Mobility Report is bullish on the potential of 5G technology. Here’s a quick look at the most important numbers. -![Vertigo3D / Getty Images][1] - -_“As market after market switches on 5G, we are at a truly momentous point in time. No previous generation of mobile technology has had the potential to drive economic growth to the extent that 5G promises. It goes beyond connecting people to fully realizing the Internet of Things (IoT) and the Fourth Industrial Revolution.”_ —The opening paragraph of the [June 2019 Ericsson Mobility Report][2] - -Almost every significant technology advancement now goes through what [Gartner calls the “hype cycle.”][3] These days, Everyone expects new technologies to be met with gushing optimism and dreamy visions of how it’s going to change the world in the blink of an eye. After a while, we all come to expect the vendors and the press to go overboard with excitement, at least until reality and disappointment set in when things don’t pan out exactly as expected. - -**[ Also read:[The time of 5G is almost here][4] ]** - -Even with all that in mind, though, Ericsson’s whole-hearted embrace of 5G in its Internet Mobility Report is impressive. The optimism is backed up by lots of numbers, but they can be hard to tease out of the 36-document. So, let’s recap some of the most important top-line predictions (with my comments at the end). - -### Worldwide 5G growth projections - - 1. “More than 10 million 5G subscriptions are projected worldwide by the end of 2019.” - 2. “[We] now expect there to be 1.9 billion 5G subscriptions for enhanced mobile broadband by the end of 2024. This will account for over 20 percent of all mobile subscriptions at that time. The peak of LTE subscriptions is projected for 2022, at around 5.3 billion subscriptions, with the number declining slowly thereafter.” - 3. “In 2024, 5G networks will carry 35 percent of mobile data traffic globally.” - 4. “5G can cover up to 65 percent of the world’s population in 2024.” - 5. ”NB-IoT and Cat-M technologies will account for close to 45 percent of cellular IoT connections in 2024.” - 6. “By the end of 2024, nearly 35 percent of cellular IoT connections will be Broadband IoT, with 4G connecting the majority.” But 5G connections will support more advanced use cases. - 7. “Despite challenging 5G timelines, device suppliers are expected to be ready with different band and architecture support in a range of devices during 2019.” - 8. “Spectrum sharing … chipsets are currently in development and are anticipated to be in 5G commercial devices in late 2019." - 9. “[VoLTE][5] is the foundation for enabling voice and communication services on 5G devices. Subscriptions are expected to reach 2.1 billion by the end of 2019. … The number of VoLTE subscriptions is projected to reach 5.9 billion by the end of 2024, accounting for more than 85 percent of combined LTE and 5G subscriptions.” - - - -![][6] - -### Regional 5G projections - - 1. “In North America, … service providers have already launched commercial 5G services, both for fixed wireless access and mobile. … By the end of 2024, we anticipate close to 270 million 5G subscriptions in the region, accounting for more than 60 percent of mobile subscriptions.” - 2. “In Western Europe … The momentum for 5G in the region was highlighted by the first commercial launch in April. By the end of 2024, 5G is expected to account for around 40 percent of mobile subscriptions. - 3. In Central and Eastern Europe, … The first 5G subscriptions are expected in 2019, and will make up 15 percent of subscriptions in 2024.” - 4. “In North East Asia, … the region’s 5G subscription penetration is projected to reach 47 percent [by the end of 2024]. - 5. “[In India,] 5G subscriptions are expected to become available in 2022 and will represent 6 percent of mobile subscriptions at the end of 2024.” - 6. “In the Middle East and North Africa, we anticipate commercial 5G deployments with leading communications service providers during 2019, and significant volumes in 2021. … Around 60 million 5G subscriptions are forecast for the end of 2024, representing 3 percent of total mobile subscriptions.” - 7. “Initial 5G commercial devices are expected in the [South East Asia and Oceania] region during the first half of 2019. By the end of 2024, it is anticipated that almost 12 percent of subscriptions in the region will be for 5G.]” - 8. “In Latin America … the first 5G deployments will be possible in the 3.5GHz band during 2019. Argentina, Brazil, Chile, Colombia, and Mexico are anticipated to be the first countries in the region to deploy 5G, with increased subscription uptake forecast from 2020. By the end of 2024, 5G is set to make up 7 percent of mobile subscriptions.” - - - -### Is 5G really so inevitable? - -Considered individually, these predictions all seem perfectly reasonable. Heck, 10 million 5G subscriptions is only a drop in the global bucket. And rumors are already flying that Apple’s next round of iPhones will include 5G capability. Also, 2024 is still five years in the future, so why wouldn’t the faster connections drive impressive traffic stats? Similarly, North America and North East Asia will experience the fastest 5G penetration. - -But when you look at them all together, these numbers project a sense of 5G inevitability that could well be premature. It will take a _lot_ of spending, by a lot of different parties—carriers, chip makers, equipment vendors, phone manufacturers, and consumers—to make this kind of growth a reality. - -I’m not saying 5G won’t take over the world. I’m just saying that when so many things have to happen in a relatively short time, there are a lot of opportunities for the train to jump the tracks. Don’t be surprised if it takes longer than expected for 5G to turn into the worldwide default standard Ericsson—and everyone else—seems to think it will inevitably become. - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403358/17-predictions-about-5g-networks-and-devices.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/5g_wireless_technology_network_connections_by_credit-vertigo3d_gettyimages-1043302218_3x2-100787550-large.jpg -[2]: https://www.ericsson.com/assets/local/mobility-report/documents/2019/ericsson-mobility-report-june-2019.pdf -[3]: https://www.gartner.com/en/research/methodologies/gartner-hype-cycle -[4]: https://www.networkworld.com/article/3354477/mobile-world-congress-the-time-of-5g-is-almost-here.html -[5]: https://www.gsma.com/futurenetworks/technology/volte/ -[6]: https://images.idgesg.net/images/article/2019/06/ericsson-mobility-report-june-2019-graph-100799481-large.jpg -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190618 Why your workplace arguments aren-t as effective as you-d like.md b/sources/talk/20190618 Why your workplace arguments aren-t as effective as you-d like.md deleted file mode 100644 index 54a0cca26f..0000000000 --- a/sources/talk/20190618 Why your workplace arguments aren-t as effective as you-d like.md +++ /dev/null @@ -1,144 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Why your workplace arguments aren't as effective as you'd like) -[#]: via: (https://opensource.com/open-organization/19/6/barriers-productive-arguments) -[#]: author: (Ron McFarland https://opensource.com/users/ron-mcfarland/users/ron-mcfarland) - -Why your workplace arguments aren't as effective as you'd like -====== -Open organizations rely on open conversations. These common barriers to -productive argument often get in the way. -![Arrows pointing different directions][1] - -Transparent, frank, and often contentious arguments are part of life in an open organization. But how can we be sure those conversations are _productive_ —not _destructive_? - -This is the second installment of a two-part series on how to argue and actually achieve something. In the [first article][2], I mentioned what arguments are (and are not), according to author Sinnott-Armstrong in his book _Think Again: How to Reason and Argue._ I also offered some suggestions for making arguments as productive as possible. - -In this article, I'll examine three barriers to productive arguments that Sinnott-Armstrong elaborates in his book: incivility, polarization, and language issues. Finally, I'll explain his suggestions for addressing those barriers. - -### Incivility - -"Incivility" has become a social concern in recent years. Consider this: As a tactic in arguments, incivility _can_ have an effect in certain situations—and that's why it's a common strategy. Sinnott-Armstrong notes that incivility: - - * **Attracts attention:** Incivility draws people's attention in one direction, sometimes to misdirect attention from or outright obscure other issues. It redirects people's attention to shocking statements. Incivility, exaggeration, and extremism can increase the size of an audience. - - * **Energizes:** Sinnott-Armstrong writes that seeing someone being uncivil on a topic of interest can generate energy from a state of powerlessness. - - * **Stimulates memory:** Forgetting shocking statements is difficult; they stick in our memory more easily than statements that are less surprising to us. - - * **Excites the powerless:** The groups most likely to believe and invest in someone being uncivil are those that feel they're powerless and being treated unfairly. - - - - -Unfortunately, incivility as a tactic in arguments has its costs. One such cost is polarization. - -### Polarization - -Sinnott-Armstrong writes about five forms of polarization: - - * **Distance:** If two people's or groups' views are far apart according to some relevant scale, have significant disagreements and little common ground, then they're polarized. - - * **Differences:** If two people or groups have fewer values and beliefs _in common_ than they _don't have in common_ , then they're polarized. - - * **Antagonism:** Groups are more polarized the more they feel hatred, disdain, fear, or other negative emotions toward other people or groups. - - * **Incivility:** Groups tend to be more polarized when they talk more negatively about people of the other groups. - - * **Rigidity:** Groups tend to be more polarized when they treat their values as indisputable and will not compromise. - - * **Gridlock:** Groups tend to be more polarized when they're unable to cooperate and work together toward common goals. - - - - -And I'll add one more form of polarization to Sinnott-Armstrong's list: - - * **Non-disclosure:** Groups tend to be more polarized when one or both of the groups refuses to share valid, verifiable information—or when they distract each other with useless or irrelevant information. One of the ways people polarize is by not talking to each other and withhold information. Similarly, they talk on subjects that distract us from the issue at hand. Some issues are difficult to talk about, but by doing so solutions can be explored. - - - -### Language issues - -Identifying discussion-stoppers like these can help you avoid shutting down a discussion that would otherwise achieve beneficial outcomes. - -Language issues can be argument-stoppers Sinnott-Armstrong says. In particular, he outlines some of the following language-related barriers to productive argument. - - * **Guarding:** Using words like "all" can make a statement unbelievable; words like "sometimes" can make a statement too vague. - - * **Assuring:** Simply stating "trust me, I know what I'm talking about," without offering evidence that this is the case, can impede arguments. - - * **Evaluating:** Offering an evaluation of something—like saying "It is good"―without any supporting reasoning. - - * **Discounting:** This involves anticipating what the another person will say and attempting to weaken it as much as possible by framing an argument in a negative way. (Contrast these two sentences, for example: "Ramona is smart but boring" and "Ramona is boring but smart." The difference is subtle, but you'd probably want to spend less time with Ramona if you heard the first statement about her than if you heard the second.) - - - - -Identifying discussion-stoppers like these can help you avoid shutting down a discussion that would otherwise achieve beneficial outcomes. In addition, Sinnott-Armstrong specifically draws readers' attention to two other language problems that can kill productive debates: vagueness and ambiguity. - - * **Vagueness:** This occurs when a word or sentence is not precise enough and having many ways to interpret its true meaning and intent, which leads to confusion. Consider the sentence "It is big." "It" must be defined if it's not already obvious to everyone in the conversation. And a word like "big" must be clarified through comparison to something that everyone has agreed upon. - - * **Ambiguity:** This occurs when a sentence could have two distinct meanings. For example: "Police killed man with axe." Who was holding the axe, the man or the police? "My neighbor had a friend for dinner." Did your neighbor invite a friend to share a meal—or did she eat her friend? - - - - -### Overcoming barriers - -To help readers avoid these common roadblocks to productive arguments, Sinnott-Armstrong recommends a simple, four-step process for evaluating another person's argument. - - 1. **Observation:** First, observe a stated opinion and its related evidence to determine the precise nature of the claim. This might require you to ask some questions for clarification (you'll remember I employed this technique when arguing with my belligerent uncle, which I described [in the first article of this series][2]). - - 2. **Hypothesis:** Develop some hypothesis about the argument. In this case, the hypothesis should be an inference based on generally acceptable standards (for more on the structure of arguments themselves, also see [the first part of this series][2]). - - 3. **Comparison:** Compare that hypothesis with others and evaluate which is more accurate. More important issues will require you to conduct more comparisons. In other cases, premises are so obvious that no further explanation is required. - - 4. **Conclusion:** From the comparison analysis, reach a conclusion about whether your hypothesis about a competing argument is correct. - - - - -In many cases, the question is not whether a particular claim is _correct_ or _incorrect_ , but whether it is _believable._ So Sinnott-Armstrong also offers a four-step "believability test" for evaluating claims of this type. - - 1. **Expertise:** Does the person presenting the argument have authority in an appropriate field? Being a specialist is one field doesn't necessarily make that person an expert in another. - - 2. **Motive:** Would self-interest or other personal motives compel a person to withhold information or make false statements? To confirm one's statements, it might be wise to seek a totally separate, independent authority for confirmation. - - 3. **Sources:** Are the sources the person offers as evidence of a claim recognized experts? Do those sources have the expertise on the specific issue addressed? - - 4. **Agreement:** Is there agreement among many experts within the same specialty? - - - - -### Let's argue - -If you really want to strengthen your ability to argue, find someone that totally disagrees with you but wants to learn and understand your beliefs. - -When I was a university student, I would usually sit toward the front of the classroom. When I didn't understand something, I would start asking questions for clarification. Everyone else in the class would just sit silently, saying nothing. After class, however, other students would come up to me and thank me for asking those questions—because everyone else in the room was confused, too. - -Clarification is a powerful act—not just in the classroom, but during arguments anywhere. Building an organizational culture in which people feel empowered to ask for clarification is critical for productive arguments (I've [given presentations on this topic][3] before). If members have the courage to clarify premises, and they can do so in an environment where others don't think they're being belligerent, then this might be the key to a successful and productive argument. - -If you really want to strengthen your ability to argue, find someone that totally disagrees with you but wants to learn and understand your beliefs. Then, practice some of Sinnott-Armstrong's suggestions. Arguing productively will enhance [transparency, inclusivity, and collaboration][4] in your organization—leading to a more open culture. - --------------------------------------------------------------------------------- - -via: https://opensource.com/open-organization/19/6/barriers-productive-arguments - -作者:[Ron McFarland][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/ron-mcfarland/users/ron-mcfarland -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/directions-arrows.png?itok=EE3lFewZ (Arrows pointing different directions) -[2]: https://opensource.com/open-organization/19/5/productive-arguments -[3]: https://www.slideshare.net/RonMcFarland1/argue-successfully-achieve-something -[4]: https://opensource.com/open-organization/resources/open-org-definition diff --git a/sources/talk/20190619 With Tableau, SaaS king Salesforce becomes a hybrid cloud company.md b/sources/talk/20190619 With Tableau, SaaS king Salesforce becomes a hybrid cloud company.md deleted file mode 100644 index d0d1d24cb6..0000000000 --- a/sources/talk/20190619 With Tableau, SaaS king Salesforce becomes a hybrid cloud company.md +++ /dev/null @@ -1,67 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (With Tableau, SaaS king Salesforce becomes a hybrid cloud company) -[#]: via: (https://www.networkworld.com/article/3403442/with-tableau-saas-king-salesforce-becomes-a-hybrid-cloud-company.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -With Tableau, SaaS king Salesforce becomes a hybrid cloud company -====== -Once dismissive of software, Salesforce acknowledges the inevitability of the hybrid cloud. -![Martyn Williams/IDGNS][1] - -I remember a time when people at Salesforce events would hand out pins that read “Software” inside a red circle with a slash through it. The High Priest of SaaS (a.k.a. CEO Marc Benioff) was so adamant against installed, on-premises software that his keynotes were always comical. - -Now, Salesforce is prepared to [spend $15.7 billion to acquire Tableau Software][2], the leader in on-premises data analytics. - -On the hell-freezes-over scale, this is up there with Microsoft embracing Linux or Apple PR people returning a phone call. Well, we know at least one of those has happened. - -**[ Also read:[Hybrid Cloud: The time for adoption is upon us][3] | Stay in the know: [Subscribe and get daily newsletter updates][4] ]** - -So, why would a company that is so steeped in the cloud, so anti-on-premises software, make such a massive purchase? - -Partly it is because Benioff and company are finally coming to the same conclusion as most everyone else: The hybrid cloud, a mix of on-premises systems and public cloud, is the wave of the future, and pure cloud plays are in the minority. - -The reality is that data is hybrid and does not sit in a single location, and Salesforce is finally acknowledging this, said Tim Crawford, president of Avoa, a strategic CIO advisory firm. - -“I see the acquisition of Tableau by Salesforce as less about getting into the on-prem game as it is a reality of the world we live in. Salesforce needed a solid analytics tool that went well beyond their existing capability. Tableau was that tool,” he said. - -**[[Become a Microsoft Office 365 administrator in record time with this quick start course from PluralSight.][5] ]** - -Salesforce also understands that they need a better understanding of customers and those data insights that drive customer decisions. That data is both on-prem and in the cloud, Crawford noted. It is in Salesforce, other solutions, and the myriad of Excel spreadsheets spread across employee systems. Tableau crosses the hybrid boundaries and brings a straightforward way to visualize data. - -Salesforce had analytics features as part of its SaaS platform, but it was geared around their own platform, whereas everyone uses Tableau and Tableau supports all manner of analytics. - -“There’s a huge overlap between Tableau customers and Salesforce customers,” Crawford said. “The data is everywhere in the enterprise, not just in Salesforce. Salesforce does a great job with its own data, but Tableau does great with data in a lot of places because it’s not tied to one platform. So, it opens up where the data comes from and the insights you get from the data.” - -Crawford said that once the deal is done and Tableau is under some deeper pockets, the organization may be able to innovate faster or do things they were unable to do prior. That hardly indicates Tableau was struggling, though. It pulled in [$1.16 billion in revenue][6] in 2018. - -Crawford also expects Salesforce to push Tableau to open up new possibilities for customer insights by unlocking customer data inside and outside of Salesforce. One challenge for the two companies is to maintain that neutrality so that they don’t lose the ability to use Tableau for non-customer centric activities. - -“It’s a beautiful way to visualize large sets of data that have nothing to do with customer centricity,” he said. - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403442/with-tableau-saas-king-salesforce-becomes-a-hybrid-cloud-company.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2015/09/150914-salesforce-dreamforce-2-100614575-large.jpg -[2]: https://www.cio.com/article/3402026/how-salesforces-tableau-acquisition-will-impact-it.html -[3]: http://www.networkworld.com/article/2172875/cloud-computing/hybrid-cloud--the-year-of-adoption-is-upon-us.html -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fcourses%2Fadministering-office-365-quick-start -[6]: https://www.geekwire.com/2019/tableau-hits-841m-annual-recurring-revenue-41-transition-subscription-model-continues/ -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190620 Carrier services help expand healthcare, with 5G in the offing.md b/sources/talk/20190620 Carrier services help expand healthcare, with 5G in the offing.md deleted file mode 100644 index 072b172fda..0000000000 --- a/sources/talk/20190620 Carrier services help expand healthcare, with 5G in the offing.md +++ /dev/null @@ -1,85 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Carrier services help expand healthcare, with 5G in the offing) -[#]: via: (https://www.networkworld.com/article/3403366/carrier-services-help-expand-healthcare-with-5g-in-the-offing.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Carrier services help expand healthcare, with 5G in the offing -====== -Many telehealth initiatives tap into wireless networking supplied by service providers that may start offering services such as Citizen's Band and 5G to support remote medical care. -![Thinkstock][1] - -There are connectivity options aplenty for most types of [IoT][2] deployment, but the idea of simply handing the networking part of the equation off to a national licensed wireless carrier could be the best one for certain kinds of deployments in the medical field. - -Telehealth systems, for example, are still a relatively new facet of modern medicine, but they’re already among the most important applications that use carrier networks to deliver care. One such system is operated by the University of Mississippi Medical Center, for the treatment and education of diabetes patients. - -**[More on wireless:[The time of 5G is almost here][3]]** - -**[ Now read[20 hot jobs ambitious IT pros should shoot for][4]. ]** - -Greg Hall is the director of IT at UMMC’s center for telehealth. He said that the remote patient monitoring system is relatively simple by design – diabetes patients receive a tablet computer that they can use to input and track their blood sugar levels, alert clinicians to symptoms like nerve pain or foot sores, and even videoconference with their doctors directly. The tablet connects via Verizon, AT&T or CSpire – depending on who’s got the best coverage in a given area – back to UMMC’s servers. - -According to Hall, there are multiple advantages to using carrier connectivity instead of unlicensed (i.e. purpose-built [Wi-Fi][5] or other technology) to connect patients – some of whom live in remote parts of the state – to their caregivers. - -“We weren’t expecting everyone who uses the service to have Wi-Fi,” he said, “and they can take their tablet with them if they’re traveling.” - -The system serves about 250 patients in Mississippi, up from roughly 175 in the 2015 pilot program that got the effort off the ground. Nor is it strictly limited to diabetes care – Hall said that it’s already been extended to patients suffering from chronic obstructive pulmonary disease, asthma and even used for prenatal care, with further expansion in the offing. - -“The goal of our program isn’t just the monitoring piece, but also the education piece, teaching a person to live with their [condition] and thrive,” he said. - -It hasn’t all been smooth sailing. One issue was caused by the natural foliage of the area, as dense areas of pine trees can cause transmission problems, thanks to their needles being a particularly troublesome length and interfering with 2.5GHz wireless signals. But Hall said that the team has been able to install signal boosters or repeaters to overcome that obstacle. - -Neurologist Dr. Allen Gee’s practice in Wyoming attempts to address a similar issue – far-flung patients with medical needs that might not be addressed by the sparse local-care options. From his main office in Cody, he said, he can cover half the state via telepresence, using a purpose-built system that is based on cellular-data connectivity from TCT, Spectrum and AT&T, as well as remote audiovisual equipment and a link to electronic health records stored in distant locations. That allows him to receive patient data, audio/visual information and even imaging diagnostics remotely. Some specialists in the state are able to fly to those remote locations, others are not. - -While Gee’s preference is to meet with patients in person, that’s just not always possible, he said. - -“Medical specialists don’t get paid for windshield time,” he noted. “Being able to transfer information from an EHR facilitates the process of learning about the patient.” - -### 5G is coming** - -** - -According to Alan Stewart-Brown, vice president at infrastructure management vendor Opengear, there’s a lot to like about current carrier networks for medical use – particularly wide coverage and a lack of interference – but there are bigger things to come. - -“We have customers that have equipment in ambulances for instance, where they’re livestreaming patients’ vital signs to consoles that doctors can monitor,” he said. “They’re using carrier 4G for that right now and it works well enough, but there are limitations, namely latency, which you don’t get on [5G][6].” - -Beyond the simple fact of increased throughput and lower latency, widespread 5G deployments could open a wide array of new possibilities for medical technology, mostly involving real-time, very-high-definition video streaming. These include medical VR, remote surgery and the like. - -“The process you use to do things like real-time video – right now on a 4G network, that may or may not have a delay,” said Stewart-Brown. “Once you can get rid of the delay, the possibilities are endless as to what you can use the technology for.” - -### Citizens band - -Ron Malenfant, chief architect for service provider IoT at Cisco, agreed that the future of 5G for medical IoT is bright, but said that the actual applications of the technology have to be carefully thought out. - -“The use cases need to be worked on,” he said. “The innovative [companies] are starting to say ‘OK, what does 5G mean to me’ and starting to plan use cases.” - -One area that the carriers themselves have been eyeing recently is the CBRS band of radio frequencies, which sits around 3.5GHz. It’s what’s referred to as “lightly licensed” spectrum, in that parts of it are used for things like CB radio and other parts are the domain of the U.S. armed forces, and it could be used to build private networks for institutional users like hospitals, instead of deploying small but expensive 4G cells. The idea is that the institutions would be able to lease those frequencies for their specific area from the carrier directly for private LTE/CBRS networks, and, eventually 5G, Malenfant said. - -There’s also the issue, of course, that there are still a huge amount of unknowns around 5G, which isn’t expected to supplant LTE in the U.S. for at least another year or so. The medical field’s stiff regulatory requirements could also prove a stumbling block for the adoption of newer wireless technology. - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403366/carrier-services-help-expand-healthcare-with-5g-in-the-offing.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/stethoscope_mobile_healthcare_ipad_tablet_doctor_patient-100765655-large.jpg -[2]: https://www.networkworld.com/article/3207535/what-is-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/3354477/mobile-world-congress-the-time-of-5g-is-almost-here.html -[4]: https://www.networkworld.com/article/3276025/careers/20-hot-jobs-ambitious-it-pros-should-shoot-for.html -[5]: https://www.networkworld.com/article/3238664/80211-wi-fi-standards-and-speeds-explained.html -[6]: https://www.networkworld.com/article/3203489/what-is-5g-how-is-it-better-than-4g.html -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190620 Cracks appear in Intel-s grip on supercomputing.md b/sources/talk/20190620 Cracks appear in Intel-s grip on supercomputing.md deleted file mode 100644 index 5ff550d3fc..0000000000 --- a/sources/talk/20190620 Cracks appear in Intel-s grip on supercomputing.md +++ /dev/null @@ -1,63 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Cracks appear in Intel’s grip on supercomputing) -[#]: via: (https://www.networkworld.com/article/3403443/cracks-appear-in-intels-grip-on-supercomputing.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Cracks appear in Intel’s grip on supercomputing -====== -New competitors threaten to take Intel’s dominance in the high-performance computing (HPC) world, and we’re not even talking about AMD (yet). -![Randy Wong/LLNL][1] - -It’s June, so it’s that time again for the twice-yearly Top 500 supercomputer list, where bragging rights are established or, in most cases, reaffirmed. The list constantly shifts as new trends appear, and one of them might be a break in Intel’s dominance. - -[Supercomputers in the top 10 list][2] include a lot of IBM Power-based systems, and almost all run Nvidia GPUs. But there’s more going on than that. - -For starters, an ARM supercomputer has shown up, at #156. [Astra][3] at Sandia National Laboratories is an HPE system running Cavium (now Marvell) ThunderX2 processors. It debuted on the list at #204 last November, but thanks to upgrades, it has moved up the list. It won’t be the last ARM server to show up, either. - -**[ Also see:[10 of the world's fastest supercomputers][2] | Get daily insights: [Sign up for Network World newsletters][4] ]** - -Second is the appearance of four Nvidia DGX servers, with the [DGX SuperPOD][5] ranking the highest at #22. [DGX systems][6] are basically compact GPU boxes with a Xeon just to boot the thing. The GPUs do all the heavy lifting. - -AMD hasn’t shown up yet with the Epyc processors, but it will, given Cray is building them for the government. - -This signals a breaking up of the hold Intel has had on the high-performance computing (HPC) market for a long time, said Ashish Nadkarni, group vice president in IDC's worldwide infrastructure practice. “The Intel hold has already been broken up by all the accelerators in the supercomputing space. The more accelerators they use, the less need they have for Xeons. They can go with other processors that do justice to those accelerators,” he told me. - -With so much work in HPC and artificial intelligence (AI) being done by GPUs, the x86 processor becomes just a boot processor in a way. I wasn’t kidding about the DGX box. It’s got one Xeon and eight Tesla GPUs. And the Xeon is an E5, a midrange part. - -**[[Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][7] ]** - -“They don’t need high-end Xeons in servers any more, although there’s a lot of supercomputers that just use CPUs. The fact is there are so many options now,” said Nadkarni. One example of an all-CPU system is [Frontera][8], a Dell-based system at the Texas Advanced Computing Center in Austin. - -The top two computers, Sierra and Summit, both run IBM Power9 RISC processors, as well as Nvidia GPUs. All told, Nvidia is in 125 of the 500 supercomputers, including five of the top 10, the fastest computer in the world, the fastest in Europe (Piz Daint) and the fastest in Japan (ABCI). - -Lenovo was the top hardware provider, beating out Dell, HPE, and IBM combined. That’s because of its large presence in its native China. Nadkari said Lenovo, which acquired the IBM x86 server business in 2014, has benefitted from the IBM installed base, which has continued wanting the same tech from Lenovo under new ownership. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403443/cracks-appear-in-intels-grip-on-supercomputing.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/10/sierra875x500-100778404-large.jpg -[2]: https://www.networkworld.com/article/3236875/embargo-10-of-the-worlds-fastest-supercomputers.html -[3]: https://www.top500.org/system/179565 -[4]: https://www.networkworld.com/newsletters/signup.html -[5]: https://www.top500.org/system/179691 -[6]: https://www.networkworld.com/article/3196088/nvidias-new-volta-based-dgx-1-supercomputer-puts-400-servers-in-a-box.html -[7]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[8]: https://www.networkworld.com/article/3236875/embargo-10-of-the-worlds-fastest-supercomputers.html#slide7 -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190620 Several deals solidify the hybrid cloud-s status as the cloud of choice.md b/sources/talk/20190620 Several deals solidify the hybrid cloud-s status as the cloud of choice.md deleted file mode 100644 index ade07dcb10..0000000000 --- a/sources/talk/20190620 Several deals solidify the hybrid cloud-s status as the cloud of choice.md +++ /dev/null @@ -1,77 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Several deals solidify the hybrid cloud’s status as the cloud of choice) -[#]: via: (https://www.networkworld.com/article/3403354/several-deals-solidify-the-hybrid-clouds-status-as-the-cloud-of-choice.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Several deals solidify the hybrid cloud’s status as the cloud of choice -====== -On-premises and cloud connections are being built by all the top vendors to bridge legacy and modern systems, creating hybrid cloud environments. -![Getty Images][1] - -The hybrid cloud market is expected to grow from $38.27 billion in 2017 to $97.64 billion by 2023, at a Compound Annual Growth Rate (CAGR) of 17.0% during the forecast period, according to Markets and Markets. - -The research firm said the hybrid cloud is rapidly becoming a leading cloud solution, as it provides various benefits, such as cost, efficiency, agility, mobility, and elasticity. One of the many reasons is the need for interoperability standards between cloud services and existing systems. - -Unless you are a startup company and can be born in the cloud, you have legacy data systems that need to be bridged, which is where the hybrid cloud comes in. - -So, in very short order we’ve seen a bunch of new alliances involving the old and new guard, reiterating that the need for hybrid solutions remains strong. - -**[ Read also:[What hybrid cloud means in practice][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ]** - -### HPE/Google - -In April, the Hewlett Packard Enterprise (HPE) and Google announced a deal where HPE introduced a variety of server solutions for Google Cloud’s Anthos, along with a consumption-based model for the validated HPE on-premises infrastructure that is integrated with Anthos. - -Following up with that, the two just announced a strategic partnership to create a hybrid cloud for containers by combining HPE’s on-premises infrastructure, Cloud Data Services, and GreenLake consumption model with Anthos. This allows for: - - * Bi-directional data mobility for data mobility and consistent data services between on-premises and cloud - * Application workload mobility to move containerized app workloads across on-premises and multi-cloud environments - * Multi-cloud flexibility, offering the choice of HPE Cloud Volumes and Anthos for what works best for the workload - * Unified hybrid management through Anthos, so customers can get a unified and consistent view of their applications and workloads regardless of where they reside - * Charged as a service via HPE GreenLake - - - -### IBM/Cisco - -This is a furthering of an already existing partnership between IBM and Cisco designed to deliver a common and secure developer experience across on-premises and public cloud environments for building modern applications. - -[Cisco said it will support IBM Cloud Private][4], an on-premises container application development platform, on Cisco HyperFlex and HyperFlex Edge hyperconverged infrastructure. This includes support for IBM Cloud Pak for Applications. IBM Cloud Paks deliver enterprise-ready containerized software solutions and developer tools for building apps and then easily moving to any cloud—public or private. - -This architecture delivers a common and secure Kubernetes experience across on-premises (including edge) and public cloud environments. IBM’s Multicloud Manager covers monitoring and management of clusters and container-based applications running from on-premises to the edge, while Cisco’s Virtual Application Centric Infrastructure (ACI) will allow customers to extend their network fabric from on-premises to the IBM Cloud. - -### IBM/Equinix - -Equinix expanded its collaboration with IBM Cloud to bring private and scalable connectivity to global enterprises via Equinix Cloud Exchange Fabric (ECX Fabric). This provides private connectivity to IBM Cloud, including Direct Link Exchange, Direct Link Dedicated and Direct Link Dedicated Hosting, that is secure and scalable. - -ECX Fabric is an on-demand, SDN-enabled interconnection service that allows any business to connect between its own distributed infrastructure and any other company’s distributed infrastructure, including cloud providers. Direct Link provides IBM customers with a connection between their network and IBM Cloud. So ECX Fabric provides IBM customers with a secured and scalable network connection to the IBM Cloud service. - -At the same time, ECX Fabric provides secure connections to other cloud providers, and most customers prefer a multi-vendor approach to avoid vendor lock-in. - -“Each of the partnerships focus on two things: 1) supporting a hybrid-cloud platform for their existing customers by reducing the friction to leveraging each solution and 2) leveraging the unique strength that each company brings. Each of the solutions are unique and would be unlikely to compete directly with other partnerships,” said Tim Crawford, president of Avoa, an IT consultancy. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3403354/several-deals-solidify-the-hybrid-clouds-status-as-the-cloud-of-choice.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/cloud_hand_plus_sign_private-100787051-large.jpg -[2]: https://www.networkworld.com/article/3249495/what-hybrid-cloud-mean-practice -[3]: https://www.networkworld.com/newsletters/signup.html -[4]: https://www.networkworld.com/article/3403363/cisco-connects-with-ibm-in-to-simplify-hybrid-cloud-deployment.html -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190626 Where are all the IoT experts going to come from.md b/sources/talk/20190626 Where are all the IoT experts going to come from.md deleted file mode 100644 index 22a303d2f6..0000000000 --- a/sources/talk/20190626 Where are all the IoT experts going to come from.md +++ /dev/null @@ -1,78 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Where are all the IoT experts going to come from?) -[#]: via: (https://www.networkworld.com/article/3404489/where-are-all-the-iot-experts-going-to-come-from.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -Where are all the IoT experts going to come from? -====== -The fast growth of the internet of things (IoT) is creating a need to train cross-functional experts who can combine traditional networking and infrastructure expertise with database and reporting skills. -![Kevin \(CC0\)][1] - -If the internet of things (IoT) is going to fulfill its enormous promise, it’s going to need legions of smart, skilled, _trained_ workers to make everything happen. And right now, it’s not entirely clear where those people are going to come from. - -That’s why I was interested in trading emails with Keith Flynn, senior director of product management, R&D at asset-optimization software company [AspenTech][2], who says that when dealing with the slew of new technologies that fall under the IoT umbrella, you need people who can understand how to configure the technology and interpret the data. Flynn sees a growing need for existing educational institutions to house IoT-specific programs, as well as an opportunity for new IoT-focused private colleges, offering a well -ounded curriculum - -“In the future,” Flynn told me, “IoT projects will differ tremendously from the general data management and automation projects of today. … The future requires a more holistic set of skills and cross-trading capabilities so that we’re all speaking the same language.” - -**[ Also read:  [20 hot jobs ambitious IT pros should shoot for][3] ]** - -With the IoT growing 30% a year, Flynn added, rather than a few specific skills, “everything from traditional deployment skills, like networking and infrastructure, to database and reporting skills and, frankly, even basic data science, need to be understood together and used together.” - -### Calling all IoT consultants - -“The first big opportunity for IoT-educated people is in the consulting field,” Flynn predicted. “As consulting companies adapt or die to the industry trends … having IoT-trained people on staff will help position them for IoT projects and make a claim in the new line of business: IoT consulting.” - -The problem is especially acute for startups and smaller companies. “The bigger the organization, the more likely they have a means to hire different people across different lines of skillsets,” Flynn said. “But for smaller organizations and smaller IoT projects, you need someone who can do both.” - -Both? Or _everything?_ The IoT “requires a combination of all knowledge and skillsets,” Flynn said, noting that “many of the skills aren’t new, they’ve just never been grouped together or taught together before.” - -**[ [Looking to upgrade your career in tech? This comprehensive online course teaches you how.][4] ]** - -### The IoT expert of the future - -True IoT expertise starts with foundational instrumentation and electrical skills, Flynn said, which can help workers implement new wireless transmitters and boost technology for better battery life and power consumption. - -“IT skills, like networking, IP addressing, subnet masks, cellular and satellite are also pivotal IoT needs,” Flynn said. He also sees a need for database management skills and cloud management and security expertise, “especially as things like [advanced process control] APC and sending sensor data directly to databases and data lakes become the norm.” - -### Where will IoT experts come from? - -Flynn said standardized formal education courses would be the best way to make sure that graduates or certificate holders have the right set of skills. He even laid out a sample curriculum: “Start in chronological order with the basics like [Electrical & Instrumentation] E&I and measurement. Then teach networking, and then database administration and cloud courses should follow that. This degree could even be looped into an existing engineering course, and it would probably take two years … to complete the IoT component.” - -While corporate training could also play role, “that’s easier said than done,” Flynn warned. “Those trainings will need to be organization-specific efforts and pushes.” - -Of course, there are already [plenty of online IoT training courses and certificate programs][5]. But, ultimately, the responsibility lies with the workers themselves. - -“Upskilling is incredibly important in this world as tech continues to transform industries,” Flynn said. “If that upskilling push doesn’t come from your employer, then online courses and certifications would be an excellent way to do that for yourself. We just need those courses to be created. ... I could even see organizations partnering with higher-education institutions that offer these courses to give their employees better access to it. Of course, the challenge with an IoT program is that it will need to constantly evolve to keep up with new advancements in tech.” - -**[ For more on IoT, see [tips for securing IoT on your network][6], our list of [the most powerful internet of things companies][7] and learn about the [industrial internet of things][8]. | Get regularly scheduled insights by [signing up for Network World newsletters][9]. ]** - -Join the Network World communities on [Facebook][10] and [LinkedIn][11] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3404489/where-are-all-the-iot-experts-going-to-come-from.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/programmer_certification-skills_code_devops_glasses_student_by-kevin-unsplash-100764315-large.jpg -[2]: https://www.aspentech.com/ -[3]: https://www.networkworld.com/article/3276025/careers/20-hot-jobs-ambitious-it-pros-should-shoot-for.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fupgrading-your-technology-career -[5]: https://www.google.com/search?client=firefox-b-1-d&q=iot+training -[6]: https://www.networkworld.com/article/3254185/internet-of-things/tips-for-securing-iot-on-your-network.html#nww-fsb -[7]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html#nww-fsb -[8]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html#nww-fsb -[9]: https://www.networkworld.com/newsletters/signup.html#nww-fsb -[10]: https://www.facebook.com/NetworkWorld/ -[11]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190630 Data Still Dominates.md b/sources/talk/20190630 Data Still Dominates.md deleted file mode 100644 index e9a86acf68..0000000000 --- a/sources/talk/20190630 Data Still Dominates.md +++ /dev/null @@ -1,100 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Data Still Dominates) -[#]: via: (https://theartofmachinery.com/2019/06/30/data_still_dominates.html) -[#]: author: (Simon Arneaud https://theartofmachinery.com) - -Data Still Dominates -====== - -Here’s [a quote from Linus Torvalds in 2006][1]: - -> I’m a huge proponent of designing your code around the data, rather than the other way around, and I think it’s one of the reasons git has been fairly successful… I will, in fact, claim that the difference between a bad programmer and a good one is whether he considers his code or his data structures more important. Bad programmers worry about the code. Good programmers worry about data structures and their relationships. - -Which sounds a lot like [Eric Raymond’s “Rule of Representation” from 2003][2]: - -> Fold knowledge into data, so program logic can be stupid and robust. - -Which was just his summary of ideas like [this one from Rob Pike in 1989][3]: - -> Data dominates. If you’ve chosen the right data structures and organized things well, the algorithms will almost always be self-evident. Data structures, not algorithms, are central to programming. - -Which cites [Fred Brooks from 1975][4]: - -> ### Representation is the Essence of Programming -> -> Beyond craftmanship lies invention, and it is here that lean, spare, fast programs are born. Almost always these are the result of strategic breakthrough rather than tactical cleverness. Sometimes the strategic breakthrough will be a new algorithm, such as the Cooley-Tukey Fast Fourier Transform or the substitution of an n log n sort for an n2 set of comparisons. -> -> Much more often, strategic breakthrough will come from redoing the representation of the data or tables. This is where the heart of your program lies. Show me your flowcharts and conceal your tables, and I shall be continued to be mystified. Show me your tables, and I won’t usually need your flowcharts; they’ll be obvious. - -So, smart people have been saying this again and again for nearly half a century: focus on the data first. But sometimes it feels like the most famous piece of smart programming advice that everyone forgets. - -Let me give some real examples. - -### The Highly Scalable System that Couldn’t - -This system was designed from the start to handle CPU-intensive loads with incredible scalability. Nothing was synchronous. Everything was done with callbacks, task queues and worker pools. - -But there were two problems: The first was that the “CPU-intensive load” turned out not to be that CPU-intensive after all — a single task took a few milliseconds at worst. So most of the architecture was doing more harm than good. The second problem was that although it sounded like a highly scalable distributed system, it wasn’t one — it only ran on one machine. Why? Because all communication between asynchronous components was done using files on the local filesystem, which was now the bottleneck for any scaling. The original design didn’t say much about data at all, except to advocate local files in the name of “simplicity”. Most of the document was about all the extra architecture that was “obviously” needed to handle the “CPU-intensiveness” of the load. - -### The Service-Oriented Architecture that was Still Data-Oriented - -This system followed a microservices design, made up of single-purpose apps with REST-style APIs. One component was a database that stored documents (basically responses to standard forms, and other electronic paperwork). Naturally it exposed an API for basic storage and retrieval, but pretty quickly there was a need for more complex search functionality. The designers felt that adding this search functionality to the existing document API would have gone against the principles of microservices design. They could talk about “search” as being a different kind of service from “get/put”, so their architecture shouldn’t couple them together. Besides, the tool they were planning to use for search indexing was separate from the database itself, so creating a new service made sense for implementation, too. - -In the end, a search API was created containing a search index that was essentially a duplicate of the data in the main database. This data was being updated dynamically, so any component that mutated document data through the main database API had to also update the search API. It’s impossible to do this with REST APIs without race conditions, so the two sets of data kept going out of sync every now and then, anyway. - -Despite what the architecture diagram promised, the two APIs were tightly coupled through their data dependencies. Later on it was recognised that the search index should be an implementation detail of a unified document service, and this made the system much more maintainable. “Do one thing” works at the data level, not the verb level. - -### The Fantastically Modular and Configurable Ball of Mud - -This system was a kind of automated deployment pipeline. The original designers wanted to make a tool that was flexible enough to solve deployment problems across the company. It was written as a set of pluggable components, with a configuration file system that not only configured the components, but acted as a [DSL][5] for programming how the components fitted into the pipeline. - -Fast forward a few years and it’s turned into “that program”. There was a long list of known bugs that no one was ever fixing. No one wanted to touch the code out of fear of breaking things. No one used any of the flexibility of the DSL. Everyone who used the program copy-pasted the same known-working configuration that everyone else used. - -What had gone wrong? Although the original design document used words like “modular”, “decoupled”, “extensible” and “configurable” a lot, it never said anything about data. So, data dependencies between components ended up being handled in an ad-hoc way using a globally shared blob of JSON. Over time, components made more and more undocumented assumptions about what was in or not in the JSON blob. Sure, the DSL allowed rearranging components into any order, but most configurations didn’t work. - -### Lessons - -I chose these three examples because they’re easy to explain, not to pick on others. I once tried to build a website, and failed trying to instead build some cringe-worthy XML database that didn’t even solve the data problems I had. Then there’s the project that turned into a broken mockery of half the functionality of `make`, again because I didn’t think about what I really needed. I wrote a post before based on a time I wrote [a castle-in-the-sky OOP class hierarchy that should have been encoded in data instead][6]. - -Update: - -Apparently many people still thought I wrote this to make fun of others. People who’ve actually worked with me will know I’m much more interested in the things I’m fixing than in blaming the people who did most of the work building them, but, okay, here’s what I think of the engineers involved. - -Honestly, the first example obviously happened because the designer was more interested in bringing a science project to work than in solving the problem at hand. Most of us have done that (mea culpa), but it’s really annoying to our colleagues who’ll probably have to help maintain them when we’re bored of them. If this sounds like you, please don’t get offended; please just stop. (I’d still rather work on the single-node distributed system than anything built around my “XML database”.) - -There’s nothing personal in the second example. Sometimes it feels like everyone is talking about how wonderful it is to split up services, but no one is talking about exactly when not to. People are learning the hard way all the time. - -The third example was actually from some of the smartest people I’ve ever had the chance to work with. - -(End update.) - -“Does this talk about the problems created by data?” turns out to be a pretty useful litmus test for good systems design. It’s also pretty handy for detecting false expert advice. The hard, messy systems design problems are data problems, so false experts love to ignore them. They’ll show you a wonderfully beautiful architecture, but without talking about what kind of data it’s appropriate for, and (crucially) what kind of data it isn’t. - -For example, a false expert might tell you that you should use a pub/sub system because pub/sub systems are loosely coupled, and loosely coupled components are more maintainable. That sounds nice and results in pretty diagrams, but it’s backwards thinking. Pub/sub doesn’t _make_ your components loosely coupled; pub/sub _is_ loosely coupled, which may or may not match your data needs. - -On the flip side, a well-designed data-oriented architecture goes a long way. Functional programming, service meshes, RPCs, design patterns, event loops, whatever, all have their merits, but personally I’ve seen tools like [boring old databases][7] be responsible for a lot more successfully shipped software. - --------------------------------------------------------------------------------- - -via: https://theartofmachinery.com/2019/06/30/data_still_dominates.html - -作者:[Simon Arneaud][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://theartofmachinery.com -[b]: https://github.com/lujun9972 -[1]: https://lwn.net/Articles/193245/ -[2]: http://www.catb.org/~esr/writings/taoup/html/ch01s06.html -[3]: http://doc.cat-v.org/bell_labs/pikestyle -[4]: https://archive.org/stream/mythicalmanmonth00fred/mythicalmanmonth00fred_djvu.txt -[5]: https://martinfowler.com/books/dsl.html -[6]: https://theartofmachinery.com/2016/06/21/code_vs_data.html -[7]: https://theartofmachinery.com/2017/10/28/rdbs_considered_useful.html diff --git a/sources/talk/20190702 SD-WAN Buyers Should Think Application Performance as well as Resiliency.md b/sources/talk/20190702 SD-WAN Buyers Should Think Application Performance as well as Resiliency.md deleted file mode 100644 index 3bddd4cdc3..0000000000 --- a/sources/talk/20190702 SD-WAN Buyers Should Think Application Performance as well as Resiliency.md +++ /dev/null @@ -1,49 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (SD-WAN Buyers Should Think Application Performance as well as Resiliency) -[#]: via: (https://www.networkworld.com/article/3406456/sd-wan-buyers-should-think-application-performance-as-well-as-resiliency.html) -[#]: author: (Zeus Kerravala https://www.networkworld.com/author/Zeus-Kerravala/) - -SD-WAN Buyers Should Think Application Performance as well as Resiliency -====== - -![istock][1] - -As an industry analyst, not since the days of WAN Optimization have I seen a technology gain as much interest as I am seeing with [SD-WANs][2] today. Although full deployments are still limited, nearly every network manager, and often many IT leaders I talk to, are interested in it. The reason for this is two-fold – the WAN has grown in importance for cloud-first enterprises and is badly in need of an overhaul. This hasn’t gone unnoticed by the vendor community as there has been an explosion of companies bringing a broad range of SD-WAN offerings to market. The great news for buyers is that there is no shortage of choices. The bad news is there are too many choices and making the right decision difficult. - -One area of differentiation for SD-WAN vendors is how they handle application performance.  I think of the SD-WAN market as being split into two categories – basic and advanced SD-WANs.  A good analogy is to think of the virtualization market.  There are many vendors that offer hypervisors – in fact there are a number of free ones.  So why do companies pay a premium for VMware? It’s because VMware offers many advanced features and capabilities that make its solution do more than just virtualize servers. - -Similarly, basic SD-WAN solutions do a great job of helping to lower costs and to increase application resiliency through path selection capabilities but do nothing to improve application performance. One myth that needs busting is that all SD-WANs make your applications perform better. That’s simply not true as application availability and performance are two different things. It’s possible to have great performance and poor availability or high availability with lackluster performance.  - -Consider the case where a business runs a hybrid WAN and voice and video traffic is sent over the MPLS connection and broadband is used for other traffic. If the MPLS link becomes congested, but doesn’t go down, most SD-WAN solutions will continue to send video and voice over it, which obviously degrades the performance. If multiple broadband connections are used, the chances of congestion related issues are even more likely.  - -This is an important point for IT professionals to understand. The business justification for SD-WAN was initially built around saving money but if application performance suffers, the entire return on investment (ROI) for the project might as well be tossed out the window.  For many companies, the network is the business, so a poor performing network means equally poor performing applications which results lost productivity, lower revenues and possibly brand damage from customer experience issues.  - -I’ve talked to many organizations that had digital initiatives fail because the network wasn’t transformed. For example, a luxury retailer implemented a tablet program for in store personnel to be able to show merchandise to customers. High end retail is almost wholly impulse purchases so the more inventory that can be shown to a customer, the larger the resulting sales. The WAN that was in place was causing the mobile application to perform poorly causing the digital initiative to have a negative effect. Instead of driving sales, the mobile initiative was chasing customers from the store.  The idea was right but the poor performing WAN caused the project to fail. - -SD-WAN decision makers need to look to suppliers that have specific technologies integrated into it that can act when congestion occurs.  A great example of this is the Silver Peak [Unity EdgeConnect™][3] SD-WAN edge platform with [path conditioning][4], [traffic shaping][5] and sub-second link failover. This ensures the best possible quality for all critical applications, even when an underlying link experiences congestion or an outage, even for [voice and video over broadband][6]. This is a foundational component of advanced SD-WAN providers as they offer the same resiliency and cost benefits as a basic SD-WAN but also ensure application performance remains high.  - -The SD-WAN era is here, and organizations should be aggressive with deployments as it will transform the WAN and make it a digital transformation enabler. Decision makers should choose their provider carefully and ensure the vendor also improves application performance.  Without it, the digital initiatives will likely fail and negate any ROI the company was hoping to realize. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3406456/sd-wan-buyers-should-think-application-performance-as-well-as-resiliency.html - -作者:[Zeus Kerravala][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Zeus-Kerravala/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/istock-157647179-100800860-large.jpg -[2]: https://www.silver-peak.com/sd-wan/sd-wan-explained -[3]: https://www.silver-peak.com/products/unity-edge-connect -[4]: https://www.silver-peak.com/products/unity-edge-connect/path-conditioning -[5]: https://www.silver-peak.com/products-solutions/unity/traffic-shaping -[6]: https://www.silver-peak.com/sd-wan/voice-video-over-broadband diff --git a/sources/talk/20190703 An eco-friendly internet of disposable things is coming.md b/sources/talk/20190703 An eco-friendly internet of disposable things is coming.md deleted file mode 100644 index 8d1e827aa8..0000000000 --- a/sources/talk/20190703 An eco-friendly internet of disposable things is coming.md +++ /dev/null @@ -1,92 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (An eco-friendly internet of disposable things is coming) -[#]: via: (https://www.networkworld.com/article/3406462/an-eco-friendly-internet-of-disposable-things-is-coming.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -An eco-friendly internet of disposable things is coming -====== -Researchers are creating a non-hazardous, bacteria-powered miniature battery that can be implanted into shipping labels and packaging to monitor temperature and track packages in real time. -![Thinkstock][1] - -Get ready for a future of disposable of internet of things (IoT) devices, one that will mean everything is connected to networks. It will be particularly useful in logistics, being used in single-use plastics in retail packaging and throw-away shippers’ carboard boxes. - -How it will happen? The answer is when non-hazardous, disposable bio-batteries make it possible. And that moment might be approaching. Researchers say they’re closer to commercializing a bacteria-powered miniature battery that they say will propel the IoDT. - -**[ Learn more: [Download a PDF bundle of five essential articles about IoT in the enterprise][2] ]** - -The “internet of disposable things is a new paradigm for the rapid evolution of wireless sensor networks,” says Seokheun Choi, an associate professor at Binghamton University, [in an article on the school’s website][3]. - -“Current IoDTs are mostly powered by expensive and environmentally hazardous batteries,” he says. Those costs can be significant in any kind of large-scale deployment, he says. And furthermore, with exponential growth, the environmental concerns would escalate rapidly. - -The miniaturized battery that Choi’s team has come up with is uniquely charged through power created by bacteria. It doesn’t have metals and acids in it. And it’s designed specifically to provide energy to sensors and radios in single-use IoT devices. Those could be the kinds of sensors ideal for supply-chain logistics where the container is ultimately going to end up in a landfill, creating a hazard. - -Another use case is real-time analysis of packaged food, with sensors monitoring temperature and location, preventing spoilage and providing safer food handling. For example, a farm product could be tracked for on-time delivery, as well as have its temperature measured, all within the packaging, as it moves from packaging facility to consumer. In the event of a food-borne illness outbreak, say, one can quickly find out where the product originated—which apparently is hard to do now. - -Other use cases could be battery-impregnated shipping labels that send real-time data to the internet. Importantly, in both use cases, packaging can be discarded without added environmental concerns. - -### How the bacteria-powered batteries work - -A slow release of nutrients provide the energy to the bacteria-powered batteries, which the researchers say can last up to eight days. “Slow and continuous reactions” convert the microbial nutrients into “long standing power,” they say in [their paper's abstract][4]. - -“Our biobattery is low-cost, disposable, and environmentally-friendly,” Choi says. - -Origami, the Japanese paper-folding skill used to create objects, was an inspiration for a similar microbial-based battery project the group wrote about last year in a paper. This one is liquid-based and not as long lasting. A bacteria-containing liquid was absorbed along the porous creases in folded paper, creating the paper-delivered power source, perhaps to be used in a shipping label. - -“Low-cost microbial fuel cells (MFCs) can be done efficiently by using a paper substrate and origami techniques,” [the group wrote then][5]. - -Scientists, too, envisage electronics now printed on circuit boards (PCBs) and can be toxic on disposal being printed entirely on eco-friendly paper. Product cycles, such as those found now in mobile devices and likely in future IoT devices, are continually getting tighter—thus PCBs are increasingly being disposed. Solutions are needed, experts say. - -Put the battery in the paper, too, is the argument here. And while you’re at it, get the biodegradation of the used-up biobattery to help break-down the organic-matter paper. - -Ultimately, Choi believes that the power-creating bacteria could even be introduced naturally by the environment—right now it’s added on by the scientists. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][6] - * [What is edge computing and how it’s changing the network][7] - * [Most powerful Internet of Things companies][8] - * [10 Hot IoT startups to watch][9] - * [The 6 ways to make money in IoT][10] - * [What is digital twin technology? [and why it matters]][11] - * [Blockchain, service-centric networking key to IoT success][12] - * [Getting grounded in IoT networking and security][2] - * [Building IoT-ready networks must become a priority][13] - * [What is the Industrial IoT? [And why the stakes are so high]][14] - - - -Join the Network World communities on [Facebook][15] and [LinkedIn][16] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3406462/an-eco-friendly-internet-of-disposable-things-is-coming.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/04/green-data-center-intro-100719502-large.jpg -[2]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[3]: https://www.binghamton.edu/news/story/1867/everything-will-connect-to-the-internet-someday-and-this-biobattery-could-h -[4]: https://www.sciencedirect.com/science/article/abs/pii/S0378775319305580 -[5]: https://www.sciencedirect.com/science/article/pii/S0960148117311606 -[6]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[7]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[8]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[9]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[10]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[11]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[12]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[13]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[14]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[15]: https://www.facebook.com/NetworkWorld/ -[16]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md b/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md deleted file mode 100644 index fe92b389a9..0000000000 --- a/sources/talk/20190705 Lessons in Vendor Lock-in- Google and Huawei.md +++ /dev/null @@ -1,58 +0,0 @@ -[#]: collector: "lujun9972" -[#]: translator: " " -[#]: reviewer: " " -[#]: publisher: " " -[#]: url: " " -[#]: subject: "Lessons in Vendor Lock-in: Google and Huawei" -[#]: via: "https://www.linuxjournal.com/content/lessons-vendor-lock-google-and-huawei" -[#]: author: "Kyle Rankin https://www.linuxjournal.com/users/kyle-rankin" - -Lessons in Vendor Lock-in: Google and Huawei -====== -![](https://www.linuxjournal.com/sites/default/files/styles/850x500/public/nodeimage/story/bigstock-Us--China-Trade-War-Boxing-F-252887971_1.jpg?itok=oZBwXDrP) - -What happens when you're locked in to a vendor that's too big to fail, but is on the opposite end of a trade war? - -The story of Google no longer giving Huawei access to Android updates is still developing, so by the time you read this, the situation may have changed. At the moment, Google has granted Huawei a 90-day window whereby it will have access to Android OS updates, the Google Play store and other Google-owned Android assets. After that point, due to trade negotiations between the US and China, Huawei no longer will have that access. - -Whether or not this new policy between Google and Huawei is still in place when this article is published, this article isn't about trade policy or politics. Instead, I'm going to examine this as a new lesson in vendor lock-in that I don't think many have considered before: what happens when the vendor you rely on is forced by its government to stop you from being a customer? - -### Too Big to Fail - -Vendor lock-in isn't new, but until the last decade or so, it generally was thought of by engineers as a bad thing. Companies would take advantage the fact that you used one of their products that was legitimately good to use the rest of their products that may or may not be as good as those from their competitors. People felt the pain of being stuck with inferior products and rebelled. - -These days, a lot of engineers have entered the industry in a world where the new giants of lock-in are still growing and have only flexed their lock-in powers a bit. Many engineers shrug off worries about choosing a solution that requires you to use only products from one vendor, in particular if that vendor is a large enough company. There is an assumption that those companies are too big ever to fail, so why would it matter that you rely on them (as many companies in the cloud do) for every aspect of their technology stack? - -Many people who justify lock-in with companies who are too big to fail point to all of the even more important companies who use that vendor who would have even bigger problems should that vendor have a major bug, outage or go out of business. It would take so much effort to use cross-platform technologies, the thinking goes, when the risk of going all-in with a single vendor seems so small. - -Huawei also probably figured (rightly) that Google and Android were too big to fail. Why worry about the risks of being beholden to a single vendor for your OS when that vendor was used by other large companies and would have even bigger problems if the vendor went away? - -### The Power of Updates - -Google held a particularly interesting and subtle bit of lock-in power over Huawei (and any phone manufacturer who uses Android)—the power of software updates. This form of lock-in isn't new. Microsoft famously used the fact that software updates in Microsoft Office cost money (naturally, as it was selling that software) along with the fact that new versions of Office had this tendency to break backward compatibility with older document formats to encourage everyone to upgrade. The common scenario was that the upper-level folks in the office would get brand-new, cutting-edge computers with the latest version of Office on them. They would start saving new documents and sharing them, and everyone else wouldn't be able to open them. It ended up being easier to upgrade everyone's version of Office than to have the bosses remember to save new documents in old formats every time. - -The main difference with Android is that updates are critical not because of compatibility, but for security. Without OS updates, your phone ultimately will become vulnerable to exploits that attackers continue to find in your software. The Android OS that ships on phones is proprietary and therefore requires permission from Google to get those updates. - -Many people still don't think of the Android OS as proprietary software. Although people talk about the FOSS underpinnings in Android, only people who go to the extra effort of getting a pure-FOSS version of Android, like LineageOS, on their phones actually experience it. The version of Android most people tend to use has a bit of FOSS in the center, surrounded by proprietary Google Apps code. - -It's this Google Apps code that gives Google the kind of powerful leverage over a company like Huawei. With traditional Android releases, Google controls access to OS updates including security updates. All of this software is signed with Google's signing keys. This system is built with security in mind—attackers can't easily build their own OS update to install on your phone—but it also has a convenient side effect of giving Google control over the updates. - -What's more, the Google Apps suite isn't just a convenient way to load Gmail or Google Docs, it also includes the tight integration with your Google account and the Google Play store. Without those hooks, you don't have access to the giant library of applications that everyone expects to use on their phones. As anyone with a LineageOS phone that uses F-Droid can attest, while a large number of applications are available in the F-Droid market, you can't expect to see those same apps as on Google Play. Although you can side-load some Google Play apps, many applications, such as Google Maps, behave differently without a Google account. Note that this control isn't unique to Google. Apple uses similar code-signing features with similar restrictions on its own phones and app updates. - -### Conclusion - -Without access to these OS updates, Huawei now will have to decide whether to create its own LineageOS-style Android fork or a whole new phone OS of its own. In either case, it will have to abandon the Google Play Store ecosystem and use F-Droid-style app repositories, or if it goes 100% alone, it will need to create a completely new app ecosystem. If its engineers planned for this situation, then they likely are working on this plan right now; otherwise, they are all presumably scrambling to address an event that "should never happen". Here's hoping that if you find yourself in a similar case of vendor lock-in with an overseas company that's too big to fail, you never get caught in the middle of a trade war. - --------------------------------------------------------------------------------- - -via: https://www.linuxjournal.com/content/lessons-vendor-lock-google-and-huawei - -作者:[Kyle Rankin][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.linuxjournal.com/users/kyle-rankin -[b]: https://github.com/lujun9972 diff --git a/sources/talk/20190708 Colocation facilities buck the cloud-data-center trend.md b/sources/talk/20190708 Colocation facilities buck the cloud-data-center trend.md deleted file mode 100644 index add6ef0093..0000000000 --- a/sources/talk/20190708 Colocation facilities buck the cloud-data-center trend.md +++ /dev/null @@ -1,96 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Colocation facilities buck the cloud-data-center trend) -[#]: via: (https://www.networkworld.com/article/3407756/colocation-facilities-buck-the-cloud-data-center-trend.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Colocation facilities buck the cloud-data-center trend -====== -Lower prices and latency plus easy access to multiple cloud providers make colocation facilities an attractive option compared to building on-site data centers. -![gorodenkoff / Getty Images][1] - -[Data center][2] workloads are moving but not only to the cloud. Increasingly, they are shifting to colocation facilities as an alternative to privately owned data centers. - -### What is colocation? - -A colocation facility or colo is a data center in which a business can rent space for servers and other computing hardware that they purchase but that the colo provider manages. - -[Read about IPv6 and cloud-access security brokers][3] - -The colo company provides the building, cooling, power, bandwidth and physical security. Space is leased by the rack, cabinet, cage or room. Many colos started out as managed services and continue  to offer those specialized services. - -Some prominent providers include Equinix, Digital Reality Trust, CenturyLink, and NTT Communications, and there are several Chinese providers that only serve the China market. Unlike the data centers of cloud vendors like Amazon and Microsoft, these colo facilities are generally in large metropolitan areas. - -“Colos have been around a long time, but their initial use case was Web servers,” said Rick Villars, vice president of data centers and cloud research at IDC. “What’s changed now is the ratio of what’s customer-facing is much greater than in 2000, [with the]  expansion of companies needing to have more assets that are network-facing.” - -### Advantages of colos: Cost, cloud interconnect - -Homegrown data centers are often sized correctly, with either too much capacity or too little, said Jim Poole, vice president of business development at Equinix. “Customers come to us all the time and say, ‘Would you buy my data center? Because I only use 25 percent of it,’” he said. - -Poole said the average capital expenditure for a stand-alone enterprise data center that is not a part of the corporate campus is $9 million. Companies are increasingly realizing that it makes sense to buy the racks of hardware but place it in someone else’s secure facility that handles the power and cooling. “It’s the same argument for doing cloud computing but at the physical-infrastructure level,” he said. - -Mike Satter, vice president for OceanTech, a data-center-decommissioning service provider, says enterprises should absolutely outsource data-center construction or go the colo route. Just as there are contractors who specialize in building houses, there are experts who specialize in data-center design, he said. - -He added that with many data-center closures there is subsequent consolidation. “For every decommissioning we do, that same company is adding to another environment somewhere else. With the new hardware out there now, the servers can do the same work in 20 racks as they did in 80 racks five years ago. That means a reduced footprint and energy cost,” he said. - -Often these closures mean moving to a colo. OceanTech recently decommissioned a private data center for a major media outlet he declined to identify that involved shutting down a data center in New Jersey that held 70 racks of gear. The firm was going to move its apps to the cloud but ended up expanding to a colo facility in New York City. - -### Cloud isn't cheaper than private data centers - -Satter said he’s had conversations with companies that planned to go to the cloud but changed their minds when they saw what it would cost if they later decided to move workloads out. Cloud providers can “kill you with guidelines and costs” because your data is in their infrastructure, and they can set fees that make it expensive to move it to another provider, he said. “The cloud not a money saver.” - -That can drive decisions to keep data in-house or in a colo in order to keep tighter possession of their data. “Early on, when people weren’t hip to the game for how much it cost to move to the cloud, you had decision makers with influence say the cloud sounded good. Now they are realizing it costs a lot more dollars to do that vs. doing something on-prem, on your own,” said Satter. - -Guy Churchward, CEO of Datera, developer of software designed storage platforms for enterprises, has noticed a new trend among CIOs making a cloud vs. private decision for apps based on the lifespan of the app. - -“Organizations don’t know how much resource they need to throw at a task. The cloud makes more sense for [short-term apps],” he said. For applications that will be used for five years or more, it makes more sense to place them in company-controlled facilities, he said. That's because with three-to-five-year hardware-refresh cycles, the hardware lasts the entire lifespan of the app, and the hardware and app can be retired at the same time. - -Another force driving the decision of private data center vs. the cloud is machine learning. Churchward said that’s because machine learning is often done using large amounts of highly sensitive data, so customers wanted data kept securely in house. They also wanted a low-latency loop between their ML apps and the data lake from which they draw. - -### Colos connect to mulitple cloud providers - -Another allure of colocation providers is that they can act as a pipeline between enterprises and multiple cloud providers. So rather than directly connecting to AWS, Azure, etc., businesses can connect to a colo, and that colo acts like a giant switch, connecting them to cloud providers through dedicated, high-speed networks. - -Villars notes the typical corporate data center is either inside corporate HQ or someplace remote, like South Dakota where land was cheap. But the trade-off is that network connectivity to remote locations is often slower and more expensive. - -That’s where a data-center colo providers with a large footprints come in, since they have points of presence in major cities. No one would fault a New York City-based firm for putting its data center in upstate New York or even further away. But when Equinix, DTR, and others all have data centers right in New York City, customers might get faster and sometimes cheaper connections plus lower latency. - -Steve Cretney, vice president and CIO for food distributor Colony Brands, is in the midst of migrating the company to the cloud and moving everything he can from his data center to AWS. Rather than connect directly to AWS, Colony’s Wisconsin headquarters is connected to an Equinix data center in Chicago. - -Going with Equinix provides more and cheaper bandwidth to the cloud than buying direct connectivity on his own. “I effectively moved my data center into Chicago. Now I can compete with a better price on data communication and networks,” he said. - -Cretney estimates that by moving Colony’s networking from a smaller, local provider to Chicago, the company is seeing an annual cost savings of 50 percent for network connectivity that includes telecommunications. - -Also, Colony wants to adopt a mult-cloud-provider strategy to avoid vendor lock-in, and he gets that by using Equinix as his network connection. As the company eventually uses Microsoft Azure and Google Cloud and other providers, Equinex can provide flexible and economic interconnections, he said. - -### **Colos reduce the need for enterprise data-center real estate** - -In 2014, 80 percent of data-centers were owned by enterprises, while colos and the early cloud accounted for 20 percent, said Villars. Today that’s a 50-50 split, and by 2022-2023, IDC projects service providers will own 70 percent of the large-data-center space. - -For the past five years, the amount of new data-center construction by enterprises has been falling steadily at  5 to 10 percent per year, said Villars. “They are not building new ones because they are coming to the realization that being an expert at data-center construction is not something a company has.” - -Enterprises across many sectors are looking at their data-center environment and leveraging things like virtual machines and SSD, thereby compressing the size of their data centers and getting more work done within smaller physical footprints. “So at some point they ask if they are spending appropriately for this space. That’s when they look at colo,” said Villars. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407756/colocation-facilities-buck-the-cloud-data-center-trend.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/05/cso_cloud_computing_backups_it_engineer_data_center_server_racks_connections_by_gorodenkoff_gettyimages-943065400_3x2_2400x1600-100796535-large.jpg -[2]: https://www.networkworld.com/article/3223692/what-is-a-data-centerhow-its-changed-and-what-you-need-to-know.html -[3]: https://www.networkworld.com/article/3391380/does-your-cloud-access-security-broker-support-ipv6-it-should.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190709 Improving IT Operations - Key to Business Success in Digital Transformation.md b/sources/talk/20190709 Improving IT Operations - Key to Business Success in Digital Transformation.md deleted file mode 100644 index 5ce4f7edfe..0000000000 --- a/sources/talk/20190709 Improving IT Operations - Key to Business Success in Digital Transformation.md +++ /dev/null @@ -1,79 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Improving IT Operations – Key to Business Success in Digital Transformation) -[#]: via: (https://www.networkworld.com/article/3407698/improving-it-operations-key-to-business-success-in-digital-transformation.html) -[#]: author: (Rami Rammaha https://www.networkworld.com/author/Rami-Rammaha/) - -Improving IT Operations – Key to Business Success in Digital Transformation -====== - -![Artem Peretiatko][1] - -Forty seven percent of CEOs say they are being “challenged” by their board of directors to show progress in shifting toward a digital business model according to the [Gartner 2018 CIO][2] Agenda Industry Insights Report. By improving IT operations, organizations can progress and even accelerate their digital transformation initiatives efficiently and successfully. The biggest barrier to success is that IT currently spends around 78 percent of their budget and 80 percent of their time just maintaining IT operations, leaving little time and resource left for innovation according to ZK Research[*][3]. - -### **Do you cut the operations budget or invest more in transforming operations? ** - -The Cisco IT Operations Readiness Index 2018 predicted a dramatic change in IT operations as CIOs embrace analytics and automation. The study reported that 88 percent of respondents identify investing in IT operations as key to driving preemptive practices and enhancing customer experience. - -### What does this have to do with the wide area network? - -According to the IT Operations Readiness Index, 73 percent of respondents will collect WAN operational or performance data and 70 percent will analyze WAN data and leverage the results to further automate network operations. However, security is the most data-driven infrastructure today compared to other IT infrastructure functions (i.e. IoT, IP telephony, network infrastructure, data center infrastructure, WAN connectivity, etc.). The big questions are: - - * How do you collect operations data and what data should you collect? - * How do you analyze it? - * How do you then automate IT operations based on the results? - - - -By no means, is this a simple task. IT departments use a combination of data collected internally and by outside vendors to aggregate information used to transform operations and make better business decisions. - -In a recent [survey][4] by Frost & Sullivan, 94 percent of respondents indicated they will deploy a Software-defined Wide Area Network ([SD-WAN][5]) in the next 24 months. SD-WAN addresses the gap that router-centric WAN architectures were not designed to fill. A business-driven SD-WAN, designed from the ground up to support a cloud-first business model, provides significantly more network and application performance visibility, significantly assisting enterprises to realize the transformational promise of a digital business model. In fact, Gartner indicates that 90 percent of WAN edge decisions will be based on SD-WAN by 2023. - -### How an SD-WAN can improve IT operations leading to successful digital transformation - -All SD-WAN solutions are not created alike. One of the key components that organizations need to consider and evaluate is having complete observability across the network and applications through a single pane of glass. Without visibility, IT risks running inefficient operations that will stifle digital transformation initiatives. This real-time visibility must provide: - - * Operational metrics enabling IT/CIO’s to shift from a reactive toward a predictive practice - * A centralized dashboard that allows IT to monitor, in real-time, all aspects of network operations – a dashboard that has flexible knobs to adjust and collect metrics from all WAN edge appliances to accelerate problem resolution - - - -The Silver Peak Unity [EdgeConnect™][6] SD-WAN edge platform provides granular visibility into network and application performance. The EdgeConnect platform ensures the highest quality of experience for both end users and IT. End users enjoy always-consistent, always-available application performance including the highest quality of voice and video, even over broadband. Utilizing the [Unity Orchestrator™][7] comprehensive management dashboard as shown below, IT gains complete observability into the performance attributes of the network and applications in real-time. Customizable widgets provide a wealth of operational data including a health heatmap for every SD-WAN appliance deployed, flow counts, active tunnels, logical topologies, top talkers, alarms, bandwidth consumed by each application and location, application latency and jitter and much more. Furthermore, the platform maintains a week’s worth of data with context allowing IT to playback and see what has transpired at a specific time and location, analogous to a DVR. - -By providing complete observability of the entire WAN, IT spends less time troubleshooting network and application bottlenecks and fielding support/help desk calls day and night, and more time focused on strategic business initiatives. - -![][8] - -This solution brief, “[Simplify SD-WAN Operations with Greater Visibility][9]”, provides additional detail on the capabilities offered in the business-driven EdgeConnect SD-WAN edge platform that enables businesses to accelerate their shift toward a digital business model. - -![][10] - -* ZK Research quote from [Cisco IT Operations Readiness Index 2018][11] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407698/improving-it-operations-key-to-business-success-in-digital-transformation.html - -作者:[Rami Rammaha][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Rami-Rammaha/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/istock-1096811078_1200x800-100801264-large.jpg -[2]: https://www.gartner.com/smarterwithgartner/is-digital-a-priority-for-your-industry/ -[3]: https://blog.silver-peak.com/improving-it-operations-key-to-business-success-in-digital-transformation#footnote -[4]: https://www.silver-peak.com/sd-wan-edge-survey -[5]: https://www.silver-peak.com/sd-wan -[6]: https://www.silver-peak.com/products/unity-edge-connect -[7]: https://www.silver-peak.com/products/unity-orchestrator -[8]: https://images.idgesg.net/images/article/2019/07/silver-peak-unity-edgeconnect-sdwan-100801265-large.jpg -[9]: https://www.silver-peak.com/resource-center/simplify-sd-wan-operations-greater-visibility -[10]: https://images.idgesg.net/images/article/2019/07/simplify-sd-wan-operations-with-greater-visibility-100801266-large.jpg -[11]: https://s3-us-west-1.amazonaws.com/connectedfutures-prod/wp-content/uploads/2018/11/CF_transforming_IT_operations_report_3-2.pdf diff --git a/sources/talk/20190709 Linux a key player in the edge computing revolution.md b/sources/talk/20190709 Linux a key player in the edge computing revolution.md deleted file mode 100644 index df1dba9344..0000000000 --- a/sources/talk/20190709 Linux a key player in the edge computing revolution.md +++ /dev/null @@ -1,112 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Linux a key player in the edge computing revolution) -[#]: via: (https://www.networkworld.com/article/3407702/linux-a-key-player-in-the-edge-computing-revolution.html) -[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/) - -Linux a key player in the edge computing revolution -====== -Edge computing is augmenting the role that Linux plays in our day-to-day lives. A conversation with Jaromir Coufal from Red Hat helps to define what the edge has become. -![Dominic Smith \(CC BY 2.0\)][1] - -In the past few years, [edge computing][2] has been revolutionizing how some very familiar services are provided to individuals like you and me, as well as how services are managed within major industries. Try to get your arms around what edge computing is today, and you might just discover that your arms aren’t nearly as long or as flexible as you’d imagined. And Linux is playing a major role in this ever-expanding edge. - -One reason why edge computing defies easy definition is that it takes many different forms. As Jaromir Coufal, principal product manager at Red Hat, recently pointed out to me, there is no single edge. Instead, there are lots of edges – depending on what compute features are needed. He suggests that we can think of the edge as something of a continuum of capabilities with the problem being resolved determining where along that particular continuum any edge solution will rest. - -**[ Also read: [What is edge computing?][3] and [How edge networking and IoT will reshape data centers][4] ]** - -Some forms of edge computing include consumer electronics that are used and installed in millions of homes, others that serve tens of thousands of small businesses with operating their facilities, and still others that tie large companies to their remote sites. Key to this elusive definition is the idea that edge computing always involves distributing the workload in such a way that the bulk of the computing work is done remotely from the central core of the business and close to the business problem being addressed. - -Done properly, edge computing can provide services that are both faster and more reliable. Applications running on the edge can be more resilient and run considerably faster because their required data resources are local. In addition, data can be processed or analyzed locally, often requiring only periodic transfer of results to central sites. - -While physical security might be lower at the edge, edge devices often implement security features that allow them to detect 1) manipulation of the device, 2) malicious software, and 3) a physical breach and wipe data. - -### Benefits of edge computing - -Some of the benefits of edge computing include: - - * A quick response to intrusion detection, including the ability for a remote device to detach or self-destruct - * The ability to instantly stop communication when needed - * Constrained functionality and fewer generic entry points - * Rugged and reliable problem resistance - * Making the overall computing system harder to attack because computing is distributed - * Less data-in-transit exposure - - - -Some examples of edge computing devices include those that provide: - - * Video surveillance – watching for activity, reporting only if seen - * Controlling autonomous vehicles - * Production monitoring and control - - - -### Edge computing success story: Chick-fil-A - -One impressive example of highly successful edge computing caught me by surprise. It turns out Chick-fil-A uses edge computing devices to help manage its food preparation services. At Chick-fil-A, edge devices: - - 1. Analyze a fryer’s cleaning and cooking - 2. Aggregate data as a failsafe in case internet connectivity is lost - 3. Help with decision-making about cooking – how much and how long to cook - 4. Enhance business operations - 5. Help automate the complex food cooking and holding decisions so that even newbies get things right - 6. Function even when the connection with the central site is down - - - -As Coufal pointed out, Chick-fil-A runs [Kubernetes][5] at the edge in every one of its restaurants. Their key motivators are low-latency, scale of operations, and continuous business. And it seems to be working extremely well. - -[Chick-fil-A’s hypothesis][6] captures it all: By making smarter kitchen equipment, we can collect more data. By applying data to our restaurant, we can build more intelligent systems. By building more intelligent systems, we can better scale our business. - -### Are you edge-ready? - -There’s no quick answer as to whether your organization is “edge ready.” Many factors determine what kind of services can be deployed on the edge and whether and when those services need to communicate with more central devices. Some of these include: - - * Whether your workload can be functionally distributed - * If it’s OK for devices to have infrequent contact with the central services - * If devices can work properly when cut off from their connection back to central services - * Whether the devices can be secured (e.g., trusted not to provide an entry point) - - - -Implementing an edge computing network will likely take a long time from initial planning to implementation. Still, this kind of technology is taking hold and offers some strong advantages. While edge computing initially took hold 15 or more years ago, the last few years have seen renewed interest thanks to tech advances that have enabled new uses. - -Coufal noted that it's been 15 or more years since edge computing concepts and technologies were first introduced, but renewed interest has come about due to tech advances enabling new uses that require this technology. - -**More about edge computing:** - - * [How edge networking and IoT will reshape data centers][4] - * [Edge computing best practices][7] - * [How edge computing can help secure the IoT][8] - - - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407702/linux-a-key-player-in-the-edge-computing-revolution.html - -作者:[Sandra Henry-Stocker][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/telecom-100801330-large.jpg -[2]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html -[3]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[5]: https://www.infoworld.com/article/3268073/what-is-kubernetes-container-orchestration-explained.html -[6]: https://medium.com/@cfatechblog/edge-computing-at-chick-fil-a-7d67242675e2 -[7]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[8]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190711 Smarter IoT concepts reveal creaking networks.md b/sources/talk/20190711 Smarter IoT concepts reveal creaking networks.md deleted file mode 100644 index 86150bc580..0000000000 --- a/sources/talk/20190711 Smarter IoT concepts reveal creaking networks.md +++ /dev/null @@ -1,79 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Smarter IoT concepts reveal creaking networks) -[#]: via: (https://www.networkworld.com/article/3407852/smarter-iot-concepts-reveal-creaking-networks.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Smarter IoT concepts reveal creaking networks -====== -Today’s networks don’t meet the needs of emergent internet of things systems. IoT systems need their own modern infrastructure, researchers at the University of Magdeburg say. -![Thinkstock][1] - -The internet of things (IoT) needs its own infrastructure ecosystem — one that doesn't use external clouds at all, researchers at the University of Magdeburg say. - -The computer scientists recently obtained funding from the German government to study how to build a future-generation of revolutionary, emergent IoT systems. They say networks must be fault tolerant, secure, and traverse disparate protocols, which they aren't now. - -**[ Read also: [What is edge computing?][2] and [How edge networking and IoT will reshape data centers][3] ]** - -The researchers say a smarter, unique, and organic infrastructure needs to be developed for the IoT and that simply adapting the IoT to traditional networks won't work. They say services must self-organize and function autonomously and that people must accept the fact that we are using the internet in ways never originally intended.  - -"The internet, as we know it, is based on network architectures of the 70s and 80s, when it was designed for completely different applications,” the researchers say in their [media release][4]. The internet has centralized security, which causes choke points, and and an inherent lack of dynamic controls, which translates to inflexibility in access rights — all of which make it difficult to adapt the IoT to it. - -Device, data, and process management must be integrated into IoT systems, say the group behind the project, called [DoRIoT][5] (Dynamische Laufzeitumgebung für Organisch (dis-)Aggregierende IoT-Prozesse), translated as Dynamic Runtime Environment for Organic dis-Aggregating IoT Processes. - -“In order to close this gap, concepts [will be] developed in the project that transparently realize the access to the data,” says Professor Sebastian Zug of the University of Freiberg, a partner in DoRIoT. “For the application, it should make no difference whether the specific information requirement is answered by a server or an IoT node.” - -### Extreme edge computing - -In other words, servers and nodes, conceptually, should merge. One could argue it’s a form of extreme [edge computing][6], which is when processing and data storage is taken out of traditional, centralized data center environments and placed close to where the resources are required. It reduces latency, among other advantages. - -DoRIoT may take edge computing one step further. Detecting failures ahead of time and seamless migration of devices are wants, too — services can’t fail just because a new kind of device is introduced. - -“The systems [will] benefit from each other, for example, they can share computing power, data and so on,” says Mesut Güneş of Magdeburg’s [Faculty of Computer Science Institute for Intelligent Cooperating Systems][7]. - -“The result is an enormous data pool,” the researchers explain. “Which, in turn, makes it possible to make much more precise statements, for example when predicting climate models, observing traffic flows, or managing large factories in Industry 4.0.” - -[Industry 4.0][8] refers to smart factories that have connected machines autonomously self-managing their own supply chain, production output, and logistics without human intervention. - -Managing risks better than the current internet is one of DoRIoT's goals. The idea is to “guarantee full sovereignty over proprietary data.” To get there, though, one has to eliminate dependency on the cloud and access to data via third parties, they say. - -“This allows companies to be independent of the server infrastructures of external service providers such as Google, Microsoft or Amazon, which are subject to constant changes and even may not be accessible,” they say. - -**More about edge networking** - - * [How edge networking and IoT will reshape data centers][3] - * [Edge computing best practices][9] - * [How edge computing can help secure the IoT][10] - - - -Join the Network World communities on [Facebook][11] and [LinkedIn][12] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3407852/smarter-iot-concepts-reveal-creaking-networks.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/02/industry_4-0_industrial_iot_internet_of_things_network_thinkstock_613880008-100749946-large.jpg -[2]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[3]: https://www.networkworld.com/article/3291790/data-center/how-edge-networking-and-iot-will-reshape-data-centers.html -[4]: http://www.ovgu.de/en/University/In+Profile/Key+Profile+Areas/Research/Secure+data+protection+in+the+new+internet+of+things.html -[5]: http://www.doriot.net/ -[6]: https://www.networkworld.com/article/3224893/what-is-edge-computing-and-how-it-s-changing-the-network.html -[7]: http://iks.cs.ovgu.de/iks/en/ICS.html -[8]: https://www.networkworld.com/article/3199671/what-is-industry-4-0.html -[9]: https://www.networkworld.com/article/3331978/lan-wan/edge-computing-best-practices.html -[10]: https://www.networkworld.com/article/3331905/internet-of-things/how-edge-computing-can-help-secure-the-iot.html -[11]: https://www.facebook.com/NetworkWorld/ -[12]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190716 Server hardware makers shift production out of China.md b/sources/talk/20190716 Server hardware makers shift production out of China.md deleted file mode 100644 index be29283977..0000000000 --- a/sources/talk/20190716 Server hardware makers shift production out of China.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Server hardware makers shift production out of China) -[#]: via: (https://www.networkworld.com/article/3409784/server-hardware-makers-shift-production-out-of-china.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Server hardware makers shift production out of China -====== -Tariffs on Chinese products and unstable U.S./China relations cause server makers to speed up their move out of China. -![Etereuti \(CC0\)][1] - -The supply chain of vendors that build servers and network communication devices is accelerating its shift of production out of China to Taiwan and North America, along with other nations not subject to the trade war between the U.S. and China. - -Last May, the Trump Administration levied tariffs on a number of imported Chinese goods, computer components among them. The tariffs ranged from 10-25%. Consumers were hit hardest, since they are more price sensitive than IT buyers. PC World said the [average laptop price could rise by $120][2] just for the tariffs. - -But since the tariff was based on the value of the product, that means server hardware prices could skyrocket, since servers cost much more than PCs. - -**[ Read also: [HPE’s CEO lays out his technology vision][3] ]** - -### Companies that are moving production out of China - -The Taiwanese tech publication DigiTimes reported (article now locked behind a paywall) that Mitac Computing Technology, a server ODM, reactivated an old production line at Hsinchu Science Park (HSP) in Taiwan at the end of 2018 and restarted another for motherboard SMT process in March 2019. The company plans to establish one more SMT production line prior to the end of 2019. - -It went on to say Mitac plans to produce all of its high-end U.S.-bound servers in Taiwan and is looking to move 30% of its overall server production lines back to Taiwan in the next three years. - -Wiwynn, a cloud computing server subsidiary of Wistron, is primarily assembling its U.S.-bound servers in Mexico and has also recently established a production site in southern Taiwan per clients' requests. - -Taiwan-based server chassis and assembly player AIC recently expanded the number of its factories in Taiwan to four and has been aggressively forming cooperation with its partners to expand its capacity. Many Taiwan-based component suppliers are also expanding their capacity in Taiwan. - -**[ [Get certified as an Apple Technical Coordinator with this seven-part online course from PluralSight.][4] ]** - -Several ODMs, such as Inventec, Wiwynn, Wistron, and Foxconn, all have plants in Mexico, while Quanta Computer has production lines in the U.S. Wiwynn also plans to open manufacturing facilities in eastern U.S. - -“This is not something that just happened overnight, it’s a process that started a few years ago. The tariffs just accelerated the desire of ODMs to do it,” said Ashish Nadkarni, group vice president for infrastructure systems, platforms and technologies at IDC. “Since [President] Trump has come into office there has been saber rattling about China and a trade war. There has also been a focus on margins.” - -He added that component makers are definitely moving out of China to other parts of Asia, like Korea, the Philippines, and Vietnam. - -### HPE, Dell and Lenovo should remain unaffected - -The big three branded server makers are all largely immunized against the tariffs. HP Enterprise, Dell, and Lenovo all have U.S.-based assemblies and their contract manufacturers are in Taiwan, said Nadkarni. So, their costs should remain unaffected by tariffs. - -The tariffs are not affecting sales as much as revenue for hyperscale whitebox vendors is being stressed. Hyperscale companies such as Amazon Web Services (AWS), Microsoft, Google, etc. have contracts with vendors such as Inspur and Super Micro, and if prices fluctuate, that’s not their problem. The hardware vendor is expected to deliver at the agreed cost. - -So margins, already paper thin, can’t be passed on to the customer, unlike the aforementioned laptop example. - -“It’s not the end customers who are affected by it, it’s the vendors who are affected by it. Certain things they can pass on, like component prices. But if the build value goes up, that’s not the customers problem, that’s the vendor’s problem,” said Nadkarni. - -So while it may cost you more to buy a laptop as this trade fracas goes on, it shouldn’t cost more to buy a server. - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409784/server-hardware-makers-shift-production-out-of-china.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/07/asia_china_flag_grunge-stars_pixabay_etereuti-100763424-large.jpg -[2]: https://www.pcworld.com/article/3403405/trump-tariffs-on-chinese-goods-could-cost-you-120-more-for-notebook-pcs-say-dell-hp-and-cta.html -[3]: https://www.networkworld.com/article/3394879/hpe-s-ceo-lays-out-his-technology-vision.html -[4]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fapple-certified-technical-trainer-10-11 -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190717 How edge computing is driving a new era of CDN.md b/sources/talk/20190717 How edge computing is driving a new era of CDN.md deleted file mode 100644 index 643d3aa713..0000000000 --- a/sources/talk/20190717 How edge computing is driving a new era of CDN.md +++ /dev/null @@ -1,107 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (How edge computing is driving a new era of CDN) -[#]: via: (https://www.networkworld.com/article/3409027/how-edge-computing-is-driving-a-new-era-of-cdn.html) -[#]: author: (Matt Conran https://www.networkworld.com/author/Matt-Conran/) - -How edge computing is driving a new era of CDN -====== -A CDN is an edge application and an edge application is a superset of what your CDN is doing. -![geralt \(CC0\)][1] - -We are living in a hyperconnected world where anything can now be pushed to the cloud. The idea of having content located in one place, which could be useful from the management’s perspective, is now redundant. Today, the users and data are omnipresent. - -The customer’s expectations have up-surged because of this evolution. There is now an increased expectation of high-quality service and a decrease in customer’s patience. In the past, one could patiently wait 10 hours to download the content. But this is certainly not the scenario at the present time. Nowadays we have high expectations and high-performance requirements but on the other hand, there are concerns as well. The internet is a weird place, with unpredictable asymmetric patterns, buffer bloat and a list of other [performance-related problems][2] that I wrote about on Network Insight. _[Disclaimer: the author is employed by Network Insight.]_ - -Also, the internet is growing at an accelerated rate. By the year 2020, the internet is expected to reach 1.5 Gigabyte of traffic per day per person. In the coming times, the world of the Internet of Things (IoT) driven by objects will far supersede these data figures as well. For example, a connected airplane will generate around 5 Terabytes of data per day. This spiraling level of volume requires a new approach to data management and forces us to re-think how we delivery applications. - -[RELATED: How Notre Dame is going all in with Amazon’s cloud][3] - -Why? Because all this information cannot be processed by a single cloud or an on-premise location. Latency will always be a problem. For example, in virtual reality (VR) anything over 7 milliseconds will cause motion sickness. When decisions are required to be taken in real-time, you cannot send data to the cloud. You can, however, make use of edge computing and a multi-CDN design. - -### Introducing edge computing and multi-CDN - -The rate of cloud adoption, all-things-video, IoT and edge computing are bringing life back to CDNs and multi-CDN designs. Typically, a multi-CDN is an implementation pattern that includes more than one CDN vendor. The traffic direction is performed by using different metrics, whereby traffic can either be load balanced or failed across the different vendors. - -Edge computing moves actions as close as possible to the source. It is the point where the physical world interacts with the digital world. Logically, the decentralized approach of edge computing will not take over the centralized approach. They will be complementary to each other, so that the application can run at its peak level, depending on its position in the network. - -For example, in IoT, saving battery life is crucial. Let’s assume an IoT device can conduct the transaction in 10ms round trip time (RTT), instead of 100ms RTT. As a result, it can use 10 times less battery. - -### The internet, a performance bottleneck - -The internet is designed on the principle that everyone can talk to everyone, thereby providing universal connectivity whether required or not. There has been a number of design changes with network address translation (NAT) being the biggest. However, essentially the role of the internet has remained the same in terms of connectivity, regardless of location. - -With this type of connectivity model, distance is an important determinant for the application’s performance. Users on the other side of the planet will suffer regardless of buffer sizes or other device optimizations. Long RTT is experienced as packets go back and forth before the actual data transmission. Although caching and traffic redirection is being used but limited success has been achieved so far. - -### The principles of application delivery - -When transmission control protocol (TCP) starts, it thinks it is back in the late 1970s. It assumes that all services are on a local area network (LAN) and there is no packet loss. It then starts to work backward from there. Back when it was designed, we didn't have real-time traffic, such as voice and video that is latency and jitter sensitive. - -Ideally, TCP was designed for the ease of use and reliability, not to boost the performance. You actually need to optimize the TCP stack. And this is why CDNs are very good at performing such tasks. For example, if a connection is received from a mobile phone, a CDN will start with the assumption that there is going to be high jitter and packet loss. This allows them to size the TCP window correctly that accurately match network conditions. - -How do you magnify the performance, what options do you have? In a generic sense, many look to lowering the latency. However, with applications, such as video streaming, latency does not tell you if the video is going to buffer. One can only assume that lower latency will lead to less buffering. In such a scenario, measurement-based on throughput is a far better performance metric since will tell you how fast an object will load. - -We have also to consider the page load times. At the network level, it's the time to first byte (TTFB) and ping. However, these mechanisms don’t tell you much about the user experience as everything fits into one packet. Using ping will not inform you about the bandwidth problems. - -And if a web page goes slower by 25% once packet loss exceeds 5% and you are measuring time to the first byte which is the 4th packet - what exactly can you learn? TTFB is comparable to an internet control message protocol (ICMP) request just one layer up the stack. It's good if something is broken but not if there is underperformance issue. - -When you examine the history of TTFB measuring, you will find that it was deployed due to the lack of Real User Monitoring (RUM) measurements. Previously TTFB was as good in approximating how fast something was going to load, but we don't have to approximate anymore as we can measure it with RUM. RUM is measurements from the end-users. An example could be the metrics generated from a webpage that is being served to an actual user. - -Conclusively, TTFB, ping and page load times are not sophisticated measurements. We should prefer RUM time measurements as much as we can. This provides a more accurate picture of the user experience. This is something which has become critical over the last decade. - -Now we are living in a world of RUM which lets us build our network based on what matters to the business users. All CDNs should aim for RUM measurements. For this, they may need to integrate with traffic management systems that intelligently measure on what the end-user really sees. - -### The need for multi-CDN - -Primarily, the reasons one would opt for a multi-CDN environment are availability and performance. No single CDN can be the fastest to everyone and everywhere in the world. It is impossible due to the internet's connectivity model. However, combining the best of two or even more CDN providers will increase the performance. - -A multi-CDN will give a faster performance and higher availability than what can be achieved with a single CDN. A good design is what runs two availability zones. A better design is what runs two availability zones with a single CDN provider. However, superior design is what runs two availability zones in a multi-CDN environment. - -### Edge applications will be the new norm - -It’s not that long ago that there was a transition from the heavy physical monolithic architecture to the agile cloud. But all that really happened was the transition from the physical appliance to a virtual cloud-based appliance. Maybe now is the time that we should ask, is this the future that we really want? - -One of the main issues in introducing edge applications is the mindset. It is challenging to convince yourself or your peers that the infrastructure you have spent all your time working on and investing in is not the best way forward for your business.  - -Although the cloud has created a big buzz, just because you migrate to the cloud does not mean that your applications will run faster. In fact, all you are really doing is abstracting the physical pieces of the architecture and paying someone else to manage it. The cloud has, however, opened the door for the edge application conversation. We have already taken the first step to the cloud and now it's time to make the second move. - -Basically, when you think about edge applications: its simplicity is a programmable CDN. A CDN is an edge application and an edge application is a superset of what your CDN is doing. Edge applications denote cloud computing at the edge. It is a paradigm to distribute the application closer to the source for lower latency, additional resilience, and simplified infrastructure, where you still have control and privacy. - -From an architectural point of view, an edge application provides more resilience than deploying centralized applications. In today's world of high expectations, resilience is a necessity for the continuity of business. Edge applications allow you to collapse the infrastructure into an architecture that is cheaper, simpler and more attentive to the application. The less in the expanse of infrastructure, the more time you can focus on what really matters to your business - the customer. - -### An example of an edge architecture - -An example of edge architecture is within each PoP, every application has its own isolated JavaScript (JS) environment. JavaScript is great for security isolation and the performance guarantees scale. The JavaScript is a dedicated isolated instance that executes the code at the edge. - -Most likely, each JavaScript has its own virtual machine (VM). The sole operation that the VM is performing is the JavaScript runtime engine and the only thing it is running is the customer's code. One could use Google V8 open-source high-performance JavaScript and WebAssembly engine. - -Let’s face it, if you continue building more PoPs, you will hit the law of diminishing returns. When it comes to application such as mobile, you really are maxed out when throwing PoPs to form a solution. So we need to find another solution. - -In the coming times, we are going to witness a trend where most applications will become global, which means edge applications. It certainly makes little sense to place all the application in one location when your users are everywhere else. - -**This article is published as part of the IDG Contributor Network. [Want to Join?][4]** - -Join the Network World communities on [Facebook][5] and [LinkedIn][6] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409027/how-edge-computing-is-driving-a-new-era-of-cdn.html - -作者:[Matt Conran][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Matt-Conran/ -[b]: https://github.com/lujun9972 -[1]: https://images.techhive.com/images/article/2017/02/network-traffic-100707086-large.jpg -[2]: https://network-insight.net/2016/12/buffers-packet-drops/ -[3]: https://www.networkworld.com/article/3014599/cloud-computing/how-notre-dame-is-going-all-in-with-amazon-s-cloud.html#tk.nww-fsb -[4]: https://www.networkworld.com/contributor-network/signup.html -[5]: https://www.facebook.com/NetworkWorld/ -[6]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190717 Public internet should be all software-defined.md b/sources/talk/20190717 Public internet should be all software-defined.md deleted file mode 100644 index 3b834bea66..0000000000 --- a/sources/talk/20190717 Public internet should be all software-defined.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Public internet should be all software-defined) -[#]: via: (https://www.networkworld.com/article/3409783/public-internet-should-be-all-software-defined.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Public internet should be all software-defined -====== -Having a programmable public internet will correct inefficiencies in the current system, engineers at NOIA say. -![Thinkstock][1] - -The public internet should migrate to a programmable backbone-as-a-service architecture, says a team of network engineers behind NOIA, a startup promising to revolutionize global traffic. They say the internet will be more efficient if internet protocols and routing technologies are re-worked and then combined with a traffic-trading blockchain. - -It’s “impossible to use internet for modern applications,” the company says on its website. “Almost all global internet companies struggle to ensure uptime and reliable user experience.” - -That’s because modern techniques aren’t being introduced fully, NOIA says. The engineers say algorithms should be implemented to route traffic and that segment routing technology should be adopted. Plus, blockchain should be instigated to trade internet transit capacity. A “programmable internet solves the web’s inefficiencies,” a representative from NOIA told me. - -**[ Read also: [What is IPv6, and why aren’t we there yet?][2] ]** - -### Deprecate the public internet - -NOIA has started introducing a caching, distributed content delivery application to improve website loading times, but it wants to ultimately deprecate the existing internet completely. - -The company currently has 353 active cache nodes around the world, with a total 27 terabytes of storage for that caching system—NOIA clients contribute spare bandwidth and storage. It’s also testing a network backbone using four providers with European and American locations that it says will be the [development environment for its envisaged software-defined and radical internet replacement][3]. - -### The problem with today's internet - -The “internet is a mesh of tangled up cables,” [NOIA says][4]. “Thousands of physically connected networks” are involved. Any configuration alterations in any of the jumble of networks causes issues with the protocols, it explains. The company is referring to Border Gateway Protocol (BGP), which lets routers discover paths to IP addresses through the disparate network. Because BGP only forwards to a neighboring router, it doesn’t manage the entire route. That introduces “severe variability” or unreliability. - -“It is impossible to guarantee service reliability without using overlay networks. Low-latency, performance-critical applications, and games cannot operate on public Internet,” the company says. - -### How a software-defined internet works - -NOIA's idea is to use [IPv6][5], the latest internet protocol. IPv6 features an expanded packet size and allows custom headers. The company then adds segment routing to create Segment Routing over IPv6 (SRv6). That SRv6 combo adds routing information to each data packet sent—a packet-level programmable network, in other words. - -Segment routing, roughly, is an updated internet protocol that lets routers comprehend routing information in packet headers and then perform the routing. Cisco has been using it, too. - -NOIA’s network then adds the SRv6 amalgamation to distributed ledger technology (blockchain) in order to let ISPs and data centers buy and sell the routes—buyers can choose their routes in the exchange, too. - -In addition to trade, blockchain introduces security. It's worth noting that routings aren’t the only internet technologies that could be disrupted due to blockchain. In April I wrote about [organizations that propose moving data storage transactions over to distributed ledgers][6]. They say that will be more secure than anything seen before. [Ethernet’s lack of inherent security could be corrected by smart contract, trackable verifiable transactions][7], say some. And, of course, supply chain, the automotive vertical, and the selling of sensor data overall may emerge as [use-contenders for secure, blockchain in the internet of things][8]. - -In NOIA’s case, with SRv6 blended with distributed ledgers, the encrypted ledger holds the IP addresses, but it is architecturally decentralized—no one controls it. That’s one element of added security, along with the aforementioned trading, provided by the ledger. - -That trading could handle the question of who’s paying for all this. However, NOIA says current internet hardware will be able to understand the segment routings, so no new equipment investments are needed. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409783/public-internet-should-be-all-software-defined.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/05/dns_browser_http_web_internet_thinkstock-100758191-large.jpg -[2]: https://www.networkworld.com/article/3254575/lan-wan/what-is-ipv6-and-why-aren-t-we-there-yet.html -[3]: https://medium.com/noia/development-update-06-20-07-04-2879f9fce3cb -[4]: https://noia.network/ -[5]: https://www.networkworld.com/article/3254575/what-is-ipv6-and-why-aren-t-we-there-yet.html -[6]: https://www.networkworld.com/article/3390722/how-data-storage-will-shift-to-blockchain.html -[7]: https://www.networkworld.com/article/3356496/how-blockchain-will-manage-networks.html -[8]: https://www.networkworld.com/article/3330937/how-blockchain-will-transform-the-iot.html -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190718 Worst DNS attacks and how to mitigate them.md b/sources/talk/20190718 Worst DNS attacks and how to mitigate them.md deleted file mode 100644 index b490eeeea1..0000000000 --- a/sources/talk/20190718 Worst DNS attacks and how to mitigate them.md +++ /dev/null @@ -1,151 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Worst DNS attacks and how to mitigate them) -[#]: via: (https://www.networkworld.com/article/3409719/worst-dns-attacks-and-how-to-mitigate-them.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Worst DNS attacks and how to mitigate them -====== -DNS threats, including DNS hijacking, tunneling, phishing, cache poisoning and DDoS attacks, are all on the rise. -![Max Bender \(CC0\)][1] - -The Domain Name System remains under constant attack, and there seems to be no end in sight as threats grow increasingly sophisticated. - -DNS, known as the internet’s phonebook, is part of the global internet infrastructure that translates between familiar names and the numbers computers need to access a website or send an email. While DNS has long been the target of assailants looking to steal all manner of corporate and private information, the threats in the [past year][2] or so indicate a worsening of the situation. - -**More about DNS:** - - * [DNS in the cloud: Why and why not][3] - * [DNS over HTTPS seeks to make internet use more private][4] - * [How to protect your infrastructure from DNS cache poisoning][5] - * [ICANN housecleaning revokes old DNS security key][6] - - - -IDC reports that 82% of companies worldwide have faced a DNS attack over the past year. The research firm recently published its fifth annual [Global DNS Threat Report][7], which is based on a survey IDC conducted on behalf of DNS security vendor EfficientIP of 904 organizations across the world during the first half of 2019. - -According to IDC's research, the average costs associated with a DNS attack rose by 49% compared to a year earlier. In the U.S., the average cost of a DNS attack tops out at more than $1.27 million. Almost half of respondents (48%) report losing more than $500,000 to a DNS attack, and nearly 10% say they lost more than $5 million on each breach. In addition, the majority of U.S. organizations say that it took more than one day to resolve a DNS attack. - -“Worryingly, both in-house and cloud applications were damaged, with growth of over 100% for in-house application downtime, making it now the most prevalent damage suffered,” IDC wrote. "DNS attacks are moving away from pure brute-force to more sophisticated attacks acting from the internal network. This will force organizations to use intelligent mitigation tools to cope with insider threats." - -### Sea Turtle DNS hijacking campaign - -An ongoing DNS hijacking campaign known as Sea Turtle is one example of what's occuring in today's DNS threat landscape. - -This month, [Cisco Talos][8] security researchers said the people behind the Sea Turtle campaign have been busy [revamping their attacks][9] with new infrastructure and going after new victims. - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][10] ]** - -In April, Talos released a [report detailing][11] Sea Turtle and calling it the “first known case of a domain name registry organization that was compromised for cyber espionage operations.” Talos says the ongoing DNS threat campaign is a state-sponsored attack that abuses DNS to harvest credentials to gain access to sensitive networks and systems in a way that victims are unable to detect, which displays unique knowledge on how to manipulate DNS. - -By obtaining control of victims’ DNS, the attackers can change or falsify any data on the Internet and illicitly modify DNS name records to point users to actor-controlled servers; users visiting those sites would never know, Talos reports.  - -The hackers behind Sea Turtle appear to have regrouped after the April report from Talos and are redoubling their efforts with new infrastructure – a move Talos researchers find to be unusual: “While many actors will slow down once they are discovered, this group appears to be unusually brazen, and will be unlikely to be deterred going forward,” Talos [wrote][9] in July. - -“Additionally, we discovered a new DNS hijacking technique that we assess with moderate confidence is connected to the actors behind Sea Turtle. This new technique is similar in that the threat actors compromise the name server records and respond to DNS requests with falsified A records,” Talos stated.  - -“This new technique has only been observed in a few highly targeted operations. We also identified a new wave of victims, including a country code top-level domain (ccTLD) registry, which manages the DNS records for every domain [that] uses that particular country code; that access was used to then compromise additional government entities. Unfortunately, unless there are significant changes made to better secure DNS, these sorts of attacks are going to remain prevalent,” Talos wrote. - -### DNSpionage attack upgrades its tools - -Another newer threat to DNS comes in the form of an attack campaign called [DNSpionage][12].  - -DNSpionage initially used two malicious websites containing job postings to compromise targets via crafted Microsoft Office documents with embedded macros. The malware supported HTTP and DNS communication with the attackers. And the attackers are continuing to develop new assault techniques. - -“The threat actor's ongoing development of DNSpionage malware shows that the attacker continues to find new ways to avoid detection. DNS tunneling is a popular method of exfiltration for some actors, and recent examples of DNSpionage show that we must ensure DNS is monitored as closely as an organization's normal proxy or weblogs,” [Talos wrote][13]. “DNS is essentially the phonebook of the internet, and when it is tampered with, it becomes difficult for anyone to discern whether what they are seeing online is legitimate.” - -The DNSpionage campaign targeted various businesses in the Middle East as well as United Arab Emirates government domains. - -“One of the biggest problems with DNS attacks or the lack of protection from them is complacency,” said Craig Williams, director of Talos outreach. Companies think DNS is stable and that they don’t need to worry about it. “But what we are seeing with attacks like DNSpionage and Sea Turtle are kind of the opposite, because attackers have figured out how to use it to their advantage – how to use it to do damage to credentials in a way, in the case of Sea Turtle, that the victim never even knows it happened. And that’s a real potential problem.” - -If you know, for example, your name server has been compromised, then you can force everyone to change their passwords. But if instead they go after the registrar and the registrar points to the bad guy’s name, you never knew it happened because nothing of yours was touched – that’s why these new threats are so nefarious, Williams said. - -“Once attackers start using it publicly, successfully, other bad guys are going to look at it and say, ‘Hey, why don't I use that to harvest a bunch of credentials from the sites I am interested in,’” Williams said. - -### **The DNS IoT risk** - -Another developing risk would be the proliferation of IoT devices.  The Internet Corporation for Assigned Names and Numbers (ICANN) recently wrote a [paper on the risk that IoT brings to DNS][14].  - -“The IoT is a risk to the DNS because various measurement studies suggest that IoT devices could stress the DNS infrastructure in ways that we have not seen before,” ICANN stated.   “For example, a software update for a popular IP-enabled IoT device that causes the device to use the DNS more frequently (e.g., regularly lookup random domain names to check for network availability) could stress the DNS in individual networks when millions of devices automatically install the update at the same time.” - -While this is a programming error from the perspective of individual devices, it could result in a significant attack vector from the perspective of DNS infrastructure operators. Incidents like this have already occurred on a small scale, but they may occur more frequently in the future due to the growth of heterogeneous IoT devices from manufacturers that equip their IoT devices with controllers that use the DNS, ICANN stated. - -ICANN also suggested that IoT botnets will represent an increased threat to DNS operators. “Larger DDoS attacks, partly because IoT bots are more difficult to eradicate. Current botnet sizes are on the order of hundreds of thousands. The most well-known example is the Mirai botnet, which involved 400K (steady-state) to 600K (peak) infected IoT devices.  The Hajime botnet hovers around 400K infected IoT devices, but has not launched any DDoS attacks yet. With the growth of the IoT, these attacks may grow to involve millions of bots and as a result larger DDoS attacks. - -### **DNS security warnings grow** - -The UK's [National Cyber Security Centre (NCSC)][15] issued a warning this month about ongoing DNS attacks, particularly focusing on DNS hijacking. It cited a number of risks associated with the uptick in DNS hijacking including: - -**Creating malicious DNS records.** A malicious DNS record could be used, for example, to create a phishing website that is present within an organization’s familiar domain. This may be used to phish employees or customers. - -**Obtaining SSL certificates.** Domain-validated SSL certificates are issued based on the creation of DNS records; thus an attacker may obtain valid SSL certificates for a domain name, which could be used to create a phishing website intended to look like an authentic website, for example. - -**Transparent proxying.** One serious risk employed recently involves transparently proxying traffic to intercept data. The attacker modifies an organization’s configured domain zone entries (such as “A” or “CNAME” records) to point traffic to their own IP address, which is infrastructure they manage. - -“An organization may lose total control of their domain and often the attackers will change the domain ownership details making it harder to recover,” the NCSC wrote. - -These new threats, as well as other dangers, led the U.S. government to issue a warning earlier this year about DNS attacks on federal agencies.  - -The Department of Homeland Security’s Cybersecurity and Infrastructure Security Agency (CISA) told all federal agencies to bolt down their DNS in the face of a series of global hacking campaigns. - -CISA said in its [Emergency Directive][16] that it was tracking a series of incidents targeting DNS infrastructure. CISA wrote that it “is aware of multiple executive branch agency domains that were impacted by the tampering campaign and has notified the agencies that maintain them.” - -CISA says that attackers have managed to intercept and redirect web and mail traffic and could target other networked services. The agency said the attacks start with compromising user credentials of an account that can make changes to DNS records.  Then the attacker alters DNS records, like Address, Mail Exchanger, or Name Server records, replacing the legitimate address of the services with an address the attacker controls. - -These actions let the attacker direct user traffic to their own infrastructure for manipulation or inspection before passing it on to the legitimate service, should they choose. This creates a risk that persists beyond the period of traffic redirection, CISA stated.  - -“Because the attacker can set DNS record values, they can also obtain valid encryption certificates for an organization’s domain names. This allows the redirected traffic to be decrypted, exposing any user-submitted data. Since the certificate is valid for the domain, end users receive no error warnings,” CISA stated. - -### **Get on the DNSSEC bandwagon** - -“Enterprises that are potential targets – in particular those that capture or expose user and enterprise data through their applications – should heed this advisory by the NSCS and should pressure their DNS and registrar vendors to make DNSSEC and other domain security best practices easy to implement and standardized,” said Kris Beevers, co-founder and CEO of DNS security vendor [NS1][17]. “They can easily implement DNSSEC signing and other domain security best practices with technologies in the market today. At the very least, they should work with their vendors and security teams to audit their implementations.” - -DNSSEC was in the news earlier this year when in response to increased DNS attacks, ICANN called for an intensified community effort to install stronger DNS security technology.  - -Specifically, ICANN wants full deployment of the Domain Name System Security Extensions ([DNSSEC][18]) across all unsecured domain names. DNSSEC adds a layer of security on top of DNS. Full deployment of DNSSEC ensures end users are connecting to the actual web site or other service corresponding to a particular domain name, ICANN said. “Although this will not solve all the security problems of the Internet, it does protect a critical piece of it – the directory lookup – complementing other technologies such as SSL (https:) that protect the ‘conversation’, and provide a platform for yet-to-be-developed security improvements,” ICANN stated. - -DNSSEC technologies have been around since about 2010 but are not widely deployed, with less than 20% of the world’s DNS registrars having deployed it, according to the regional internet address registry for the Asia-Pacific region ([APNIC][19]). - -DNSSEC adoption has been lagging because it was viewed as optional and can require a tradeoff between security and functionality, said NS1's Beevers. - -### **Traditional DNS threats** - -While DNS hijacking may be the front line attack method, other more traditional threats still exist.  - -The IDC/EfficientIP study found most popular DNS threats have changed compared with last year. Phishing (47%) is now more popular than last year’s favorite, DNS-based malware (39%), followed by DDoS attacks (30%), false positive triggering (26%), and lock-up domain attacks (26%). - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3409719/worst-dns-attacks-and-how-to-mitigate-them.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/08/anonymous_faceless_hooded_mand_in_scary_halloween_mask_finger_to_lips_danger_threat_stealth_attack_hacker_hush_silence_warning_by_max_bender_cc0_via_unsplash_1200x800-100766358-large.jpg -[2]: https://www.fireeye.com/blog/threat-research/2019/01/global-dns-hijacking-campaign-dns-record-manipulation-at-scale.html -[3]: https://www.networkworld.com/article/3273891/hybrid-cloud/dns-in-the-cloud-why-and-why-not.html -[4]: https://www.networkworld.com/article/3322023/internet/dns-over-https-seeks-to-make-internet-use-more-private.html -[5]: https://www.networkworld.com/article/3298160/internet/how-to-protect-your-infrastructure-from-dns-cache-poisoning.html -[6]: https://www.networkworld.com/article/3331606/security/icann-housecleaning-revokes-old-dns-security-key.html -[7]: https://www.efficientip.com/resources/idc-dns-threat-report-2019/ -[8]: https://www.talosintelligence.com/ -[9]: https://blog.talosintelligence.com/2019/07/sea-turtle-keeps-on-swimming.html -[10]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[11]: https://blog.talosintelligence.com/2019/04/seaturtle.html -[12]: https://www.networkworld.com/article/3390666/cisco-dnspionage-attack-adds-new-tools-morphs-tactics.html -[13]: https://blog.talosintelligence.com/2019/04/dnspionage-brings-out-karkoff.html -[14]: https://www.icann.org/en/system/files/files/sac-105-en.pdf -[15]: https://www.ncsc.gov.uk/news/ongoing-dns-hijacking-and-mitigation-advice -[16]: https://cyber.dhs.gov/ed/19-01/ -[17]: https://ns1.com/ -[18]: https://www.icann.org/resources/pages/dnssec-qaa-2014-01-29-en -[19]: https://www.apnic.net/ diff --git a/sources/talk/20190724 Data centers may soon recycle heat into electricity.md b/sources/talk/20190724 Data centers may soon recycle heat into electricity.md deleted file mode 100644 index 92298e1a01..0000000000 --- a/sources/talk/20190724 Data centers may soon recycle heat into electricity.md +++ /dev/null @@ -1,67 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Data centers may soon recycle heat into electricity) -[#]: via: (https://www.networkworld.com/article/3410578/data-centers-may-soon-recycle-heat-into-electricity.html) -[#]: author: (Patrick Nelson https://www.networkworld.com/author/Patrick-Nelson/) - -Data centers may soon recycle heat into electricity -====== -Rice University researchers are developing a system that converts waste heat into light and then that light into electricity, which could help data centers reduce computing costs. -![Gordon Mah Ung / IDG][1] - -Waste heat is the scurge of computing. In fact, much of the cost of powering a computer is from creating unwanted heat. That’s because the inefficiencies in electronic circuits, caused by resistance in the materials, generates that heat. The processors, without computing anything, are essentially converting expensively produced electrical energy into waste energy. - -It’s a fundamental problem, and one that hasn’t been going away. But what if you could convert the unwanted heat back into electricity—recycle the heat back into its original energy form? The data center heat, instead of simply disgorging into the atmosphere to be gotten rid of with dubious eco-effects, could actually run more machines. Plus, your cooling costs would be taken care of—there’s nothing to cool because you’ve already grabbed the hot air. - -**[ Read also: [How server disaggregation can boost data center efficiency][2] | Get regularly scheduled insights: [Sign up for Network World newsletters][3] ]** - -Scientists at Rice Univeristy are trying to make that a reality by developing heat scavenging and conversion solutions. - -Currently, the most efficient way to convert heat into electricity is through the use of traditional turbines. - -Turbines “can give you nearly 50% conversion efficiency,” says Chloe Doiron, a graduate student at Rice University and co-lead on the project, in a [news article][4] on the school’s website. Turbines convert the kinetic energy of moving fluids, like steam or combustion gases, into mechanical energy. The moving steam then shifts blades mounted on a shaft, which turns a generator, thus creating the power. - -Not a bad solution. The problem, though, is “those systems are not easy to implement,” the researchers explain. The issue is that turbines are full of moving parts, and they’re big, noisy, and messy. - -### Thermal emitter better than turbines for converting heat to energy - -A better option would be a solid-state, thermal device that could absorb heat at the source and simply convert it, perhaps straight into attached batteries. - -The researchers say a thermal emitter could absorb heat, jam it into tight, easy-to-capture bandwidth and then emit it as light. Cunningly, they would then simply turn the light into electricity, as we see all the time now in solar systems. - -“Thermal photons are just photons emitted from a hot body,” says Rice University professor Junichiro Kono in the article. “If you look at something hot with an infrared camera, you see it glow. The camera is capturing these thermally excited photons.” Indeed, all heated surfaces, to some extent, send out light as thermal radiation. - -The Rice team wants to use a film of aligned carbon nanotubes to do the job. The test system will be structured as an actual solar panel. That’s because solar panels, too, lose energy through heat, so are a good environment in which to work. The concept applies to other inefficient technologies, too. “Anything else that loses energy through heat [would become] far more efficient,” the researchers say. - -Around 20% of industrial energy consumption is unwanted heat, Doiron says. That's a lot of wasted energy. - -### Other heat conversion solutions - -Other heat scavenging devices are making inroads, too. Now-commercially available thermoelectric technology can convert a temperature difference into power, also with no moving parts. They function by exposing a specially made material to heat. [Electrons flow when one part is cold and one is hot][5]. And the University of Utah is working on [silicon for chips that generates electricity][6] as one of two wafers heat up. - -Join the Network World communities on [Facebook][7] and [LinkedIn][8] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3410578/data-centers-may-soon-recycle-heat-into-electricity.html - -作者:[Patrick Nelson][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Patrick-Nelson/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/07/flir_20190711t191326-100801627-large.jpg -[2]: https://www.networkworld.com/article/3266624/how-server-disaggregation-could-make-cloud-datacenters-more-efficient.html -[3]: https://www.networkworld.com/newsletters/signup.html -[4]: https://news.rice.edu/2019/07/12/rice-device-channels-heat-into-light/ -[5]: https://www.networkworld.com/article/2861438/how-to-convert-waste-data-center-heat-into-electricity.html -[6]: https://unews.utah.edu/beat-the-heat/ -[7]: https://www.facebook.com/NetworkWorld/ -[8]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190724 Reports- As the IoT grows, so do its threats to DNS.md b/sources/talk/20190724 Reports- As the IoT grows, so do its threats to DNS.md deleted file mode 100644 index d9647304b9..0000000000 --- a/sources/talk/20190724 Reports- As the IoT grows, so do its threats to DNS.md +++ /dev/null @@ -1,78 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Reports: As the IoT grows, so do its threats to DNS) -[#]: via: (https://www.networkworld.com/article/3411437/reports-as-the-iot-grows-so-do-its-threats-to-dns.html) -[#]: author: (Michael Cooney https://www.networkworld.com/author/Michael-Cooney/) - -Reports: As the IoT grows, so do its threats to DNS -====== -ICANN and IBM's security researchers separately spell out how the growth of the internet of things will increase opportunities for malicious actors to attack the Domain Name System with hyperscale botnets and worm their malware into the cloud. -The internet of things is shaping up to be a more significant threat to the Domain Name System through larger IoT botnets, unintentional adverse effects of IoT-software updates and the continuing development of bot-herding software. - -The Internet Corporation for Assigned Names and Numbers (ICANN) and IBM’s X-Force security researchers have recently issued reports outlining the interplay between DNS and IoT that includes warnings about the pressure IoT botnets will put on the availability of DNS systems. - -**More about DNS:** - - * [DNS in the cloud: Why and why not][1] - * [DNS over HTTPS seeks to make internet use more private][2] - * [How to protect your infrastructure from DNS cache poisoning][3] - * [ICANN housecleaning revokes old DNS security key][4] - - - -ICANN’s Security and Stability Advisory Committee (SSAC) wrote in a [report][5] that “a significant number of IoT devices will likely be IP enabled and will use the DNS to locate the remote services they require to perform their functions. As a result, the DNS will continue to play the same crucial role for the IoT that it has for traditional applications that enable human users to interact with services and content,” ICANN stated. “The  role of  the  DNS  might  become  even  more  crucial  from  a  security  and  stability perspective with IoT devices interacting with people’s physical environment.” - -IoT represents both an opportunity and a risk to the DNS, ICANN stated. “It is an opportunity because the DNS provides functions and data that can help make the IoT more secure, stable, and transparent, which is critical given the IoT's interaction with the physical world. It is a risk because various measurement studies suggest that IoT devices may stress the DNS, for instance, because of complex DDoS attacks carried out by botnets that grow to hundreds of thousands or in the future millions of infected IoT devices within hours,” ICANN stated. - -Unintentional DDoS attacks - -One risk is that the IoT could place new burdens on the DNS. “For example, a software update for a popular IP-enabled IoT device that causes the device to use the DNS more frequently (e.g., regularly lookup random domain names to check for network availability) could stress the DNS in individual networks when millions of devices automatically install the update at the same time,” ICANN stated. - -While this is a programming error from the perspective of individual devices, it could result in a significant attack vector from the perspective of DNS infrastructure operators. Incidents like this have already occurred on a small scale, but they may occur more frequently in the future due to the growth of heterogeneous IoT devices from manufacturers that equip their IoT devices with controllers that use the DNS, ICANN stated. - -Massively larger botnets, threat to clouds - -The report also suggested that the scale of IoT botnets could grow from hundreds of thousands of devices to millions. The best known IoT botnet is Mirai, responsible for DDoS attacks involving 400,000 to 600,000 devices. The Hajime botnet hovers around 400K infected IoT devices but has not launched any DDoS attacks yet. But as the IoT grows, so will the botnets and as a result larger DDoS attacks. - -Cloud-connected IoT devices could endanger cloud resources. “IoT devices connected to cloud architecture could allow Mirai adversaries to gain access to cloud servers. They could infect a server with additional malware dropped by Mirai or expose all IoT devices connected to the server to further compromise,” wrote Charles DeBeck,  a senior cyber threat intelligence strategic analyst with [IBM X-Force Incident Response][6] in a recent report.  - - “As organizations increasingly adopt cloud architecture to scale efficiency and productivity, disruption to a cloud environment could be catastrophic.” - -For enterprises that are rapidly adopting both IoT technology and cloud architecture, insufficient security controls could expose the organization to elevated risk, calling for the security committee to conduct an up-to-date risk assessment, DeBeck stated. - -Attackers continue malware development - -“Since this activity is highly automated, there remains a strong possibility of large-scale infection of IoT devices in the future,” DeBeck stated. “Additionally, threat actors are continuing to expand their targets to include new types of IoT devices and may start looking at industrial IoT devices or connected wearables to increase their footprint and profits.” - -Botnet bad guys are also developing new Mirai variants and IoT botnet malware outside of the Mirai family to target IoT devices, DeBeck stated. - -To continue reading this article register now - -[Get Free Access][7] - -[Learn More][8]   Existing Users [Sign In][7] - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3411437/reports-as-the-iot-grows-so-do-its-threats-to-dns.html - -作者:[Michael Cooney][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Michael-Cooney/ -[b]: https://github.com/lujun9972 -[1]: https://www.networkworld.com/article/3273891/hybrid-cloud/dns-in-the-cloud-why-and-why-not.html -[2]: https://www.networkworld.com/article/3322023/internet/dns-over-https-seeks-to-make-internet-use-more-private.html -[3]: https://www.networkworld.com/article/3298160/internet/how-to-protect-your-infrastructure-from-dns-cache-poisoning.html -[4]: https://www.networkworld.com/article/3331606/security/icann-housecleaning-revokes-old-dns-security-key.html -[5]: https://www.icann.org/en/system/files/files/sac-105-en.pdf -[6]: https://securityintelligence.com/posts/i-cant-believe-mirais-tracking-the-infamous-iot-malware-2/?cm_mmc=OSocial_Twitter-_-Security_Security+Brand+and+Outcomes-_-WW_WW-_-SI+TW+blog&cm_mmca1=000034XK&cm_mmca2=10009814&linkId=70790642 -[7]: javascript:// -[8]: https://www.networkworld.com/learn-about-insider/ diff --git a/sources/talk/20190724 When it comes to the IoT, Wi-Fi has the best security.md b/sources/talk/20190724 When it comes to the IoT, Wi-Fi has the best security.md deleted file mode 100644 index 2b5014dfa8..0000000000 --- a/sources/talk/20190724 When it comes to the IoT, Wi-Fi has the best security.md +++ /dev/null @@ -1,88 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (When it comes to the IoT, Wi-Fi has the best security) -[#]: via: (https://www.networkworld.com/article/3410563/when-it-comes-to-the-iot-wi-fi-has-the-best-security.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -When it comes to the IoT, Wi-Fi has the best security -====== -It’s easy to dismiss good ol’ Wi-Fi’s role in internet of things networking. But Wi-Fi has more security advantages than other IoT networking choices. -![Ralph Gaithe / Soifer / Getty Images][1] - -When it comes to connecting internet of things (IoT) devices, there is a wide variety of networks to choose from, each with its own set of capabilities, advantages and disadvantages, and ideal use cases. Good ol’ Wi-Fi is often seen as a default networking choice, available in many places, but of limited range and not particularly suited for IoT implementations. - -According to [Aerohive Networks][2], however, Wi-Fi is “evolving to help IT address security complexities and challenges associated with IoT devices.” Aerohive sells cloud-managed networking solutions and was [acquired recently by software-defined networking company Extreme Networks for some $272 million][3]. And Aerohive's director of product marketing, Mathew Edwards, told me via email that Wi-Fi brings a number of security advantages compared to other IoT networking choices. - -It’s not a trivial problem. According to Gartner, in just the last three years, [approximately one in five organizations have been subject to an IoT-based attack][4]. And as more and more IoT devices come on line, the attack surface continues to grow quickly. - -**[ Also read: [Extreme targets cloud services, SD-WAN, Wi-Fi 6 with $210M Aerohive grab][3] and [Smart cities offer window into the evolution of enterprise IoT technology][5] ]** - -### What makes Wi-Fi more secure for IoT? - -What exactly are Wi-Fi’s IoT security benefits? Some of it is simply 20 years of technological maturity, Edwards said. - -“Extending beyond the physical boundaries of organizations, Wi-Fi has always had to be on the front foot when it comes to securely onboarding and monitoring a range of corporate, guest, and BYOD devices, and is now prepared with the next round of connectivity complexities with IoT,” he said. - -Specifically, Edwards said, “Wi-Fi has evolved … to increase the visibility, security, and troubleshooting of edge devices by combining edge security with centralized cloud intelligence.” - -Just as important, though, new Wi-Fi capabilities from a variety of vendors are designed to help identify and isolate IoT devices to integrate them into the wider network while limiting the potential risks. The goal is to incorporate IoT device awareness and protection mechanisms to prevent breaches and attacks through vulnerable headless devices. Edwards cited Aerohive’s work to “securely onboard IoT devices with its PPSK (private pre-shared key) technology, an authentication and encryption method providing 802.1X-equivalent role-based access, without the equivalent management complexities.” - -**[ [Prepare to become a Certified Information Security Systems Professional with this comprehensive online course from PluralSight. Now offering a 10-day free trial!][6] ]** - -### The IoT is already here—and so is Wi-Fi - -Unfortunately, enterprise IoT security is not always a carefully planned and monitored operation. - -“Much like BYOD,” Edwards said, “many organizations are dealing with IoT without them even knowing it.” On the plus side, even as “IoT devices have infiltrated many networks , ... administrators are already leveraging some of the tools to protect against IoT threats without them even realizing it.” - -He noted that customers who have already deployed PPSK to secure guest and BYOD networks can easily extend those capabilities to cover IoT devices such as “smart TVs, projectors, printers, security systems, sensors and more.” - -In addition, Edwards said, “vendors have introduced methods to assign performance and security limits through context-based profiling, which is easily extended to IoT devices once the vendor can utilize signatures to identify an IoT device.” - -Once an IoT device is identified and tagged, Wi-Fi networks can assign it to a particular VLAN, set minimum and maximum data rates, data limits, application access, firewall rules, and other protections. That way, Edwards said, “if the device is lost, stolen, or launches a DDoS attack, the Wi-Fi network can kick it off, restrict it, or quarantine it.” - -### Wi-Fi still isn’t for every IoT deployment - -All that hardly turns Wi-Fi into the perfect IoT network. Relatively high costs and limited range mean it won’t find a place in many large-scale IoT implementations. But Edwards says Wi-Fi’s mature identification and control systems can help enterprises incorporate new IoT-based systems and sensors into their networks with more confidence. - -**More about 802.11ax (Wi-Fi 6)** - - * [Why 802.11ax is the next big thing in wireless][7] - * [FAQ: 802.11ax Wi-Fi][8] - * [Wi-Fi 6 (802.11ax) is coming to a router near you][9] - * [Wi-Fi 6 with OFDMA opens a world of new wireless possibilities][10] - * [802.11ax preview: Access points and routers that support Wi-Fi 6 are on tap][11] - - - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3410563/when-it-comes-to-the-iot-wi-fi-has-the-best-security.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/03/hack-your-own-wi-fi_neon-wi-fi_keyboard_hacker-100791531-large.jpg -[2]: http://www.aerohive.com/ -[3]: https://www.networkworld.com/article/3405440/extreme-targets-cloud-services-sd-wan-wifi-6-with-210m-aerohive-grab.html -[4]: https://www.gartner.com/en/newsroom/press-releases/2018-03-21-gartner-says-worldwide-iot-security-spending-will-reach-1-point-5-billion-in-2018. -[5]: https://www.networkworld.com/article/3409787/smart-cities-offer-window-into-the-evolution-of-enterprise-iot-technology.html -[6]: https://pluralsight.pxf.io/c/321564/424552/7490?u=https%3A%2F%2Fwww.pluralsight.com%2Fpaths%2Fcertified-information-systems-security-professional-cisspr -[7]: https://www.networkworld.com/article/3215907/mobile-wireless/why-80211ax-is-the-next-big-thing-in-wi-fi.html -[8]: https://%20https//www.networkworld.com/article/3048196/mobile-wireless/faq-802-11ax-wi-fi.html -[9]: https://www.networkworld.com/article/3311921/mobile-wireless/wi-fi-6-is-coming-to-a-router-near-you.html -[10]: https://www.networkworld.com/article/3332018/wi-fi/wi-fi-6-with-ofdma-opens-a-world-of-new-wireless-possibilities.html -[11]: https://www.networkworld.com/article/3309439/mobile-wireless/80211ax-preview-access-points-and-routers-that-support-the-wi-fi-6-protocol-on-tap.html -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190725 IoT-s role in expanding drone use.md b/sources/talk/20190725 IoT-s role in expanding drone use.md deleted file mode 100644 index a9281dcf40..0000000000 --- a/sources/talk/20190725 IoT-s role in expanding drone use.md +++ /dev/null @@ -1,61 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (IoT’s role in expanding drone use) -[#]: via: (https://www.networkworld.com/article/3410564/iots-role-in-expanding-drone-use.html) -[#]: author: (Fredric Paul https://www.networkworld.com/author/Fredric-Paul/) - -IoT’s role in expanding drone use -====== -Collision avoidance technology that uses internet of things (IoT) connectivity, AI, machine learning, and computer vision could be the key to expanding drone applications. -![Thinkstock][1] - -As faithful readers of [TechWatch][2] (love you, Mom) may know, the rollout of many companies’ ambitious drone delivery services has not gone as quickly as promised. Despite recent signs of progress in Australia and the United States—not to mention [clever ideas for burger deliveries to cars stuck in traffic][3]—drone delivery remains a long way from becoming a viable option in the vast majority of use cases. And the problem affects many areas of drone usage, not just the heavily hyped drone delivery applications. - -According to [Grace McKenzie][4], director of operations and controller at [Iris Automation][5], one key restriction to economically viable drone deliveries is that the “skies are not safe enough for many drone use cases.” - -Speaking at a recent [SF New Tech “Internet of Everything” event in San Francisco][6], McKenzie said fear of collisions with manned aircraft is the big reason why the Federal Aviation Association (FAA) and international regulators typically prohibit drones from flying beyond the line of the sight of the remote pilot. Obviously, she added, that restriction greatly constrains where and how drones can make deliveries and is working to keep the market from growing test and pilot programs into full-scale commercial adoption. - -**[ Read also: [No, drone delivery still isn’t ready for prime time][7] | Get regularly scheduled insights: [Sign up for Network World newsletters][8] ]** - -### Detect and avoid technology is critical - -Iris Automation, not surprisingly, is in the business of creating workable collision avoidance systems for drones in an attempt to solve this issue. Variously called “detect and avoid” or “sense and avoid” technologies, these automated solutions are required for “beyond visual line of sight” (BVLOS) drone operations. There are multiple issues in play. - -As explained on Iris’ website, “Drone pilots are skilled aviators, but even they struggle to see and avoid obstacles and aircraft when operating drones at extended range [and] no pilot on board means low situational awareness. This risk is huge, and the potential conflicts can be extremely dangerous.” - -As “a software company with a hardware problem,” McKenzie said, Iris’ systems use artificial intelligence (AI), machine learning, computer vision, and IoT connectivity to identify and focus on the “small group of pixels that could be a risk.” Working together, those technologies are creating an “exponential curve” in detect-and-avoid technology improvements, she added. The result? Drones that “see better than a human pilot,” she claimed. - -### Bigger market and new use cases for drones - -It’s hardly an academic issue. “Not being able to show adequate mitigation of operational risk means regulators are forced to limit drone uses and applications to closed environments,” the company says. - -Solving this problem would open up a wide range of industrial and commercial applications for drones. Far beyond delivering burritos, McKenzie said that with confidence in drone “sense and avoid” capabilities, drones could be used for all kinds of aerial data gathering, from inspecting hydro-electric dams, power lines, and railways to surveying crops to fighting forest fires and conducting search-and-rescue operations. - -Join the Network World communities on [Facebook][9] and [LinkedIn][10] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3410564/iots-role-in-expanding-drone-use.html - -作者:[Fredric Paul][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Fredric-Paul/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2018/01/drone_delivery_package_future-100745961-large.jpg -[2]: https://www.networkworld.com/blog/techwatch/ -[3]: https://www.networkworld.com/article/3396188/the-traffic-jam-whopper-project-may-be-the-coolestdumbest-iot-idea-ever.html -[4]: https://www.linkedin.com/in/withgracetoo/ -[5]: https://www.irisonboard.com/ -[6]: https://sfnewtech.com/event/iot/ -[7]: https://www.networkworld.com/article/3390677/drone-delivery-not-ready-for-prime-time.html -[8]: https://www.networkworld.com/newsletters/signup.html -[9]: https://www.facebook.com/NetworkWorld/ -[10]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190725 Report- Smart-city IoT isn-t smart enough yet.md b/sources/talk/20190725 Report- Smart-city IoT isn-t smart enough yet.md deleted file mode 100644 index da6d4ee57a..0000000000 --- a/sources/talk/20190725 Report- Smart-city IoT isn-t smart enough yet.md +++ /dev/null @@ -1,73 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Report: Smart-city IoT isn’t smart enough yet) -[#]: via: (https://www.networkworld.com/article/3411561/report-smart-city-iot-isnt-smart-enough-yet.html) -[#]: author: (Jon Gold https://www.networkworld.com/author/Jon-Gold/) - -Report: Smart-city IoT isn’t smart enough yet -====== -A report from Forrester Research details vulnerabilities affecting smart-city internet of things (IoT) infrastructure and offers some methods of mitigation. -![Aleksandr Durnov / Getty Images][1] - -Security arrangements for smart-city IoT technology around the world are in an alarming state of disrepair, according to a report from Forrester Research that argues serious changes are needed in order to avoid widespread compromises. - -Much of what’s wrong has to do with a lack of understanding on the part of the people in charge of those systems and a failure to follow well-known security best practices, like centralized management, network visibility and limiting attack-surfaces. - -**More on IoT:** - - * [What is the IoT? How the internet of things works][2] - * [What is edge computing and how it’s changing the network][3] - * [Most powerful Internet of Things companies][4] - * [10 Hot IoT startups to watch][5] - * [The 6 ways to make money in IoT][6] - * [What is digital twin technology? [and why it matters]][7] - * [Blockchain, service-centric networking key to IoT success][8] - * [Getting grounded in IoT networking and security][9] - * [Building IoT-ready networks must become a priority][10] - * [What is the Industrial IoT? [And why the stakes are so high]][11] - - - -Those all pose stiff challenges, according to “Making Smart Cities Safe And Secure,” the Forrester report by Merritt Maxim and Salvatore Schiano. The attack surface for a smart city is, by default, enormous, given the volume of Internet-connected hardware involved. Some device, somewhere, is likely to be vulnerable, and with the devices geographically spread out it’s difficult to secure all types of access to them. - -Worse still, some legacy systems can be downright impossible to manage and update in a safe way. Older technology often contains no provision for live updates, and its vulnerabilities can be severe, according to the report. Physical access to some types of devices also remains a serious challenge. The report gives the example of wastewater treatment plants in remote locations in Australia, which were sabotaged by a contractor who accessed the SCADA systems directly. - -In addition to the risk of compromised control systems, the generalized insecurity of smart city IoT makes the vast amounts of data that it generates highly suspect. Improperly configured devices could collect more information than they’re supposed to, including personally identifiable information, which could violate privacy regulations. Also, the data collected is analyzed to glean useful information about such things as parking patterns, water flow and electricity use, and inaccurate or compromised information can badly undercut the value of smart city technology to a given user. - -“Security teams are just gaining maturity in the IT environment with the necessity for data inventory, classification, and flow mapping, together with thorough risk and privacy impact assessments, to drive appropriate protection,” the report says. “In OT environments, they’re even further behind.” - -Yet, despite the fact that IoT planning and implementation doubled between 2017 and 2018, according to Forrester’s data, comparatively little work has been done on the security front. The report lists 13 cyberattacks on smart-city technology between 2014 and 2019 that had serious consequences, including widespread electricity outages, ransomware infections on hospital computers and emergency-service interruptions. - -Still, there are ways forward, according to Forrester. Careful log monitoring can keep administrators abreast of what’s normal and what’s suspicious on their networks. Asset mapping and centralizing control-plane functionality should make it much more difficult for bad actors to insert malicious devices into a smart-city network or take control of less-secure items. And intelligent alerting – the kind that provides contextual information, differentiating between “this system just got rained on and has poor connectivity” and “someone is tampering with this system” – should help cities be more responsive to security threats when they arise. - -Join the Network World communities on [Facebook][12] and [LinkedIn][13] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3411561/report-smart-city-iot-isnt-smart-enough-yet.html - -作者:[Jon Gold][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Jon-Gold/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/smart_city_smart_cities_iot_internet_of_things_by_aleksandr_durnov_gettyimages-971455374_2400x1600-100788363-large.jpg -[2]: https://www.networkworld.com/article/3207535/internet-of-things/what-is-the-iot-how-the-internet-of-things-works.html -[3]: https://www.networkworld.com/article/3224893/internet-of-things/what-is-edge-computing-and-how-it-s-changing-the-network.html -[4]: https://www.networkworld.com/article/2287045/internet-of-things/wireless-153629-10-most-powerful-internet-of-things-companies.html -[5]: https://www.networkworld.com/article/3270961/internet-of-things/10-hot-iot-startups-to-watch.html -[6]: https://www.networkworld.com/article/3279346/internet-of-things/the-6-ways-to-make-money-in-iot.html -[7]: https://www.networkworld.com/article/3280225/internet-of-things/what-is-digital-twin-technology-and-why-it-matters.html -[8]: https://www.networkworld.com/article/3276313/internet-of-things/blockchain-service-centric-networking-key-to-iot-success.html -[9]: https://www.networkworld.com/article/3269736/internet-of-things/getting-grounded-in-iot-networking-and-security.html -[10]: https://www.networkworld.com/article/3276304/internet-of-things/building-iot-ready-networks-must-become-a-priority.html -[11]: https://www.networkworld.com/article/3243928/internet-of-things/what-is-the-industrial-iot-and-why-the-stakes-are-so-high.html -[12]: https://www.facebook.com/NetworkWorld/ -[13]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190725 Storage management a weak area for most enterprises.md b/sources/talk/20190725 Storage management a weak area for most enterprises.md deleted file mode 100644 index 859c1caa32..0000000000 --- a/sources/talk/20190725 Storage management a weak area for most enterprises.md +++ /dev/null @@ -1,82 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Storage management a weak area for most enterprises) -[#]: via: (https://www.networkworld.com/article/3411400/storage-management-a-weak-area-for-most-enterprises.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -Storage management a weak area for most enterprises -====== -Survey finds companies are adopting technology for such things as AI, machine learning, edge computing and IoT, but still use legacy storage that can't handle those workloads. -![Miakievy / Getty Images][1] - -Stop me if you’ve heard this before: Companies are racing to a new technological paradigm but are using yesterday’s tech to do it. - -I know. Shocking. - -A survey of more than 300 storage professionals by storage vendor NGD Systems found only 11% of the companies they talked to would give themselves an “A” grade for their compute and storage capabilities. - -Why? The chief reason given is that while enterprises are rapidly deploying technologies for edge networks, real-time analytics, machine learning, and internet of things (IoT) projects, they are still using legacy storage solutions that are not designed for such data-intensive workloads. More than half — 54% — said their processing of edge applications is a bottleneck, and they want faster and more intelligent storage solutions. - -**[ Read also: [What is NVMe, and how is it changing enterprise storage][2] ]** - -### NVMe SSD use increases, but doesn't solve all needs - -It’s not all bad news. The study, entitled ["The State of Storage and Edge Computing"][3] and conducted by Dimensional Research, found 60% of storage professionals are using NVMe SSDs to speed up the processing of large data sets being generated at the edge. - -However, this has not solved their needs. As artificial intelligence (AI) and other data-intensive deployments increase, data needs to be moved over increasingly longer distances, which causes network bottlenecks and delays analytic results. And edge computing systems tend to have a smaller footprint than a traditional data center, so they are performance constrained. - -The solution is to process the data where it is ingested, in this case, the edge device. Separate the wheat from the chafe and only send relevant data upstream to a data center to be processed. This is called computational storage, processing data where it is stored rather than moving it around. - -According to the survey, 89% of respondents said they expect real value from computational storage. Conveniently, NGD is a vendor of computational storage systems. So, yes, this is a self-serving finding. This happens a lot. That doesn’t mean they don’t have a valid point, though. Processing the data where it lies is the point of edge computing. - -Among the survey’s findings: - - * 55% use edge computing - * 71% use edge computing for real-time analytics - * 61% said the cost of traditional storage solutions continues to plague their applications - * 57% said faster access to storage would improve their compute abilities - - - -The study also found that [NVMe][2] is being adopted very quickly but is being hampered by price. - - * 86% expect storage’s future to rely on NVMe SSDs - * 60% use NVMe SSDs in their work environments - * 63% said NVMe SSDs helped with superior storage speed - * 67% reported budget and cost as issues preventing the use of NVMe SSDs - - - -That last finding is why so many enterprises are hampered in their work. For whatever reason they are using old storage systems rather than new NVMe systems, and it hurts them. - -### GPUs won't improve workload performance - -One interesting finding: 70% of respondents said they are using GPUs to help improve workload performance, but NGD said those are no good. - -“We were not surprised to find that while more than half of respondents are actively using edge computing, more than 70% are using legacy GPUs, which will not reduce the network bandwidth, power and footprint necessary to analyze mass data-sets in real time,” said Nader Salessi, CEO and founder of NGD Systems, in a statement. - -That’s because GPUs lend themselves well to repetitive tasks and parallel processing jobs, while computational storage is very much a serial processing job, with the task constantly changing. So while some processing jobs will benefit from a GPU, a good number will not and the GPU is essentially wasted. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3411400/storage-management-a-weak-area-for-most-enterprises.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/edge_computing_by_miakievy_gettyimages-957694592_2400x1600-100788315-large.jpg -[2]: https://www.networkworld.com/article/3280991/what-is-nvme-and-how-is-it-changing-enterprise-storage.html -[3]: https://ngd.dnastaging.net/brief/NGD_Systems_Storage_Edge_Computing_Survey_Report -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190726 NVMe over Fabrics enterprise storage spec enters final review process.md b/sources/talk/20190726 NVMe over Fabrics enterprise storage spec enters final review process.md deleted file mode 100644 index f14dcd7d67..0000000000 --- a/sources/talk/20190726 NVMe over Fabrics enterprise storage spec enters final review process.md +++ /dev/null @@ -1,71 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (NVMe over Fabrics enterprise storage spec enters final review process) -[#]: via: (https://www.networkworld.com/article/3411958/nvme-over-fabrics-enterprise-storage-spec-enters-final-review-process.html) -[#]: author: (Andy Patrizio https://www.networkworld.com/author/Andy-Patrizio/) - -NVMe over Fabrics enterprise storage spec enters final review process -====== -The NVMe over Fabric (NVMe-oF) architecture is closer to becoming a formal specification. It's expected improve storage network fabric communications and network performance. -![Gremlin / Getty Images][1] - -NVM Express Inc., the developer of the [NVMe][2] spec for enterprise SSDs, announced that its NVMe-oF architecture has entered a final 45-day review, an important step toward release of a formal specification for enterprise SSD makers. - -NVMe-oF stands for [NVMe over Fabrics][3], a mechanism to transfer data between a host computer and a target SSD or system over a network, such as Ethernet, Fibre Channel (FC), or InfiniBand. NVM Express first released the 1.0 spec of NVMe-oF in 2016, so this is long overdue. - -**[ Read also: [NVMe over Fabrics creates data-center storage disruption][3] ]** - -NVMe has become an important advance in enterprise storage because it allows for intra-network data sharing. Before, when PCI Express-based SSDs first started being used in servers, they could not easily share data with another physical server. The SSD was basically for the machine it was in, and moving data around was difficult. - -With NVMe over Fabrics, it’s possible for one machine to directly reach out to another for data and have it transmitted over a variety of high-speed fabrics rather than just Ethernet. - -### How NVMe-oF 1.1 improves storage network fabric communication - -The NVMe-oF 1.1 architecture is designed to improve storage network fabric communications in several ways: - - * Adds TCP transport supports NVMe-oF on current data center TCP/IP network infrastructure. - * Asynchronous discovery events inform hosts of addition or removal of target ports in a fabric-independent manner. - * Fabric I/O Queue Disconnect enables finer-grain I/O resource management. - * End-to-end (command to response) flow control improves concurrency. - - - -### New enterprise features for NVMe 1.4 - -The organization also announced the release of the NVMe 1.4 base specification with new “enterprise features” described as a further maturation of the protocol. The specification provides important benefits, such as improved quality of service (QoS), faster performance, improvements for high-availability deployments, and scalability optimizations for data centers. - -Among the new features: - - * Rebuild Assist simplifies data recovery and migration scenarios. - * Persistent Event Log enables robust drive history for issue triage and debug at scale. - * NVM Sets and IO Determinism allow for better performance, isolation, and QoS. - * Multipathing enhancements or Asymmetric Namespace Access (ANA) enable optimal and redundant paths to namespaces for high availability and full multi-controller scalability. - * Host Memory Buffer feature reduces latency and SSD design complexity, benefiting client SSDs. - - - -The upgraded NVMe 1.4 base specification and the pending over-fabric spec will be demonstrated at the Flash Memory Summit August 6-8, 2019 in Santa Clara, California. - -Join the Network World communities on [Facebook][4] and [LinkedIn][5] to comment on topics that are top of mind. - --------------------------------------------------------------------------------- - -via: https://www.networkworld.com/article/3411958/nvme-over-fabrics-enterprise-storage-spec-enters-final-review-process.html - -作者:[Andy Patrizio][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://www.networkworld.com/author/Andy-Patrizio/ -[b]: https://github.com/lujun9972 -[1]: https://images.idgesg.net/images/article/2019/02/big_data_storage_businessman_walks_through_futuristic_data_center_by_gremlin_gettyimages-1098116540_2400x1600-100788347-large.jpg -[2]: https://www.networkworld.com/article/3280991/what-is-nvme-and-how-is-it-changing-enterprise-storage.html -[3]: https://www.networkworld.com/article/3394296/nvme-over-fabrics-creates-data-center-storage-disruption.html -[4]: https://www.facebook.com/NetworkWorld/ -[5]: https://www.linkedin.com/company/network-world diff --git a/sources/talk/20190729 Do you prefer a live demo to be perfect or broken.md b/sources/talk/20190729 Do you prefer a live demo to be perfect or broken.md deleted file mode 100644 index b4b76aadd6..0000000000 --- a/sources/talk/20190729 Do you prefer a live demo to be perfect or broken.md +++ /dev/null @@ -1,82 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Do you prefer a live demo to be perfect or broken?) -[#]: via: (https://opensource.com/article/19/7/live-demo-perfect-or-broken) -[#]: author: (Lauren Maffeo https://opensource.com/users/lmaffeohttps://opensource.com/users/don-watkinshttps://opensource.com/users/jamesf) - -Do you prefer a live demo to be perfect or broken? -====== -Do you learn more from flawless demos or ones the presenter de-bugs in -real-time? Let us know by answering our poll. -![video editing dashboard][1] - -At [DevFest DC][2] in June, [Sara Robinson][3], developer advocate at Google Cloud, gave the most seamless live demo I've ever witnessed. - -Sara live-coded a machine model from scratch using TensorFlow and Keras. Then she trained the model live, deployed it to Google's Cloud AI platform, and used the deployed model to make predictions. - -With the exception of perhaps one small hiccup, the whole thing went smoothly, and I learned a lot as an audience member. - -At that evening's reception, I congratulated Sara on the live demo's success and told her I've never seen a live demo go so well. It turns out that this subject was already on her mind; Sara asked this question on Twitter less than two hours before her live demo: - -> Do you prefer watching a live demo where everything works perfectly or one that breaks and the presenter has to de-bug? -> -> — Sara Robinson (@SRobTweets) [June 14, 2019][4] - -Contrary to my preference for flawless demos, two-thirds of Sara's followers prefer to watch de-bugging. The replies to her poll were equally enlightening: - -> I prefer ones that break once or twice, just so you know it's real. "Break" can be something small like a typo or skipping a step. -> -> — Seth Vargo (@sethvargo) [June 14, 2019][5] - -> Broken demos which are fixed in real time seem to get a better reaction from the audience. This was our experience with the All-Demo Super Session at NEXT SF. Audible gasps followed by applause from the audience when the broken demo was fixed in real-time 🤓 -> -> — Jamie Kinney (@jamiekinney) [June 14, 2019][6] - -This made me reconsider my preference for perfection. When I attend live demos at events, I'm looking for tools that I'm unfamiliar with. I want to learn the basics of those tools, then see real-world applications. I don't expect magic, but I do want to see how the tools intend to work so I can gain and retain some knowledge. - -I've gone to several live demos that break. In my experience, this has caught most presenters off-guard; they seemed unfamiliar with the bugs at hand and, in one case, the error derailed the rest of the presentation. In short, it was like this: - -> Hmm, at least when the live demo fails you know it's not a video 😁 -> But I don't like when the presenter start to struggle, when everything becomes silent, it becomes so awkward (especially when I'm the one presenting) -> -> — Sylvain Nouts Ⓥ (@SylvainNouts) [June 14, 2019][7] - -Reading the replies to Sara's thread made me wonder what I'm really after when attending live demos. Is "perfection" what I seek? Or is it presenters who are more skilled at de-bugging in real-time? Upon reflection, I suspect that it's the latter. - -After all, "perfect" code is a lofty (if impossible) concept. Mistakes will happen, and I don't expect them not to. But I _do_ expect conference presenters to know their tools well enough that when things go sideways during live demos, they won't get so flustered that they can't keep going. - -Overall, this reply to Sara resonates with me the most. I attend live demos as a new coder with the goal to learn, and those that veer too far off-course aren't as effective for me: - -> I don’t necessarily prefer a broken demo, but I do think they show a more realistic view. -> That said, when you are newer to coding if the error takes things too far off the rails it can make it challenging to understand the original concept. -> -> — April Bowler (@A_Bowler2) [June 14, 2019][8] - -I don't expect everyone to attend live demos with the same goals and perspective as me. That's why we want to learn what the open source community thinks. - -_Do you prefer for live demos to be perfect? Or do you gain more from watching presenters de-bug in real-time? Do you attend live demos primarily to learn or for other reasons? Let us know by taking our poll or leaving a comment below._ - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/7/live-demo-perfect-or-broken - -作者:[Lauren Maffeo][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/lmaffeohttps://opensource.com/users/don-watkinshttps://opensource.com/users/jamesf -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/video_editing_folder_music_wave_play.png?itok=-J9rs-My (video editing dashboard) -[2]: https://www.devfestdc.org/ -[3]: https://twitter.com/SRobTweets -[4]: https://twitter.com/SRobTweets/status/1139619990687162368?ref_src=twsrc%5Etfw -[5]: https://twitter.com/sethvargo/status/1139620990546145281?ref_src=twsrc%5Etfw -[6]: https://twitter.com/jamiekinney/status/1139636109585989632?ref_src=twsrc%5Etfw -[7]: https://twitter.com/SylvainNouts/status/1139637154731237376?ref_src=twsrc%5Etfw -[8]: https://twitter.com/A_Bowler2/status/1139648492953976832?ref_src=twsrc%5Etfw diff --git a/sources/talk/20190729 I Used The Web For A Day On A 50 MB Budget - Smashing Magazine.md b/sources/talk/20190729 I Used The Web For A Day On A 50 MB Budget - Smashing Magazine.md deleted file mode 100644 index b0a7f4a7e0..0000000000 --- a/sources/talk/20190729 I Used The Web For A Day On A 50 MB Budget - Smashing Magazine.md +++ /dev/null @@ -1,538 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (I Used The Web For A Day On A 50 MB Budget — Smashing Magazine) -[#]: via: (https://www.smashingmagazine.com/2019/07/web-on-50mb-budget/) -[#]: author: (Chris Ashton https://www.smashingmagazine.com/author/chrisbashton) - -I Used The Web For A Day On A 50 MB Budget -====== - -Data can be prohibitively expensive, especially in developing countries. Chris Ashton puts himself in the shoes of someone on a tight data budget and offers practical tips for reducing our websites’ data footprint. - -This article is part of a series in which I attempt to use the web under various constraints, representing a given demographic of user. I hope to raise the profile of difficulties faced by real people, which are avoidable if we design and develop in a way that is sympathetic to their needs. - -Last time, I [navigated the web for a day using Internet Explorer 8][7]. This time, I browsed the web for a day on a 50 MB budget. - -### Why 50 MB? - -Many of us are lucky enough to be on mobile plans which allow several gigabytes of data transfer per month. Failing that, we are usually able to connect to home or public WiFi networks that are on fast broadband connections and have effectively unlimited data. - -But there are parts of the world where mobile data is prohibitively expensive, and where there is little or no broadband infrastructure. - -> People often buy data packages of just tens of megabytes at a time, making a gigabyte a relatively large and therefore expensive amount of data to buy. -> — Dan Howdle, consumer telecoms analyst at Cable.co.uk - -Just how expensive are we talking? - -#### The Cost Of Mobile Data - -A 2018 [study by cable.co.uk][8] found that Zimbabwe was the most expensive country in the world for mobile data, where 1 GB cost an average of $75.20, ranging from $12.50 to $138.46. The enormous range in price is due to smaller amounts of data being very expensive, getting proportionally cheaper the bigger the data plan you commit to. You can read the [study methodology][9] for more information. - -Zimbabwe is by no means a one-off. Equatorial Guinea, Saint Helena and the Falkland Islands are next in line, with 1 GB of data costing $65.83, $55.47 and $47.39 respectively. These countries generally have a combination of poor technical infrastructure and low adoption, meaning data is both costly to deliver and doesn’t have the economy of scale to drive costs down. - -Data is expensive in parts of Europe too. A gigabyte of data in Greece will set you back $32.71; in Switzerland, $20.22. For comparison, the same amount of data costs $6.66 in the UK, or $12.37 in the USA. On the other end of the scale, India is the cheapest place in the world for data, at an average cost of $0.26. Kyrgyzstan, Kazakhstan and Ukraine follow at $0.27, $0.49 and $0.51 per GB respectively. - -The speed of mobile networks, too, varies considerably between countries. Perhaps surprisingly, [users experience faster speeds over a mobile network than WiFi][10] in at least 30 countries worldwide, including Australia and France. South Korea has the [fastest mobile download speed][11], averaging 52.4 Mbps, but Iraq has the slowest, averaging 1.6 Mbps download and 0.7 Mbps upload. The USA ranks 40th in the world for mobile download speeds, at around 34 Mbps, and is [at risk of falling further behind][12] as the world moves towards 5G. - -As for mobile network connection type, 84.7% of user connections in the UK are on 4G, compared to 93% in the USA, and 97.5% in South Korea. This compares with less than 50% in Uzbekistan and less than 60% in Algeria, Ecuador, Nepal and Iraq. - -#### The Cost Of Broadband Data - -Meanwhile, a [study of the cost of broadband in 2018][13] shows that a broadband connection in Niger costs $263 ‘per megabit per month’. This metric is a little difficult to comprehend, so here’s an example: if the average cost of broadband packages in a country is $22, and the average download speed offered by the packages is 10 Mbps, then the cost ‘per megabit per month’ would be $2.20. - -It’s an interesting metric, and one that acknowledges that broadband speed is as important a factor as the data cap. A cost of $263 suggests a combination of extremely slow and extremely expensive broadband. For reference, the metric is $1.19 in the UK and $1.26 in the USA. - -What’s perhaps easier to comprehend is the average cost of a broadband package. Note that this study was looking for the cheapest broadband packages on offer, ignoring whether or not these packages had a data cap, so provides a useful ballpark figure rather than the cost of data per se. - -On package cost alone, Mauritania has the most expensive broadband in the world, at an average of $768.16 (a range of $307.26 to $1,368.72). This enormous cost includes building physical lines to the property, since few already exist in Mauritania. At 0.7 Mbps, Mauritania also has one of the slowest broadband networks in the world. - -[Taiwan has the fastest broadband in the world][14], at a mean speed of 85 Mbps. Yemen has the slowest, at 0.38 Mbps. But even countries with good established broadband infrastructure have so-called ‘not-spots’. The United Kingdom is ranked 34th out of 207 countries for broadband speed, but in July 2019 there was [still a school in the UK without broadband][15]. - -The average cost of a broadband package in the UK is $39.58, and in the USA is $67.69. The cheapest average in the world is Ukraine’s, at just $5, although the cheapest broadband deal of them all was found in Kyrgystan ($1.27 — against the country average of $108.22). - -Zimbabwe was the most costly country for mobile data, and the statistics aren’t much better for its broadband, with an average cost of $128.71 and a ‘per megabit per month’ cost of $6.89. - -#### Absolute Cost vs Cost In Real Terms - -All of the costs outlined so far are the absolute costs in USD, based on the exchange rates at the time of the study. These costs have [not been accounted for cost of living][16], meaning that for many countries the cost is actually far higher in real terms. - -I’m going to limit my browsing today to 50 MB, which in Zimbabwe would cost around $3.67 on a mobile data tariff. That may not sound like much, but teachers in Zimbabwe were striking this year because their [salaries had fallen to just $2.50 a day][17]. - -For comparison, $3.67 is around half the [$7.25 minimum wage in the USA][18]. As a Zimbabwean, I’d have to work for around a day and a half to earn the money to buy this 50MB data, compared to just half an hour in the USA. It’s not easy to compare cost of living between countries, but on wages alone the $3.67 cost of 50 MB of data in Zimbabwe would feel like $52 to an American on minimum wage. - -### Setting Up The Experiment - -I launched Chrome and opened the dev tools, where I throttled the network to a slow 3G connection. I wanted to simulate a slow connection like those experienced by users in Uzbekistan, to see what kind of experience websites would give me. I also throttled my CPU to simulate being on a lower end device. - -[![][19]][20]I opted to throttle my network to Slow 3G and my CPU to 6x slowdown. ([Large preview][20]) - -I installed [ModHeader][21] and set the [‘Save-Data’ header][22] to let websites know I want to minimise my data usage. This is also the header set by Chrome for Android’s ‘Lite mode’, which I’ll cover in more detail later. - -I downloaded [TripMode][23]; an application for Mac which gives you control over which apps on your Mac can access the internet. Any other application’s internet access is automatically blocked. - -You can enable/disable individual apps from connecting to the internet with TripMode. I enabled Chrome. ([Large preview][24]) - -How far do I predict my 50 MB budget will take me? With the [average weight of a web page being almost 1.7 MB][25], that suggests I’ve got around 29 pages in my budget, although probably a few more than that if I’m able to stay on the same sites and leverage browser caching. - -Throughout the experiment I will suggest performance tips to speed up the [first contentful paint][26] and perceived loading time of the page. Some of these tips may not affect the amount of data transferred directly, but do generally involve deferring the download of less important resources, which on slow connections may mean the resources are never downloaded and data is saved. - -### The Experiment - -Without any further ado, I loaded google.com, using 402 KB of my budget and spending $0.03 (around 1% of my Zimbabwe budget). - -[![402 KB transferred, 1.1 MB resources, 24 requests][27]][28]402 KB transferred, 1.1 MB resources, 24 requests. ([Large preview][28]) - -All in all, not a bad page size, but I wondered where those 24 network requests were coming from and whether or not the page could be made any lighter. - -#### Google Homepage — DOM - -[![][29]][30]Chrome devtools screenshot of the DOM, where I’ve expanded one inline `style` tag. ([Large preview][30]) - -Looking at the page markup, there are no external stylesheets — all of the CSS is inline. - -##### Performance Tip #1: Inline Critical CSS - -This is good for performance as it saves the browser having to make an additional network request in order to fetch an external stylesheet, so the styles can be parsed and applied immediately for the first contentful paint. There’s a trade-off to be made here, as external stylesheets can be cached but inline ones cannot (unless you [get clever with JavaScript][31]). - -The general advice is for your [critical styles][32] (anything [above-the-fold][33]) to be inline, and for the rest of your styling to be external and loaded asynchronously. Asynchronous loading of CSS can be achieved in [one remarkably clever line of HTML][34]: - -``` - -``` - -The devtools show a prettified version of the DOM. If you want to see what was actually downloaded to the browser, switch to the Sources tab and find the document. - -[![A wall of minified code.][35]][36]Switching to Sources and finding the index shows the ‘raw’ HTML that was delivered to the browser. What a mess! ([Large preview][36]) - -You can see there is a LOT of inline JavaScript here. It’s worth noting that it has been uglified rather than merely minified. - -##### Performance Tip #2: Minify And Uglify Your Assets - -Minification removes unnecessary spaces and characters, but uglification actually ‘mangles’ the code to be shorter. The tell-tale sign is that the code contains short, machine-generated variable names rather than untouched source code. This is good as it means the script is smaller and quicker to download. - -Even so, inline scripts look to be roughly 120 KB of the 210 KB page resource (about half the 60 KB gzipped size). In addition, there are five external JavaScript files amounting to 291 KB of the 402 KB downloaded: - -[![Network tab of DevTools showing the external javascript files][37]][38]Five external JavaScript files in the Network tab of the devtools. ([Large preview][38]) - -This means that JavaScript accounts for about 80 percent of the overall page weight. - -This isn’t useless JavaScript; Google has to have some in order to display suggestions as you type. But I suspect a lot of it is tracking code and advertising setup. - -For comparison, I disabled JavaScript and reloaded the page: - -[![DevTools showing only 5 network requests][39]][40]The disabled JS version of Google search was only 102 KB and had just 5 network requests. ([Large preview][40]) - -The JS-disabled version of Google search is just 102 KB, as opposed to 402 KB. Although Google can’t provide autosuggestions under these conditions, the site is still functional, and I’ve just cut my data usage down to a quarter of what it was. If I really did have to limit my data usage in the long term, one of the first things I’d do is disable JavaScript. [It’s not as bad as it sounds][41]. - -##### Performance Tip #3: Less Is More - -Inlining, uglifying and minifying assets is all well and good, but the best performance comes from not sending down the assets in the first place. - - * Before adding any new features, do you have a [performance budget][42] in place? - * Before adding JavaScript to your site, can your feature be accomplished using plain HTML? (For example, [HTML5 form validation][43]). - * Before pulling a large JavaScript or CSS library into your application, use something like [bundlephobia.com][44] to measure how big it is. Is the convenience worth the weight? Can you accomplish the same thing using vanilla code at a much smaller data size? - - - -#### Analysing The Resource Info - -There’s a lot to unpack here, so let’s get cracking. I’ve only got 50 MB to play with, so I’m going to milk every bit of this page load. Settle in for a short Chrome Devtools tutorial. - -402 KB transferred, but 1.1 MB of resources: what does that actually mean? - -It means 402 KB of content was actually downloaded, but in its compressed form (using a compression algorithm such as [gzip or brotli][45]). The browser then had to do some work to unpack it into something meaningful. The total size of the unpacked data is 1.1 MB. - -This unpacking isn’t free — [there are a few milliseconds of overhead in decompressing the resources][46]. But that’s a negligible overhead compared to sending 1.1MB down the wire. - -##### Performance Tip #4: Compress Text-based Assets - -As a general rule, always compress your assets, using something like gzip. But don’t use compression on your images and other binary files — you should optimize these in advance at source. Compression could actually end up [making them bigger][47]. - -And, if you can, [avoid compressing files that are 1500 bytes or smaller][47]. The smallest TCP packet size is 1500 bytes, so by compressing to, say, 800 bytes, you save nothing, as it’s still transmitted in the same byte packet. Again, the cost is negligible, but wastes some compression CPU time on the server and decompression CPU time on the client. - -Now back to the Network tab in Chrome: let’s dig into those priorities. Notice that resources have priority “Highest” to “Lowest” — these are the browser’s best guess as to what are the more important resources to download. The higher the priority, the sooner the browser will try to download the asset. - -##### Performance Tip #5: Give Resource Hints To The Browser - -The browser will guess at what the highest priority assets are, but you can [provide a resource hint][48] using the `` tag, instructing the browser to download the asset as soon as possible. It’s a good idea to preload fonts, logos and anything else that appears above the fold. - -Let’s talk about caching. I’m going to hold ALT and right-click to change my column headers to unlock some more juicy information. We’re going to check out Cache-Control. - -There are lots of interesting fields tucked away behind ALT. ([Large preview][49]) - -Cache-Control denotes whether or not a resource can be cached, how long it can be cached for, and what rules it should follow around [revalidating][50]. Setting proper cache values is crucial to keeping the data cost of repeat visits down. - -##### Performance Tip #6: Set cache-control Headers On All Cacheable Assets - -Note that the cache-control value begins with a directive of `public` or `private`, followed by an expiration value (e.g. `max-age=31536000`). What does the directive mean, and why the oddly specific `max-age` value? - -[![Screenshot of Google network tab with cache-control column visible][51]][52]A mixture of max-age values and public/private. ([Large preview][52]) - -The value `31536000` is the number of seconds there are in a year, and is the theoretical maximum value allowed by the cache-control specification. It is common to see this value applied to all static assets and effectively means “this resource isn’t going to change”. In practice, [no browser is going to cache for an entire year][53], but it will cache the asset for as long as makes sense. - -To explain the public/private directive, we must explain the two main caches that exist off the server. First, there is the traditional browser cache, where the resource is stored on the user’s machine (the ‘client’). And then there is the CDN cache, which sits between the client and the server; resources are cached at the CDN level to prevent the CDN from requesting the resource from the origin server over and over again. - -A `Cache-Control` directive of `public` allows the resource to be cached in both the client and the CDN. A value of `private` means only the client can cache it; the CDN is not supposed to. This latter value is typically used for pages or assets that exist behind authentication, where it is fine to be cached on the client but we wouldn’t want to leak private information by caching it in the CDN and delivering it to other users. - -[![Screenshot of Google logo cache-control setting: private, max-age=31536000][54]][55]A mixture of max-age values and public/private. ([Large preview][55]) - -One thing that got my attention was that the Google logo has a cache control of “private”. Other images on the page do have a public cache, and I don’t know why the logo would be treated any differently. If you have any ideas, let me know in the comments! - -I refreshed the page and most of the resources were served from cache, apart from the page itself, which as you’ve seen already is `private, max-age=0`, meaning it cannot be cached. This is normal for dynamic web pages where it is important that the user always gets the very latest page when they refresh. - -It was at this point I accidentally clicked on an ‘Explanation’ URL in the devtools, which took me to the [network analysis reference][56], costing me about 5 MB of my budget. Oops. - -### Google Dev Docs - -4.2 MB of this new 5 MB page was down to images; specifically SVGs. The weightiest of these was 186 KB, which isn’t particularly big — there were just so many of them, and they all downloaded at once. - -This is a loooong page. All the images downloaded on page load. ([Large preview][57]) - -That 5 MB page load was 10% of my budget for today. So far I’ve used 5.5 MB, including the no-JavaScript reload of the Google homepage, and spent $0.40. I didn’t even mean to open this page. - -What would have been a better user experience here? - -##### Performance Tip #7: Lazy-load Your Images - -Ordinarily, if I accidentally clicked on a link, I would hit the back button in my browser. I’d have received no benefit whatsoever from downloading those images — what a waste of 4.2 MB! - -Apart from video, where you generally know what you’re getting yourself into, images are by far the biggest culprit to data usage on the web. A [study of the world’s top 500 websites][58] found that images take up to 53% of the average page weight. “This means they have a big impact on page-loading times and subsequently overall performance”. - -Instead of downloading all of the images on page load, it is good practice to lazy-load the images so that only users who are engaged with the page pay the cost of downloading them. Users who choose not to scroll below the fold therefore don’t waste any unnecessary bandwidth downloading images they’ll never see. - -There’s a great [css-tricks.com guide to rolling out lazy-loading for images][59] which offers a good balance between those on good connections, those on poor connections, and those with JavaScript disabled. - -If this page had implemented lazy loading as per the guide above, each of the 38 SVGs would have been represented by a 1 KB placeholder image by default, and only loaded into view on scroll. - -##### Performance Tip #8: Use The Right Format For Your Images - -I thought that Google had missed a trick by not using [WebP][60], which is an image format that is 26% smaller in size compared to PNGs (with no loss in quality) and 25-34% smaller in size compared to JPEGs (and of a comparable quality). I thought I’d have a go at converting SVG to WebP. - -Converting to WebP did bring one of the SVGs down from 186 KB to just 65 KB, but actually, looking at the images side by side, the WebP came out grainy: - -[![Comparison of the two images][61]][62]The SVG (left) is noticeably crisper than the WebP (right). ([Large preview][62]) - -I then tried converting one of the PNGs to WebP, which is supposed to be lossless and should come out smaller. However, the WebP output was *heavier* (127 KB, from 109 KB)! - -[![Comparison of the two images][63]][64]The PNG (left) is a similar quality to the WebP (right) but is smaller at 109 KB compared to 127 KB. ([Large preview][64]) - -This surprised me. WebP isn’t necessarily the silver bullet we think it is, and even Google have neglected to use it on this page. - -So my advice would be: where possible, experiment with different image formats on a per-image basis. The format that keeps the best quality for the smallest size may not be the one you expect. - -Now back to the DOM. I came across this: - -Notice the `async` keyword on the Google analytics script? - -[![Screenshot of performance analysis output of devtools][65]][66]Google analytics has ‘low’ priority. ([Large preview][66]) - -Despite being one of the first things in the head of the document, this was given a low priority, as we’ve explicitly opted out of being a blocking request by using the `async` keyword. - -A blocking request is one that stops the rendering of the page. A ` -``` - -_editor_url_ 是文档编辑器的链接接口。 -打开每个文件以供查看的按钮: - -``` - -``` - -现在我们需要添加带有 _id_ 的 `div` 标签,打开文档编辑器: - -``` -

-``` - -要打开编辑器,必须调用调用一个函数: - -``` - -``` - -DocEditor 函数有两个参数:将在其中打开编辑器的元素 `id` 和带有编辑器设置的 `JSON`。 -在此示例中,使用了以下必需参数: - - * _documentType_ 由其格式标识(`.docx,.xlsx,.pptx` 用于相应的文本,电子表格和演示文稿) - * _document.url_ 是您要打开的文件链接。 - * _editorConfig.mode_。 - -我们还可以添加将在编辑器中显示的 _title_。 -接下来,我们可以在 Python 应用程序中查看文档。 - -**3\. 如何在 Python 应用中利用 ONLYOFFICE 编辑文档** -首先,添加 “Edit”(编辑) 按钮: - -``` - -``` - -然后创建一个新功能,打开文件进行编辑。类似于查看功能。 -现在创建3个函数: - -``` - -``` - -_destroyEditor_ 被调用以关闭一个打开的编辑器。 -您可能会注意到,_edit()_ 函数中缺少 _editorConfig_ 参数,因为默认情况下它的值是 **{"mode":"edit"}.* - -现在,我们拥有了打开文档以在 Python 应用程序中进行协同编辑的所有功能。 - -**4\. 如何在 Python 应用中利用 ONLYOFFICE 协同编辑文档** -通过在编辑器中设置对同一文档使用相同的 `document.key` 来实现协同编辑。 如果没有此键,则每次打开文件时,编辑器都会创建编辑会话。 - -为每个文档设置唯一键,以使用户连接到同一编辑会话时进行协同编辑。 密钥格式应为以下格式:_filename +"_key"_。下一步是将其添加到当前文档的所有配置中。 - -``` -document: { -url: "host_url" + '/' + filepath, -title: filename, -key: filename + '_key' -}, -``` - -**5\. 如何在 Python 应用中利用 ONLYOFFICE 保存文档** -每次我们更改并保存文件时,ONLYOFFICE 都会存储其所有版本。 让我们仔细看看它是如何工作的。 关闭编辑器后,文档服务器将构建要保存的文件版本并将请求发送到 `callbackUrl` 地址。 该请求包含 `document.key`和指向刚刚构建的文件的链接。 -`document.key` 用于查找文件的旧版本并将其替换为新版本。 由于这里没有任何数据库,因此仅使用 `callbackUrl` 发送文件名。 -在 _editorConfig.callbackUrl_ 的设置中指定 _callbackUrl_ 参数并将其添加到 _edit()method_ 中: - -``` -function edit(filename) { -const filepath = 'files/' + filename; -if (editor) { -editor.destroyEditor() -} -editor = new DocsAPI.DocEditor("editor", -{ -documentType: get_file_type(filepath), -document: { -url: "host_url" + '/' + filepath, -title: filename, -key: filename + '_key' -} -, -editorConfig: { -mode: 'edit', -callbackUrl: "host_url" + '/callback' + '&filename=' + filename // add file name as a request parameter -} -}); -} -``` - -编写一种方法,在获取到 POST 请求发送到 */callback* 地址后将保存文件: - -``` -@post("/callback") # processing post requests for /callback -def callback(): -if request.json['status'] == 2: -file = requests.get(request.json['url']).content -with open('files/' + request.query['filename'], 'wb') as f: -f.write(file) -return "{\"error\":0}" -``` - -*# status 2* 是已生成的文件 -当我们关闭编辑器时,新版本的文件将保存到存储器中。 - -**6\. 如何在 Python 应用中利用 ONLYOFFICE 管理用户** -如果应用中有用户,并且您需要查看谁在编辑文档,请在编辑器的配置中输入其标识符(`id`和`name`)。 -在界面中添加选择用户的功能: - -``` - -``` - -如果在标记 _&lt;script&gt;_ 的开头添加对函数 *pick_user()* 的调用,负责初始化函数自身 `id`和`name`变量。 - -``` -function pick_user() { -const user_selector = document.getElementById("user_selector"); -this.current_user_name = user_selector.options[user_selector.selectedIndex].text; -this.current_user_id = user_selector.options[user_selector.selectedIndex].value; -} -``` - -使用 _editorConfig.user.id_ 和 _editorConfig.user.name_ 来配置用户设置。将这些参数添加到文件编辑函数中的编辑器配置中。 - -``` -function edit(filename) { -const filepath = 'files/' + filename; -if (editor) { -editor.destroyEditor() -} -editor = new DocsAPI.DocEditor("editor", -{ -documentType: get_file_type(filepath), -document: { -url: "host_url" + '/' + filepath, -title: filename -}, -editorConfig: { -mode: 'edit', -callbackUrl: "host_url" + '/callback' + '?filename=' + filename, -user: { -id: this.current_user_id, -name: this.current_user_name -} -} -}); -} -``` - -使用这种方法,您可以将 ONLYOFFICE 编辑器集成到用 Python 编写的应用程序中,并获得用于在文档上进行协同工作的所有必要工具。有关更多集成示例(Java,Node.js,PHP,Ruby),请参考官方的 [_API文档_][5]。 - -**By: Maria Pashkina** - --------------------------------------------------------------------------------- - -via: https://opensourceforu.com/2019/09/integrate-online-documents-editors-into-a-python-web-app-using-onlyoffice/ - -作者:[Aashima Sharma][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/stevenzdg988) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensourceforu.com/author/aashima-sharma/ -[b]: https://github.com/lujun9972 -[1]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2016/09/Typist-composing-text-in-laptop.jpg?resize=696%2C420&ssl=1 (Typist composing text in laptop) -[2]: https://i1.wp.com/opensourceforu.com/wp-content/uploads/2016/09/Typist-composing-text-in-laptop.jpg?fit=900%2C543&ssl=1 -[3]: https://www.onlyoffice.com/en/ -[4]: https://www.onlyoffice.com/en/developer-edition.aspx -[5]: https://api.onlyoffice.com/editors/basic diff --git a/translated/tech/20191224 Top articles for learning Python in 2020.md b/translated/tech/20191224 Top articles for learning Python in 2020.md deleted file mode 100644 index 93d0164876..0000000000 --- a/translated/tech/20191224 Top articles for learning Python in 2020.md +++ /dev/null @@ -1,72 +0,0 @@ -[#]: collector: (lujun9972) -[#]: translator: (stevenzdg988) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Top articles for learning Python in 2020) -[#]: via: (https://opensource.com/article/19/12/learn-python) -[#]: author: (Matthew Broberg https://opensource.com/users/mbbroberg) - -2020 年学习 Python 的热门文章 - -====== -无论您在Python编程过程中处于什么阶段,**Opensource.com** 在2019年发布的Python热门文章将会对您有很大帮助。 - -![Hands on a keyboard with a Python book][1] ?[Python 手书][1] - -Python 在 2019 年是丰收的一年。根据 [GitHub][2] 和 [Stack Overflow][3] 的受欢迎资源程度来看,它正在成为全球第二大流行语言。 - -> “ Python 是增长最快的程序设计语言,在我们的调查中发现它再次在程序设计语言中排名上升,今年排在 Java 之前,成为第二大最受欢迎的程序设计语言(仅次于 Rust )。” -> — [Stack Overflow Insights][3] ?[堆栈溢出顿悟][3] - -同样,Python 在 **Opensource.com** 上的读者人数呈跳跃式激增。 以下是按主题分组的 2019 年以来最热门的 Python 文章,供您仔细阅读。 - -### 为什么选择 Python ? - -在众多的程序设计语言中,是什么使 Python 成为首选呢? 从大多数阅读的文章阅读量来看,那就是因为它的灵活性。 正如 Jigyasa Grover 解释的那样,Python 开发人员可以访问 [多功能范例][4],包括流行的 [面向对象程序设计][5] 的 Seth Kenlon 教程。 - -如果您是 Python 的长期用户并且正在寻找 Python 为什么是完美的程序设计语言的例子,那么可以看 Moshe Zadka 的 [喜欢Python的5大理由][6]。 如果这还不够的话,您也可以使用功能强大的工具来尝试,无需编写大量代码,例如 Parul Pandey 关于 [图像处理][7] 的教程。 - -### 配置 Python - -随着 Python 的受欢迎程度不断提高,使用它的人越来越多。这些新手中的许多人是利用 Mac 操作系统,并且正在使用 Moshe 和我写的[Python3配置向导][8]。 - -安装Python之后,然后决定利用什么工具编写代码。关于文本编辑器和集成开发环境(IDE),有很多选择,但是读者似乎更喜欢图形界面,在 2019 年阅读有关该主题的文章中,史蒂芬·阿文维德(Stephan Avenwedde)的关于 [Pythonic][9] 和我的关于 [JupyterLab][10] 的读者最多。 - -在对程序设计语言充满信心的途径上,开发人员将不得不采用众多可选项来管理程序设计语言版本和项目依赖项。幸运的是,László Kiss Kollár 的文章 [Python 包管理][11] 让其变得更加容易。 - -当您准备配置 IDE 时,充分利用该语言的所有功能,请确保尝试 Moshe 的 [opinionated linter Black][12],以保持代码的清洁。 - -### 小结 - -无论您处在 Python 程序设计的哪个阶段,2019年以来的热门 Python 文章都将为您提供帮助。 在没有进行关键测试确认的情况下,我无法对此进行总结,为此目的,Moshe 提供了另一本广为阅读的文章 [on tox][13]。 - -感谢所有在 2019 年为 Opensource.com 撰写的作者!如果您正在学习利用 Python 编程,请在评论中告诉我们您想知道的内容。而如果您是经验丰富的老手,请考虑通过 [writing an article][14] 与您感兴趣的 Python 主题来与我们分享您的技巧和窍门。 - --------------------------------------------------------------------------------- - -via: https://opensource.com/article/19/12/learn-python - -作者:[Matthew Broberg][a] -选题:[lujun9972][b] -译者:[译者ID](https://github.com/stevenzdg988) -校对:[校对者ID](https://github.com/校对者ID) - -本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 - -[a]: https://opensource.com/users/mbbroberg -[b]: https://github.com/lujun9972 -[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/python-programming-code-keyboard.png?itok=fxiSpmnd (Hands on a keyboard with a Python book ) -[2]: https://octoverse.github.com/#top-languages -[3]: https://insights.stackoverflow.com/survey/2019 -[4]: https://opensource.com/article/19/10/python-programming-paradigms -[5]: https://opensource.com/article/19/7/get-modular-python-classes -[6]: https://opensource.com/article/19/10/why-love-python -[7]: https://opensource.com/article/19/3/python-image-manipulation-tools -[8]: https://opensource.com/article/19/5/python-3-default-mac -[9]: https://opensource.com/article/19/5/graphically-programming-pythonic -[10]: https://opensource.com/article/19/5/jupyterlab-python-developers-magic -[11]: https://opensource.com/article/19/4/managing-python-packages -[12]: https://opensource.com/article/19/5/python-black -[13]: https://opensource.com/article/19/5/python-tox -[14]: https://opensource.com/how-submit-article diff --git a/sources/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md b/translated/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md similarity index 57% rename from sources/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md rename to translated/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md index 93b17b0fd3..b2e80cc517 100644 --- a/sources/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md +++ b/translated/tech/20200129 Ansible Playbooks Quick Start Guide with Examples.md @@ -1,96 +1,90 @@ -[#]: collector: (lujun9972) -[#]: translator: ( ) -[#]: reviewer: ( ) -[#]: publisher: ( ) -[#]: url: ( ) -[#]: subject: (Ansible Playbooks Quick Start Guide with Examples) -[#]: via: (https://www.2daygeek.com/ansible-playbooks-quick-start-guide-with-examples/) -[#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/) +[#]: collector: "lujun9972" +[#]: translator: "MjSeven" +[#]: reviewer: " " +[#]: publisher: " " +[#]: url: " " +[#]: subject: "Ansible Playbooks Quick Start Guide with Examples" +[#]: via: "https://www.2daygeek.com/ansible-playbooks-quick-start-guide-with-examples/" +[#]: author: "Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/" -Ansible Playbooks Quick Start Guide with Examples +Ansible 剧本快速入门指南 ====== -We have already written two articles about Ansible, this is the third article. +我们已经写了两篇关于 Ansible 的文章,这是第三篇。 -If you are new to Ansible, I advise you to read the two topics below, which will teach you the basics of Ansible and what it is. +如果你是 Ansible 新手,我建议你阅读下面这两篇文章,它会教你一些 Ansible 的基础以及它是什么。 - * **Part-1: [How to Install and Configure Ansible on Linux][1]** - * **Part-2: [Ansible ad-hoc Command Quick Start Guide][2]** + * **第一篇: [在 Linux 如何安装和配置 Ansible][1]** + * **第二篇: [Ansible ad-hoc 命令快速入门指南][2]** +如果你已经阅读过了,那么在阅读本文时你才不会感到突兀。 +### 什么是 Ansible 剧本? -If you have finished them, you will feel the continuity as you read this article. +剧本比临时命令模式更强大,而且完全不同。 -### What is the Ansible Playbook? +它使用了 **"/usr/bin/ansible-playbook"** 二进制文件,并且提供丰富的特性使得复杂的任务变得更容易。 -Playbooks are much more powerful and completely different way than ad-hoc command mode. +如果你想经常运行一个任务,剧本是非常有用的。 -It uses the **“/usr/bin/ansible-playbook”** binary. It provides rich features to make complex task easier. +此外,如果你想在服务器组上执行多个任务,它也是非常有用的。 -Playbooks are very useful if you want to run a task often. +剧本由 YAML 语言编写。YAML 代表一种标记语言,它比其它常见的数据格式(如 XML 或 JSON)更容易读写。 -Also, this is useful if you want to perform multiple tasks at the same time on the group of server. - -Playbooks are written in YAML language. YAML stands for Ain’t Markup Language, which is easier for humans to read and write than other common data formats such as XML or JSON. - -The Ansible Playbook Flow Chart below will tell you its detailed structure. +下面这张 Ansible 剧本流程图将告诉你它的详细结构。 ![][3] -### Understanding the Ansible Playbooks Terminology +### 理解 Ansible 剧本的术语 - * **Control Node:** The machine where Ansible is installed. It is responsible for managing client nodes. - * **Managed Nodes:** List of hosts managed by the control node - * **Playbook:** A Playbook file contains a set of procedures used to automate a task. - * **Inventory:** The inventory file contains information about the servers you manage. - * **Task:** Each play has multiple tasks, tasks that are executed one by one against a given machine (it a host or multiple host or a group of host). - * **Module:** Modules are a unit of code that is used to gather information from the client node. - * **Role:** Roles are ways to automatically load some vars_files, tasks, and handlers based on known file structure. - * **Play:** Each playbook has multiple plays, and a play is the implementation of a particular automation from beginning to end. - * **Handlers:** This helps you reduce any service restart in a play. Lists of handler tasks are not really different from regular tasks, and changes are notified by notifiers. If the handler does not receive any notification, it will not work. + * **控制节点:** Ansible 安装的机器,它负责管理客户端节点。 + * **被控节点:** 被控制节点管理的主机列表。 + * **剧本:** 一个剧本文件,包含一组自动化任务。 + * **主机清单:*** 这个文件包含有关管理的服务器的信息。 + * **任务:** 每个剧本都有大量的任务。任务在指定机器上依次执行(一个主机或多个主机)。 + * **模块:** 模块是一个代码单元,用于从客户端节点收集信息。 + * **角色:** 角色是根据已知文件结构自动加载一些变量文件、任务和处理程序的方法。 + * **Play:** 每个剧本含有大量的 play, 一个 play 从头到尾执行一个特定的自动化。 + * **Handlers:** 它可以帮助你减少在剧本中的重启任务。处理程序任务列表实际上与常规任务没有什么不同,更改由通知程序通知。如果处理程序没有收到任何通知,它将不起作用。 +### 基本的剧本是怎样的? +下面是一个剧本的模板: -### How Does the Basic Playbook looks Like? - -Here’s how the basic playbook looks. - -``` ---- [YAML file should begin with a three dash] -- name: [Description about a script] - hosts: group [Add a host or host group] - become: true [It requires if you want to run a task as a root user] - tasks: [What action do you want to perform under task] - - name: [Enter the module options] - module: [Enter a module, which you want to perform] - module_options-1: value [Enter the module options] +```yaml +--- [YAML 文件应该以三个破折号开头] +- name: [脚本描述] + hosts: group [添加主机或主机组] + become: true [如果你想以 root 身份运行任务,则标记它] + tasks: [你想在任务下执行什么动作] + - name: [输入模块选项] + module: [输入要执行的模块] + module_options-1: value [输入模块选项] module_options-2: value . module_options-N: value ``` -### How to Understand Ansible Output +### 如何理解 Ansible 的输出 -The Ansible Playbook output comes with 4 colors, see below for color definitions. +Ansible 剧本的输出有四种颜色,下面是具体含义: - * **Green:** **ok –** If that is correct, the associated task data already exists and configured as needed. - * **Yellow: changed –** Specific data has updated or modified according to the needs of the tasks. - * **Red: FAILED –** If there is any problem while doing a task, it returns a failure message, it may be anything and you need to fix it accordingly. - * **White:** It comes with multiple parameters + * **绿色:** **ok –** 代表成功,关联的任务数据已经存在,并且已经根据需要进行了配置。 + * **黄色: 已更改 –** 指定的数据已经根据任务的需要更新或修改。 + * **红色: 失败–** 如果在执行任务时出现任何问题,它将返回一个失败消息,它可能是任何东西,你需要相应地修复它。 + * **白色:** 表示有多个参数。 +为此,创建一个剧本目录,将它们都放在同一个地方。 - -To do so, create a playbook directory to keep them all in one place. - -``` +```bash $ sudo mkdir /etc/ansible/playbooks ``` -### Playbook-1: Ansible Playbook to Install Apache Web Server on RHEL Based Systems +### 剧本-1:在 RHEL 系统上安装 Apache Web 服务器 -This sample playbook allows you to install the Apache web server on a given target node. +这个示例剧本允许你在指定的目标机器上安装 Apache Web 服务器: -``` +```bash $ sudo nano /etc/ansible/playbooks/apache.yml --- @@ -108,17 +102,17 @@ $ sudo nano /etc/ansible/playbooks/apache.yml state: started ``` -``` +```bash $ ansible-playbook apache1.yml ``` ![][3] -### How to Understand Playbook Execution in Ansible +### 如何理解 Ansible 中剧本的执行 -To check the syntax error, run the following command. If it finds no error, it only shows the given file name. If it detects any error, you will get an error as follows, but the contents may differ based on your input file. +使用以下命令来查看语法错误。如果没有发现错误,它只显示剧本文件名。如果它检测到任何错误,你将得到一个如下所示的错误,但内容可能根据你的输入文件而有所不同。 -``` +```bash $ ansible-playbook apache1.yml --syntax-check ERROR! Syntax Error while loading YAML. @@ -143,11 +137,11 @@ Should be written as: # ^--- all spaces here. ``` -Alternatively, you can check your ansible-playbook content from online using the following url @ [YAML Lint][4] +或者,你可以使用这个 URL [YAML Lint][4] 在线检查 Ansible 剧本内容。 -Run the following command to perform a **“Dry Run”**. When you run a ansible-playbook with the **“–check”** option, it does not make any changes to the remote machine. Instead, it will tell you what changes they have made rather than create them. +执行以下命令进行**“演练”**。当你运行带有 **"-check"** 选项的剧本时,它不会对远程机器进行任何修改。相反,它会告诉你它将要做什么改变但不是真的执行。 -``` +```bash $ ansible-playbook apache.yml --check PLAY [Install and Configure Apache Webserver] ******************************************************************** @@ -169,9 +163,9 @@ node1.2g.lab : ok=3 changed=2 unreachable=0 failed=0 s node2.2g.lab : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` -If you want detailed information about your ansible playbook implementation, use the **“-vv”** verbose option. It shows what it really does to gather this information. +如果你想要知道 ansible 剧本实现的详细信息,使用 **"-vv"** 选项,它会展示如何收集这些信息。 -``` +```bash $ ansible-playbook apache.yml --check -vv ansible-playbook 2.9.2 @@ -212,11 +206,11 @@ node1.2g.lab : ok=3 changed=2 unreachable=0 failed=0 s node2.2g.lab : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` -### Playbook-2: Ansible Playbook to Install Apache Web Server on Ubuntu Based Systems +### 剧本-2:在 Ubuntu 系统上安装 Apache Web 服务器 -This sample playbook allows you to install the Apache web server on a given target node. +这个示例剧本允许你在指定的目标节点上安装 Apache Web 服务器。 -``` +```bash $ sudo nano /etc/ansible/playbooks/apache-ubuntu.yml --- @@ -250,13 +244,13 @@ $ sudo nano /etc/ansible/playbooks/apache-ubuntu.yml enabled: yes ``` -### Playbook-3: Ansible Playbook to Install a List of Packages on Red Hat Based Systems +### 剧本-3:在 Red Hat 系统上安装软件包列表 -This sample playbook allows you to install a list of packages on a given target node. +这个示例剧本允许你在指定的目标节点上安装软件包。 -**Method-1:** +**方法-1:** -``` +```bash $ sudo nano /etc/ansible/playbooks/packages-redhat.yml --- @@ -273,9 +267,9 @@ $ sudo nano /etc/ansible/playbooks/packages-redhat.yml - htop ``` -**Method-2:** +**方法-2:** -``` +```bash $ sudo nano /etc/ansible/playbooks/packages-redhat-1.yml --- @@ -292,9 +286,9 @@ $ sudo nano /etc/ansible/playbooks/packages-redhat-1.yml - htop ``` -**Method-3: Using Array Variable** +**方法-3: 使用数组变量** -``` +```bash $ sudo nano /etc/ansible/playbooks/packages-redhat-2.yml --- @@ -309,11 +303,11 @@ $ sudo nano /etc/ansible/playbooks/packages-redhat-2.yml with_items: "{{ packages }}" ``` -### Playbook-4: Ansible Playbook to Install Updates on Linux Systems +### 剧本-4:在 Linux 系统上安装更新 -This sample playbook allows you to install updates on your Linux systems, running Red Hat and Debian-based client nodes. +这个示例剧本允许你在基于 Red Hat 或 Debian 的 Linux 系统上安装更新。 -``` +```bash $ sudo nano /etc/ansible/playbooks/security-update.yml --- @@ -336,7 +330,7 @@ via: https://www.2daygeek.com/ansible-playbooks-quick-start-guide-with-examples/ 作者:[Magesh Maruthamuthu][a] 选题:[lujun9972][b] -译者:[译者ID](https://github.com/译者ID) +译者:[MjSeven](https://github.com/MjSeven) 校对:[校对者ID](https://github.com/校对者ID) 本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 diff --git a/translated/tech/20200724 LaTeX typesetting, Part 3- formatting.md b/translated/tech/20200724 LaTeX typesetting, Part 3- formatting.md new file mode 100644 index 0000000000..79d65a9eb1 --- /dev/null +++ b/translated/tech/20200724 LaTeX typesetting, Part 3- formatting.md @@ -0,0 +1,280 @@ +[#]: collector: (Chao-zhi) +[#]: translator: (Chao-zhi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (LaTeX typesetting,Part 3: formatting) +[#]: via: (https://fedoramagazine.org/latex-typesetting-part-3-formatting/) +[#]: author: (Earl Ramirez https://fedoramagazine.org/author/earlramirez/) + + +LaTeX 排版 (3):排版 +====== + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/latex-series-redux-1536x650.jpg) + +本[系列 ][1] 介绍了 LaTeX 中的基本格式。[第 1 部分 ][2] 介绍了列表。[第 2 部分 ][3] 阐述了表格。在第 3 部分中,您将了解 LaTeX 的另一个重要特性:细腻灵活的文档排版。本文介绍如何自定义页面布局、目录、标题部分和页面样式。 + +### 页面维度 + +当您第一次编写 LaTeX 文档时,您可能已经注意到默认边距比您想象的要大一些。页边距与指定的纸张类型有关,例如 A4、letter 和 documentclass(article、book、report) 等等。要修改页边距,有几个选项,最简单的选项之一是使用 [fullpage][4] 包。 + +> 该软件包设置页面的主体,可以使主体几乎占满整个页面。 +> +> ——FULLPAGE PACKAGE DOCUMENTATION + +下图演示了使用 fullpage 包和没有使用的区别。 +<!-- 但是原文中并没有这个图 --> + +另一个选择是使用 [geometry][5] 包。在探索 geometry 包如何操纵页边距之前,请首先查看如下所示的页面尺寸。 + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image.png) + +1。1 英寸 + \hoffset +2。1 英寸 + \voffset +3。\oddsidemargin = 31pt +4。\topmargin = 20pt +5。\headheight = 12pt +6。\headsep = 25pt +7。\textheight = 592pt +8。\textwidth = 390pt +9。\marginparsep = 35pt +10。\marginparwidth = 35pt +11。\footskip = 30pt + +要使用 geometry 包将边距设置为 1 英寸,请使用以下示例 + +``` +\usepackage{geometry} +\geometry{a4paper, margin=1in} +``` + +除上述示例外,geometry 命令还可以修改纸张尺寸和方向。要更改纸张尺寸,请使用以下示例: + +``` +\usepackage[a4paper, total={7in, 8in}]{geometry} +``` + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-2-1024x287.png) + +要更改页面方向,需要将横向添加到 geometery 选项中,如下所示: + +``` +\usepackage{geometery} +\geometry{a4paper, landscape, margin=1.5in +``` + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-9.png) + +### 目录 + +默认情况下,目录的标题为 “contents”。有时,您更想将标题改为 “Table of Content”,更改目录和章节第一节之间的垂直间距,或者只更改文本的颜色。 + +若要更改文本,请在导言区中添加以下行,用所需语言替换英语: + +``` +\usepackage[english]{babel} +\addto\captionsenglish{ +\renewcommand{\contentsname} +{\bfseries{Table of Contents}}} +``` +要操纵目录与图,小节和章节列表之间的虚拟间距,请使用 tocloft 软件包。本文中使用的两个选项是 cftbeforesecskip 和 cftaftertoctitleskip。 + +> tocloft 包提供了控制目录、图表列表和表格列表的排版方法。 +> +> ——TOCLOFT PACKAGE DOUCMENTATION + +``` +\usepackage{tocloft} +\setlength\ctfbeforesecskip{2pt} +\setlength\cftaftertoctitleskip{30pt} +``` + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-3.png) +默认目录 + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-4.png) +定制目录 + +### 边框 + +在文档中使用包 [hyperref][6] 时,目录中的 LaTeX 章节列表和包含 `\url` 的引用都有边框,如下图所示。 + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-5.png) + +要删除这些边框,请在导言区中包括以下内容,您将看到目录中没有任何边框。 + +``` +\usepackage{hyperref} +\hypersetup{ pdfborder = {0 0 0}} +``` + +要修改标题部分的字体、样式或颜色,请使用程序包 [titlesec][7]。在本例中,您将更改节、子节和子节的字体大小、字体样式和字体颜色。首先,在导言区中增加以下内容。 + +``` +\usepackage{titlesec} +\titleformat*{\section}{\Huge\bfseries\color{darkblue}} +\titleformat*{\subsection}{\huge\bfseries\color{darkblue}} +\titleformat*{\subsubsection}{\Large\bfseries\color{darkblue}} +``` + +仔细看看代码,`\titleformat*{\section}` 指定要使用的节的深度。上面的示例最多使用第三个深度。`{\Huge\bfseries\color{darkblue}}` 部分指定字体大小、字体样式和字体颜色 + +### 页面样式 + +要自定义的页眉和页脚,请使用 [fancyhdr][8]。此示例使用此包修改页面样式、页眉和页脚。下面的代码简要描述了每个选项的作用。 + +``` +\pagestyle{fancy} %for header to be on each page +\fancyhead[L]{} %keep left header blank +\fancyhead[C]{} %keep centre header blank +\fancyhead[R]{\leftmark} %add the section/chapter to the header right +\fancyfoot[L]{Static Content} %add static test to the left footer +\fancyfoot[C]{} %keep centre footer blank +\fancyfoot[R]{\thepage} %add the page number to the right footer +\setlength\voffset{-0.25in} %space between page border and header (1in + space) +\setlength\headheight{12pt} %height of the actual header. +\setlength\headsep{25pt} %separation between header and text. +\renewcommand{\headrulewidth}{2pt} % add header horizontal line +\renewcommand{\footrulewidth}{1pt} % add footer horizontal line +``` +结果如下所示 + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-7.png) +页眉 + +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-8.png) +页脚 + +### 小贴士 + +#### 集中导言区 + +如果要编写许多 TeX 文档,可以根据文档类别创建一个包含所有导言区的 `.tex` 文件并引用此文件。例如,我使用结构 `.tex` 如下所示。 + +``` +$ cat article_structure.tex +\usepackage[english]{babel} +\addto\captionsenglish{ +\renewcommand{\contentsname} +{\bfseries{\color{darkblue}Table of Contents}} +} % Relable the contents +%\usepackage[margin=0.5in]{geometry} % specifies the margin of the document +\usepackage[utf8]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{graphicx} % allows you to add graphics to the document +\usepackage{hyperref} % permits redirection of URL from a PDF document +\usepackage{fullpage} % formate the content to utilise the full page +%\usepackage{a4wide} +\usepackage[export]{adjustbox} % to force image position +%\usepackage[section]{placeins} % to have multiple images in a figure +\usepackage{tabularx} % for wrapping text in a table +%\usepackage{rotating} +\usepackage{multirow} +\usepackage{subcaption} % to have multiple images in a figure +%\usepackage{smartdiagram} % initialise smart diagrams +\usepackage{enumitem} % to manage the spacing between lists and enumeration +\usepackage{fancyhdr} %, graphicx} %for header to be on each page +\pagestyle{fancy} %for header to be on each page +%\fancyhf{} +\fancyhead[L]{} +\fancyhead[C]{} +\fancyhead[R]{\leftmark} +\fancyfoot[L]{Static Content} %\includegraphics[width=0.02\textwidth]{virgin_voyages.png}} +\fancyfoot[C]{} % clear center +\fancyfoot[R]{\thepage} +\setlength\voffset{-0.25in} %Space between page border and header (1in + space) +\setlength\headheight{12pt} %Height of the actual header. +\setlength\headsep{25pt} %Separation between header and text. +\renewcommand{\headrulewidth}{2pt} % adds horizontal line +\renewcommand{\footrulewidth}{1pt} % add horizontal line (footer) +%\renewcommand{\oddsidemargin}{2pt} % adjuct the margin spacing +%\renewcommand{\pagenumbering}{roman} % change the numbering style +%\renewcommand{\hoffset}{20pt} +%\usepackage{color} +\usepackage[table]{xcolor} +\hypersetup{ pdfborder = {0 0 0}} % removes the red boarder from the table of content +%\usepackage{wasysym} %add checkbox +%\newcommand\insq[1]{% +% \Square\ #1\quad% +%} % specify the command to add checkbox +%\usepackage{xcolor} +%\usepackage{colortbl} +%\definecolor{Gray}{gray}{0.9} % create new colour +%\definecolor{LightCyan}{rgb}{0.88,1,1} % create new colour +%\usepackage[first=0,last=9]{lcg} +%\newcommand{\ra}{\rand0.\arabic{rand}} +%\newcolumntype{g}{>{\columncolor{LightCyan}}c} % create new column type g +%\usesmartdiagramlibrary{additions} +%\setcounter{figure}{0} +\setcounter{secnumdepth}{0} % sections are level 1 +\usepackage{csquotes} % the proper was of using double quotes +%\usepackage{draftwatermark} % Enable watermark +%\SetWatermarkText{DRAFT} % Specify watermark text +%\SetWatermarkScale{5} % Toggle watermark size +\usepackage{listings} % add code blocks +\usepackage{titlesec} % Manipulate section/subsection +\titleformat{\section}{\Huge\bfseries\color{darkblue}} % update sections to bold with the colour blue \titleformat{\subsection}{\huge\bfseries\color{darkblue}} % update subsections to bold with the colour blue +\titleformat*{\subsubsection}{\Large\bfseries\color{darkblue}} % update subsubsections to bold with the colour blue +\usepackage[toc]{appendix} % Include appendix in TOC +\usepackage{xcolor} +\usepackage{tocloft} % For manipulating Table of Content virtical spacing +%\setlength\cftparskip{-2pt} +\setlength\cftbeforesecskip{2pt} %spacing between the sections +\setlength\cftaftertoctitleskip{30pt} % space between the first section and the text ``Table of Contents'' +\definecolor{navyblue}{rgb}{0.0,0.0,0.5} +\definecolor{zaffre}{rgb}{0.0, 0.08, 0.66} +\definecolor{white}{rgb}{1.0, 1.0, 1.0} +\definecolor{darkblue}{rgb}{0.0, 0.2, 0.6} +\definecolor{darkgray}{rgb}{0.66, 0.66, 0.66} +\definecolor{lightgray}{rgb}{0.83, 0.83, 0.83} +%\pagenumbering{roman} +``` + +在您的文章中,请参考以下示例中所示的方法引用 `structure.tex` 文件: + +``` +\documentclass[a4paper,11pt]{article} +\input{/path_to_structure.tex}} +\begin{document} +…... +\end{document} +``` + +#### 添加水印 + +要在 LaTeX 文档中启用水印,请使用 [draftwatermark][9] 软件包。下面的代码段和图像演示了如何在文档中添加水印。默认情况下,水印颜色为灰色,可以将其修改为所需的颜色。 + +``` +\usepackage{draftwatermark} +\SetWatermarkText{\color{red}Classified} %add watermark text +\SetWatermarkScale{4} %specify the size of the text +``` +![](https://fedoramagazine.org/wp-content/uploads/2020/07/image-10.png) + +### 结论 + +在本系列中,您了解了 LaTeX 提供的一些基本但丰富的功能,这些功能可用于自定义文档以满足您的需要或将文档呈现给的受众。LaTeX 海洋中,还有许多软件包需要大家自行去探索。 + +-------------------------------------------------------------------------------- + +via: https://fedoramagazine.org/latex-typesetting-part-3-formatting/ + +作者:[Earl Ramirez][a] +选题:[Chao-zhi][b] +译者:[Chao-zhi](https://github.com/Chao-zhi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://fedoramagazine.org/author/earlramirez/ +[b]: https://github.com/Chao-zhi +[1]:https://fedoramagazine.org/tag/latex/ +[2]:https://fedoramagazine.org/latex-typesetting-part-1/ +[3]:https://fedoramagazine.org/latex-typesetting-part-2-tables/ +[4]:https://www.ctan.org/pkg/fullpage +[5]:https://www.ctan.org/geometry +[6]:https://www.ctan.org/pkg/hyperref +[7]:https://www.ctan.org/pkg/titlesec +[8]:https://www.ctan.org/pkg/fancyhdr +[9]:https://www.ctan.org/pkg/draftwatermark \ No newline at end of file diff --git a/translated/tech/20201103 How the Kubernetes scheduler works.md b/translated/tech/20201103 How the Kubernetes scheduler works.md new file mode 100644 index 0000000000..7e7ac9eddc --- /dev/null +++ b/translated/tech/20201103 How the Kubernetes scheduler works.md @@ -0,0 +1,158 @@ +[#]: collector: (lujun9972) +[#]: translator: (MZqk) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How the Kubernetes scheduler works) +[#]: via: (https://opensource.com/article/20/11/kubernetes-scheduler) +[#]: author: (Mike Calizo https://opensource.com/users/mcalizo) + +Kubernetes 调度器是如何工作的 +===== +了解 Kubernetes 调度器是如何发现新的 pod 并将其分配到节点。 +![Parts, modules, containers for software][1] + +[Kubernetes][2] 作为容器和容器化工作负载标的准编标准引擎出现。它供提供一个了通用、开放的抽象层,跨公有云和私有云的环境。 + +对于那些已经熟悉 Kuberbetes 及其组件的人,他们讨论通常围绕着最大化使用 Kuberbetes 的功能。但当您只是 Kubernetes 初学者时或尝试在生产环境中使用前,明智的做法是从一些关于 Kubernetes 相关组件(包括 [Kubernetes 调度器][3]) 开始学习,如下抽象视图中所示。 + +![][4] + +Kubernetes 也分为控制平面和节点。 + + 1. **控制平面:** 也称为 Master,负责对群集做出全局决策,以及检测和响应集群事件。控制平面组件包括: + * etcd + * kube-apiserver + * kube-controller-manager + * scheduler + 2. **工作节点:** 也称 Node,是工作负载所在的位置。它始终和 Master 联系,以获取工作负载运行、集群外部进行通讯和连接所需的信息。工作节点组件包括: + * kubelet + * kube-proxy + * CRI + + + +我希望在这种背景下可以帮助您理解 Kubernetes 组件是如何关联在一起的。 + +### Kubernetes 调度器是如何工作的 + +Kubernetes [pod][5] 由一个或多个容器组成组成,共享存储和网络资源。Kubernetes 调度器的任务是确保每个 pod 分配到节点上运行。 + +在更深层次下,Kubernetes 调度器的工作方式是这样的: + + 1. 每个被调度的 pod 都需要加入到队列 + 2. 新的 pod 被创建后,它们也会加入到队列 + 3. 调度器持续从队列中取出 pod 并对其进行调度 + + + +[调度器源码][6] (`scheduler.go`) 很大,约 9000 行,且相当复杂,但解决了重要问题: + + 1. **等待/监视 pod 创建的代码** +监视 pod 创建的代码 `scheduler.go` 从 8970 行开始,它持续等待新的 pod: + + +``` +// Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately. + +func (sched *Scheduler) Run() { + if !sched.config.WaitForCacheSync() { + return + } + + go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything) +``` + + 2. **负责对 pod 进行排队的代码** +负责 pod 进行排队的功能是: + + +``` +// queue for pods that need scheduling + podQueue *cache.FIFO +``` + +负责 pod 进行排队的代码从 7360 行开始 `scheduler.go`。当新的 pod 显示可用时事件处理程序触发,这段代码将新的 pod 加入队列中: + + +``` +func (f *ConfigFactory) getNextPod() *v1.Pod { + for { + pod := cache.Pop(f.podQueue).(*v1.Pod) + if f.ResponsibleForPod(pod) { + glog.V(4).Infof("About to try and schedule pod %v", pod.Name) + return pod + } + } +} +``` + + 3. **处理错误代码** +在 pod 调度中您不可避免会遇到调度错误。以下代码是处理调度程序错误的方法。它监听 `podInformer` 然后抛出一个错误,提示此 pod 尚未调度并被终止: + + +``` +// scheduled pod cache + podInformer.Informer().AddEventHandler( + cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + switch t := obj.(type) { + case *v1.Pod: + return assignedNonTerminatedPod(t) + default: + runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj)) + return false + } + }, +``` + + + + +I换句话说,Kubernetes 调度器负责如下: + + * 将新创建的 pod 调度至具有足够空间的节点上,以满足 pod 的资源需求。 + * 监听 kube-apiserver 和控制器是否创建新的 pod,然后调度它至集群内一个可用的节点。 + * 监听未安排的 pod,并使用 `/binding` 子资源 API 将 pod 绑定至节点。 + + + +例如,假设正在部署一个需要 1 GB 内存和双核 CPU 的应用。因此创建应用 pod 的节点上需有足够资源可用,然后调度器会持续运行监听是否有 pod 需要调度。 + +### 了解更多 + +要使 Kubernetes 集群工作,你需要使用以上所有组件一起同步运行。调度器有一段复杂的的代码,但是 Kubernetes 是一个很棒的软件,目前它仍是我们在讨论或采用云原生应用程序时的首选。 + +学习 Kubernetes 需要精力和时间,但是将其作为您的专业技能之一能为您的职业生涯带来优势和回报。有很多很好的学习资源可供使用,而且[官方文档][7]也很棒。如果您有兴趣了解更多,建议从以下内容开始: + + * [Kubernetes the hard way][8] + * [Kubernetes the hard way on bare metal][9] + * [Kubernetes the hard way on AWS][10] + + + +你喜欢的 Kubernetes 学习方法是什么?请在评论中分享吧。 + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/20/11/kubernetes-scheduler + +作者:[Mike Calizo][a] +选题:[lujun9972][b] +译者:[译者ID](https://github.com/MZqk) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/mcalizo +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/containers_modules_networking_hardware_parts.png?itok=rPpVj92- (Parts, modules, containers for software) +[2]: https://kubernetes.io/ +[3]: https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ +[4]: https://lh4.googleusercontent.com/egB0SSsAglwrZeWpIgX7MDF6u12oxujfoyY6uIPa8WLqeVHb8TYY_how57B4iqByELxvitaH6-zjAh795wxAB8zenOwoz2YSMIFRqHsMWD9ohvUTc3fNLCzo30r7lUynIHqcQIwmtRo +[5]: https://kubernetes.io/docs/concepts/workloads/pods/ +[6]: https://github.com/kubernetes/kubernetes/blob/e4551d50e57c089aab6f67333412d3ca64bc09ae/plugin/pkg/scheduler/scheduler.go +[7]: https://kubernetes.io/docs/home/ +[8]: https://github.com/kelseyhightower/kubernetes-the-hard-way +[9]: https://github.com/Praqma/LearnKubernetes/blob/master/kamran/Kubernetes-The-Hard-Way-on-BareMetal.md +[10]: https://github.com/Praqma/LearnKubernetes/blob/master/kamran/Kubernetes-The-Hard-Way-on-AWS.md diff --git a/translated/tech/20210216 How to install Linux in 3 steps.md b/translated/tech/20210216 How to install Linux in 3 steps.md new file mode 100644 index 0000000000..6e50ef1eb6 --- /dev/null +++ b/translated/tech/20210216 How to install Linux in 3 steps.md @@ -0,0 +1,145 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (How to install Linux in 3 steps) +[#]: via: (https://opensource.com/article/21/2/linux-installation) +[#]: author: (Seth Kenlon https://opensource.com/users/seth) + +安装 Linux,只需三步 +====== + +> 操作系统的安装看似神秘,但其实很简单。以下是成功安装 Linux 的步骤。 + +![绿色背景的bash标志][1] + +在 2021 年,有更多让人们喜欢 Linux 的理由。在这个系列中,我将分享 21 种使用 Linux 的不同理由。下面是如何安装 Linux。  + +安装一个操作系统(OS)总是令人生畏。对大多数人来说,这是一个难题。安装操作系统不能从操作系统内部进行,因为它要么没有被安装,要么即将被另一个操作系统取代,那么它是如何发生的呢?更糟糕的是,它通常会涉及到硬盘格式、安装目的地、时区、用户名、密码等一系列你通常不会想到的混乱问题。Linux 发行版知道这一点,所以它们多年来一直在努力将你在操作系统安装程序中花费的时间减少到最低限度。 + +### 安装时发生了什么 + +无论你安装的是一个应用程序还是整个操作系统,*安装*的过程只是将文件从一种媒介复制到另一种媒介的一种花哨方式。不管是什么用户界面,还是用动画将安装过程伪装成多么高度专业化的东西,最终都是一回事:曾经存储在光盘或驱动器上的文件被复制到硬盘上的特定位置。 + +当安装的是一个应用程序时,放置这些文件的有效位置被高度限制在你的*文件系统*或你的操作系统知道它可以使用的硬盘驱动器的部分。这一点很重要,因为它可以将硬盘分割成不同的空间(苹果公司在世纪初的 Bootcamp 中使用了这一技巧,允许用户将 macOS 和 Windows 安装到一个硬盘上,但作为单独的实体)。当你安装一个操作系统时,一些特殊的文件会被安装到硬盘上通常是禁区的地方。更重要的是,至少在默认情况下,你的硬盘上的所有现有数据都会被擦除,以便为新系统腾出空间,所以创建一个备份是*必要的*。 + +### 安装程序 + +从技术上讲,你实际上不需要安装程序来安装应用程序甚至操作系统。不管你信不信,有些人通过挂载一块空白硬盘、编译代码并复制文件来手动安装 Linux。这是在一个名为 [Linux From Scratch(LFS)][2] 的项目的帮助下完成的。这个项目旨在帮助爱好者、学生和未来的操作系统设计者更多地了解计算机的工作原理以及每个组件执行的功能。这并不是推荐的安装 Linux 的方法,但你会发现,在开源中,通常是这样的:*如果*有些事情可以做,那么就有人在做。而这也是一件好事,因为这些小众的兴趣往往会带来令人惊讶的有用的创新。 + +假设你不是想对 Linux 进行逆向工程,那么正常的安装方式是使用安装光盘或安装镜像。 + +### 3 个简单的步骤来安装 Linux + +当你从一个 Linux 安装 DVD 或 U 盘启动时,你会置身于一个最小化的操作环境中,这个环境是为了运行一个或多个有用的应用程序。安装程序是最主要的应用程序,但由于 Linux 是一个如此灵活的系统,你通常也可以运行标准的桌面应用程序,以在你决定安装它之前感受一下这个操作系统是什么样子的。 + +不同的 Linux 发行版有不同的安装程序界面。下面是两个例子。 + +Fedora Linux 有一个灵活的安装程序(称为 Anaconda),能够进行复杂的系统配置: + +![Fedora 上的 Anaconda 安装界面][3] + +*Fedora 上的 Anaconda 安装程序* + +Elementary OS 有一个简单的安装程序,主要是为了在个人电脑上安装而设计的: + +![Elementary OS 安装程序][4] + +*Elementary OS 安装程序* + +#### 1、获取安装程序 + +安装 Linux 的第一步是下载一个安装程序。你可以从你选择尝试的发行版中获得一个 Linux 安装镜像。 + + * [Fedora][5] 以率先更新软件而闻名。 + * [Linux Mint][6] 提供了安装缺失驱动程序的简易选项。 + * [Elementary][7] 提供了一个美丽的桌面体验和几个特殊的、定制的应用程序。 + +Linux 安装程序是 `.iso` 文件,是 DVD 介质的“蓝图”。如果你还在使用光学介质,你可以把 `.iso` 文件刻录到 DVD-R 上,或者你可以把它烧录到 USB 驱动器上(确保它是一个空的 USB 驱动器,因为当镜像被烧录到它上时,它的所有内容都会被删除)。要将镜像烧录到 USB 驱动器上,你可以 [使用开源的 Etcher 应用程序][8]。 + +![Etcher 用于烧录 USB 驱动器][9] + +*Etcher 应用程序可以烧录 USB 驱动器。* + +现在你可以安装 Linux 了。 + +#### 2、引导顺序 + +要在电脑上安装操作系统,你必须引导到操作系统安装程序。这对于一台电脑来说并不是常见的行为,因为很少有人这样做。理论上,你只需要安装一次操作系统,然后你就会不断更新它。当你选择在电脑上安装不同的操作系统时,你就中断了这个正常的生命周期。这不是一件坏事。这是你的电脑,所以你有权力对它进行重新规划。然而,这与电脑的默认行为不同,它的默认行为是开机后立即启动到硬盘上找到的任何操作系统。 + +在安装 Linux 之前,你必须备份你在目标计算机上的任何数据,因为这些数据在安装时都会被清除。 + +假设你已经将数据保存到了一个外部硬盘上,然后你将它秘密地存放在安全的地方(而不是连接到你的电脑上),那么你就可以继续了。 + +首先,将装有 Linux 安装程序的 USB 驱动器连接到电脑上。打开电脑电源,观察屏幕上是否有一些如何中断其默认启动序列的指示。这通常是像 `F2`、`F8`、`Esc` 甚至 `Del` 这样的键,但根据你的主板制造商的不同而不同。如果你错过了这个时间窗口,只需等待默认操作系统加载,然后重新启动并再次尝试。 + +当你中断启动序列时,电脑会提示你引导指令。具体来说,嵌入主板的固件需要知道该到哪个驱动器寻找可以加载的操作系统。在这种情况下,你希望计算机从包含 Linux 镜像的 USB 驱动器启动。如何提示你这些信息取决于主板制造商。有时,它会直接问你,并配有一个菜单: + +![引导设备菜单][10] + +*启动设备选择菜单* + +其他时候,你会被带入一个简陋的界面,你可以用来设置启动顺序。计算机通常默认设置为先查看内部硬盘。如果引导失败,它就会移动到 USB 驱动器、网络驱动器或光驱。你需要告诉你的计算机先寻找一个 USB 驱动器,这样它就会绕过自己的内部硬盘驱动器,而引导 USB 驱动器上的 Linux 镜像。 + +![BIOS 选择屏幕][11] + +*BIOS 选择屏幕* + +起初,这可能会让人望而生畏,但一旦你熟悉了界面,这就是一个快速而简单的任务。一旦安装了Linux,你就不必这样做了,因为,在这之后,你会希望你的电脑再次从内部硬盘启动。这是一个很好的技巧,因为它是在 USB 驱动器上使用 Linux 的关键,在安装前测试计算机的 Linux 兼容性,以及无论涉及什么操作系统的一般性故障排除。 + +一旦你选择了你的 USB 驱动器作为引导设备,保存你的设置,让电脑复位,然后启动到 Linux 镜像。 + +#### 3、安装 Linux + +一旦你启动进入 Linux 安装程序,就只需通过提示进行操作。 + +Fedora 安装程序 Anaconda 为你提供了一个“菜单”,上面有你在安装前可以自定义的所有事项。大多数设置为合理的默认值,可能不需要你的互动,但有些则用警示符号标记,表示你的配置不能被安全地猜测,因此需要设置。这些配置包括你想安装操作系统的硬盘位置,以及你想为账户使用的用户名。在你解决这些问题之前,你不能继续安装。 + +对于硬盘的位置,你必须知道你要擦除哪个硬盘,然后用你选择的 Linux 发行版重新写入。对于只有一个硬盘的笔记本来说,这可能是一个显而易见的选择。 + +![选择安装驱动器的屏幕][12] + +*选择要安装操作系统的硬盘(本例中只有一个硬盘)。* + +如果你的电脑里有不止一个硬盘,而你只想在其中一个硬盘上安装 Linux,或者你想把两个硬盘当作一个硬盘,那么你必须帮助安装程序了解你的目标。最简单的方法是只给 Linux 分配一个硬盘,让安装程序执行自动分区和格式化,但对于高级用户来说,还有很多其他的选择。 + +你的电脑必须至少有一个用户,所以要为自己创建一个用户账户。完成后,你可以最后点击 **Done** 按钮,安装 Linux。 + +![Anaconda 选项完成并准备安装][13] + +*Anaconda 选项已经完成,可以安装了* + +其他的安装程序可能会更简单,不管你信不信,所以你看到的可能与本文中的图片不同。无论怎样,除了预装的操作系统之外,这个安装过程都是最简单的操作系统安装之一,所以不要让安装操作系统的想法吓到你。这是你的电脑。你可以也应该安装一个你拥有所有权的操作系统。 + +### 拥有你的电脑 + +最终,Linux 是你的操作系统。它是一个由来自世界各地的人们开发的操作系统,其核心是一个:创造一种参与、共同拥有、合作管理的计算文化。如果你有兴趣更好地了解开源,那么就请你迈出一步,了解它的一个光辉典范,并安装 Linux。 + + +-------------------------------------------------------------------------------- + +via: https://opensource.com/article/21/2/linux-installation + +作者:[Seth Kenlon][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://opensource.com/users/seth +[b]: https://github.com/lujun9972 +[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/bash_command_line.png?itok=k4z94W2U (bash logo on green background) +[2]: http://www.linuxfromscratch.org +[3]: https://opensource.com/sites/default/files/anaconda-installer.png +[4]: https://opensource.com/sites/default/files/elementary-installer.png +[5]: http://getfedora.org +[6]: http://linuxmint.com +[7]: http://elementary.io +[8]: https://opensource.com/article/18/7/getting-started-etcherio +[9]: https://opensource.com/sites/default/files/etcher_0.png +[10]: https://opensource.com/sites/default/files/boot-menu.jpg +[11]: https://opensource.com/sites/default/files/bios_1.jpg +[12]: https://opensource.com/sites/default/files/install-harddrive-chooser.png +[13]: https://opensource.com/sites/default/files/anaconda-done.png diff --git a/translated/tech/20210216 Meet Plots- A Mathematical Graph Plotting App for Linux Desktop.md b/translated/tech/20210216 Meet Plots- A Mathematical Graph Plotting App for Linux Desktop.md new file mode 100644 index 0000000000..e732ddf738 --- /dev/null +++ b/translated/tech/20210216 Meet Plots- A Mathematical Graph Plotting App for Linux Desktop.md @@ -0,0 +1,96 @@ +[#]: collector: (lujun9972) +[#]: translator: (geekpi) +[#]: reviewer: ( ) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Meet Plots: A Mathematical Graph Plotting App for Linux Desktop) +[#]: via: (https://itsfoss.com/plots-graph-app/) +[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/) + +认识 Plots:一款适用于 Linux 桌面的数学图形绘图应用 +====== + +Plots 是一款图形绘图应用,它可以轻松实现数学公式的可视化。你可以用它来绘制任意三角函数、双曲函数、指数函数和对数函数的和和积。 + +### 在 Linux 上使用 Plots 绘制数学图形 + +[Plots][1] 是一款简单的应用,它的灵感来自于像 [Desmos][2] 这样的网络图形绘图应用。它能让你绘制不同数学函数的图形,你可以交互式地输入这些函数,还可以自定义绘图的颜色。 + +Plots 是用 Python 编写的,它使用 [OpenGL][3] 来利用现代硬件。它使用 GTK 3,因此可以很好地与 GNOME 桌面集成。 + +![][4] + +使用 plots 是很直接的。要添加一个新的方程,点击加号。点击垃圾箱图标可以删除方程。还可以选择撤销和重做。你也可以放大和缩小。 + +![][5] + +你输入方程的文本框是友好的。菜单中有一个“帮助”选项可以访问文档。你可以在这里找到关于如何编写各种数学符号的有用提示。你也可以复制粘贴方程。 + +![][6] + +在深色模式下,侧栏公式区域变成了深色,但主绘图区域仍然是白色。我相信这也许是设计好的。 + +你可以使用多个函数,并将它们全部绘制在一张图中: + +![][7] + +我发现它在尝试粘贴一些它无法理解的方程时崩溃了。如果你写了一些它不能理解的东西,或者与现有的方程冲突,所有图形都会消失,去掉不正确的方程就会恢复图形。 + +不幸的是,没有导出绘图或复制到剪贴板的选项。你可以随时[在 Linux 中截图][8],并在你要添加图像的文档中使用它。 + +### 在 Linux 上安装 Plots + +Plots 为各种发行版提供了不同的安装方式。 + +Ubuntu 20.04 和 20.10 用户可以[使用 PPA][11]: + +``` +sudo add-apt-repository ppa:apandada1/plots +sudo apt update +sudo apt install plots +``` + +对于其他基于 Debian 的发行版,你可以使用[这里][13]的 [deb 文件安装][12]。 + +我没有在 AUR 软件包列表中找到它,但是作为 Arch Linux 用户,你可以使用 Flatpak 软件包或者使用 Python 安装它。 + +[Plots Flatpak Package][14] + +如果你感兴趣,可以在它的 GitHub 仓库中查看源代码。如果你喜欢这款应用,请考虑在 GitHub 上给它 star。 + +[GitHub 上的 Plots 源码][1] + +**结论** + +Plots 主要用于帮助学生学习数学或相关科目,但它在很多其他场景下也能发挥作用。我知道不是每个人都需要,但肯定会对学术界和学校的人有帮助。 + +不过我倒是希望能有导出图片的功能。也许开发者可以在未来的版本中加入这个功能。 + +你知道有什么类似的绘图应用吗?Plots 与它们相比如何? + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/plots-graph-app/ + +作者:[Abhishek Prakash][a] +选题:[lujun9972][b] +译者:[geekpi](https://github.com/geekpi) +校对:[校对者ID](https://github.com/校对者ID) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/abhishek/ +[b]: https://github.com/lujun9972 +[1]: https://github.com/alexhuntley/Plots/ +[2]: https://www.desmos.com/ +[3]: https://www.opengl.org/ +[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/fourier-graph-plots.png?resize=800%2C492&ssl=1 +[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/plots-app-linux-1.png?resize=800%2C518&ssl=1 +[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/plots-app-linux.png?resize=800%2C527&ssl=1 +[7]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/multiple-equations-plots.png?resize=800%2C492&ssl=1 +[8]: https://itsfoss.com/take-screenshot-linux/ +[10]: https://itsfoss.com/keenwrite/ +[11]: https://itsfoss.com/ppa-guide/ +[12]: https://itsfoss.com/install-deb-files-ubuntu/ +[13]: https://launchpad.net/~apandada1/+archive/ubuntu/plots/+packages +[14]: https://flathub.org/apps/details/com.github.alexhuntley.Plots diff --git a/translated/tech/20210220 Starship- Open-Source Customizable Prompt for Any Shell.md b/translated/tech/20210220 Starship- Open-Source Customizable Prompt for Any Shell.md new file mode 100644 index 0000000000..76b804b1e9 --- /dev/null +++ b/translated/tech/20210220 Starship- Open-Source Customizable Prompt for Any Shell.md @@ -0,0 +1,178 @@ +[#]: collector: (lujun9972) +[#]: translator: (wxy) +[#]: reviewer: (wxy) +[#]: publisher: ( ) +[#]: url: ( ) +[#]: subject: (Starship: Open-Source Customizable Prompt for Any Shell) +[#]: via: (https://itsfoss.com/starship/) +[#]: author: (Ankush Das https://itsfoss.com/author/ankush/) + +Starship: 跨 shell 的可定制的提示符 +====== + +> 如果你很在意你的终端的外观的话,一个跨 shell 的提示符可以让你轻松地定制和配置 Linux 终端提示符。 + +虽然我已经介绍了一些帮助你 [自定义终端外观][1] 的技巧,但我也发现了一些有趣的跨 shell 提示符的建议。 + +### Starship:轻松地调整你的 Linux Shell 提示符 + +![][2] + +[Starship][3] 是一个用 [Rust][4] 编写的开源项目,它可以帮助你建立一个精简、快速、可定制的 shell 提示符。 + +无论你是使用 bash、fish、还是 Windows 上的 PowerShell,抑或其他 shell,你都可以利用Starship 来定制外观。 + +请注意,你必须了解它的 [官方文档][5] 才能对所有你喜欢的东西进行高级配置,但在这里,我将包括一个简单的示例配置,以有一个良好的开端,以及一些关于 Startship 的关键信息。 + +Startship 专注于为你提供一个精简的、快速的、有用的默认 shell 提示符。它甚至会记录并显示执行一个命令所需的时间。例如,这里有一张截图: + +![][6] + +不仅如此,根据自己的喜好定制提示符也相当简单。下面是一张官方 GIF,展示了它的操作: + +![][7] + +让我帮你设置一下。我是在 Ubuntu 上使用 bash shell 来测试的。你可以参考我提到的步骤,或者你可以看看 [官方安装说明][8],以获得在你的系统上安装它的更多选择。 + +### Starship 的亮点 + + * 跨平台 + * 跨 shell 支持 + * 能够添加自定义命令 + * 定制 git 体验 + * 定制使用特定编程语言时的体验 + * 轻松定制提示符的每一个方面,而不会对性能造成实质影响 + +### 在 Linux 上安装 Starship + +> 安装 Starship 需要下载一个 bash 脚本,然后用 root 权限运行该脚本。 +> +> 如果你不习惯这样做,你可以使用 snap。 +> +> ``` +> sudo snap install starship +> ``` + +**注意**:你需要安装 [Nerd 字体][9] 才能获得完整的体验。 + +要开始使用,请确保你安装了 [curl][10]。你可以通过键入如下命令来轻松安装它: + +``` +sudo apt install curl +``` + +完成后,输入以下内容安装 Starship: + +``` +curl -fsSL https://starship.rs/install.sh | bash +``` + +这应该会以 root 身份将 Starship 安装到 `usr/local/bin`。你可能会被提示输入密码。下面是它的样子: + +![][11] + +### 在 bash 中添加 Starship + +如截图所示,你会在终端本身得到设置的指令。在这里,我们需要在 `.bashrc` 用户文件的末尾添加以下一行: + +``` +eval "$(starship init bash)" +``` + +要想轻松添加,只需键入: + +``` +nano .bashrc +``` + +然后,通过向下滚动导航到文件的末尾,并在文件末尾添加如下图所示的行: + +![][12] + +完成后,只需重启终端或重启会话即可看到最小化的提示符。对于你的 shell 来说,它可能看起来有点不同,但默认情况下应该是一样的。 + +![][13] + +设置好后,你就可以继续自定义和配置提示符了。让我给你看一个我做的配置示例: + +### 配置 Starship 提示符:基础 + +开始你只需要在 `.config` 目录下制作一个配置文件([TOML文件][14])。如果你已经有了这个目录,直接导航到该目录并创建配置文件。 + +下面是创建目录和配置文件时需要输入的内容: + +``` +mkdir -p ~/.config && touch ~/.config/starship.toml +``` + +请注意,这是一个隐藏目录。所以,当你试图使用文件管理器从主目录访问它时,请确保在继续之前 [启用查看隐藏文件][15]。 + +接下来如果你想探索一些你喜欢的东西,你应该参考配置文档。 + +举个例子,我配置了一个简单的自定义提示,看起来像这样: + +![][16] + +为了实现这个目标,我的配置文件是这样的: + +![][17] + +根据他们的官方文档,这是一个基本的自定义格式。但是,如果你不想要自定义格式,只是想用一种颜色或不同的符号来自定义默认的提示,那就会像这样: + +![][18] + +上述定制的配置文件是这样的: + +![][19] + +当然,这不是我能做出的最好看的提示符,但我希望你能理解其配置方式。 + +你可以通过包括图标或表情符来定制目录的外观,你可以调整变量、格式化字符串、显示 git 提交,或者根据使用特定编程语言而调整。 + +不仅如此,你还可以创建在你的 shell 中使用的自定义命令,让事情变得更简单或舒适。 + +你可以在他们的 [官方网站][3] 和它的 [GitHub 页面][20] 中探索更多的信息。 + +- [Starship.rs][3] + +### 结论 + +如果你只是想做一些小的调整,这文档可能会太复杂了。但是,即使如此,它也可以让你用很少的努力实现一个自定义的提示符或精简的提示符,你可以应用于任何普通的 shell 和你正在使用的系统。 + +总的来说,我不认为它非常有用,但有几个读者建议使用它,看来人们确实喜欢它。我很想看看你是如何 [自定义 Linux 终端][1] 以适应不同的使用方式。 + +欢迎在下面的评论中分享你的看法,如果你喜欢的话。 + +-------------------------------------------------------------------------------- + +via: https://itsfoss.com/starship/ + +作者:[Ankush Das][a] +选题:[lujun9972][b] +译者:[wxy](https://github.com/wxy) +校对:[wxy](https://github.com/wxy) + +本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出 + +[a]: https://itsfoss.com/author/ankush/ +[b]: https://github.com/lujun9972 +[1]: https://itsfoss.com/customize-linux-terminal/ +[2]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-screenshot.png?resize=800%2C577&ssl=1 +[3]: https://starship.rs/ +[4]: https://www.rust-lang.org/ +[5]: https://starship.rs/config/ +[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-time.jpg?resize=800%2C281&ssl=1 +[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-demo.gif?resize=800%2C501&ssl=1 +[8]: https://starship.rs/guide/#%F0%9F%9A%80-installation +[9]: https://www.nerdfonts.com +[10]: https://curl.se/ +[11]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/install-starship.png?resize=800%2C534&ssl=1 +[12]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2021/02/startship-bashrc-file.png?resize=800%2C545&ssl=1 +[13]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-prompt.png?resize=800%2C552&ssl=1 +[14]: https://en.wikipedia.org/wiki/TOML +[15]: https://itsfoss.com/hide-folders-and-show-hidden-files-in-ubuntu-beginner-trick/ +[16]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-custom.png?resize=800%2C289&ssl=1 +[17]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-custom-config.png?resize=800%2C320&ssl=1 +[18]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-different-symbol.png?resize=800%2C224&ssl=1 +[19]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2021/02/starship-symbol-change.jpg?resize=800%2C167&ssl=1 +[20]: https://github.com/starship/starship