mirror of
https://github.com/LCTT/TranslateProject.git
synced 2025-03-21 02:10:11 +08:00
commit
8e397d846a
315
published/20180710 Building a Messenger App- Messages.md
Normal file
315
published/20180710 Building a Messenger App- Messages.md
Normal file
@ -0,0 +1,315 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12680-1.html)
|
||||
[#]: subject: (Building a Messenger App: Messages)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-messages/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
构建一个即时消息应用(四):消息
|
||||
======
|
||||
|
||||

|
||||
|
||||
本文是该系列的第四篇。
|
||||
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
|
||||
在这篇文章中,我们将对端点进行编码,以创建一条消息并列出它们,同时还将编写一个端点以更新参与者上次阅读消息的时间。 首先在 `main()` 函数中添加这些路由。
|
||||
|
||||
```
|
||||
router.HandleFunc("POST", "/api/conversations/:conversationID/messages", requireJSON(guard(createMessage)))
|
||||
router.HandleFunc("GET", "/api/conversations/:conversationID/messages", guard(getMessages))
|
||||
router.HandleFunc("POST", "/api/conversations/:conversationID/read_messages", guard(readMessages))
|
||||
```
|
||||
|
||||
消息会进入对话,因此端点包含对话 ID。
|
||||
|
||||
### 创建消息
|
||||
|
||||
该端点处理对 `/api/conversations/{conversationID}/messages` 的 POST 请求,其 JSON 主体仅包含消息内容,并返回新创建的消息。它有两个副作用:更新对话 `last_message_id` 以及更新参与者 `messages_read_at`。
|
||||
|
||||
```
|
||||
func createMessage(w http.ResponseWriter, r *http.Request) {
|
||||
var input struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
errs := make(map[string]string)
|
||||
input.Content = removeSpaces(input.Content)
|
||||
if input.Content == "" {
|
||||
errs["content"] = "Message content required"
|
||||
} else if len([]rune(input.Content)) > 480 {
|
||||
errs["content"] = "Message too long. 480 max"
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
respond(w, Errors{errs}, http.StatusUnprocessableEntity)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not begin tx: %v", err))
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query participant existance: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if !isParticipant {
|
||||
http.Error(w, "Conversation not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var message Message
|
||||
if err := tx.QueryRowContext(ctx, `
|
||||
INSERT INTO messages (content, user_id, conversation_id) VALUES
|
||||
($1, $2, $3)
|
||||
RETURNING id, created_at
|
||||
`, input.Content, authUserID, conversationID).Scan(
|
||||
&message.ID,
|
||||
&message.CreatedAt,
|
||||
); err != nil {
|
||||
respondError(w, fmt.Errorf("could not insert message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := tx.ExecContext(ctx, `
|
||||
UPDATE conversations SET last_message_id = $1
|
||||
WHERE id = $2
|
||||
`, message.ID, conversationID); err != nil {
|
||||
respondError(w, fmt.Errorf("could not update conversation last message ID: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not commit tx to create a message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil {
|
||||
log.Printf("could not update messages read at: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
message.Content = input.Content
|
||||
message.UserID = authUserID
|
||||
message.ConversationID = conversationID
|
||||
// TODO: notify about new message.
|
||||
message.Mine = true
|
||||
|
||||
respond(w, message, http.StatusCreated)
|
||||
}
|
||||
```
|
||||
|
||||
首先,它将请求正文解码为包含消息内容的结构。然后,它验证内容不为空并且少于 480 个字符。
|
||||
|
||||
```
|
||||
var rxSpaces = regexp.MustCompile("\\s+")
|
||||
|
||||
func removeSpaces(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
|
||||
lines := make([]string, 0)
|
||||
for _, line := range strings.Split(s, "\n") {
|
||||
line = rxSpaces.ReplaceAllLiteralString(line, " ")
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
```
|
||||
|
||||
这是删除空格的函数。它遍历每一行,删除两个以上的连续空格,然后回非空行。
|
||||
|
||||
验证之后,它将启动一个 SQL 事务。首先,它查询对话中的参与者是否存在。
|
||||
|
||||
```
|
||||
func queryParticipantExistance(ctx context.Context, tx *sql.Tx, userID, conversationID string) (bool, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
var exists bool
|
||||
if err := tx.QueryRowContext(ctx, `SELECT EXISTS (
|
||||
SELECT 1 FROM participants
|
||||
WHERE user_id = $1 AND conversation_id = $2
|
||||
)`, userID, conversationID).Scan(&exists); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
```
|
||||
|
||||
我将其提取到一个函数中,因为稍后可以重用。
|
||||
|
||||
如果用户不是对话参与者,我们将返回一个 `404 NOT Found` 错误。
|
||||
|
||||
然后,它插入消息并更新对话 `last_message_id`。从这时起,由于我们不允许删除消息,因此 `last_message_id` 不能为 `NULL`。
|
||||
|
||||
接下来提交事务,并在 goroutine 中更新参与者 `messages_read_at`。
|
||||
|
||||
```
|
||||
func updateMessagesReadAt(ctx context.Context, userID, conversationID string) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if _, err := db.ExecContext(ctx, `
|
||||
UPDATE participants SET messages_read_at = now()
|
||||
WHERE user_id = $1 AND conversation_id = $2
|
||||
`, userID, conversationID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
在回复这条新消息之前,我们必须通知一下。这是我们将要在下一篇文章中编写的实时部分,因此我在那里留一了个注释。
|
||||
|
||||
### 获取消息
|
||||
|
||||
这个端点处理对 `/api/conversations/{conversationID}/messages` 的 GET 请求。 它用一个包含会话中所有消息的 JSON 数组进行响应。它还具有更新参与者 `messages_read_at` 的副作用。
|
||||
|
||||
```
|
||||
func getMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true})
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not begin tx: %v", err))
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query participant existance: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if !isParticipant {
|
||||
http.Error(w, "Conversation not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
rows, err := tx.QueryContext(ctx, `
|
||||
SELECT
|
||||
id,
|
||||
content,
|
||||
created_at,
|
||||
user_id = $1 AS mine
|
||||
FROM messages
|
||||
WHERE messages.conversation_id = $2
|
||||
ORDER BY messages.created_at DESC
|
||||
`, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query messages: %v", err))
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
messages := make([]Message, 0)
|
||||
for rows.Next() {
|
||||
var message Message
|
||||
if err = rows.Scan(
|
||||
&message.ID,
|
||||
&message.Content,
|
||||
&message.CreatedAt,
|
||||
&message.Mine,
|
||||
); err != nil {
|
||||
respondError(w, fmt.Errorf("could not scan message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
messages = append(messages, message)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not iterate over messages: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not commit tx to get messages: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil {
|
||||
log.Printf("could not update messages read at: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
respond(w, messages, http.StatusOK)
|
||||
}
|
||||
```
|
||||
|
||||
首先,它以只读模式开始一个 SQL 事务。检查参与者是否存在,并查询所有消息。在每条消息中,我们使用当前经过身份验证的用户 ID 来了解用户是否拥有该消息(`mine`)。 然后,它提交事务,在 goroutine 中更新参与者 `messages_read_at` 并以消息响应。
|
||||
|
||||
### 读取消息
|
||||
|
||||
该端点处理对 `/api/conversations/{conversationID}/read_messages` 的 POST 请求。 没有任何请求或响应主体。 在前端,每次有新消息到达实时流时,我们都会发出此请求。
|
||||
|
||||
```
|
||||
func readMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
if err := updateMessagesReadAt(ctx, authUserID, conversationID); err != nil {
|
||||
respondError(w, fmt.Errorf("could not update messages read at: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
```
|
||||
|
||||
它使用了与更新参与者 `messages_read_at` 相同的函数。
|
||||
|
||||
* * *
|
||||
|
||||
到此为止。实时消息是后台仅剩的部分了。请等待下一篇文章。
|
||||
|
||||
- [源代码][4]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://nicolasparada.netlify.com/posts/go-messenger-messages/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://linux.cn/article-11396-1.html
|
||||
[2]: https://linux.cn/article-11510-1.html
|
||||
[3]: https://linux.cn/article-12056-1.html
|
||||
[4]: https://github.com/nicolasparada/go-messenger-demo
|
@ -0,0 +1,175 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12685-1.html)
|
||||
[#]: subject: (Building a Messenger App: Realtime Messages)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
构建一个即时消息应用(五):实时消息
|
||||
======
|
||||
|
||||

|
||||
|
||||
本文是该系列的第五篇。
|
||||
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
* [第四篇: 消息][4]
|
||||
|
||||
对于实时消息,我们将使用 <ruby>[服务器发送事件][5]<rt>Server-Sent Events</rt></ruby>。这是一个打开的连接,我们可以在其中传输数据流。我们会有个端点,用户会在其中订阅发送给他的所有消息。
|
||||
|
||||
### 消息户端
|
||||
|
||||
在 HTTP 部分之前,让我们先编写一个<ruby>映射<rt>map</rt></ruby> ,让所有客户端都监听消息。 像这样全局初始化:
|
||||
|
||||
```go
|
||||
type MessageClient struct {
|
||||
Messages chan Message
|
||||
UserID string
|
||||
}
|
||||
|
||||
var messageClients sync.Map
|
||||
```
|
||||
|
||||
### 已创建的新消息
|
||||
|
||||
还记得在 [上一篇文章][4] 中,当我们创建这条消息时,我们留下了一个 “TODO” 注释。在那里,我们将使用这个函数来调度一个 goroutine。
|
||||
|
||||
```go
|
||||
go messageCreated(message)
|
||||
```
|
||||
|
||||
把这行代码插入到我们留注释的位置。
|
||||
|
||||
```go
|
||||
func messageCreated(message Message) error {
|
||||
if err := db.QueryRow(`
|
||||
SELECT user_id FROM participants
|
||||
WHERE user_id != $1 and conversation_id = $2
|
||||
`, message.UserID, message.ConversationID).
|
||||
Scan(&message.ReceiverID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go broadcastMessage(message)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func broadcastMessage(message Message) {
|
||||
messageClients.Range(func(key, _ interface{}) bool {
|
||||
client := key.(*MessageClient)
|
||||
if client.UserID == message.ReceiverID {
|
||||
client.Messages <- message
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
该函数查询接收者 ID(其他参与者 ID),并将消息发送给所有客户端。
|
||||
|
||||
### 订阅消息
|
||||
|
||||
让我们转到 `main()` 函数并添加以下路由:
|
||||
|
||||
```go
|
||||
router.HandleFunc("GET", "/api/messages", guard(subscribeToMessages))
|
||||
```
|
||||
|
||||
此端点处理 `/api/messages` 上的 GET 请求。请求应该是一个 [EventSource][6] 连接。它用一个事件流响应,其中的数据是 JSON 格式的。
|
||||
|
||||
```go
|
||||
func subscribeToMessages(w http.ResponseWriter, r *http.Request) {
|
||||
if a := r.Header.Get("Accept"); !strings.Contains(a, "text/event-stream") {
|
||||
http.Error(w, "This endpoint requires an EventSource connection", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
respondError(w, errors.New("streaming unsupported"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
|
||||
h := w.Header()
|
||||
h.Set("Cache-Control", "no-cache")
|
||||
h.Set("Connection", "keep-alive")
|
||||
h.Set("Content-Type", "text/event-stream")
|
||||
|
||||
messages := make(chan Message)
|
||||
defer close(messages)
|
||||
|
||||
client := &MessageClient{Messages: messages, UserID: authUserID}
|
||||
messageClients.Store(client, nil)
|
||||
defer messageClients.Delete(client)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case message := <-messages:
|
||||
if b, err := json.Marshal(message); err != nil {
|
||||
log.Printf("could not marshall message: %v\n", err)
|
||||
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "data: %s\n\n", b)
|
||||
}
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
首先,它检查请求头是否正确,并检查服务器是否支持流式传输。我们创建一个消息通道,用它来构建一个客户端,并将其存储在客户端映射中。每当创建新消息时,它都会进入这个通道,因此我们可以通过 `for-select` 循环从中读取。
|
||||
|
||||
<ruby>服务器发送事件<rt>Server-Sent Events</rt></ruby>使用以下格式发送数据:
|
||||
|
||||
```go
|
||||
data: some data here\n\n
|
||||
```
|
||||
|
||||
我们以 JSON 格式发送:
|
||||
|
||||
```json
|
||||
data: {"foo":"bar"}\n\n
|
||||
```
|
||||
|
||||
我们使用 `fmt.Fprintf()` 以这种格式写入响应<ruby>写入器<rt>writter</rt></ruby>,并在循环的每次迭代中刷新数据。
|
||||
|
||||
这个循环会一直运行,直到使用请求上下文关闭连接为止。我们延迟了通道的关闭和客户端的删除,因此,当循环结束时,通道将被关闭,客户端不会收到更多的消息。
|
||||
|
||||
注意,<ruby>服务器发送事件<rt>Server-Sent Events</rt></ruby>(EventSource)的 JavaScript API 不支持设置自定义请求头😒,所以我们不能设置 `Authorization: Bearer <token>`。这就是为什么 `guard()` 中间件也会从 URL 查询字符串中读取令牌的原因。
|
||||
|
||||
* * *
|
||||
|
||||
实时消息部分到此结束。我想说的是,这就是后端的全部内容。但是为了编写前端代码,我将再增加一个登录端点:一个仅用于开发的登录。
|
||||
|
||||
- [源代码][7]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://linux.cn/article-11396-1.html
|
||||
[2]: https://linux.cn/article-11510-1.html
|
||||
[3]: https://linux.cn/article-12056-1.html
|
||||
[4]: https://linux.cn/article-12680-1.html
|
||||
[5]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
|
||||
[6]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource
|
||||
[7]: https://github.com/nicolasparada/go-messenger-demo
|
@ -1,38 +1,38 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12692-1.html)
|
||||
[#]: subject: (Building a Messenger App: Development Login)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-dev-login/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Development Login
|
||||
构建一个即时消息应用(六):仅用于开发的登录
|
||||
======
|
||||
|
||||
This post is the 6th on a series:
|
||||

|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
* [Part 4: Messages][4]
|
||||
* [Part 5: Realtime Messages][5]
|
||||
本文是该系列的第六篇。
|
||||
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
* [第四篇: 消息][4]
|
||||
* [第五篇: 实时消息][5]
|
||||
|
||||
我们已经实现了通过 GitHub 登录,但是如果想把玩一下这个 app,我们需要几个用户来测试它。在这篇文章中,我们将添加一个为任何用户提供登录的端点,只需提供用户名即可。该端点仅用于开发。
|
||||
|
||||
We already implemented login through GitHub, but if we want to play around with the app, we need a couple of users to test it. In this post we’ll add an endpoint to login as any user just giving an username. This endpoint will be just for development.
|
||||
首先在 `main()` 函数中添加此路由。
|
||||
|
||||
Start by adding this route in the `main()` function.
|
||||
|
||||
```
|
||||
```go
|
||||
router.HandleFunc("POST", "/api/login", requireJSON(login))
|
||||
```
|
||||
|
||||
### Login
|
||||
### 登录
|
||||
|
||||
This function handles POST requests to `/api/login` with a JSON body with just an username and returns the authenticated user, a token and expiration date of it in JSON format.
|
||||
此函数处理对 `/api/login` 的 POST 请求,其中 JSON body 只包含用户名,并以 JSON 格式返回通过认证的用户、令牌和过期日期。
|
||||
|
||||
```
|
||||
```go
|
||||
func login(w http.ResponseWriter, r *http.Request) {
|
||||
if origin.Hostname() != "localhost" {
|
||||
http.NotFound(w, r)
|
||||
@ -81,9 +81,9 @@ func login(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
```
|
||||
|
||||
First it checks we are on localhost or it responds with `404 Not Found`. It decodes the body skipping validation since this is just for development. Then it queries to the database for a user with the given username, if none is found, it returns with `404 Not Found`. Then it issues a new JSON web token using the user ID as Subject.
|
||||
首先,它检查我们是否在本地主机上,或者响应为 `404 Not Found`。它解码主体跳过验证,因为这只是为了开发。然后在数据库中查询给定用户名的用户,如果没有,则返回 `404 NOT Found`。然后,它使用用户 ID 作为主题发布一个新的 JSON Web 令牌。
|
||||
|
||||
```
|
||||
```go
|
||||
func issueToken(subject string, exp time.Time) (string, error) {
|
||||
token, err := jwtSigner.Encode(jwt.Claims{
|
||||
Subject: subject,
|
||||
@ -96,33 +96,33 @@ func issueToken(subject string, exp time.Time) (string, error) {
|
||||
}
|
||||
```
|
||||
|
||||
The function does the same we did [previously][2]. I just moved it to reuse code.
|
||||
该函数执行的操作与 [前文][2] 相同。我只是将其移过来以重用代码。
|
||||
|
||||
After creating the token, it responds with the user, token and expiration date.
|
||||
创建令牌后,它将使用用户、令牌和到期日期进行响应。
|
||||
|
||||
### Seed Users
|
||||
### 种子用户
|
||||
|
||||
Now you can add users to play with to the database.
|
||||
现在,你可以将要操作的用户添加到数据库中。
|
||||
|
||||
```
|
||||
```sql
|
||||
INSERT INTO users (id, username) VALUES
|
||||
(1, 'john'),
|
||||
(2, 'jane');
|
||||
```
|
||||
|
||||
You can save it to a file and pipe it to the Cockroach CLI.
|
||||
你可以将其保存到文件中,并通过管道将其传送到 Cockroach CLI。
|
||||
|
||||
```
|
||||
```bash
|
||||
cat seed_users.sql | cockroach sql --insecure -d messenger
|
||||
```
|
||||
|
||||
* * *
|
||||
|
||||
That’s it. Once you deploy the code to production and use your own domain this login function won’t be available.
|
||||
就是这样。一旦将代码部署到生产环境并使用自己的域后,该登录功能将不可用。
|
||||
|
||||
This post concludes the backend.
|
||||
本文也结束了所有的后端开发部分。
|
||||
|
||||
[Souce Code][6]
|
||||
- [源代码][6]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -130,16 +130,16 @@ via: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/
|
||||
[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/
|
||||
[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/
|
||||
[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/
|
||||
[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/
|
||||
[1]: https://linux.cn/article-11396-1.html
|
||||
[2]: https://linux.cn/article-11510-1.html
|
||||
[3]: https://linux.cn/article-12056-1.html
|
||||
[4]: https://linux.cn/article-12680-1.html
|
||||
[5]: https://linux.cn/article-12685-1.html
|
||||
[6]: https://github.com/nicolasparada/go-messenger-demo
|
@ -1,31 +1,31 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12704-1.html)
|
||||
[#]: subject: (Building a Messenger App: Access Page)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-access-page/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Access Page
|
||||
构建一个即时消息应用(七):Access 页面
|
||||
======
|
||||
|
||||
This post is the 7th on a series:
|
||||

|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
* [Part 4: Messages][4]
|
||||
* [Part 5: Realtime Messages][5]
|
||||
* [Part 6: Development Login][6]
|
||||
本文是该系列的第七篇。
|
||||
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
* [第四篇: 消息][4]
|
||||
* [第五篇: 实时消息][5]
|
||||
* [第六篇: 仅用于开发的登录][6]
|
||||
|
||||
现在我们已经完成了后端,让我们转到前端。 我将采用单页应用程序方案。
|
||||
|
||||
Now that we’re done with the backend, lets move to the frontend. I will go with a single-page application.
|
||||
首先,我们创建一个 `static/index.html` 文件,内容如下。
|
||||
|
||||
Lets start by creating a file `static/index.html` with the following content.
|
||||
|
||||
```
|
||||
```html
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
@ -40,11 +40,11 @@ Lets start by creating a file `static/index.html` with the following content.
|
||||
</html>
|
||||
```
|
||||
|
||||
This HTML file must be server for every URL and JavaScript will take care of rendering the correct page.
|
||||
这个 HTML 文件必须为每个 URL 提供服务,并且使用 JavaScript 负责呈现正确的页面。
|
||||
|
||||
So lets go the the `main.go` for a moment and in the `main()` function add the following route:
|
||||
因此,让我们将注意力转到 `main.go` 片刻,然后在 `main()` 函数中添加以下路由:
|
||||
|
||||
```
|
||||
```go
|
||||
router.Handle("GET", "/...", http.FileServer(SPAFileSystem{http.Dir("static")}))
|
||||
|
||||
type SPAFileSystem struct {
|
||||
@ -60,15 +60,15 @@ func (spa SPAFileSystem) Open(name string) (http.File, error) {
|
||||
}
|
||||
```
|
||||
|
||||
We use a custom file system so instead of returning `404 Not Found` for unknown URLs, it serves the `index.html`.
|
||||
我们使用一个自定义的文件系统,因此它不是为未知的 URL 返回 `404 Not Found`,而是转到 `index.html`。
|
||||
|
||||
### Router
|
||||
### 路由器
|
||||
|
||||
In the `index.html` we loaded two files: `styles.css` and `main.js`. I leave styling to your taste.
|
||||
在 `index.html` 中我们加载了两个文件:`styles.css` 和 `main.js`。我把样式留给你自由发挥。
|
||||
|
||||
Lets move to `main.js`. Create a `static/main.js` file with the following content:
|
||||
让我们移动到 `main.js`。 创建一个包含以下内容的 `static/main.js` 文件:
|
||||
|
||||
```
|
||||
```javascript
|
||||
import { guard } from './auth.js'
|
||||
import Router from './router.js'
|
||||
|
||||
@ -98,19 +98,19 @@ function view(pageName) {
|
||||
}
|
||||
```
|
||||
|
||||
If you are follower of this blog, you already know how this works. That router is the one showed [here][7]. Just download it from [@nicolasparada/router][8] and save it to `static/router.js`.
|
||||
如果你是这个博客的关注者,你已经知道它是如何工作的了。 该路由器就是在 [这里][7] 显示的那个。 只需从 [@nicolasparada/router][8] 下载并保存到 `static/router.js` 即可。
|
||||
|
||||
We registered four routes. At the root `/` we show the home or access page whether the user is authenticated. At `/callback` we show the callback page. On `/conversations/{conversationID}` we show the conversation or access page whether the user is authenticated and for every other URL, we show a not found page.
|
||||
我们注册了四条路由。 在根路由 `/` 处,我们展示 `home` 或 `access` 页面,无论用户是否通过身份验证。 在 `/callback` 中,我们展示 `callback` 页面。 在 `/conversations/{conversationID}` 上,我们展示对话或 `access` 页面,无论用户是否通过验证,对于其他 URL,我们展示一个 `not-found` 页面。
|
||||
|
||||
We tell the router to render the result to the document body and dispatch a `disconnect` event to each page before leaving.
|
||||
我们告诉路由器将结果渲染为文档主体,并在离开之前向每个页面调度一个 `disconnect` 事件。
|
||||
|
||||
We have each page in a different file and we import them with the new dynamic `import()`.
|
||||
我们将每个页面放在不同的文件中,并使用新的动态 `import()` 函数导入它们。
|
||||
|
||||
### Auth
|
||||
### 身份验证
|
||||
|
||||
`guard()` is a function that given two functions, executes the first one if the user is authenticated, or the sencond one if not. It comes from `auth.js` so lets create a `static/auth.js` file with the following content:
|
||||
`guard()` 是一个函数,给它两个函数作为参数,如果用户通过了身份验证,则执行第一个函数,否则执行第二个。它来自 `auth.js`,所以我们创建一个包含以下内容的 `static/auth.js` 文件:
|
||||
|
||||
```
|
||||
```javascript
|
||||
export function isAuthenticated() {
|
||||
const token = localStorage.getItem('token')
|
||||
const expiresAtItem = localStorage.getItem('expires_at')
|
||||
@ -150,17 +150,17 @@ export function getAuthUser() {
|
||||
}
|
||||
```
|
||||
|
||||
`isAuthenticated()` checks for `token` and `expires_at` from localStorage to tell if the user is authenticated. `getAuthUser()` gets the authenticated user from localStorage.
|
||||
`isAuthenticated()` 检查 `localStorage` 中的 `token` 和 `expires_at`,以判断用户是否已通过身份验证。`getAuthUser()` 从 `localStorage` 中获取经过身份验证的用户。
|
||||
|
||||
When we login, we’ll save all the data to localStorage so it will make sense.
|
||||
当我们登录时,我们会将所有的数据保存到 `localStorage`,这样才有意义。
|
||||
|
||||
### Access Page
|
||||
### Access 页面
|
||||
|
||||
![access page screenshot][9]
|
||||
|
||||
Lets start with the access page. Create a file `static/pages/access-page.js` with the following content:
|
||||
让我们从 `access` 页面开始。 创建一个包含以下内容的文件 `static/pages/access-page.js`:
|
||||
|
||||
```
|
||||
```javascript
|
||||
const template = document.createElement('template')
|
||||
template.innerHTML = `
|
||||
<h1>Messenger</h1>
|
||||
@ -172,15 +172,15 @@ export default function accessPage() {
|
||||
}
|
||||
```
|
||||
|
||||
Because the router intercepts all the link clicks to do its navigation, we must prevent the event propagation for this link in particular.
|
||||
因为路由器会拦截所有链接点击来进行导航,所以我们必须特别阻止此链接的事件传播。
|
||||
|
||||
Clicking on that link will redirect us to the backend, then to GitHub, then to the backend and then to the frontend again; to the callback page.
|
||||
单击该链接会将我们重定向到后端,然后重定向到 GitHub,再重定向到后端,然后再次重定向到前端; 到 `callback` 页面。
|
||||
|
||||
### Callback Page
|
||||
### Callback 页面
|
||||
|
||||
Create the file `static/pages/callback-page.js` with the following content:
|
||||
创建包括以下内容的 `static/pages/callback-page.js` 文件:
|
||||
|
||||
```
|
||||
```javascript
|
||||
import http from '../http.js'
|
||||
import { navigate } from '../router.js'
|
||||
|
||||
@ -211,13 +211,13 @@ function getAuthUser(token) {
|
||||
}
|
||||
```
|
||||
|
||||
The callback page doesn’t render anything. It’s an async function that does a GET request to `/api/auth_user` using the token from the URL query string and saves all the data to localStorage. Then it redirects to `/`.
|
||||
`callback` 页面不呈现任何内容。这是一个异步函数,它使用 URL 查询字符串中的 token 向 `/api/auth_user` 发出 GET 请求,并将所有数据保存到 `localStorage`。 然后重定向到 `/`。
|
||||
|
||||
### HTTP
|
||||
|
||||
There is an HTTP module. Create a `static/http.js` file with the following content:
|
||||
这里是一个 HTTP 模块。 创建一个包含以下内容的 `static/http.js` 文件:
|
||||
|
||||
```
|
||||
```javascript
|
||||
import { isAuthenticated } from './auth.js'
|
||||
|
||||
async function handleResponse(res) {
|
||||
@ -297,15 +297,15 @@ export default {
|
||||
}
|
||||
```
|
||||
|
||||
This module is a wrapper around the [fetch][10] and [EventSource][11] APIs. The most important part is that it adds the JSON web token to the requests.
|
||||
这个模块是 [fetch][10] 和 [EventSource][11] API 的包装器。最重要的部分是它将 JSON web 令牌添加到请求中。
|
||||
|
||||
### Home Page
|
||||
### Home 页面
|
||||
|
||||
![home page screenshot][12]
|
||||
|
||||
So, when the user login, the home page will be shown. Create a `static/pages/home-page.js` file with the following content:
|
||||
因此,当用户登录时,将显示 `home` 页。 创建一个具有以下内容的 `static/pages/home-page.js` 文件:
|
||||
|
||||
```
|
||||
```javascript
|
||||
import { getAuthUser } from '../auth.js'
|
||||
import { avatar } from '../shared.js'
|
||||
|
||||
@ -334,15 +334,15 @@ function onLogoutClick() {
|
||||
}
|
||||
```
|
||||
|
||||
For this post, this is the only content we render on the home page. We show the current authenticated user and a logout button.
|
||||
对于这篇文章,这是我们在 `home` 页上呈现的唯一内容。我们显示当前经过身份验证的用户和注销按钮。
|
||||
|
||||
When the user clicks to logout, we clear all inside localStorage and do a reload of the page.
|
||||
当用户单击注销时,我们清除 `localStorage` 中的所有内容并重新加载页面。
|
||||
|
||||
### Avatar
|
||||
|
||||
That `avatar()` function is to show the user’s avatar. Because it’s used in more than one place, I moved it to a `shared.js` file. Create the file `static/shared.js` with the following content:
|
||||
那个 `avatar()` 函数用于显示用户的头像。 由于已在多个地方使用,因此我将它移到 `shared.js` 文件中。 创建具有以下内容的文件 `static/shared.js`:
|
||||
|
||||
```
|
||||
```javascript
|
||||
export function avatar(user) {
|
||||
return user.avatarUrl === null
|
||||
? `<figure class="avatar" data-initial="${user.username[0]}"></figure>`
|
||||
@ -350,23 +350,23 @@ export function avatar(user) {
|
||||
}
|
||||
```
|
||||
|
||||
We use a small figure with the user’s initial in case the avatar URL is null.
|
||||
如果头像网址为 `null`,我们将使用用户的姓名首字母作为初始头像。
|
||||
|
||||
You can show the initial with a little of CSS using the `attr()` function.
|
||||
你可以使用 `attr()` 函数显示带有少量 CSS 样式的首字母。
|
||||
|
||||
```
|
||||
```css
|
||||
.avatar[data-initial]::after {
|
||||
content: attr(data-initial);
|
||||
}
|
||||
```
|
||||
|
||||
### Development Login
|
||||
### 仅开发使用的登录
|
||||
|
||||
![access page with login form screenshot][13]
|
||||
|
||||
In the previous post we coded a login for development. Lets add a form for that in the access page. Go to `static/pages/access-page.js` and modify it a little.
|
||||
在上一篇文章中,我们为编写了一个登录代码。让我们在 `access` 页面中为此添加一个表单。 进入 `static/ages/access-page.js`,稍微修改一下。
|
||||
|
||||
```
|
||||
```javascript
|
||||
import http from '../http.js'
|
||||
|
||||
const template = document.createElement('template')
|
||||
@ -420,15 +420,15 @@ function login(username) {
|
||||
}
|
||||
```
|
||||
|
||||
I added a login form. When the user submits the form. It does a POST requets to `/api/login` with the username. Saves all the data to localStorage and reloads the page.
|
||||
我添加了一个登录表单。当用户提交表单时。它使用用户名对 `/api/login` 进行 POST 请求。将所有数据保存到 `localStorage` 并重新加载页面。
|
||||
|
||||
Remember to remove this form once you are done with the frontend.
|
||||
记住在前端完成后删除此表单。
|
||||
|
||||
* * *
|
||||
|
||||
That’s all for this post. In the next one, we’ll continue with the home page to add a form to start conversations and display a list with the latest ones.
|
||||
这就是这篇文章的全部内容。在下一篇文章中,我们将继续使用主页添加一个表单来开始对话,并显示包含最新对话的列表。
|
||||
|
||||
[Souce Code][14]
|
||||
- [源代码][14]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -436,19 +436,19 @@ via: https://nicolasparada.netlify.com/posts/go-messenger-access-page/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/
|
||||
[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/
|
||||
[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/
|
||||
[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/
|
||||
[5]: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/
|
||||
[6]: https://nicolasparada.netlify.com/posts/go-messenger-dev-login/
|
||||
[1]: https://linux.cn/article-11396-1.html
|
||||
[2]: https://linux.cn/article-11510-1.html
|
||||
[3]: https://linux.cn/article-12056-1.html
|
||||
[4]: https://linux.cn/article-12680-1.html
|
||||
[5]: https://linux.cn/article-12685-1.html
|
||||
[6]: https://linux.cn/article-12692-1.html
|
||||
[7]: https://nicolasparada.netlify.com/posts/js-router/
|
||||
[8]: https://unpkg.com/@nicolasparada/router
|
||||
[9]: https://nicolasparada.netlify.com/img/go-messenger-access-page/access-page.png
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12689-1.html)
|
||||
[#]: subject: (How to Disable IPv6 on Ubuntu Linux)
|
||||
[#]: via: (https://itsfoss.com/disable-ipv6-ubuntu-linux/)
|
||||
[#]: author: (Sergiu https://itsfoss.com/author/sergiu/)
|
||||
@ -14,31 +14,33 @@
|
||||
|
||||
### 什么是 IPv6?为什么会想要禁用它?
|
||||
|
||||
<ruby>**[网际协议第6版][1]**<rt>Internet Protocol version 6</rt></ruby>[(][1] **[IPv6][1]**[)][1]是网际协议(IP)的最新版本。网际协议是一种通信协议,它为网络上的计算机提供识别和定位系统,并在互联网上进行通信路由。IPv6 是在 1998 年以取代 **IPv4** 协议为目的被设计出来的。
|
||||
<ruby>[互联网协议第 6 版][1]<rt>Internet Protocol version 6</rt></ruby>(IPv6)是互联网协议(IP)的最新版本。互联网协议是一种通信协议,它为网络上的计算机提供识别和定位系统,并在互联网上进行通信路由。IPv6 于 1998 年设计,以取代 IPv4 协议。
|
||||
|
||||
**IPv6** 意在提高安全性与性能的同时保证地址不被用尽;它可以在全球范围内为每台设备分配唯一的以 **128 位元**存储的地址,而 IPv4 只使用了 32 位元。
|
||||
**IPv6** 意在提高安全性与性能的同时保证地址不被用尽;它可以在全球范围内为每台设备分配唯一的以 **128 位比特**存储的地址,而 IPv4 只使用了 32 位比特。
|
||||
|
||||
![Disable IPv6 Ubuntu][2]
|
||||
|
||||
尽管 IPv6 的目标是取代 IPv4,但目前还有很长的路要走;互联网上只有少于 **30%** 的网站支持 IPv6([这里][3] 是谷歌的统计),IPv6 有时也会导致 [一些程序出现问题][4]。
|
||||
尽管 IPv6 的目标是取代 IPv4,但目前还有很长的路要走;互联网上只有不到 **30%** 的网站支持 IPv6([这里][3] 是谷歌的统计),IPv6 有时也给 [一些应用带来问题][4]。
|
||||
|
||||
由于 IPv6 使用全球(唯一分配的)路由地址,以及(仍然)有<ruby>互联网服务供应商<rt>Internet Service Provider</rt></ruby>(ISP)不提供 IPv6 支持的事实,IPv6 这一功能在提供全球服务的<ruby>**虚拟私人网络**<rt>Virtual Private Network</rt></ruby>(VPN)供应商的优先级列表中处于较低的位置,这样一来,他们就可以专注于对 VPN 用户最重要的事情:安全。
|
||||
|
||||
不想让自己暴露在各种威胁之下可能是另一个让你想在系统上禁用 IPv6 的原因。虽然 IPv6 本身比 IPv4 更安全,但我所指的风险是另一种性质上的。如果你不积极使用 IPv6 及其功能,[启用 IPv6 后,你会很容易受到各种攻击][5],因而为黑客提供另一种可能的利用工具。
|
||||
不想让自己暴露在各种威胁之下可能是另一个让你想在系统上禁用 IPv6 的原因。虽然 IPv6 本身比 IPv4 更安全,但我所指的风险是另一种性质上的。如果你不实际使用 IPv6 及其功能,那么[启用 IPv6 后,你会很容易受到各种攻击][5],因而为黑客提供另一种可能的利用工具。
|
||||
|
||||
同样,配置基本的网络规则是不够的;就像对 IPv4 一样,你需要密切关注 IPv6 的配置,这可能会是一件相当麻烦的事情(维护也是)。并且随着 IPv6 而来的将会是一套不同于 IPv4 的问题(鉴于这个协议的年龄,许多问题已经可以在网上找到了),这又会使你的系统多了一层复杂性。
|
||||
同样,只配置基本的网络规则是不够的;你必须像对 IPv4 一样,对调整 IPv6 的配置给予同样的关注,这可能会是一件相当麻烦的事情(维护也是)。并且随着 IPv6 而来的将会是一套不同于 IPv4 的问题(鉴于这个协议的年龄,许多问题已经可以在网上找到了),这又会使你的系统多了一层复杂性。
|
||||
|
||||
据观察,在某些情况下,禁用 IPv6 有助于提高 Ubuntu 的 WiFi 速度。
|
||||
|
||||
### 在 Ubuntu 上禁用 IPv6 [高级用户]
|
||||
|
||||
在本节中,我会详述如何在 Ubuntu 上禁用 IPv6 协议,请打开终端(**默认键:** CTRL+ALT+T),让我们开始吧!
|
||||
在本节中,我会详述如何在 Ubuntu 上禁用 IPv6 协议,请打开终端(默认快捷键:`CTRL+ALT+T`),让我们开始吧!
|
||||
|
||||
**注意:**_接下来大部分输入终端的命令都需要 root 权限(**sudo**)。_
|
||||
**注意:** 接下来大部分输入终端的命令都需要 root 权限(`sudo`)。
|
||||
|
||||
警告!
|
||||
> 警告!
|
||||
>
|
||||
> 如果你是一个普通 Linux 桌面用户,并且偏好稳定的工作系统,请避开本教程,接下来的部分是为那些知道自己在做什么以及为什么要这么做的用户准备的。
|
||||
|
||||
如果你是普通 Linux 桌面用户,并且偏好稳定的工作系统,请避开本教程,接下来的部分是为那些知道自己在做什么以及为什么要这么做的用户准备的。
|
||||
|
||||
#### 1\. 使用 Sysctl 禁用 IPv6
|
||||
#### 1、使用 sysctl 禁用 IPv6
|
||||
|
||||
首先,可以执行以下命令来**检查** IPv6 是否已经启用:
|
||||
|
||||
@ -50,7 +52,7 @@ ip a
|
||||
|
||||
![IPv6 Address Ubuntu][7]
|
||||
|
||||
在教程 [在 Ubuntu 中重启网络][8] 中,你已经见过 sysctl 命令了,在这里我们也同样会用到它。要**禁用 IPv6**,只需要输入三条命令:
|
||||
在教程《[在 Ubuntu 中重启网络][8]》(LCTT 译注:其实这篇文章并没有提到使用 sysctl 的方法……)中,你已经见过 `sysctl` 命令了,在这里我们也同样会用到它。要**禁用 IPv6**,只需要输入三条命令:
|
||||
|
||||
```
|
||||
sudo sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||
@ -58,8 +60,6 @@ sudo sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=1
|
||||
```
|
||||
|
||||
(译注:这篇文章 LCTT 有翻译,在 [这里:《Linux 初学者:如何在 Ubuntu 中重启网络》][patch-1];不过尴尬的是,并没有提到使用 sysctl 的方法……)
|
||||
|
||||
检查命令是否生效:
|
||||
|
||||
```
|
||||
@ -70,11 +70,11 @@ ip a
|
||||
|
||||
![IPv6 Disabled Ubuntu][9]
|
||||
|
||||
然而这种方法只能**临时禁用 IPv6**,因此在下次系统启动的时候, IPv6 仍然会被启用。
|
||||
然而这种方法只能**临时禁用 IPv6**,因此在下次系统启动的时候,IPv6 仍然会被启用。
|
||||
|
||||
(译注:这里的临时禁用是指这次所做的改变直到此次关机之前都有效,因为相关的参数是存储在内存中的,可以改变值,但是在内存断电后就会丢失;这种意义上来讲,下文所述的两种方法都是临时的,只不过改变参数值的时机是在系统启动的早期,并且每次系统启动时都有应用而已。那么如何完成这种意义上的永久改变?答案是在编译内核的时候禁用相关功能,然后要后悔就只能重新编译内核了(悲)。)
|
||||
(LCTT 译注:这里的临时禁用是指这次所做的改变直到此次关机之前都有效,因为相关的参数是存储在内存中的,可以改变值,但是在内存断电后就会丢失;这种意义上来讲,下文所述的两种方法都是临时的,只不过改变参数值的时机是在系统启动的早期,并且每次系统启动时都有应用而已。那么如何完成这种意义上的永久改变?答案是在编译内核的时候禁用相关功能,然后要后悔就只能重新编译内核了(悲)。)
|
||||
|
||||
一种让选项持续生效的方式是修改文件 **/etc/sysctl.conf**,在这里我用 vim 来编辑文件,不过你可以使用任何你想使用的编辑器,以及请确保你拥有**管理员权限**(用 **sudo**):
|
||||
一种让选项持续生效的方式是修改文件 `/etc/sysctl.conf`,在这里我用 `vim` 来编辑文件,不过你可以使用任何你想使用的编辑器,以及请确保你拥有**管理员权限**(用 `sudo`):
|
||||
|
||||
![Sysctl Configuration][10]
|
||||
|
||||
@ -92,7 +92,7 @@ net.ipv6.conf.lo.disable_ipv6=1
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||
如果在重启之后 IPv6 仍然被启用了,而你还想继续这种方法的话,那么你必须(使用 root 权限)创建文件 **/etc/rc.local** 并加入以下内容:
|
||||
如果在重启之后 IPv6 仍然被启用了,而你还想继续这种方法的话,那么你必须(使用 root 权限)创建文件 `/etc/rc.local` 并加入以下内容:
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
@ -112,20 +112,20 @@ sudo chmod 755 /etc/rc.local
|
||||
|
||||
这会让系统(在启动的时候)从之前编辑过的 sysctl 配置文件中读取内核参数。
|
||||
|
||||
#### 2\. 使用 GRUB 禁用 IPv6
|
||||
#### 2、使用 GRUB 禁用 IPv6
|
||||
|
||||
另外一种方法是配置 **GRUB**,它会在系统启动时向内核传递参数。这样做需要编辑文件 **/etc/default/grub**(请确保拥有管理员权限)。
|
||||
另外一种方法是配置 **GRUB**,它会在系统启动时向内核传递参数。这样做需要编辑文件 `/etc/default/grub`(请确保拥有管理员权限)。
|
||||
|
||||
![GRUB Configuration][13]
|
||||
|
||||
现在需要修改文件中分别以 **GRUB_CMDLINE_LINUX_DEFAULT** 和 **GRUB_CMDLINE_LINUX** 开头的两行来在启动时禁用 IPv6:
|
||||
现在需要修改文件中分别以 `GRUB_CMDLINE_LINUX_DEFAULT` 和 `GRUB_CMDLINE_LINUX` 开头的两行来在启动时禁用 IPv6:
|
||||
|
||||
```
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash ipv6.disable=1"
|
||||
GRUB_CMDLINE_LINUX="ipv6.disable=1"
|
||||
```
|
||||
|
||||
(译注:这里是指在上述两行内增加参数 ipv6.disable=1,不同的系统中这两行的默认值可能有所不同。)
|
||||
(LCTT 译注:这里是指在上述两行内增加参数 `ipv6.disable=1`,不同的系统中这两行的默认值可能有所不同。)
|
||||
|
||||
保存文件,然后执行命令:
|
||||
|
||||
@ -133,7 +133,7 @@ GRUB_CMDLINE_LINUX="ipv6.disable=1"
|
||||
sudo update-grub
|
||||
```
|
||||
|
||||
(译注:该命令用以更新 GRUB 的配置文件,在没有 update-grub 命令的系统中需要使用 `sudo grub-mkconfig -o /boot/grub/grub.cfg` )
|
||||
(LCTT 译注:该命令用以更新 GRUB 的配置文件,在没有 `update-grub` 命令的系统中需要使用 `sudo grub-mkconfig -o /boot/grub/grub.cfg` )
|
||||
|
||||
设置会在重启后生效。
|
||||
|
||||
@ -147,7 +147,7 @@ sudo sysctl -w net.ipv6.conf.default.disable_ipv6=0
|
||||
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0
|
||||
```
|
||||
|
||||
否则想要持续启用的话,看看是否修改过 **/etc/sysctl.conf**,可以删除掉之前增加的部分,也可以将它们改为以下值(两种方法等效):
|
||||
否则想要持续启用的话,看看是否修改过 `/etc/sysctl.conf`,可以删除掉之前增加的部分,也可以将它们改为以下值(两种方法等效):
|
||||
|
||||
```
|
||||
net.ipv6.conf.all.disable_ipv6=0
|
||||
@ -161,19 +161,19 @@ net.ipv6.conf.lo.disable_ipv6=0
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||
(译注:这里可选的意思可能是如果之前临时启用了 IPv6 就没必要再重新加载配置文件了)
|
||||
(LCTT 译注:这里可选的意思可能是如果之前临时启用了 IPv6 就没必要再重新加载配置文件了)
|
||||
|
||||
这样应该可以再次看到 IPv6 地址了:
|
||||
|
||||
![IPv6 Reenabled in Ubuntu][14]
|
||||
|
||||
另外,你也可以删除之前创建的文件 **/etc/rc.local**(可选):
|
||||
另外,你也可以删除之前创建的文件 `/etc/rc.local`(可选):
|
||||
|
||||
```
|
||||
sudo rm /etc/rc.local
|
||||
```
|
||||
|
||||
如果修改了文件 **/etc/default/grub** ,回去删掉你所增加的参数:
|
||||
如果修改了文件 `/etc/default/grub`,回去删掉你所增加的参数:
|
||||
|
||||
```
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"
|
||||
@ -186,7 +186,7 @@ GRUB_CMDLINE_LINUX=""
|
||||
sudo update-grub
|
||||
```
|
||||
|
||||
**尾声**
|
||||
### 尾声
|
||||
|
||||
在这篇文章中,我介绍了在 Linux 上**禁用 IPv6** 的方法,并简述了什么是 IPv6 以及可能想要禁用掉它的原因。
|
||||
|
||||
@ -199,7 +199,7 @@ via: https://itsfoss.com/disable-ipv6-ubuntu-linux/
|
||||
作者:[Sergiu][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[rakino](https://github.com/rakino)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
@ -212,11 +212,10 @@ via: https://itsfoss.com/disable-ipv6-ubuntu-linux/
|
||||
[5]: https://www.internetsociety.org/blog/2015/01/ipv6-security-myth-1-im-not-running-ipv6-so-i-dont-have-to-worry/
|
||||
[6]: https://itsfoss.com/remove-drive-icons-from-unity-launcher-in-ubuntu/
|
||||
[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/05/ipv6_address_ubuntu.png?fit=800%2C517&ssl=1
|
||||
[8]: https://itsfoss.com/restart-network-ubuntu/
|
||||
[8]: https://linux.cn/article-10804-1.html
|
||||
[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/05/ipv6_disabled_ubuntu.png?fit=800%2C442&ssl=1
|
||||
[10]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2019/05/sysctl_configuration.jpg?fit=800%2C554&ssl=1
|
||||
[11]: https://linuxhandbook.com/chmod-command/
|
||||
[12]: https://itsfoss.com/find-which-kernel-version-is-running-in-ubuntu/
|
||||
[13]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2019/05/grub_configuration-1.jpg?fit=800%2C565&ssl=1
|
||||
[14]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/05/ipv6_address_ubuntu-1.png?fit=800%2C517&ssl=1
|
||||
[patch-1]: https://github.com/LCTT/TranslateProject/blob/master/published/201905/20190307%20How%20to%20Restart%20a%20Network%20in%20Ubuntu%20-Beginner-s%20Tip.md
|
132
published/20190822 Things You Didn-t Know About GNU Readline.md
Normal file
132
published/20190822 Things You Didn-t Know About GNU Readline.md
Normal file
@ -0,0 +1,132 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12706-1.html)
|
||||
[#]: subject: (Things You Didn't Know About GNU Readline)
|
||||
[#]: via: (https://twobithistory.org/2019/08/22/readline.html)
|
||||
[#]: author: (Two-Bit History https://twobithistory.org)
|
||||
|
||||
你所不知的 GNU Readline
|
||||
======
|
||||
|
||||

|
||||
|
||||
有时我会觉得自己的计算机是一栋非常大的房子,我每天都会访问这栋房子,也对一楼的大部分房间都了如指掌,但仍然还是有我没有去过的卧室,有我没有打开过的衣柜,有我没有探索过的犄角旮旯。我感到有必要更多地了解我的计算机了,就像任何人都会觉得有必要看看自己家里从未去过的房间一样。
|
||||
|
||||
GNU Readline 是个不起眼的小软件库,我依赖了它多年却没有意识到它的存在,也许有成千上万的人每天都在不经意间使用它。如果你用 Bash shell 的话,每当你自动补全一个文件名,或者在输入的一行文本中移动光标,以及搜索之前命令的历史记录时,你都在使用 GNU Readline;当你在 Postgres(`psql`)或是 Ruby REPL(`irb`)的命令行界面中进行同样的操作时,你依然在使用 GNU Readline。很多软件都依赖 GNU Readline 库来实现用户所期望的功能,不过这些功能是如此的辅助与不显眼,以至于在我看来很少有人会停下来去想它是从哪里来的。
|
||||
|
||||
GNU Readline 最初是自由软件基金会在 20 世纪 80 年代创建的,如今作为每个人的基础计算设施的重要的、甚至看不见的组成部分的它,由一位志愿者维护。
|
||||
|
||||
### 充满特色
|
||||
|
||||
GNU Readline 库的存在,主要是为了增强各种命令行界面,它提供了一组通用的按键,使你可以在一个单行输入中移动和编辑。例如,在 Bash 提示符中按下 `Ctrl-A`,你的光标会跳到行首,而按下 `Ctrl-E` 则会跳到行末;另一个有用的命令是 `Ctrl-U`,它会删除该行中光标之前的所有内容。
|
||||
|
||||
有很长一段时间,我通过反复敲击方向键来在命令行上移动,如今看来这十分尴尬,也不知道为什么,当时的我从来没有想过可以有一种更快的方法。当然了,没有哪一个熟悉 Vim 或 Emacs 这种文本编辑器的程序员愿意长时间地击打方向键,所以像 Readline 这样的东西必然会被创造出来。在 Readline 上可以做的绝非仅仅跳来跳去,你可以像使用文本编辑器那样编辑单行文本——这里有删除单词、单词换位、大写单词、复制和粘贴字符等命令。Readline 的大部分按键/快捷键都是基于 Emacs 的,它基本上就是一个单行文本版的 Emacs 了,甚至还有录制和重放宏的功能。
|
||||
|
||||
我从来没有用过 Emacs,所以很难记住所有不同的 Readline 命令。不过 Readline 有着很巧妙的一点,那就是能够切换到基于 Vim 的模式,在 Bash 中可以使用内置的 `set` 命令来这样做。下面会让 Readline 在当前的 shell 中使用 Vim 风格的命令:
|
||||
|
||||
```
|
||||
$ set -o vi
|
||||
```
|
||||
|
||||
该选项启用后,就可以使用 `dw` 等命令来删除单词了,此时相当于 Emacs 模式下的 `Ctrl-U` 的命令是 `d0`。
|
||||
|
||||
我第一次知道有这个功能的时候很兴奋地想尝试一下,但它对我来说并不是那么好用。我很高兴知道有这种对 Vim 用户的让步,在使用这个功能上你可能会比我更幸运,尤其是你还没有使用 Readline 的默认按键的话;我的问题在于,我听说有基于 Vim 的界面时已经学会了几种默认按键,因此即使启用了 Vim 的选项,也一直在错误地用着默认的按键;另外因为没有某种指示器,所以 Vim 的模态设计在这里会很尴尬——你很容易就忘记了自己处于哪个模式,就因为这样,我卡在了一种虽然使用 Vim 作为文本编辑器,但却在 Readline 上用着 Emacs 风格的命令的情况里,我猜其他很多人也是这样的。
|
||||
|
||||
如果你觉得 Vim 和 Emacs 的键盘命令系统诡异而神秘(这并不是没有道理的),你可以按照喜欢的方式自定义 Readline 的键绑定。Readline 在启动时会读取文件 `~/.inputrc`,它可以用来配置各种选项与键绑定,我做的一件事是重新配置了 `Ctrl-K`:通常情况下该命令会从光标处删除到行末,但我很少这样做,所以我在 `~/.inputrc` 中添加了以下内容,把它绑定为直接删除整行:
|
||||
|
||||
```
|
||||
Control-k: kill-whole-line
|
||||
```
|
||||
|
||||
每个 Readline 命令(文档中称它们为 “函数” )都有一个名称,你可以用这种方式将其与一个键序列联系起来。如果你在 Vim 中编辑 `~/.inputrc`,就会发现 Vim 知道这种文件类型,还会帮你高亮显示有效的函数名,而不高亮无效的函数名。
|
||||
|
||||
`~/.inputrc` 可以做的另一件事是通过将键序列映射到输入字符串上来创建预制宏。[Readline 手册][1]给出了一个我认为特别有用的例子:我经常想把一个程序的输出保存到文件中,这意味着我得经常在 Bash 命令中追加类似 `> output.txt` 这样的东西,为了节省时间,可以把它做成一个 Readline 宏:
|
||||
|
||||
```
|
||||
Control-o: "> output.txt"
|
||||
```
|
||||
|
||||
这样每当你按下 `Ctrl-O` 时,你都会看到 `> output.txt` 被添加到了命令行光标的后面,这样很不错!
|
||||
|
||||
不过你可以用宏做的可不仅仅是为文本串创建快捷方式;在 `~/.inputrc` 中使用以下条目意味着每次按下 `Ctrl-J` 时,行内已有的文本都会被 `$(` 和 `)` 包裹住。该宏先用 `Ctrl-A` 移动到行首,添加 `$(` ,然后再用 `Ctrl-E` 移动到行尾,添加 `)`:
|
||||
|
||||
```
|
||||
Control-j: "\C-a$(\C-e)"
|
||||
```
|
||||
|
||||
如果你经常需要像下面这样把一个命令的输出用于另一个命令的话,这个宏可能会对你有帮助:
|
||||
|
||||
```
|
||||
$ cd $(brew --prefix)
|
||||
```
|
||||
|
||||
`~/.inputrc` 文件也允许你为 Readline 手册中所谓的 “变量” 设置不同的值,这些变量会启用或禁用某些 Readline 行为,你也可以使用这些变量来改变 Readline 中像是自动补全或者历史搜索这些行为的工作方式。我建议开启的一个变量是 `revert-all-at-newline`,它是默认关闭的,当这个变量关闭时,如果你使用反向搜索功能从命令历史记录中提取一行并编辑,但随后又决定搜索另一行,那么你所做的编辑会被保存在历史记录中。我觉得这样会很混乱,因为这会导致你的 Bash 命令历史中出现从未运行过的行。所以在你的 `~/.inputrc` 中加入这个:
|
||||
|
||||
```
|
||||
set revert-all-at-newline on
|
||||
```
|
||||
|
||||
在你用 `~/.inputrc` 设置了选项或键绑定以后,它们会适用于任何使用 Readline 库的地方,显然 Bash 也包括在内,不过你也会在其它像是 `irb` 和 `psql` 这样的程序中受益。如果你经常使用关系型数据库的命令行界面,一个用于插入 `SELECT * FROM` 的 Readline 宏可能会很有用。
|
||||
|
||||
### Chet Ramey
|
||||
|
||||
GNU Readline 如今由凯斯西储大学的高级技术架构师 Chet Ramey 维护,Ramey 同时还负责维护 Bash shell;这两个项目都是由一位名叫 Brian Fox 的自由软件基金会员工在 1988 年开始编写的,但从 1994 年左右开始,Ramey 一直是它们唯一的维护者。
|
||||
|
||||
Ramey 通过电子邮件告诉我,Readline 远非一个原创的想法,它是为了实现 POSIX 规范所规定的功能而被创建的,而 POSIX 规范又是在 20 世纪 80 年代末被制定的。许多早期的 shell,包括 Korn shell 和至少一个版本的 Unix System V shell,都包含行编辑功能。1988 年版的 Korn shell(`ksh88`)提供了 Emacs 风格和 Vi/Vim 风格的编辑模式。据我从[手册页][2]中得知,Korn shell 会通过查看 `VISUAL` 和 `EDITOR` 环境变量来决定你使用的模式,这一点非常巧妙。POSIX 中指定 shell 功能的部分近似于 `ksh88` 的实现,所以 GNU Bash 也要实现一个类似的灵活的行编辑系统来保持兼容,因此就有了 Readline。
|
||||
|
||||
Ramey 第一次参与 Bash 开发时,Readline 还是 Bash 项目目录下的一个单一的源文件,它其实只是 Bash 的一部分;随着时间的推移,Readline 文件慢慢地成为了独立的项目,不过直到 1994 年(Readline 2.0 版本发布),Readline 才完全成为了一个独立的库。
|
||||
|
||||
Readline 与 Bash 密切相关,Ramey 也通常把 Readline 与 Bash 的发布配对,但正如我上面提到的,Readline 是一个可以被任何有命令行界面的软件使用的库,而且它真的很容易使用。下面是一个例子,虽然简单,但这就是在 C 程序中使用 Readline 的方法。向 `readline()` 函数传递的字符串参数就是你希望 Readline 向用户显示的提示符:
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "readline/readline.h"
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
char* line = readline("my-rl-example> ");
|
||||
printf("You entered: \"%s\"\n", line);
|
||||
|
||||
free(line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
你的程序会把控制权交给 Readline,它会负责从用户那里获得一行输入(以这样的方式让用户可以做所有花哨的行编辑工作),一旦用户真正提交了这一行,Readline 就会把它返回给你。在我的库搜索路径中有 Readline 库,所以我可以通过调用以下内容来链接 Readline 库,从而编译上面的内容:
|
||||
|
||||
```
|
||||
$ gcc main.c -lreadline
|
||||
```
|
||||
|
||||
当然,Readline 的 API 比起那个单一的函数要丰富得多,任何使用它的人都可以对库的行为进行各种调整,库的用户(开发者)甚至可以添加新的函数,来让最终用户可以通过 `~/.inputrc` 来配置它们,这意味着 Readline 非常容易扩展。但是据我所知,即使是 Bash ,虽然事先有很多配置,最终也会像上面的例子一样调用简单的 `readline()` 函数来获取输入。(参见 GNU Bash 源代码中的[这一行][3],Bash 似乎在这里将获取输入的责任交给了 Readline)。
|
||||
|
||||
Ramey 现在已经在 Bash 和 Readline 上工作了二十多年,但他的工作却从来没有得到过报酬 —— 他一直都是一名志愿者。Bash 和 Readline 仍然在积极开发中,尽管 Ramey 说 Readline 的变化比 Bash 慢得多。我问 Ramey 作为这么多人使用的软件唯一的维护者是什么感觉,他说可能有几百万人在不知不觉中使用 Bash(因为每个苹果设备都运行 Bash),这让他担心一个破坏性的变化会造成多大的混乱,不过他已经慢慢习惯了所有这些人的想法。他还说他会继续在 Bash 和 Readline 上工作,因为在这一点上他已经深深地投入了,而且他也只是单纯地喜欢把有用的软件提供给世界。
|
||||
|
||||
_你可以在 [Chet Ramey 的网站][4]上找到更多关于他的信息。_
|
||||
|
||||
_喜欢这篇文章吗?我会每四周写出一篇像这样的文章。关注推特帐号 [@TwoBitHistory][5] 或者[订阅 RSS][6] 来获取更新吧!_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2019/08/22/readline.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[rakino](https://github.com/rakino)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://tiswww.case.edu/php/chet/readline/readline.html
|
||||
[2]: https://web.archive.org/web/20151105130220/http://www2.research.att.com/sw/download/man/man1/ksh88.html
|
||||
[3]: https://github.com/bminor/bash/blob/9f597fd10993313262cab400bf3c46ffb3f6fd1e/parse.y#L1487
|
||||
[4]: https://tiswww.case.edu/php/chet/
|
||||
[5]: https://twitter.com/TwoBitHistory
|
||||
[6]: https://twobithistory.org/feed.xml
|
||||
[7]: https://twitter.com/TwoBitHistory/status/1112492084383092738?ref_src=twsrc%5Etfw
|
380
published/20200512 Scan your Linux security with Lynis.md
Normal file
380
published/20200512 Scan your Linux security with Lynis.md
Normal file
@ -0,0 +1,380 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (wxy)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12696-1.html)
|
||||
[#]: subject: (Scan your Linux security with Lynis)
|
||||
[#]: via: (https://opensource.com/article/20/5/linux-security-lynis)
|
||||
[#]: author: (Gaurav Kamathe https://opensource.com/users/gkamathe)
|
||||
|
||||
使用 Lynis 扫描 Linux 安全性
|
||||
======
|
||||
|
||||
> 使用这个全面的开源安全审计工具检查你的 Linux 机器的安全性。
|
||||
|
||||

|
||||
|
||||
你有没有想过你的 Linux 机器到底安全不安全?Linux 发行版众多,每个发行版都有自己的默认设置,你在上面运行着几十个版本各异的软件包,还有众多的服务在后台运行,而我们几乎不知道或不关心这些。
|
||||
|
||||
要想确定安全态势(指你的 Linux 机器上运行的软件、网络和服务的整体安全状态),你可以运行几个命令,得到一些零碎的相关信息,但你需要解析的数据量是巨大的。
|
||||
|
||||
如果能运行一个工具,生成一份关于机器安全状况的报告,那就好得多了。而幸运的是,有一个这样的软件:[Lynis][2]。它是一个非常流行的开源安全审计工具,可以帮助强化基于 Linux 和 Unix 的系统。根据该项目的介绍:
|
||||
|
||||
> “它运行在系统本身,可以进行深入的安全扫描。主要目标是测试安全防御措施,并提供进一步强化系统的提示。它还将扫描一般系统信息、易受攻击的软件包和可能的配置问题。Lynis 常被系统管理员和审计人员用来评估其系统的安全防御。”
|
||||
|
||||
### 安装 Lynis
|
||||
|
||||
你的 Linux 软件仓库中可能有 Lynis。如果有的话,你可以用以下方法安装它:
|
||||
|
||||
```
|
||||
dnf install lynis
|
||||
```
|
||||
|
||||
或
|
||||
|
||||
```
|
||||
apt install lynis
|
||||
```
|
||||
|
||||
然而,如果你的仓库中的版本不是最新的,你最好从 GitHub 上安装它。(我使用的是 Red Hat Linux 系统,但你可以在任何 Linux 发行版上运行它)。就像所有的工具一样,先在虚拟机上试一试是有意义的。要从 GitHub 上安装它:
|
||||
|
||||
```
|
||||
$ cat /etc/redhat-release
|
||||
Red Hat Enterprise Linux Server release 7.8 (Maipo)
|
||||
$
|
||||
$ uname -r
|
||||
3.10.0-1127.el7.x86_64
|
||||
$
|
||||
$ git clone https://github.com/CISOfy/lynis.git
|
||||
Cloning into 'lynis'...
|
||||
remote: Enumerating objects: 30, done.
|
||||
remote: Counting objects: 100% (30/30), done.
|
||||
remote: Compressing objects: 100% (30/30), done.
|
||||
remote: Total 12566 (delta 15), reused 8 (delta 0), pack-reused 12536
|
||||
Receiving objects: 100% (12566/12566), 6.36 MiB | 911.00 KiB/s, done.
|
||||
Resolving deltas: 100% (9264/9264), done.
|
||||
$
|
||||
```
|
||||
|
||||
一旦你克隆了这个版本库,那么进入该目录,看看里面有什么可用的。主要的工具在一个叫 `lynis` 的文件里。它实际上是一个 shell 脚本,所以你可以打开它看看它在做什么。事实上,Lynis 主要是用 shell 脚本来实现的:
|
||||
|
||||
```
|
||||
$ cd lynis/
|
||||
$ ls
|
||||
CHANGELOG.md CONTRIBUTING.md db developer.prf FAQ include LICENSE lynis.8 README SECURITY.md
|
||||
CODE_OF_CONDUCT.md CONTRIBUTORS.md default.prf extras HAPPY_USERS.md INSTALL lynis plugins README.md
|
||||
$
|
||||
$ file lynis
|
||||
lynis: POSIX shell script, ASCII text executable, with very long lines
|
||||
$
|
||||
```
|
||||
|
||||
### 运行 Lynis
|
||||
|
||||
通过给 Lynis 一个 `-h` 选项来查看帮助部分,以便有个大概了解:
|
||||
|
||||
```
|
||||
$ ./lynis -h
|
||||
```
|
||||
|
||||
你会看到一个简短的信息屏幕,然后是 Lynis 支持的所有子命令。
|
||||
|
||||
接下来,尝试一些测试命令以大致熟悉一下。要查看你正在使用的 Lynis 版本,请运行:
|
||||
|
||||
```
|
||||
$ ./lynis show version
|
||||
3.0.0
|
||||
$
|
||||
```
|
||||
|
||||
要查看 Lynis 中所有可用的命令:
|
||||
|
||||
```
|
||||
$ ./lynis show commands
|
||||
|
||||
Commands:
|
||||
lynis audit
|
||||
lynis configure
|
||||
lynis generate
|
||||
lynis show
|
||||
lynis update
|
||||
lynis upload-only
|
||||
|
||||
$
|
||||
```
|
||||
|
||||
### 审计 Linux 系统
|
||||
|
||||
要审计你的系统的安全态势,运行以下命令:
|
||||
|
||||
```
|
||||
$ ./lynis audit system
|
||||
```
|
||||
|
||||
这个命令运行得很快,并会返回一份详细的报告,输出结果可能一开始看起来很吓人,但我将在下面引导你来阅读它。这个命令的输出也会被保存到一个日志文件中,所以你可以随时回过头来检查任何可能感兴趣的东西。
|
||||
|
||||
Lynis 将日志保存在这里:
|
||||
|
||||
```
|
||||
Files:
|
||||
- Test and debug information : /var/log/lynis.log
|
||||
- Report data : /var/log/lynis-report.dat
|
||||
```
|
||||
|
||||
你可以验证是否创建了日志文件。它确实创建了:
|
||||
|
||||
```
|
||||
$ ls -l /var/log/lynis.log
|
||||
-rw-r-----. 1 root root 341489 Apr 30 05:52 /var/log/lynis.log
|
||||
$
|
||||
$ ls -l /var/log/lynis-report.dat
|
||||
-rw-r-----. 1 root root 638 Apr 30 05:55 /var/log/lynis-report.dat
|
||||
$
|
||||
```
|
||||
|
||||
### 探索报告
|
||||
|
||||
Lynis 提供了相当全面的报告,所以我将介绍一些重要的部分。作为初始化的一部分,Lynis 做的第一件事就是找出机器上运行的操作系统的完整信息。之后是检查是否安装了什么系统工具和插件:
|
||||
|
||||
```
|
||||
[+] Initializing program
|
||||
------------------------------------
|
||||
- Detecting OS... [ DONE ]
|
||||
- Checking profiles... [ DONE ]
|
||||
|
||||
---------------------------------------------------
|
||||
Program version: 3.0.0
|
||||
Operating system: Linux
|
||||
Operating system name: Red Hat Enterprise Linux Server 7.8 (Maipo)
|
||||
Operating system version: 7.8
|
||||
Kernel version: 3.10.0
|
||||
Hardware platform: x86_64
|
||||
Hostname: example
|
||||
---------------------------------------------------
|
||||
<<截断>>
|
||||
|
||||
[+] System Tools
|
||||
------------------------------------
|
||||
- Scanning available tools...
|
||||
- Checking system binaries...
|
||||
|
||||
[+] Plugins (phase 1)
|
||||
------------------------------------
|
||||
Note: plugins have more extensive tests and may take several minutes to complete
|
||||
|
||||
- Plugin: pam
|
||||
[..]
|
||||
- Plugin: systemd
|
||||
[................]
|
||||
```
|
||||
|
||||
接下来,该报告被分为不同的部分,每个部分都以 `[+]` 符号开头。下面可以看到部分章节。(哇,要审核的地方有这么多,Lynis 是最合适的工具!)
|
||||
|
||||
```
|
||||
[+] Boot and services
|
||||
[+] Kernel
|
||||
[+] Memory and Processes
|
||||
[+] Users, Groups and Authentication
|
||||
[+] Shells
|
||||
[+] File systems
|
||||
[+] USB Devices
|
||||
[+] Storage
|
||||
[+] NFS
|
||||
[+] Name services
|
||||
[+] Ports and packages
|
||||
[+] Networking
|
||||
[+] Printers and Spools
|
||||
[+] Software: e-mail and messaging
|
||||
[+] Software: firewalls
|
||||
[+] Software: webserver
|
||||
[+] SSH Support
|
||||
[+] SNMP Support
|
||||
[+] Databases
|
||||
[+] LDAP Services
|
||||
[+] PHP
|
||||
[+] Squid Support
|
||||
[+] Logging and files
|
||||
[+] Insecure services
|
||||
[+] Banners and identification
|
||||
[+] Scheduled tasks
|
||||
[+] Accounting
|
||||
[+] Time and Synchronization
|
||||
[+] Cryptography
|
||||
[+] Virtualization
|
||||
[+] Containers
|
||||
[+] Security frameworks
|
||||
[+] Software: file integrity
|
||||
[+] Software: System tooling
|
||||
[+] Software: Malware
|
||||
[+] File Permissions
|
||||
[+] Home directories
|
||||
[+] Kernel Hardening
|
||||
[+] Hardening
|
||||
[+] Custom tests
|
||||
```
|
||||
|
||||
Lynis 使用颜色编码使报告更容易解读。
|
||||
|
||||
* 绿色。一切正常
|
||||
* 黄色。跳过、未找到,可能有个建议
|
||||
* 红色。你可能需要仔细看看这个
|
||||
|
||||
在我的案例中,大部分的红色标记都是在 “Kernel Hardening” 部分找到的。内核有各种可调整的设置,它们定义了内核的功能,其中一些可调整的设置可能有其安全场景。发行版可能因为各种原因没有默认设置这些,但是你应该检查每一项,看看你是否需要根据你的安全态势来改变它的值:
|
||||
|
||||
```
|
||||
[+] Kernel Hardening
|
||||
------------------------------------
|
||||
- Comparing sysctl key pairs with scan profile
|
||||
- fs.protected_hardlinks (exp: 1) [ OK ]
|
||||
- fs.protected_symlinks (exp: 1) [ OK ]
|
||||
- fs.suid_dumpable (exp: 0) [ OK ]
|
||||
- kernel.core_uses_pid (exp: 1) [ OK ]
|
||||
- kernel.ctrl-alt-del (exp: 0) [ OK ]
|
||||
- kernel.dmesg_restrict (exp: 1) [ DIFFERENT ]
|
||||
- kernel.kptr_restrict (exp: 2) [ DIFFERENT ]
|
||||
- kernel.randomize_va_space (exp: 2) [ OK ]
|
||||
- kernel.sysrq (exp: 0) [ DIFFERENT ]
|
||||
- kernel.yama.ptrace_scope (exp: 1 2 3) [ DIFFERENT ]
|
||||
- net.ipv4.conf.all.accept_redirects (exp: 0) [ DIFFERENT ]
|
||||
- net.ipv4.conf.all.accept_source_route (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.all.bootp_relay (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.all.forwarding (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.all.log_martians (exp: 1) [ DIFFERENT ]
|
||||
- net.ipv4.conf.all.mc_forwarding (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.all.proxy_arp (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.all.rp_filter (exp: 1) [ OK ]
|
||||
- net.ipv4.conf.all.send_redirects (exp: 0) [ DIFFERENT ]
|
||||
- net.ipv4.conf.default.accept_redirects (exp: 0) [ DIFFERENT ]
|
||||
- net.ipv4.conf.default.accept_source_route (exp: 0) [ OK ]
|
||||
- net.ipv4.conf.default.log_martians (exp: 1) [ DIFFERENT ]
|
||||
- net.ipv4.icmp_echo_ignore_broadcasts (exp: 1) [ OK ]
|
||||
- net.ipv4.icmp_ignore_bogus_error_responses (exp: 1) [ OK ]
|
||||
- net.ipv4.tcp_syncookies (exp: 1) [ OK ]
|
||||
- net.ipv4.tcp_timestamps (exp: 0 1) [ OK ]
|
||||
- net.ipv6.conf.all.accept_redirects (exp: 0) [ DIFFERENT ]
|
||||
- net.ipv6.conf.all.accept_source_route (exp: 0) [ OK ]
|
||||
- net.ipv6.conf.default.accept_redirects (exp: 0) [ DIFFERENT ]
|
||||
- net.ipv6.conf.default.accept_source_route (exp: 0) [ OK ]
|
||||
```
|
||||
|
||||
看看 SSH 这个例子,因为它是一个需要保证安全的关键领域。这里没有什么红色的东西,但是 Lynis 对我的环境给出了很多强化 SSH 服务的建议:
|
||||
|
||||
```
|
||||
[+] SSH Support
|
||||
------------------------------------
|
||||
- Checking running SSH daemon [ FOUND ]
|
||||
- Searching SSH configuration [ FOUND ]
|
||||
- OpenSSH option: AllowTcpForwarding [ SUGGESTION ]
|
||||
- OpenSSH option: ClientAliveCountMax [ SUGGESTION ]
|
||||
- OpenSSH option: ClientAliveInterval [ OK ]
|
||||
- OpenSSH option: Compression [ SUGGESTION ]
|
||||
- OpenSSH option: FingerprintHash [ OK ]
|
||||
- OpenSSH option: GatewayPorts [ OK ]
|
||||
- OpenSSH option: IgnoreRhosts [ OK ]
|
||||
- OpenSSH option: LoginGraceTime [ OK ]
|
||||
- OpenSSH option: LogLevel [ SUGGESTION ]
|
||||
- OpenSSH option: MaxAuthTries [ SUGGESTION ]
|
||||
- OpenSSH option: MaxSessions [ SUGGESTION ]
|
||||
- OpenSSH option: PermitRootLogin [ SUGGESTION ]
|
||||
- OpenSSH option: PermitUserEnvironment [ OK ]
|
||||
- OpenSSH option: PermitTunnel [ OK ]
|
||||
- OpenSSH option: Port [ SUGGESTION ]
|
||||
- OpenSSH option: PrintLastLog [ OK ]
|
||||
- OpenSSH option: StrictModes [ OK ]
|
||||
- OpenSSH option: TCPKeepAlive [ SUGGESTION ]
|
||||
- OpenSSH option: UseDNS [ SUGGESTION ]
|
||||
- OpenSSH option: X11Forwarding [ SUGGESTION ]
|
||||
- OpenSSH option: AllowAgentForwarding [ SUGGESTION ]
|
||||
- OpenSSH option: UsePrivilegeSeparation [ OK ]
|
||||
- OpenSSH option: AllowUsers [ NOT FOUND ]
|
||||
- OpenSSH option: AllowGroups [ NOT FOUND ]
|
||||
```
|
||||
|
||||
我的系统上没有运行虚拟机或容器,所以这些显示的结果是空的:
|
||||
|
||||
```
|
||||
[+] Virtualization
|
||||
------------------------------------
|
||||
|
||||
[+] Containers
|
||||
------------------------------------
|
||||
```
|
||||
|
||||
Lynis 会检查一些从安全角度看很重要的文件的文件权限:
|
||||
|
||||
```
|
||||
[+] File Permissions
|
||||
------------------------------------
|
||||
- Starting file permissions check
|
||||
File: /boot/grub2/grub.cfg [ SUGGESTION ]
|
||||
File: /etc/cron.deny [ OK ]
|
||||
File: /etc/crontab [ SUGGESTION ]
|
||||
File: /etc/group [ OK ]
|
||||
File: /etc/group- [ OK ]
|
||||
File: /etc/hosts.allow [ OK ]
|
||||
File: /etc/hosts.deny [ OK ]
|
||||
File: /etc/issue [ OK ]
|
||||
File: /etc/issue.net [ OK ]
|
||||
File: /etc/motd [ OK ]
|
||||
File: /etc/passwd [ OK ]
|
||||
File: /etc/passwd- [ OK ]
|
||||
File: /etc/ssh/sshd_config [ OK ]
|
||||
Directory: /root/.ssh [ SUGGESTION ]
|
||||
Directory: /etc/cron.d [ SUGGESTION ]
|
||||
Directory: /etc/cron.daily [ SUGGESTION ]
|
||||
Directory: /etc/cron.hourly [ SUGGESTION ]
|
||||
Directory: /etc/cron.weekly [ SUGGESTION ]
|
||||
Directory: /etc/cron.monthly [ SUGGESTION ]
|
||||
```
|
||||
|
||||
在报告的底部,Lynis 根据报告的发现提出了建议。每项建议后面都有一个 “TEST-ID”(为了下一部分方便,请将其保存起来)。
|
||||
|
||||
```
|
||||
Suggestions (47):
|
||||
----------------------------
|
||||
* If not required, consider explicit disabling of core dump in /etc/security/limits.conf file [KRNL-5820]
|
||||
https://cisofy.com/lynis/controls/KRNL-5820/
|
||||
|
||||
* Check PAM configuration, add rounds if applicable and expire passwords to encrypt with new values [AUTH-9229]
|
||||
https://cisofy.com/lynis/controls/AUTH-9229/
|
||||
```
|
||||
|
||||
Lynis 提供了一个选项来查找关于每个建议的更多信息,你可以使用 `show details` 命令和 TEST-ID 号来访问:
|
||||
|
||||
```
|
||||
./lynis show details TEST-ID
|
||||
```
|
||||
|
||||
这将显示该测试的其他信息。例如,我检查了 SSH-7408 的详细信息:
|
||||
|
||||
```
|
||||
$ ./lynis show details SSH-7408
|
||||
2020-04-30 05:52:23 Performing test ID SSH-7408 (Check SSH specific defined options)
|
||||
2020-04-30 05:52:23 Test: Checking specific defined options in /tmp/lynis.k8JwazmKc6
|
||||
2020-04-30 05:52:23 Result: added additional options for OpenSSH < 7.5
|
||||
2020-04-30 05:52:23 Test: Checking AllowTcpForwarding in /tmp/lynis.k8JwazmKc6
|
||||
2020-04-30 05:52:23 Result: Option AllowTcpForwarding found
|
||||
2020-04-30 05:52:23 Result: Option AllowTcpForwarding value is YES
|
||||
2020-04-30 05:52:23 Result: OpenSSH option AllowTcpForwarding is in a weak configuration state and should be fixed
|
||||
2020-04-30 05:52:23 Suggestion: Consider hardening SSH configuration [test:SSH-7408] [details:AllowTcpForwarding (set YES to NO)] [solution:-]
|
||||
```
|
||||
|
||||
### 试试吧
|
||||
|
||||
如果你想更多地了解你的 Linux 机器的安全性,请试试 Lynis。如果你想了解 Lynis 是如何工作的,可以研究一下它的 shell 脚本,看看它是如何收集这些信息的。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/5/linux-security-lynis
|
||||
|
||||
作者:[Gaurav Kamathe][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[wxy](https://github.com/wxy)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/gkamathe
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/yearbook-haff-rx-linux-file-lead_0.png?itok=-i0NNfDC (Hand putting a Linux file folder into a drawer)
|
||||
[2]: https://github.com/CISOfy/lynis
|
142
published/20200629 Using Bash traps in your scripts.md
Normal file
142
published/20200629 Using Bash traps in your scripts.md
Normal file
@ -0,0 +1,142 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (HankChow)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12715-1.html)
|
||||
[#]: subject: (Using Bash traps in your scripts)
|
||||
[#]: via: (https://opensource.com/article/20/6/bash-trap)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
在脚本中使用 Bash 信号捕获
|
||||
======
|
||||
|
||||
> 无论你的脚本是否成功运行,<ruby>信号捕获<rt>trap</rt></ruby>都能让它平稳结束。
|
||||
|
||||

|
||||
|
||||
Shell 脚本的启动并不难被检测到,但 Shell 脚本的终止检测却并不容易,因为我们无法确定脚本会按照预期地正常结束,还是由于意外的错误导致失败。当脚本执行失败时,将正在处理的内容记录下来是非常有用的做法,但有时候这样做起来并不方便。而 [Bash][2] 中 `trap` 命令的存在正是为了解决这个问题,它可以捕获到脚本的终止信号,并以某种预设的方式作出应对。
|
||||
|
||||
### 响应失败
|
||||
|
||||
如果出现了一个错误,可能导致发生一连串错误。下面示例脚本中,首先在 `/tmp` 中创建一个临时目录,这样可以在临时目录中执行解包、文件处理等操作,然后再以另一种压缩格式进行打包:
|
||||
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
CWD=`pwd`
|
||||
TMP=${TMP:-/tmp/tmpdir}
|
||||
|
||||
## create tmp dir
|
||||
mkdir "${TMP}"
|
||||
|
||||
## extract files to tmp
|
||||
tar xf "${1}" --directory "${TMP}"
|
||||
|
||||
## move to tmpdir and run commands
|
||||
pushd "${TMP}"
|
||||
for IMG in *.jpg; do
|
||||
mogrify -verbose -flip -flop "${IMG}"
|
||||
done
|
||||
tar --create --file "${1%.*}".tar *.jpg
|
||||
|
||||
## move back to origin
|
||||
popd
|
||||
|
||||
## bundle with bzip2
|
||||
bzip2 --compress "${TMP}"/"${1%.*}".tar \
|
||||
--stdout > "${1%.*}".tbz
|
||||
|
||||
## clean up
|
||||
/usr/bin/rm -r /tmp/tmpdir
|
||||
```
|
||||
|
||||
一般情况下,这个脚本都可以按照预期执行。但如果归档文件中的文件是 PNG 文件而不是期望的 JPEG 文件,脚本就会在中途失败,这时候另一个问题就出现了:最后一步删除临时目录的操作没有被正常执行。如果你手动把临时目录删掉,倒是不会造成什么影响,但是如果没有手动把临时目录删掉,在下一次执行这个脚本的时候,它必须处理一个现有的临时目录,里面充满了不可预知的剩余文件。
|
||||
|
||||
其中一个解决方案是在脚本开头增加一个预防性删除逻辑用来处理这种情况。但这种做法显得有些暴力,而我们更应该从结构上解决这个问题。使用 `trap` 是一个优雅的方法。
|
||||
|
||||
### 使用 trap 捕获信号
|
||||
|
||||
我们可以通过 `trap` 捕捉程序运行时的信号。如果你使用过 `kill` 或者 `killall` 命令,那你就已经使用过名为 `SIGTERM` 的信号了。除此以外,还可以执行 `trap -l` 或 `trap --list` 命令列出其它更多的信号:
|
||||
|
||||
```
|
||||
$ trap --list
|
||||
1) SIGHUP 2) SIGINT 3) SIGQUIT 4) SIGILL 5) SIGTRAP
|
||||
6) SIGABRT 7) SIGBUS 8) SIGFPE 9) SIGKILL 10) SIGUSR1
|
||||
11) SIGSEGV 12) SIGUSR2 13) SIGPIPE 14) SIGALRM 15) SIGTERM
|
||||
16) SIGSTKFLT 17) SIGCHLD 18) SIGCONT 19) SIGSTOP 20) SIGTSTP
|
||||
21) SIGTTIN 22) SIGTTOU 23) SIGURG 24) SIGXCPU 25) SIGXFSZ
|
||||
26) SIGVTALRM 27) SIGPROF 28) SIGWINCH 29) SIGIO 30) SIGPWR
|
||||
31) SIGSYS 34) SIGRTMIN 35) SIGRTMIN+1 36) SIGRTMIN+2 37) SIGRTMIN+3
|
||||
38) SIGRTMIN+4 39) SIGRTMIN+5 40) SIGRTMIN+6 41) SIGRTMIN+7 42) SIGRTMIN+8
|
||||
43) SIGRTMIN+9 44) SIGRTMIN+10 45) SIGRTMIN+11 46) SIGRTMIN+12 47) SIGRTMIN+13
|
||||
48) SIGRTMIN+14 49) SIGRTMIN+15 50) SIGRTMAX-14 51) SIGRTMAX-13 52) SIGRTMAX-12
|
||||
53) SIGRTMAX-11 54) SIGRTMAX-10 55) SIGRTMAX-9 56) SIGRTMAX-8 57) SIGRTMAX-7
|
||||
58) SIGRTMAX-6 59) SIGRTMAX-5 60) SIGRTMAX-4 61) SIGRTMAX-3 62) SIGRTMAX-2
|
||||
63) SIGRTMAX-1 64) SIGRTMAX
|
||||
```
|
||||
|
||||
可以被 `trap` 识别的信号除了以上这些,还包括:
|
||||
|
||||
* `EXIT`:进程退出时发出的信号
|
||||
* `ERR`:进程以非 0 状态码退出时发出的信号
|
||||
* `DEBUG`:表示调试模式的布尔值
|
||||
|
||||
如果要在 Bash 中实现信号捕获,只需要在 `trap` 后加上需要执行的命令,再加上需要捕获的信号列表就可以了。
|
||||
|
||||
例如,下面的这行语句可以捕获到在进程运行时用户按下 `Ctrl + C` 组合键发出的 `SIGINT` 信号:
|
||||
|
||||
```
|
||||
trap "{ echo 'Terminated with Ctrl+C'; }" SIGINT
|
||||
```
|
||||
|
||||
因此,上文中脚本的缺陷可以通过使用 `trap` 捕获 `SIGINT`、`SIGTERM`、进程错误退出、进程正常退出等信号,并正确处理临时目录的方式来修复:
|
||||
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
CWD=`pwd`
|
||||
TMP=${TMP:-/tmp/tmpdir}
|
||||
|
||||
trap \
|
||||
"{ /usr/bin/rm -r "${TMP}" ; exit 255; }" \
|
||||
SIGINT SIGTERM ERR EXIT
|
||||
|
||||
## create tmp dir
|
||||
mkdir "${TMP}"
|
||||
tar xf "${1}" --directory "${TMP}"
|
||||
|
||||
## move to tmp and run commands
|
||||
pushd "${TMP}"
|
||||
for IMG in *.jpg; do
|
||||
mogrify -verbose -flip -flop "${IMG}"
|
||||
done
|
||||
tar --create --file "${1%.*}".tar *.jpg
|
||||
|
||||
## move back to origin
|
||||
popd
|
||||
|
||||
## zip tar
|
||||
bzip2 --compress $TMP/"${1%.*}".tar \
|
||||
--stdout > "${1%.*}".tbz
|
||||
```
|
||||
|
||||
对于更复杂的功能,还可以用 [Bash 函数][3]来简化 `trap` 语句。
|
||||
|
||||
### Bash 中的信号捕获
|
||||
|
||||
信号捕获可以让脚本在无论是否成功执行所有任务的情况下都能够正确完成清理工作,能让你的脚本更加可靠,这是一个很好的习惯。尽管尝试把信号捕获加入到你的脚本里看看能够起到什么作用吧。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/6/bash-trap
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[HankChow](https://github.com/HankChow)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/programming-code-keyboard-laptop.png?itok=pGfEfu2S (Hands programming)
|
||||
[2]: https://opensource.com/resources/what-bash
|
||||
[3]: https://opensource.com/article/20/6/how-write-functions-bash
|
@ -0,0 +1,140 @@
|
||||
[#]: collector: "lujun9972"
|
||||
[#]: translator: "lxbwolf"
|
||||
[#]: reviewer: "wxy"
|
||||
[#]: publisher: "wxy"
|
||||
[#]: url: "https://linux.cn/article-12700-1.html"
|
||||
[#]: subject: "Automate testing for website errors with this Python tool"
|
||||
[#]: via: "https://opensource.com/article/20/7/seodeploy"
|
||||
[#]: author: "JR Oakes https://opensource.com/users/jroakes"
|
||||
|
||||
使用这个 Python 工具对网站 SEO 问题进行自动化测试
|
||||
======
|
||||
|
||||
> SEODeploy 可以帮助我们在网站部署之前识别出 SEO 问题。
|
||||
|
||||

|
||||
|
||||
作为一个技术性搜索引擎优化开发者,我经常被请来协助做网站迁移、新网站发布、分析实施和其他一些影响网站在线可见性和测量等领域,以控制风险。许多公司每月经常性收入的很大一部分来自用户通过搜索引擎找到他们的产品和服务。虽然搜索引擎已经能妥善地处理没有被良好格式化的代码,但在开发过程中还是会出问题,对搜索引擎如何索引和为用户显示页面产生不利影响。
|
||||
|
||||
我曾经也尝试通过评审各阶段会破坏 SEO(<ruby>搜索引擎优化<rt>search engine optimization</rt></ruby>)的问题来手动降低这种风险。我的团队最终审查到的结果,决定了该项目是否可以上线。但这个过程通常很低效,只能用于有限的页面,而且很有可能出现人为错误。
|
||||
|
||||
长期以来,这个行业一直在寻找可用且值得信赖的方式来自动化这一过程,同时还能让开发人员和搜索引擎优化人员在必须测试的内容上获得有意义的发言权。这是非常重要的,因为这些团队在开发冲刺中优先级通常会发生冲突,搜索引擎优化者需要推动变化,而开发人员需要控制退化和预期之外的情况。
|
||||
|
||||
### 常见的破坏 SEO 的问题
|
||||
|
||||
我合作过的很多网站有成千上万的页面,甚至上百万。实在令人费解,为什么一个开发过程中的改动能影响这么多页面。在 SEO 的世界中,Google 或其他搜索引擎展示你的页面时,一个非常微小和看起来无关紧要的修改也可能导致全网站范围的变化。在部署到生产环境之前,必须要处理这类错误。
|
||||
|
||||
下面是我去年见过的几个例子。
|
||||
|
||||
#### 偶发的 noindex
|
||||
|
||||
在部署到生产环境之后,我们用的一个专用的第三方 SEO 监控工具 [ContentKing][2] 马上发现了这个问题。这个错误很隐蔽,因为它在 HTML 中是不可见的,确切地说,它隐藏在服务器响应头里,但它能很快导致搜索不可见。
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Date: Tue May 25 2010 21:12:42 GMT
|
||||
[...]
|
||||
X-Robots-Tag: noindex
|
||||
[...]
|
||||
```
|
||||
|
||||
#### canonical 小写
|
||||
|
||||
上线时错误地把整个网站的 [canonical 链接元素][3]全改成小写了。这个改动影响了接近 30000 个 URL。在修改之前,所有的 URL 大小写都正常(例如 `URL-Path` 这样)。这之所以是个问题是因为 `canonical` 链接元素是用来给 Google 提示一个网页真实的规范 URL 版本的。这个改动导致很多 URL 被从 Google 的索引中移除并用小写的版本(`/url-path`)重新建立索引。影响范围是流量损失了 10% 到 15%,也污染了未来几个星期的网页监控数据。
|
||||
|
||||
#### 源站退化
|
||||
|
||||
有个网站的 React 实现复杂而奇特,它有个神奇的问题,`origin.domain.com` URL 退化显示为 CDN 服务器的源站。它会在网站元数据(如 `canonical` 链接元素、URL 和 Open Graph 链接)中间歇性地显示原始的主机而不是 CDN 边缘主机。这个问题在原始的 HTML 和渲染后的 HTML 中都存在。这个问题影响搜索的可见性和在社交媒体上的分享质量。
|
||||
|
||||
### SEODeploy 介绍
|
||||
|
||||
SEO 通常使用差异测试工具来检测渲染后和原始的 HTML 的差异。差异测试是很理想的,因为它避免了肉眼测试的不确定性。你希望检查 Google 对你的页面的渲染过程的差异,而不是检查用户对你页面的渲染。你希望查看下原始的 HTML 是什么样的,而不是渲染后的 HTML,因为 Google 的渲染过程是有独立的两个阶段的。
|
||||
|
||||
这促使我和我的同事创造了 [SEODeploy][4] 这个“在部署流水线中用于自动化 SEO 测试的 Python 库。”我们的使命是:
|
||||
|
||||
> 开发一个工具,让开发者能提供若干 URL 路径,并允许这些 URL 在生产环境和预演环境的主机上进行差异测试,尤其是对 SEO 相关数据的非预期的退化。
|
||||
|
||||
SEODeploy 的机制很简单:提供一个每行内容都是 URL 路径的文本文件,SEODeploy 对那些路径运行一系列模块,对比<ruby>生产环境<rt>production</rt></ruby>和<ruby>预演环境<rt>staging</rt></ruby>的 URL,把检测到的所有的错误和改动信息报告出来。
|
||||
|
||||
![SEODeploy overview][5]
|
||||
|
||||
这个工具及其模块可以用一个 YAML 文件来配置,可以根据预期的变化进行定制。
|
||||
|
||||
![SEODeploy output][7]
|
||||
|
||||
最初的发布版本包含下面的的核心功能和概念:
|
||||
|
||||
1. **开源**:我们坚信分享代码可以被大家批评、改进、扩展、分享和复用。
|
||||
2. **模块化**:Web 开发中有许多不同的堆栈和边缘案例。SEODeploy 工具在概念上很简单,因此采用模块化用来控制复杂性。我们提供了两个建好的模块和一个实例模块来简述基本结构。
|
||||
3. **URL 抽样**:由于它不是对所有 URL 都是可行和有效的,因此我们引入了一种随机抽取 XML 网站地图 URL 或被 ContentKing 监控的 URL 作为样本的方法。
|
||||
4. **灵活的差异检测**:Web 数据是凌乱的。无论被检测的数据是什么类型(如 ext、数组或列表、JSON 对象或字典、整数、浮点数等等),差异检测功能都会尝试将这些数据转换为差异信息。
|
||||
5. **自动化**: 你可以在命令行来调用抽样和运行方法,将 SEODeploy 融合到已有的流水线也很简单。
|
||||
|
||||
### 模块
|
||||
|
||||
虽然核心功能很简单,但在设计上,SEODeploy 的强大功能和复杂度体现在模块上。模块用来处理更难的任务:获取、清理和组织预演服务器和生产服务器上的数据来作对比。
|
||||
|
||||
#### Headless 模块
|
||||
|
||||
[Headless 模块][8] 是为那些从库里获取数据时不想为第三方服务付费的开发者准备的。它可以运行任意版本的 Chrome,会从每组用来比较的 URL 中提取渲染的数据。
|
||||
|
||||
Headless 模块会提取下面的核心数据用来比较:
|
||||
|
||||
1. SEO 内容,如标题、H1-H6、链接等等。
|
||||
2. 从 Chrome <ruby>计时器<rt>Timings</rt></ruby>和 CDP(<ruby>Chrome 开发工具协议<rt>Chrome DevTools Protocol</rt></ruby>)性能 API 中提取性能数据
|
||||
3. 计算出的性能指标,包括 CLS(<ruby>累积布局偏移<rt>Cumulative Layout Shift</rt></ruby>),这是 Google 最近发布的一个很受欢迎的 [Web 核心数据][9]
|
||||
4. 从上述 CDP 的覆盖率 API 获取的 CSS 和 JavaScript 的覆盖率数据
|
||||
|
||||
这个模块引入了处理预演环境、网络速度预设(为了让对比更规范化)等功能,也引入了一个处理在预演对比数据中替换预演主机的方法。开发者也能很容易地扩展这个模块,以收集他们想要在每个页面上进行比较的任何其他数据。
|
||||
|
||||
#### 其他模块
|
||||
|
||||
我们为开发者创建了一个[示例模块][10],开发者可以参照它来使用框架创建一个自定义的提取模块。另一个示例模块是与 ContentKing 结合的。ContentKing 模块需要有 ContentKing 订阅,而 Headless 可以在所有能运行 Chrome 的机器上运行。
|
||||
|
||||
### 需要解决的问题
|
||||
|
||||
我们有扩展和强化工具库的[计划][11],但正在寻求开发人员的[反馈][12],了解哪些是可行的,哪些是不符合他们的需求。我们正在解决的问题和条目有:
|
||||
|
||||
1. 对于某些对比元素(尤其是 schema),动态时间戳会产生误报。
|
||||
2. 把测试数据保存到数据库,以便查看部署历史以及与上次的预演推送进行差异测试。
|
||||
3. 通过云基础设施的渲染,强化提取的规模和速度。
|
||||
4. 把测试覆盖率从现在的 46% 提高到 99% 以上。
|
||||
5. 目前,我们依赖 [Poetry][13] 进行部署管理,但我们希望发布一个 PyPl 库,这样就可以用 `pip install` 轻松安装。
|
||||
6. 我们还在关注更多使用时的问题和相关数据。
|
||||
|
||||
### 开始使用
|
||||
|
||||
这个项目在 [GitHub][4] 上,我们对大部分功能都提供了 [文档][14]。
|
||||
|
||||
我们希望你能克隆 SEODeploy 并试试它。我们的目标是通过这个由技术性搜索引擎优化开发者开发的、经过开发者和工程师们验证的工具来支持开源社区。我们都见过验证复杂的预演问题需要多长时间,也都见过大量 URL 的微小改动能有什么样的业务影响。我们认为这个库可以为开发团队节省时间、降低部署过程中的风险。
|
||||
|
||||
如果你有问题或者想提交代码,请查看项目的[关于][15]页面。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/7/seodeploy
|
||||
|
||||
作者:[JR Oakes][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[lxbwolf](https://github.com/lxbwolf)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/jroakes
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/browser_screen_windows_files.png?itok=kLTeQUbY "Computer screen with files or windows open"
|
||||
[2]: https://www.contentkingapp.com/
|
||||
[3]: https://en.wikipedia.org/wiki/Canonical_link_element
|
||||
[4]: https://github.com/locomotive-agency/SEODeploy
|
||||
[5]: https://opensource.com/sites/default/files/uploads/seodeploy.png "SEODeploy overview"
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://opensource.com/sites/default/files/uploads/seodeploy_output.png "SEODeploy output"
|
||||
[8]: https://locomotive-agency.github.io/SEODeploy/modules/headless/
|
||||
[9]: https://web.dev/vitals/
|
||||
[10]: https://locomotive-agency.github.io/SEODeploy/modules/creating/
|
||||
[11]: https://locomotive-agency.github.io/SEODeploy/todo/
|
||||
[12]: https://locomotive-agency.github.io/SEODeploy/about/#contact
|
||||
[13]: https://python-poetry.org/
|
||||
[14]: https://locomotive-agency.github.io/SEODeploy/
|
||||
[15]: https://locomotive-agency.github.io/SEODeploy/about/
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (wxy)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12697-1.html)
|
||||
[#]: subject: (How to read Lynis reports to improve Linux security)
|
||||
[#]: via: (https://opensource.com/article/20/8/linux-lynis-security)
|
||||
[#]: author: (Alan Formy-Duval https://opensource.com/users/alanfdoss)
|
||||
@ -12,13 +12,13 @@
|
||||
|
||||
> 使用 Lynis 的扫描和报告来发现和修复 Linux 安全问题。
|
||||
|
||||
![锁定][1]
|
||||

|
||||
|
||||
当我读到 Gaurav Kamathe 的文章《[用 Lynis 扫描你的 Linux 安全性][2]>时,让我想起了我在美国劳工部担任系统管理员的日子。我的职责之一是保证我们的 Unix 服务器的安全。每个季度,都会有一个独立的核查员来审查我们服务器的安全状态。每次在核查员预定到达的那一天,我都会运行 Security Readiness Review(SRR),这是一个扫描工具,它使用一大套脚本来识别和报告任何安全线索。SRR 是开源的,因此我可以查看所有源码脚本及其功能。这使我能够查看代码,确定具体是什么问题,并迅速修复它发现的每个问题。
|
||||
当我读到 Gaurav Kamathe 的文章《[使用 Lynis 扫描 Linux 安全性][2]》时,让我想起了我在美国劳工部担任系统管理员的日子。我那时的职责之一是保证我们的 Unix 服务器的安全。每个季度,都会有一个独立的核查员来审查我们服务器的安全状态。每次在核查员预定到达的那一天,我都会运行 Security Readiness Review(SRR),这是一个扫描工具,它使用一大套脚本来识别和报告任何安全线索。SRR 是开源的,因此我可以查看所有源码脚本及其功能。这使我能够查看其代码,确定具体是什么问题,并迅速修复它发现的每个问题。
|
||||
|
||||
### 什么是 Lynis?
|
||||
|
||||
[Lynis][3] 是一个开源的安全审计工具,它的工作原理和 SRR 很像,它会扫描 Linux 系统,并提供关于它发现的任何弱点的详细报告。同样和 SRR 一样,它也是由一大套脚本组成的,每个脚本都会检查一个特定的项目,例如,最小和最大密码时间要求。
|
||||
[Lynis][3] 是一个开源的安全审计工具,它的工作原理和 SRR 很像,它会扫描 Linux 系统,并提供它发现的任何弱点的详细报告。同样和 SRR 一样,它也是由一大套脚本组成的,每个脚本都会检查一个特定的项目,例如,最小和最大密码时间要求。
|
||||
|
||||
运行 Lynis 后,你可以使用它的报告来定位每个项目的脚本,并了解 Lynis 是如何检查和报告每个问题的。你也可以使用相同的脚本代码来创建新的代码来自动解决。
|
||||
|
||||
@ -61,14 +61,14 @@
|
||||
2020-06-16 20:54:33 ====
|
||||
```
|
||||
|
||||
这些细节表明 Lynis 无法找到各种文件。这个情况非常清楚。我可以运行 `updatedb` 命令,重新检查这个测试。
|
||||
这些细节表明 Lynis 无法找到各种文件。这个情况描述的非常清楚。我可以运行 `updatedb` 命令,然后重新检查这个测试。
|
||||
|
||||
```
|
||||
# updatedb
|
||||
# lynis --tests FILE-6410
|
||||
```
|
||||
|
||||
然后,重新检查细节时,会显示它发现哪个文件满足了测试:
|
||||
重新检查细节时,会显示它发现哪个文件满足了测试:
|
||||
|
||||
```
|
||||
# lynis show details FILE-6410
|
||||
@ -89,8 +89,8 @@ Lynis 的许多建议并不像这个建议那样直接。如果你不确定某
|
||||
|
||||
```
|
||||
* Consider hardening SSH configuration [SSH-7408]
|
||||
- Details : MaxAuthTries (6 --> 3)
|
||||
<https://cisofy.com/lynis/controls/SSH-7408/>
|
||||
- Details : MaxAuthTries (6 --> 3)
|
||||
https://cisofy.com/lynis/controls/SSH-7408/
|
||||
```
|
||||
|
||||
要解决这个问题,你需要知道 SSH 配置文件的位置。一个经验丰富的 Linux 管理员可能已经知道在哪里找到它们,但如果你不知道,有一个方法可以看到 Lynis 在哪里找到它们。
|
||||
@ -112,7 +112,7 @@ Lynis 支持多种操作系统,因此你的安装位置可能有所不同。
|
||||
|
||||
#### 查找 SSH 问题
|
||||
|
||||
名为 `tests_ssh` 的文件中包含了 TEST-ID,在这里可以找到与 SSH 相关的扫描函数。看看这个文件,就可以看到 Lynis 扫描器调用的各种函数。第一部分在一个名为 `SSH_DAEMON_CONFIG_LOCS` 的变量中定义了一个目录列表。下面几节负责检查 SSH 守护进程的状态、定位它的配置文件,并识别它的版本。我在 SSH-7404 测试中找到了查找配置文件的代码,描述为 “确定 SSH 守护进程配置文件位置”。这段代码包含一个 `for` 循环,在列表中的项目中搜索一个名为 `sshd_config` 的文件。我可以用这个逻辑来做自己的搜索:
|
||||
名为 `tests_ssh` 的文件中包含了 TEST-ID,在这里可以找到与 SSH 相关的扫描函数。看看这个文件,就可以看到 Lynis 扫描器调用的各种函数。第一部分在一个名为 `SSH_DAEMON_CONFIG_LOCS` 的变量中定义了一个目录列表。下面几节负责检查 SSH 守护进程的状态、定位它的配置文件,并识别它的版本。我在 SSH-7404 测试中找到了查找配置文件的代码,描述为 “确定 SSH 守护进程配置文件位置”。这段代码包含一个 `for` 循环,在列表中的项目中搜索一个名为 `sshd_config` 的文件。我可以用这个逻辑来自己进行搜索:
|
||||
|
||||
```
|
||||
# find /etc /etc/ssh /usr/local/etc/ssh /opt/csw/etc/ssh -name sshd_config
|
||||
@ -122,7 +122,7 @@ find: ‘/usr/local/etc/ssh’: No such file or directory
|
||||
find: ‘/opt/csw/etc/ssh’: No such file or directory
|
||||
```
|
||||
|
||||
进一步探索这个文件,就会发现寻找 SSH-7408 的相关代码。这个测试涵盖了 `MaxAuthTries` 和其他一些设置。现在我可以在 SSH 配置文件中找到该变量:
|
||||
进一步探索这个文件,就会看到寻找 SSH-7408 的相关代码。这个测试涵盖了 `MaxAuthTries` 和其他一些设置。现在我可以在 SSH 配置文件中找到该变量:
|
||||
|
||||
```
|
||||
# grep MaxAuthTries /etc/ssh/sshd_config
|
||||
@ -131,7 +131,7 @@ find: ‘/opt/csw/etc/ssh’: No such file or directory
|
||||
|
||||
#### 修复法律横幅问题
|
||||
|
||||
Lynis 还报告了一个与登录系统时显示的法律横幅有关的发现。在我的家庭桌面系统上(我不希望有很多其他人登录),我没有去改变默认的 `issue` 文件。企业或政府的系统很可能被要求包含一个法律横幅,以警告用户他们的登录和活动可能被记录和监控。Lynis 用 BANN-7126 测试和 BANN-7130 测试报告了这一点:
|
||||
Lynis 还报告了一个与登录系统时显示的法律横幅有关的发现。在我的家庭桌面系统上(我并不希望有很多其他人登录),我没有去改变默认的 `issue` 文件。企业或政府的系统很可能被要求包含一个法律横幅,以警告用户他们的登录和活动可能被记录和监控。Lynis 用 BANN-7126 测试和 BANN-7130 测试报告了这一点:
|
||||
|
||||
```
|
||||
* Add a legal banner to /etc/issue, to warn unauthorized users [BANN-7126]
|
||||
@ -168,7 +168,7 @@ Kernel \r on an \m (\l)
|
||||
for ITEM in ${LEGAL_BANNER_STRINGS}; do
|
||||
```
|
||||
|
||||
这些法律术语存储在文件顶部定义的变量 `LEGAL_BANNER_STRINGS` 中。向后滚动到顶部可以看到完整的清单:”
|
||||
这些法律术语存储在文件顶部定义的变量 `LEGAL_BANNER_STRINGS` 中。向后滚动到顶部可以看到完整的清单:
|
||||
|
||||
```
|
||||
LEGAL_BANNER_STRINGS="audit access authori condition connect consent continu criminal enforce evidence forbidden intrusion law legal legislat log monitor owner penal policy policies privacy private prohibited record restricted secure subject system terms warning"
|
||||
@ -205,12 +205,12 @@ via: https://opensource.com/article/20/8/linux-lynis-security
|
||||
作者:[Alan Formy-Duval][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[wxy](https://github.com/wxy)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/alanfdoss
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/security-lock-password.jpg?itok=KJMdkKum (Lock)
|
||||
[2]: https://opensource.com/article/20/5/linux-security-lynis
|
||||
[2]: https://linux.cn/article-12696-1.html
|
||||
[3]: https://github.com/CISOfy/lynis
|
304
published/20200811 TCP window scaling, timestamps and SACK.md
Normal file
304
published/20200811 TCP window scaling, timestamps and SACK.md
Normal file
@ -0,0 +1,304 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12710-1.html)
|
||||
[#]: subject: (TCP window scaling, timestamps and SACK)
|
||||
[#]: via: (https://fedoramagazine.org/tcp-window-scaling-timestamps-and-sack/)
|
||||
[#]: author: (Florian Westphal https://fedoramagazine.org/author/strlen/)
|
||||
|
||||
TCP 窗口缩放、时间戳和 SACK
|
||||
======
|
||||
|
||||

|
||||
|
||||
Linux TCP 协议栈具有无数个可以更改其行为的 `sysctl` 旋钮。 这包括可用于接收或发送操作的内存量、套接字的最大数量、可选的特性和协议扩展。
|
||||
|
||||
有很多文章出于各种“性能调优”或“安全性”原因,建议禁用 TCP 扩展,比如时间戳或<ruby>选择性确认<rt>Selective ACKnowledgments</rt></ruby>(SACK)。
|
||||
|
||||
本文提供了这些扩展功能的背景,为什么会默认启用,它们之间是如何关联的,以及为什么通常情况下将它们关闭是个坏主意。
|
||||
|
||||
### TCP 窗口缩放
|
||||
|
||||
TCP 可以承受的数据传输速率受到几个因素的限制。其中包括:
|
||||
|
||||
* <ruby>往返时间<rt>Round trip time</rt></ruby>(RTT)。
|
||||
|
||||
这是数据包到达目的地并返回回复所花费的时间。越低越好。
|
||||
* 所涉及的网络路径的最低链路速度。
|
||||
* 丢包频率。
|
||||
* 新数据可用于传输的速度。
|
||||
|
||||
例如,CPU 需要能够以足够快的速度将数据传递到网络适配器。如果 CPU 需要首先加密数据,则适配器可能必须等待新数据。同样地,如果磁盘存储不能足够快地读取数据,则磁盘存储可能会成为瓶颈。
|
||||
* TCP 接收窗口的最大可能大小。
|
||||
|
||||
接收窗口决定了 TCP 在必须等待接收方报告接收到该数据之前可以传输多少数据(以字节为单位)。这是由接收方宣布的。接收方将在读取并确认接收到传入数据时不断更新此值。接收窗口的当前值包含在 [TCP 报头][2] 中,它是 TCP 发送的每个数据段的一部分。因此,只要发送方接收到来自对等方的确认,它就知道当前的接收窗口。这意味着往返时间(RTT)越长,发送方获得接收窗口更新所需的时间就越长。
|
||||
|
||||
TCP 的未确认(正在传输)数据被限制为最多 64KB。在大多数网络场景中,这甚至还不足以维持一个像样的数据速率。让我们看看一些例子。
|
||||
|
||||
**理论数据速率**
|
||||
|
||||
在往返时间(RTT)为 100 毫秒的情况下,TCP 每秒最多可以传输 640KB。在延迟为 1 秒的情况下,最大理论数据速率降至只有 64KB/s。
|
||||
|
||||
这是因为接收窗口的原因。一旦发送了 64KB 的数据,接收窗口就已经满了。发送方必须等待,直到对等方通知它应用程序已经读取了至少一部分数据。
|
||||
|
||||
发送的第一个段会把 TCP 窗口缩减去该段的大小。在接收窗口值的更新信息可用之前,需要往返一次。当更新以 1 秒的延迟到达时,即使链路有足够的可用带宽,也会导致 64KB 的限制。
|
||||
|
||||
为了充分利用一个具有几毫秒延迟的快速网络,必须有一个比传统 TCP 支持的窗口更大的窗口。“64KB 限制”是协议规范的产物:TCP 头只为接收窗口大小保留了 16 个位。这允许接收窗口最大为 64KB。在 TCP 协议最初设计时,这个大小并没有被视为一个限制。
|
||||
|
||||
不幸的是,想通过仅仅更改 TCP 头来支持更大的最大窗口值是不可能的。如果这样做就意味着 TCP 的所有实现都必须同时更新,否则它们将无法相互理解。为了解决这个问题,我们改变了对接收窗口值的解释。
|
||||
|
||||
“窗口缩放选项”允许你改变这个解释,同时保持与现有实现的兼容性。
|
||||
|
||||
#### TCP 选项:向后兼容的协议扩展
|
||||
|
||||
TCP 支持可选扩展。这允许使用新特性增强协议,而无需立即更新所有实现。当 TCP 发起方连接到对等方时,它还会发送一个支持的扩展列表。所有扩展都遵循相同的格式:一个唯一的选项号,后跟选项的长度以及选项数据本身。
|
||||
|
||||
TCP 响应方检查连接请求中包含的所有选项号。如果它遇到一个不能理解的选项号,则会跳过
|
||||
该选项号附带的“长度”字节的数据,并检查下一个选项号。响应方忽略了从答复中无法理解的内容。这使发送方和接收方都够理解所支持的公共选项集。
|
||||
|
||||
使用窗口缩放时,选项数据总是由单个数字组成。
|
||||
|
||||
### 窗口缩放选项
|
||||
|
||||
```
|
||||
Window Scale option (WSopt): Kind: 3, Length: 3
|
||||
+---------+---------+---------+
|
||||
| Kind=3 |Length=3 |shift.cnt|
|
||||
+---------+---------+---------+
|
||||
1 1 1
|
||||
```
|
||||
|
||||
[窗口缩放][3] 选项告诉对等方,应该使用给定的数字缩放 TCP 标头中的接收窗口值,以获取实际大小。
|
||||
|
||||
例如,一个宣告窗口缩放因子为 7 的 TCP 发起方试图指示响应方,任何将来携带接收窗口值为 512 的数据包实际上都会宣告 65536 字节的窗口。增加了 128 倍(2^7)。这将允许最大为 8MB 的 TCP 窗口。
|
||||
|
||||
不能理解此选项的 TCP 响应方将会忽略它,为响应连接请求而发送的 TCP 数据包(SYN-ACK)不会包含该窗口缩放选项。在这种情况下,双方只能使用 64k 的窗口大小。幸运的是,默认情况下,几乎每个 TCP 栈都支持并默认启用了此选项,包括 Linux。
|
||||
|
||||
响应方包括了它自己所需的缩放因子。两个对等方可以使用不同的因子。宣布缩放因子为 0 也是合法的。这意味着对等方应该如实处理它接收到的接收窗口值,但它允许应答方向上的缩放值,然后接收方可以使用更大的接收窗口。
|
||||
|
||||
与 SACK 或 TCP 时间戳不同,窗口缩放选项仅出现在 TCP 连接的前两个数据包中,之后无法更改。也不可能通过查看不包含初始连接三次握手的连接的数据包捕获来确定缩放因子。
|
||||
|
||||
支持的最大缩放因子为 14。这将允许 TCP 窗口的大小高达 1GB。
|
||||
|
||||
**窗口缩放的缺点**
|
||||
|
||||
在非常特殊的情况下,它可能导致数据损坏。但在你禁用该选项之前,要知道通常情况下是不可能损坏的。还有一种解决方案可以防止这种情况。不幸的是,有些人在没有意识到它与窗口缩放的关系的情况下禁用了该解决方案。首先,让我们看一下需要解决的实际问题。想象以下事件序列:
|
||||
|
||||
1. 发送方发送段:s_1、s_2、s_3、... s_n。
|
||||
2. 接收方看到:s_1、s_3、... s_n,并发送对 s_1 的确认。
|
||||
3. 发送方认为 s_2 丢失,然后再次发送。它还发送了段 s_n+1 中包含的新数据。
|
||||
4. 接收方然后看到:s_2、s_n+1,s_2:数据包 s_2 被接收两次。
|
||||
|
||||
当发送方过早触发重新传输时,可能会发生这种情况。在正常情况下,即使使用窗口缩放,这种错误的重传也绝不会成为问题。接收方将只丢弃重复项。
|
||||
|
||||
#### 从旧数据到新数据
|
||||
|
||||
TCP 序列号最多可以为 4GB。如果它变得大于此值,则该序列会回绕到 0,然后再次增加。这本身不是问题,但是如果这种问题发生得足够快,则上述情况可能会造成歧义。
|
||||
|
||||
如果在正确的时刻发生回绕,则序列号 s_2(重新发送的数据包)可能已经大于 s_n+1。因此,在最后的步骤(4)中,接收方可以将其解释为:s_2、s_n+1、s_n+m,即它可以将 “旧” 数据包 s_2 视为包含新数据。
|
||||
|
||||
通常,这不会发生,因为即使在高带宽链接上,“回绕”也只会每隔几秒钟或几分钟发生一次。原始数据包和不需要的重传的数据包之间的间隔将小得多。
|
||||
|
||||
例如,对于 50MB/s 的传输速度,重复项要迟到一分钟以上才会成为问题。序列号的回绕速度没有快到让小的延迟会导致这个问题。
|
||||
|
||||
一旦 TCP 达到 “GB/s” 的吞吐率,序列号的回绕速度就会非常快,以至于即使只有几毫秒的延迟也可能会造成 TCP 无法检测出的重复项。通过解决接收窗口太小的问题,TCP 现在可以用于以前无法实现的网络速度,这会产生一个新的,尽管很少见的问题。为了在 RTT 非常低的环境中安全使用 GB/s 的速度,接收方必须能够检测到这些旧的重复项,而不必仅依赖序列号。
|
||||
|
||||
### TCP 时间戳
|
||||
|
||||
#### 最佳截止日期
|
||||
|
||||
用最简单的术语来说,[TCP 时间戳][3]只是在数据包上添加时间戳,以解决由非常快速的序列号回绕引起的歧义。如果一个段看起来包含新数据,但其时间戳早于上一个在接收窗口内的数据包,则该序列号已被重新回绕,而“新”数据包实际上是一个较旧的重复项。这解决了即使在极端情况下重传的歧义。
|
||||
|
||||
但是,该扩展不仅仅是检测旧数据包。TCP 时间戳的另一个主要功能是更精确的往返时间测量(RTTm)。
|
||||
|
||||
#### 需要准确的 RTT 估算
|
||||
|
||||
当两个对等方都支持时间戳时,每个 TCP 段都携带两个附加数字:时间戳值和回显时间戳。
|
||||
|
||||
```
|
||||
TCP Timestamp option (TSopt): Kind: 8, Length: 10
|
||||
+-------+----+----------------+-----------------+
|
||||
|Kind=8 | 10 |TS Value (TSval)|EchoReply (TSecr)|
|
||||
+-------+----+----------------+-----------------+
|
||||
1 1 4 4
|
||||
```
|
||||
|
||||
准确的 RTT 估算对于 TCP 性能至关重要。TCP 会自动重新发送未确认的数据。重传由计时器触发:如果超时,则 TCP 会将尚未收到确认的一个或多个数据包视为丢失。然后再发送一次。
|
||||
|
||||
但是,“尚未得到确认” 并不意味着该段已丢失。也有可能是接收方到目前为止没有发送确认,或者确认仍在传输中。这就造成了一个两难的困境:TCP 必须等待足够长的时间,才能让这种轻微的延迟变得无关紧要,但它也不能等待太久。
|
||||
|
||||
**低网络延迟 VS 高网络延迟**
|
||||
|
||||
在延迟较高的网络中,如果计时器触发过快,TCP 经常会将时间和带宽浪费在不必要的重发上。
|
||||
|
||||
然而,在延迟较低的网络中,等待太长时间会导致真正发生数据包丢失时吞吐量降低。因此,在低延迟网络中,计时器应该比高延迟网络中更早到期。所以,TCP 重传超时不能使用固定常量值作为超时。它需要根据其在网络中所经历的延迟来调整该值。
|
||||
|
||||
**往返时间的测量**
|
||||
|
||||
TCP 选择基于预期的往返时间(RTT)的重传超时。RTT 事先是未知的。它是通过测量发送的段与 TCP 接收到该段所承载数据的确认之间的增量来估算的。
|
||||
|
||||
由于多种因素使其而变得复杂。
|
||||
|
||||
* 出于性能原因,TCP 不会为收到的每个数据包生成新的确认。它等待的时间非常短:如果有更多的数据段到达,则可以通过单个 ACK 数据包确认其接收。这称为<ruby>“累积确认”<rt>cumulative ACK</rt></ruby>。
|
||||
* 往返时间并不恒定。这是有多种因素造成的。例如,客户端可能是一部移动电话,随其移动而切换到不同的基站。也可能是当链路或 CPU 的利用率提高时,数据包交换花费了更长的时间。
|
||||
* 必须重新发送的数据包在计算过程中必须被忽略。这是因为发送方无法判断重传数据段的 ACK 是在确认原来的传输数据(毕竟已到达)还是在确认重传数据。
|
||||
|
||||
最后一点很重要:当 TCP 忙于从丢失中恢复时,它可能仅接收到重传段的 ACK。这样,它就无法在此恢复阶段测量(更新)RTT。所以,它无法调整重传超时,然后超时将以指数级增长。那是一种非常具体的情况(它假设其他机制,如快速重传或 SACK 不起作用)。但是,使用 TCP 时间戳,即使在这种情况下也会进行 RTT 评估。
|
||||
|
||||
如果使用了扩展,则对等方将从 TCP 段的扩展空间中读取时间戳值并将其存储在本地。然后,它将该值作为 “回显时间戳” 放入发回的所有数据段中。
|
||||
|
||||
因此,该选项带有两个时间戳:它的发送方自己的时间戳和它从对等方收到的最新时间戳。原始发送方使用 “回显时间戳” 来计算 RTT。它是当前时间戳时钟与 “回显时间戳” 中所反映的值之间的增量。
|
||||
|
||||
**时间戳的其他用途**
|
||||
|
||||
TCP 时间戳甚至还有除 PAWS(<ruby>防止序列号回绕<rt>Protection Against Wrapped Sequences</rt></ruby>) 和 RTT 测量以外的其他用途。例如,可以检测是否不需要重发。如果该确认携带较旧的回显时间戳,则该确认针对的是初始数据包,而不是重新发送的数据包。
|
||||
|
||||
TCP 时间戳的另一个更晦涩的用例与 TCP [syn cookie][4] 功能有关。
|
||||
|
||||
**在服务器端建立 TCP 连接**
|
||||
|
||||
当连接请求到达的速度快于服务器应用程序可以接受新的传入连接的速度时,连接积压最终将达到其极限。这可能是由于系统配置错误或应用程序中的错误引起的。当一个或多个客户端发送连接请求而不对 “SYN ACK” 响应做出反应时,也会发生这种情况。这将用不完整的连接填充连接队列。这些条目需要几秒钟才会超时。这被称为<ruby>“同步泛洪攻击”<rt>syn flood attack</rt></ruby>。
|
||||
|
||||
**TCP 时间戳和 TCP Syn Cookie**
|
||||
|
||||
即使队列已满,某些 TCP 协议栈也允许继续接受新连接。发生这种情况时,Linux 内核将在系统日志中打印一条突出的消息:
|
||||
|
||||
> 端口 P 上可能发生 SYN 泛洪。正在发送 Cookie。检查 SNMP 计数器。
|
||||
|
||||
此机制将完全绕过连接队列。通常存储在连接队列中的信息被编码到 SYN/ACK 响应 TCP 序列号中。当 ACK 返回时,可以根据序列号重建队列条目。
|
||||
|
||||
序列号只有有限的空间来存储信息。因此,使用 “TCP Syn Cookie” 机制建立的连接不能支持 TCP 选项。
|
||||
|
||||
但是,对两个对等点都通用的 TCP 选项可以存储在时间戳中。ACK 数据包在回显时间戳字段中反映了该值,这也允许恢复已达成共识的 TCP 选项。否则,cookie 连接受标准的 64KB 接收窗口限制。
|
||||
|
||||
**常见误区 —— 时间戳不利于性能**
|
||||
|
||||
不幸的是,一些指南建议禁用 TCP 时间戳,以减少内核访问时间戳时钟来获取当前时间所需的次数。这是不正确的。如前所述,RTT 估算是 TCP 的必要部分。因此,内核在接收/发送数据包时总是采用微秒级的时间戳。
|
||||
|
||||
在包处理步骤的其余部分中,Linux 会重用 RTT 估算所需的时钟时间戳。这还避免了将时间戳添加到传出 TCP 数据包的额外时钟访问。
|
||||
|
||||
整个时间戳选项在每个数据包中仅需要 10 个字节的 TCP 选项空间,这不会显著减少可用于数据包有效负载的空间。
|
||||
|
||||
**常见误区 —— 时间戳是个安全问题**
|
||||
|
||||
一些安全审计工具和(较旧的)博客文章建议禁用 TCP 时间戳,因为据称它们泄露了系统正常运行时间:这样一来,便可以估算系统/内核的补丁级别。这在过去是正确的:时间戳时钟基于不断增加的值,该值在每次系统引导时都以固定值开始。时间戳值可以估计机器已经运行了多长时间(正常运行时间 `uptime`)。
|
||||
|
||||
从 Linux 4.12 开始,TCP 时间戳不再显示正常运行时间。发送的所有时间戳值都使用对等设备特定的偏移量。时间戳值也每 49 天回绕一次。
|
||||
|
||||
换句话说,从地址 “A” 出发,或者终到地址 “A” 的连接看到的时间戳与到远程地址 “B” 的连接看到的时间戳不同。
|
||||
|
||||
运行 `sysctl net.ipv4.tcp_timeamp=2` 以禁用随机化偏移。这使得分析由诸如 `wireshark` 或 `tcpdump` 之类的工具记录的数据包跟踪变得更容易 —— 从主机发送的数据包在其 TCP 选项时间戳中都具有相同的时钟基准。因此,对于正常操作,默认设置应保持不变。
|
||||
|
||||
### 选择性确认
|
||||
|
||||
如果同一数据窗口中的多个数据包丢失了,TCP 将会出现问题。这是因为 TCP 确认是累积的,但仅适用于按顺序到达的数据包。例如:
|
||||
|
||||
* 发送方发送段 s_1、s_2、s_3、... s_n
|
||||
* 发送方收到 s_2 的 ACK
|
||||
* 这意味着 s_1 和 s_2 都已收到,并且发送方不再需要保留这些段。
|
||||
* s_3 是否应该重新发送? s_4 呢? s_n?
|
||||
|
||||
发送方等待 “重传超时” 或 “重复 ACK” 以使 s_2 到达。如果发生重传超时或到达了 s_2 的多个重复 ACK,则发送方再次发送 s_3。
|
||||
|
||||
如果发送方收到对 s_n 的确认,则 s_3 是唯一丢失的数据包。这是理想的情况。仅发送单个丢失的数据包。
|
||||
|
||||
如果发送方收到的确认段小于 s_n,例如 s_4,则意味着丢失了多个数据包。发送方也需要重传下一个数据段。
|
||||
|
||||
**重传策略**
|
||||
|
||||
可能只是重复相同的序列:重新发送下一个数据包,直到接收方指示它已处理了直至 s_n 的所有数据包为止。这种方法的问题在于,它需要一个 RTT,直到发送方知道接下来必须重新发送的数据包为止。尽管这种策略可以避免不必要的重传,但要等到 TCP 重新发送整个数据窗口后,它可能要花几秒钟甚至更长的时间。
|
||||
|
||||
另一种方法是一次重新发送几个数据包。当丢失了几个数据包时,此方法可使 TCP 恢复更快。在上面的示例中,TCP 重新发送了 s_3、s_4、s_5、...,但是只能确保已丢失 s_3。
|
||||
|
||||
从延迟的角度来看,这两种策略都不是最佳的。如果只有一个数据包需要重新发送,第一种策略是快速的,但是当多个数据包丢失时,它花费的时间太长。
|
||||
|
||||
即使必须重新发送多个数据包,第二个也是快速的,但是以浪费带宽为代价。此外,这样的 TCP 发送方在进行不必要的重传时可能已经发送了新数据。
|
||||
|
||||
通过可用信息,TCP 无法知道丢失了哪些数据包。这就是 TCP [选择性确认][5](SACK)的用武之地了。就像窗口缩放和时间戳一样,它是另一个可选的但非常有用的 TCP 特性。
|
||||
|
||||
**SACK 选项**
|
||||
|
||||
```
|
||||
TCP Sack-Permitted Option: Kind: 4, Length 2
|
||||
+---------+---------+
|
||||
| Kind=4 | Length=2|
|
||||
+---------+---------+
|
||||
```
|
||||
|
||||
支持此扩展的发送方在连接请求中包括 “允许 SACK” 选项。如果两个端点都支持该扩展,则检测到数据流中丢失数据包的对等方可以将此信息通知发送方。
|
||||
|
||||
```
|
||||
TCP SACK Option: Kind: 5, Length: Variable
|
||||
+--------+--------+
|
||||
| Kind=5 | Length |
|
||||
+--------+--------+--------+--------+
|
||||
| Left Edge of 1st Block |
|
||||
+--------+--------+--------+--------+
|
||||
| Right Edge of 1st Block |
|
||||
+--------+--------+--------+--------+
|
||||
| |
|
||||
/ . . . /
|
||||
| |
|
||||
+--------+--------+--------+--------+
|
||||
| Left Edge of nth Block |
|
||||
+--------+--------+--------+--------+
|
||||
| Right Edge of nth Block |
|
||||
+--------+--------+--------+--------+
|
||||
```
|
||||
|
||||
接收方遇到 s_2 后跟 s_5 ... s_n,则在发送对 s_2 的确认时将包括一个 SACK 块:
|
||||
|
||||
```
|
||||
|
||||
+--------+-------+
|
||||
| Kind=5 | 10 |
|
||||
+--------+------+--------+-------+
|
||||
| Left edge: s_5 |
|
||||
+--------+--------+-------+------+
|
||||
| Right edge: s_n |
|
||||
+--------+-------+-------+-------+
|
||||
```
|
||||
|
||||
这告诉发送方到 s_2 的段都是按顺序到达的,但也让发送方知道段 s_5 至 s_n 也已收到。然后,发送方可以重新发送那两个数据包(s_3、s_4),并继续发送新数据。
|
||||
|
||||
**神话般的无损网络**
|
||||
|
||||
从理论上讲,如果连接不会丢包,那么 SACK 就没有任何优势。或者连接具有如此低的延迟,甚至等待一个完整的 RTT 都无关紧要。
|
||||
|
||||
在实践中,无损行为几乎是不可能保证的。即使网络及其所有交换机和路由器具有足够的带宽和缓冲区空间,数据包仍然可能丢失:
|
||||
|
||||
* 主机操作系统可能面临内存压力并丢弃数据包。请记住,一台主机可能同时处理数万个数据包流。
|
||||
* CPU 可能无法足够快地消耗掉来自网络接口的传入数据包。这会导致网络适配器本身中的数据包丢失。
|
||||
* 如果 TCP 时间戳不可用,即使一个非常小的 RTT 的连接也可能在丢失恢复期间暂时停止。
|
||||
|
||||
使用 SACK 不会增加 TCP 数据包的大小,除非连接遇到数据包丢失。因此,几乎没有理由禁用此功能。几乎所有的 TCP 协议栈都支持 SACK —— 它通常只在不进行 TCP 批量数据传输的低功耗 IOT 类的设备上才不存在。
|
||||
|
||||
当 Linux 系统接受来自此类设备的连接时,TCP 会自动为受影响的连接禁用 SACK。
|
||||
|
||||
### 总结
|
||||
|
||||
本文中研究的三个 TCP 扩展都与 TCP 性能有关,最好都保留其默认设置:启用。
|
||||
|
||||
TCP 握手可确保仅使用双方都可以理解的扩展,因此,永远不需因为对等方可能不支持而全局禁用该扩展。
|
||||
|
||||
关闭这些扩展会导致严重的性能损失,尤其是 TCP 窗口缩放和 SACK。可以禁用 TCP 时间戳而不会立即造成不利影响,但是现在没有令人信服的理由这样做了。启用它们还可以支持 TCP 选项,即使在 SYN cookie 生效时也是如此。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/tcp-window-scaling-timestamps-and-sack/
|
||||
|
||||
作者:[Florian Westphal][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/strlen/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://fedoramagazine.org/wp-content/uploads/2020/08/tcp-window-scaling-816x346.png
|
||||
[2]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
|
||||
[3]: https://www.rfc-editor.org/info/rfc7323
|
||||
[4]: https://en.wikipedia.org/wiki/SYN_cookies
|
||||
[5]: https://www.rfc-editor.org/info/rfc2018
|
@ -1,46 +1,44 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (wxy)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12688-1.html)
|
||||
[#]: subject: (What is IPv6, and why aren’t we there yet?)
|
||||
[#]: via: (https://www.networkworld.com/article/3254575/what-is-ipv6-and-why-aren-t-we-there-yet.html)
|
||||
[#]: author: (Keith Shaw, Josh Fruhlinger )
|
||||
|
||||
什么是 IPv6,为什么我们还普及?
|
||||
什么是 IPv6,为什么我们还未普及?
|
||||
======
|
||||
|
||||
自 1998 年以来,IPv6 一直在努力解决 IPv4 可用 IP 地址的不足的问题,然而尽管 IPv6 在效率和安全方面具有优势,但其采用速度仍然缓慢。
|
||||
> 自 1998 年以来,IPv6 一直在努力解决 IPv4 可用 IP 地址的不足的问题,然而尽管 IPv6 在效率和安全方面具有优势,但其采用速度仍然缓慢。
|
||||
|
||||

|
||||
|
||||
在大多数情况下,已经没人一再警告互联网地址耗尽的可怕境况,因为,虽然缓慢,但坚定地,从互联网协议版本 4(IPv4)的世界到 IPv6 的迁移已经开始,并且相关软件已经到位,以防止许多人预测的地址耗竭。
|
||||
在大多数情况下,已经没有人一再对互联网地址耗尽的可怕境况发出警告,因为,从互联网协议版本 4(IPv4)的世界到 IPv6 的迁移,虽然缓慢,但已经坚定地开始了,并且相关软件已经到位,以防止许多人预测的地址耗竭。
|
||||
|
||||
但在我们看到 IPv6 的现状和发展方向之前,让我们先回到互联网寻址的早期。
|
||||
|
||||
### 什么是 IPv6,为什么它很重要?
|
||||
|
||||
IPv6 是最新版本的<ruby>互联网协议<rt>Internet Protocol</rt></ruby>(IP),它可以识别互联网上的设备,从而确定它们的位置。每一个使用互联网的设备都要通过自己的 IP 地址来识别,以便互联网通信工作。在这方面,它就像你需要知道的街道地址和邮政编码一样,以便邮寄信件。
|
||||
IPv6 是最新版本的<ruby>互联网协议<rt>Internet Protocol</rt></ruby>(IP),它可以跨互联网识别设备,从而确定它们的位置。每一个使用互联网的设备都要通过自己的 IP 地址来识别,以便可以通过互联网通信。在这方面,它就像你需要知道街道地址和邮政编码一样,以便邮寄信件。
|
||||
|
||||
之前的版本 IPv4 采用 32 位寻址方案,可以支持 43 亿台设备,本以为已经足够。然而,互联网、个人电脑、智能手机以及现在物联网设备的发展证明,这个世界需要更多的地址。
|
||||
|
||||
幸运的是,<ruby>互联网工程任务组<rt>Internet Engineering Task Force</rt></ruby>(IETF)在 20 年前就认识到了这一点。1998 年,它创建了 IPv6,使用 128 位寻址方式来支持大约 340 <ruby>亿亿亿<rt>trillion trillion</rt></ruby>(或者 2 的 128 次幂,如果你喜欢的话)。IPv4 的地址可表示为四组一至三位十进制数,IPv6 则使用八组四位十六进制数字,用冒号隔开。
|
||||
幸运的是,<ruby>互联网工程任务组<rt>Internet Engineering Task Force</rt></ruby>(IETF)在 20 年前就认识到了这一点。1998 年,它创建了 IPv6,使用 128 位寻址方式来支持大约 340 <ruby>亿亿亿<rt>trillion trillion</rt></ruby>(或者 2 的 128 次幂,如果你喜欢用这种表示方式的话)。IPv4 的地址可表示为四组一至三位十进制数,IPv6 则使用八组四位十六进制数字,用冒号隔开。
|
||||
|
||||
### IPv6 的好处是什么?
|
||||
|
||||
IETF 在其工作中加入了 IPv6 对 IPv4 增强的功能。IPv6 协议可以更有效地处理数据包,提高性能和增加安全性。它使互联网服务提供商(ISP)能够通过使他们的路由表更有层次性来减少其大小。
|
||||
IETF 在其工作中为 IPv6 加入了对 IPv4 增强的功能。IPv6 协议可以更有效地处理数据包,提高性能和增加安全性。它使互联网服务提供商(ISP)能够通过使他们的路由表更有层次性来减少其大小。
|
||||
|
||||
### 网络地址转换(NAT)和 IPv6
|
||||
|
||||
IPv6 的采用被推迟,部分原因是<ruby>网络地址转换<rt>network address translation</rt></ruby>(NAT)导致的,它将私有 IP 地址转化为公共 IP 地址。这样一来,拥有私也 IP 地址的企业的机器就可以向位于私人网络之外拥有公共 IP 地址的机器发送和接收数据包。
|
||||
IPv6 的采用被推迟,部分原因是<ruby>网络地址转换<rt>network address translation</rt></ruby>(NAT)导致的,NAT 可以将私有 IP 地址转化为公共 IP 地址。这样一来,拥有私有 IP 地址的企业的机器就可以向位于私有网络之外拥有公共 IP 地址的机器发送和接收数据包。
|
||||
|
||||
如果没有 NAT,拥有数千台或数万台计算机的大公司如果要与外界通信,就会吞噬大量的公有 IPv4 地址。但是这些 IPv4 地址是有限的,而且接近枯竭,以至于不得不限制分配。
|
||||
|
||||
NAT 有助于缓解这个问题。有了 NAT,成千上万的私有地址计算机可以通过防火墙或路由器等 NAT 机器呈现在公共互联网上。
|
||||
NAT 有助于缓解这个问题。有了 NAT,成千上万的私有地址计算机可以通过防火墙或路由器等 NAT 设备呈现在公共互联网上。
|
||||
|
||||
NAT 的工作方式是,当一台拥有私有 IP 地址的企业计算机向企业网络外的公共 IP 地址发送数据包时,首先会进入 NAT 设备。NAT 在翻译表中记下数据包的源地址和目的地址。
|
||||
|
||||
NAT 将数据包的源地址改为 NAT 设备面向公众的地址,并将数据包一起发送到外部目的地。当数据包回复时,NAT 将目的地址翻译成发起通信的计算机的私有 IP 地址。这样一来,一个公网 IP 地址可以代表多台私有地址的计算机。
|
||||
NAT 的工作方式是,当一台拥有私有 IP 地址的企业计算机向企业网络外的公共 IP 地址发送数据包时,首先会进入 NAT 设备。NAT 在翻译表中记下数据包的源地址和目的地址。NAT 将数据包的源地址改为 NAT 设备面向公众的地址,并将数据包一起发送到外部目的地。当数据包回复时,NAT 将目的地址翻译成发起通信的计算机的私有 IP 地址。这样一来,一个公网 IP 地址可以代表多台私有地址的计算机。
|
||||
|
||||
### 谁在部署 IPv6?
|
||||
|
||||
@ -48,27 +46,27 @@ NAT 将数据包的源地址改为 NAT 设备面向公众的地址,并将数
|
||||
|
||||
主要网站则排在其后 —— World IPv6 Launch 称,目前 Alexa 前 1000 的网站中只有不到 30% 可以通过 IPv6 到达。
|
||||
|
||||
企业在部署方面比较落后,根据<ruby>互联网协会<rt>Internet Society</rt></ruby>的[《2017年 IPv6 部署状况》报告][4],只有不到四分之一的企业宣传其 IPv6 前缀。复杂性、成本和完成迁移所需时间都是给出的理由。此外,一些项目由于软件兼容性的问题而被推迟。例如,一份 [2017 年 1 月的报告][5]称,Windows 10 中的一个 bug “破坏了微软在其西雅图总部推出纯 IPv6 网络的努力”。
|
||||
企业在部署方面比较落后,根据<ruby>互联网协会<rt>Internet Society</rt></ruby>的[《2017年 IPv6 部署状况》报告][4],只有不到四分之一的企业宣传其 IPv6 前缀。复杂性、成本和完成迁移所需时间都是他们给出的理由。此外,一些项目由于软件兼容性的问题而被推迟。例如,一份 [2017 年 1 月的报告][5]称,Windows 10 中的一个 bug “破坏了微软在其西雅图总部推出纯 IPv6 网络的努力”。
|
||||
|
||||
### 何时会有更多部署?
|
||||
|
||||
互联网协会表示,IPv4 地址的价格将在 2018 年达到顶峰,然后在 IPv6 部署通过 50% 大关后,价格会下降。目前,[根据 Google][6],全球的 IPv6 采用率为 20% 到 22%,但在美国约为 32%。
|
||||
|
||||
随着 IPv4 地址的价格开始下降,互联网协会建议企业出售现有的 IPv4 地址,以帮助资助其 IPv6 的部署。根据[一个发布在 GitHub 上的说明][7],麻省理工学院已经这样做了。这所大学得出的结论是,其 800 万个 IPv4 地址是“过剩”的,可以在不影响当前或未来需求的情况下出售,因为它还持有 20 个<ruby>非亿级<rt>nonillion</rt></ruby> IPv6 地址。(非亿级地址是指数字 1 后面跟着 30 个零)。
|
||||
随着 IPv4 地址的价格开始下降,互联网协会建议企业出售现有的 IPv4 地址,以帮助资助其 IPv6 的部署。根据[一个发布在 GitHub 上的说明][7],麻省理工学院已经这样做了。这所大学得出的结论是,其有 800 万个 IPv4 地址是“过剩”的,可以在不影响当前或未来需求的情况下出售,因为它还持有 20 个<ruby>非亿级<rt>nonillion</rt></ruby> IPv6 地址。(非亿级地址是指数字 1 后面跟着 30 个零)。
|
||||
|
||||
此外,随着部署的增多,更多的公司将开始对 IPv4 地址的使用收费,而免费提供 IPv6 服务。[英国的 ISP Mythic Beasts][8] 表示,“IPv6 连接是标配”,而 “IPv4 连接是可选的额外服务”。
|
||||
|
||||
### IPv4 何时会被“关闭”?
|
||||
|
||||
在 2011 年至 2018 年期间,世界上大部分地区[“用完”了新的 IPv4 地址][9] —— 但我们不会完全没有了这些地址,因为 IPv4 地址会被出售和重新使用(如前所述),而任何剩余的地址将用于 IPv6 过渡。
|
||||
在 2011 年至 2018 年期间,世界上大部分地区[“用完”了新的 IPv4 地址][9] —— 但我们不会完全没有 IPv4 地址,因为 IPv4 地址会被出售和重新使用(如前所述),而剩余的地址将用于 IPv6 过渡。
|
||||
|
||||
目前还没有正式的关闭日期,所以人们不应该担心有一天他们的互联网接入会突然消失。随着越来越多的网络过渡,越来越多的内容网站支持 IPv6,以及越来越多的终端用户为 IPv6 功能升级设备,世界将慢慢远离 IPv4。
|
||||
目前还没有正式的 IPv4 关闭日期,所以人们不用担心有一天他们的互联网接入会突然消失。随着越来越多的网络过渡,越来越多的内容网站支持 IPv6,以及越来越多的终端用户为 IPv6 功能升级设备,世界将慢慢远离 IPv4。
|
||||
|
||||
### 为什么没有 IPv5?
|
||||
|
||||
曾经有一个 IPv5,也被称为<ruby>互联网流协议<rt>Internet Stream Protocol</rt></ruby>,简称 ST。它被设计用于跨 IP 网络的面向连接的通信,目的是支持语音和视频。
|
||||
|
||||
它在这个任务上是成功的,并被实验性地使用。它的一个缺点是它的 32 位地址方案 —— 与 IPv4 使用的方案相同,从而影响了它的普及。因此,它存在着与 IPv4 相同的问题 —— 可用的 IP 地址数量有限。这导致了 IPv6 的发展和最终采用。尽管 IPv5 从未被公开采用,但它已经用掉了 IPv5 这个名字。
|
||||
它在这个任务上是成功的,并被实验性地使用。它的一个缺点是它的 32 位地址方案 —— 与 IPv4 使用的方案相同,从而影响了它的普及。因此,它存在着与 IPv4 相同的问题 —— 可用的 IP 地址数量有限。这导致了发展出了 IPv6 并和最终得到采用。尽管 IPv5 从未被公开采用,但它已经用掉了 IPv5 这个名字。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -77,7 +75,7 @@ via: https://www.networkworld.com/article/3254575/what-is-ipv6-and-why-aren-t-we
|
||||
作者:[Keith Shaw][a],[Josh Fruhlinger][c]
|
||||
选题:[lujun9972][b]
|
||||
译者:[wxy](https://github.com/wxy)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12693-1.html)
|
||||
[#]: subject: (Create a mobile app with Flutter)
|
||||
[#]: via: (https://opensource.com/article/20/9/mobile-app-flutter)
|
||||
[#]: author: (Vitaly Kuprenko https://opensource.com/users/kooper)
|
||||
@ -10,14 +10,15 @@
|
||||
使用 Flutter 创建 App
|
||||
======
|
||||
|
||||
使用流行的 Flutter 框架开始您的跨平台开发之旅。
|
||||
![A person looking at a phone][1]
|
||||
> 使用流行的 Flutter 框架开始你的跨平台开发之旅。
|
||||
|
||||
[Flutter][2] 是一个深受全球移动开发者欢迎的项目。该框架有一个庞大的、友好的发烧友社区,随着 Flutter 帮助程序员将他们的项目带入移动领域,这个社区还在继续增长。
|
||||

|
||||
|
||||
本教程旨在帮助您开始使用 Flutter 进行移动开发。阅读之后,您将了解如何快速安装和设置框架,以便开始为智能手机、平板电脑和其他平台编码。
|
||||
[Flutter][2] 是一个深受全球移动开发者欢迎的项目。该框架有一个庞大的、友好的爱好者社区,随着 Flutter 帮助程序员将他们的项目带入移动领域,这个社区还在继续增长。
|
||||
|
||||
本操作指南假定您已在计算机上安装了 [Android Studio][3],并且具有一定的使用经验。
|
||||
本教程旨在帮助你开始使用 Flutter 进行移动开发。阅读之后,你将了解如何快速安装和设置框架,以便开始为智能手机、平板电脑和其他平台开发。
|
||||
|
||||
本操作指南假定你已在计算机上安装了 [Android Studio][3],并且具有一定的使用经验。
|
||||
|
||||
### 什么是 Flutter ?
|
||||
|
||||
@ -25,15 +26,15 @@ Flutter 使得开发人员能够为多个平台构建应用程序,包括:
|
||||
|
||||
* Android
|
||||
* iOS
|
||||
* Web (测试版)
|
||||
* macOS (正在开发中)
|
||||
* Linux (正在开发中)
|
||||
* Web(测试版)
|
||||
* macOS(正在开发中)
|
||||
* Linux(正在开发中)
|
||||
|
||||
对 macOS 和 Linux 的支持还处于早期开发阶段,而 Web 支持预计很快就会发布。这意味着您可以立即试用其功能(如下所述)。
|
||||
对 macOS 和 Linux 的支持还处于早期开发阶段,而 Web 支持预计很快就会发布。这意味着你可以立即试用其功能(如下所述)。
|
||||
|
||||
### 安装 Flutter
|
||||
|
||||
我使用的是 Ubuntu18.04,但安装过程与其他 Linux 发行版类似,比如 Arch 或 Mint。
|
||||
我使用的是 Ubuntu 18.04,但其他 Linux 发行版安装过程与之类似,比如 Arch 或 Mint。
|
||||
|
||||
#### 使用 snapd 安装
|
||||
|
||||
@ -46,31 +47,27 @@ $ sudo snap install flutter –classic
|
||||
flutter 0+git.142868f from flutter Team/ installed
|
||||
```
|
||||
|
||||
然后使用 `flutter` 命令启动它。 首次启动时,该框架会下载到您的计算机上:
|
||||
然后使用 `flutter` 命令启动它。 首次启动时,该框架会下载到你的计算机上:
|
||||
|
||||
```
|
||||
$ flutter
|
||||
Initializing Flutter
|
||||
Downloading <https://storage.googleapis.com/flutter\_infra\[...\]>
|
||||
Downloading https://storage.googleapis.com/flutter_infra[...]
|
||||
```
|
||||
|
||||
下载完成后,您会看到一条消息,告诉您 Flutter 已初始化:
|
||||
下载完成后,你会看到一条消息,告诉你 Flutter 已初始化:
|
||||
|
||||
![Flutter initialized][5]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
#### 手动安装
|
||||
|
||||
如果您没有安装 Snapd,或者您的发行版不是 Ubuntu,那么安装过程会略有不同。 在这种情况下,请[下载] [7] 为您的操作系统推荐的 Flutter 版本。
|
||||
如果你没有安装 Snapd,或者你的发行版不是 Ubuntu,那么安装过程会略有不同。在这种情况下,请[下载][7] 为你的操作系统推荐的 Flutter 版本。
|
||||
|
||||
![Install Flutter manually][8]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
然后将其解压缩到你的主目录。
|
||||
|
||||
然后将其解压缩到您的主目录。
|
||||
|
||||
在您喜欢的文本编辑器中打开主目录中的 `.bashrc` 文件 (如果您使用 [Z shell][9],则打开 `.zshc`)。因为它是隐藏文件,所以您必须首先在文件管理器中启用显示隐藏文件,或者使用以下命令从终端打开它:
|
||||
在你喜欢的文本编辑器中打开主目录中的 `.bashrc` 文件(如果你使用 [Z shell][9],则打开 `.zshc`)。因为它是隐藏文件,所以你必须首先在文件管理器中启用显示隐藏文件,或者使用以下命令从终端打开它:
|
||||
|
||||
```
|
||||
$ gedit ~/.bashrc &
|
||||
@ -82,48 +79,43 @@ $ gedit ~/.bashrc &
|
||||
export PATH="$PATH:~/flutter/bin"
|
||||
```
|
||||
|
||||
保存并关闭文件。 请记住,如果将 Flutter 提取到您的主目录之外的其他位置,则 [Flutter SDK 的路径][10] 将有所不同。
|
||||
保存并关闭文件。 请记住,如果在你的主目录之外的其他位置解压 Flutter,则 [Flutter SDK 的路径][10] 将有所不同。
|
||||
|
||||
关闭您的终端,然后再次打开,以便加载新配置。 或者,您可以通过以下命令使配置立即生效:
|
||||
关闭你的终端,然后再次打开,以便加载新配置。 或者,你可以通过以下命令使配置立即生效:
|
||||
|
||||
```
|
||||
$ . ~/.bashrc
|
||||
```
|
||||
|
||||
如果您没有看到错误,那说明一切都是正常的。
|
||||
|
||||
这种安装方法比使用 `snap`命令稍微困难一些,但是它非常通用,可以让您在几乎所有的发行版上安装框架。
|
||||
如果你没有看到错误,那说明一切都是正常的。
|
||||
|
||||
这种安装方法比使用 `snap` 命令稍微困难一些,但是它非常通用,可以让你在几乎所有的发行版上安装该框架。
|
||||
|
||||
#### 检查安装结果
|
||||
|
||||
要检查结果,请在终端中输入以下内容:
|
||||
要检查安装结果,请在终端中输入以下内容:
|
||||
|
||||
```
|
||||
flutter doctor -v
|
||||
```
|
||||
|
||||
您将看到有关已安装组件的信息。 如果看到错误,请不要担心。 您尚未安装任何用于 Flutter SDK 的 IDE 插件。
|
||||
你将看到有关已安装组件的信息。 如果看到错误,请不要担心。 你尚未安装任何用于 Flutter SDK 的 IDE 插件。
|
||||
|
||||
![Checking Flutter installation with the doctor command][11]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
### 安装 IDE 插件
|
||||
|
||||
您应该在您的 [集成开发环境 (IDE)][12] 中安装插件,以帮助它与 Flutter SDK 接口、与设备交互并构建代码。
|
||||
你应该在你的 [集成开发环境(IDE)][12] 中安装插件,以帮助它与 Flutter SDK 接口、与设备交互并构建代码。
|
||||
|
||||
Flutter 开发中常用的三个主要 IDE 工具是 IntelliJ IDEA (社区版)、Android Studio 和 VS Code (或 [VSCodium][13])。我在本教程中使用的是 Android Studio,但步骤与它们在 IntelliJ Idea (社区版)上的工作方式相似,因为它们构建在相同的平台上。
|
||||
Flutter 开发中常用的三个主要 IDE 工具是 IntelliJ IDEA(社区版)、Android Studio 和 VS Code(或 [VSCodium][13])。我在本教程中使用的是 Android Studio,但步骤与它们在 IntelliJ Idea(社区版)上的工作方式相似,因为它们构建在相同的平台上。
|
||||
|
||||
首先,启动 **Android Studio**。打开 **Settings**,进入 **Plugins** 窗格,选择 **Marketplace** 选项卡。在搜索行中输入 **Flutter**,然后单击 **Install**。
|
||||
首先,启动 Android Studio。打开 “Settings”,进入 “Plugins” 窗格,选择 “Marketplace” 选项卡。在搜索行中输入 “Flutter”,然后单击 “Install”。
|
||||
|
||||
![Flutter plugins][14]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
你可能会看到一个安装 “Dart” 插件的选项;同意它。如果看不到 Dart 选项,请通过重复上述步骤手动安装它。我还建议使用 “Rainbow Brackets” 插件,它可以让代码导航更简单。
|
||||
|
||||
您可能会看到一个安装 **Dart** 插件的选项;同意它。如果看不到 Dart 选项,请通过重复上述步骤手动安装。我还建议使用 **Rainbow Brackets** 插件,它可以让代码导航更简单。
|
||||
|
||||
就这样!您已经安装了所需的所有插件。您可以在终端中输入一个熟悉的命令进行检查:
|
||||
就这样!你已经安装了所需的所有插件。你可以在终端中输入一个熟悉的命令进行检查:
|
||||
|
||||
```
|
||||
flutter doctor -v
|
||||
@ -131,47 +123,37 @@ flutter doctor -v
|
||||
|
||||
![Checking Flutter plugins with the doctor command][15]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
### 构建您的 “Hello World” 应用程序
|
||||
### 构建你的 “Hello World” 应用程序
|
||||
|
||||
要启动新项目,请创建一个 Flutter 项目:
|
||||
|
||||
1. 选择 **New -> New Flutter project**.
|
||||
1、选择 “New -> New Flutter project”。
|
||||
|
||||
![Creating a new Flutter plugin][16]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
2、在窗口中,选择所需的项目类型。 在这种情况下,你需要选择 “Flutter Application”。
|
||||
|
||||
2. 在窗口中,选择所需的项目类型。 在这种情况下,您需要选择 **Flutter Application**。
|
||||
|
||||
3. 命名您的项目 **hello_world**。 请注意,您应该使用合并的名称,因此请使用下划线而不是空格。 您可能还需要指定 SDK 的路径。
|
||||
3、命名你的项目为 `hello_world`。 请注意,你应该使用合并的名称,因此请使用下划线而不是空格。 你可能还需要指定 SDK 的路径。
|
||||
|
||||
![Naming a new Flutter plugin][17]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
4、输入软件包名称。
|
||||
|
||||
4. 输入软件包名称。
|
||||
|
||||
您已经创建了一个项目!现在,您可以在设备上或使用模拟器启动它。
|
||||
你已经创建了一个项目!现在,你可以在设备上或使用模拟器启动它。
|
||||
|
||||
![Device options in Flutter][18]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
选择您想要的设备,然后按 **运行**。稍后,您将看到结果。
|
||||
选择你想要的设备,然后按 “Run”。稍后,你将看到结果。
|
||||
|
||||
![Flutter demo on mobile device][19]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
现在你可以在一个 [中间项目][20] 上开始工作了。
|
||||
|
||||
### 尝试 Flutter for web
|
||||
|
||||
在安装 Flutter 的 Web 组件之前,您应该知道 Flutter 目前对 Web 应用程序的支持还很原始。 因此,将其用于复杂的项目并不是一个好主意。
|
||||
在安装 Flutter 的 Web 组件之前,你应该知道 Flutter 目前对 Web 应用程序的支持还很原始。 因此,将其用于复杂的项目并不是一个好主意。
|
||||
|
||||
默认情况下,基本 SDK 中不启用 Flutter for web。 要打开它,请转到 beta 频道。 为此,请在终端中输入以下命令:
|
||||
默认情况下,基本 SDK 中不启用 “Flutter for web”。 要打开它,请转到 beta 通道。 为此,请在终端中输入以下命令:
|
||||
|
||||
```
|
||||
flutter channel beta
|
||||
@ -179,8 +161,6 @@ flutter channel beta
|
||||
|
||||
![flutter channel beta output][21]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
接下来,使用以下命令根据 beta 分支升级 Flutter:
|
||||
|
||||
```
|
||||
@ -189,23 +169,19 @@ flutter upgrade
|
||||
|
||||
![flutter upgrade output][22]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
要使 Flutter for web 工作,请输入:
|
||||
要使 “Flutter for web” 工作,请输入:
|
||||
|
||||
```
|
||||
flutter config --enable-web
|
||||
```
|
||||
|
||||
重新启动 IDE;这有助于 Android Studio 索引新的 IDE 并重新加载设备列表。您应该会看到几个新设备:
|
||||
重新启动 IDE;这有助于 Android Studio 索引新的 IDE 并重新加载设备列表。你应该会看到几个新设备:
|
||||
|
||||
![Flutter for web device options][23]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
选择 “Chrome” 会在浏览器中启动一个应用程序, “Web Server” 会提供指向你的 Web 应用程序的链接,你可以在任何浏览器中打开它。
|
||||
|
||||
选择 **Chrome** 会在浏览器中启动一个应用程序, **Web Server** 会提供指向您的 Web 应用程序的链接,您可以在任何浏览器中打开它。
|
||||
|
||||
不过,现在还不是急于开发的时候,因为您当前的项目不支持Web。要改进它,请打开项目根目录下的终端,然后输入:
|
||||
不过,现在还不是急于开发的时候,因为你当前的项目不支持 Web。要改进它,请打开项目根目录下的终端,然后输入:
|
||||
|
||||
```
|
||||
flutter create
|
||||
@ -213,25 +189,21 @@ flutter create
|
||||
|
||||
此命令重新创建项目,并添加 Web 支持。 现有代码不会被删除。
|
||||
|
||||
请注意,目录树已更改,现在有了一个 "web" 目录:
|
||||
请注意,目录树已更改,现在有了一个 `web` 目录:
|
||||
|
||||
![File tree with web directory][24]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
现在您可以开始工作了。 选择 **Chrome**,然后按 **Run**。 稍后,您会看到带有应用程序的浏览器窗口。
|
||||
现在你可以开始工作了。 选择 “Chrome”,然后按 “Run”。 稍后,你会看到带有应用程序的浏览器窗口。
|
||||
|
||||
![Flutter web app demo][25]
|
||||
|
||||
(Vitaly Kuprenko, [CC BY-SA 4.0][6])
|
||||
|
||||
恭喜你! 您刚刚为浏览器启动了一个项目,并且可以像其他任何网站一样继续使用它。
|
||||
恭喜你! 你刚刚为浏览器启动了一个项目,并且可以像其他任何网站一样继续使用它。
|
||||
|
||||
所有这些都来自同一代码库,因为 Flutter 使得几乎无需更改就可以为移动平台和 Web 编写代码。
|
||||
|
||||
### 用 Flutter 做更多的事情
|
||||
|
||||
Flutter 是用于移动开发的强大工具,而且它也是迈向跨平台开发的重要一步。 了解它,使用它,并将您的应用程序交付到所有平台!
|
||||
Flutter 是用于移动开发的强大工具,而且它也是迈向跨平台开发的重要一步。 了解它,使用它,并将你的应用程序交付到所有平台!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -239,8 +211,8 @@ via: https://opensource.com/article/20/9/mobile-app-flutter
|
||||
|
||||
作者:[Vitaly Kuprenko][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,28 +1,29 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (HankChow)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12686-1.html)
|
||||
[#]: subject: (A practical guide to learning awk)
|
||||
[#]: via: (https://opensource.com/article/20/9/awk-ebook)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
awk 实用学习指南
|
||||
======
|
||||
下载我们的电子书,学习如何更好地使用 `awk`。
|
||||
![Person programming on a laptop on a building][1]
|
||||
|
||||
> 下载我们的电子书,学习如何更好地使用 `awk`。
|
||||
|
||||

|
||||
|
||||
在众多 [Linux][2] 命令中,`sed`、`awk` 和 `grep` 恐怕是其中最经典的三个命令了。它们引人注目或许是由于名字发音与众不同,也可能是它们无处不在,甚至是因为它们存在已久,但无论如何,如果要问哪些命令很有 Linux 风格,这三个命令是当之无愧的。其中 `sed` 和 `grep` 已经有很多简洁的标准用法了,但 `awk` 的使用难度却相对突出。
|
||||
|
||||
在日常使用中,通过 `sed` 实现字符串替换、通过 `grep` 实现过滤,这些都是司空见惯的操作了,但 `awk` 命令相对来说是用得比较少的。在我看来,可能的原因是大多数人都只使用 `sed` 或者 `grep` 的一些变体实现某些功能,例如:
|
||||
|
||||
在日常使用中,通过 `sed` 实现字符串替换、通过 `grep` 实现过滤,这些都是司空见惯的操作了,但 `awk` 命令相对来说是用得比较少的。在我看来,可能的原因是大多数人都只使用 `sed` 或者 `grep` 的一些变化实现某些功能,例如:
|
||||
|
||||
```
|
||||
$ sed -e 's/foo/bar/g' file.txt
|
||||
$ grep foo file.txt
|
||||
```
|
||||
|
||||
因此,尽管你可能会觉得 `sed` 和 `grep` 使用起来更加顺手,但实际上它们还有更多更强大的作用没有发挥出来。当然,我们没有必要在这两个命令上钻研得很深入,但我还是想理解自己是如何学习一个命令的。很多时候我会把一整串命令记住,但不会去了解其中的运行过程,这就让我产生了一种很熟悉命令的错觉,我可以随口说出某个命令的好几个选项参数,但这些参数具体有什么作用,以及它们的相关语法,我都并不明确。
|
||||
因此,尽管你可能会觉得 `sed` 和 `grep` 使用起来更加顺手,但实际上它们还有更多更强大的作用没有发挥出来。当然,我们没有必要在这两个命令上钻研得很深入,但我有时会好奇自己“学习”命令的方式。很多时候我会记住一整串命令“咒语”,而不会去了解其中的运作过程,这就让我产生了一种很熟悉命令的错觉,我可以随口说出某个命令的好几个选项参数,但这些参数具体有什么作用,以及它们的相关语法,我都并不明确。
|
||||
|
||||
这大概就是很多人对 `awk` 缺乏了解的原因了。
|
||||
|
||||
@ -34,7 +35,6 @@ $ grep foo file.txt
|
||||
|
||||
`awk` 的本质是将输入的内容看作是一个数组。当 `awk` 扫描一个文本文件时,会把每一行作为一条<ruby>记录<rt>record</rt></ruby>,每一条记录中又分割为多个<ruby>字段<rt>field</rt></ruby>。`awk` 记录了各条记录各个字段的信息,并通过内置变量 `NR`(记录数) 和 `NF`(字段数) 来调用相关信息。例如一下这个命令可以查看文件的行数:
|
||||
|
||||
|
||||
```
|
||||
$ awk 'END { print NR;}' example.txt
|
||||
36
|
||||
@ -42,15 +42,13 @@ $ awk 'END { print NR;}' example.txt
|
||||
|
||||
从上面的命令可以看出 `awk` 的基本语法,无论是一个单行命令还是一整个脚本,语法都是这样的:
|
||||
|
||||
|
||||
```
|
||||
`样式或关键字 { 操作 }`
|
||||
模式或关键字 { 操作 }
|
||||
```
|
||||
|
||||
在上面的例子中,`END` 是一个关键字而不是样式,与此类似的另一个关键字是 `BEGIN`。使用 `BEGIN` 或 `END` 可以让 `awk` 在解析内容前或解析内容后执行大括号中指定的操作。
|
||||
|
||||
你可以使用<ruby>样式<rt>pattern</rt></ruby>作为过滤器或限定符,这样 `awk` 只会对匹配样式的对应记录执行指定的操作。以下这个例子就是使用 `awk` 实现 `grep` 命令在文件中查找“Linux”字符串的功能:
|
||||
在上面的例子中,`END` 是一个关键字而不是模式,与此类似的另一个关键字是 `BEGIN`。使用 `BEGIN` 或 `END` 可以让 `awk` 在解析内容前或解析内容后执行大括号中指定的操作。
|
||||
|
||||
你可以使用<ruby>模式<rt>pattern</rt></ruby>作为过滤器或限定符,这样 `awk` 只会对匹配模式的对应记录执行指定的操作。以下这个例子就是使用 `awk` 实现 `grep` 命令在文件中查找“Linux”字符串的功能:
|
||||
|
||||
```
|
||||
$ awk '/Linux/ { print $0; }' os.txt
|
||||
@ -62,8 +60,7 @@ OS: Elementary Linux (10.1.2.5)
|
||||
OS: Elementary Linux (10.1.2.6)
|
||||
```
|
||||
|
||||
`awk` 会将文件中的每一行作为一条记录,将一条记录中的每个单词作为一个字段,默认情况下会按照空格作为<ruby>分隔符<rt>field separator</rt></ruby>(`FS`)切割出记录中的字段。如果想要使用其它内容作为分隔符,可以使用 `--field-separator` 选项指定分隔符:
|
||||
|
||||
`awk` 会将文件中的每一行作为一条记录,将一条记录中的每个单词作为一个字段,默认情况下会以空格作为<ruby>字段分隔符<rt>field separator</rt></ruby>(`FS`)切割出记录中的字段。如果想要使用其它内容作为分隔符,可以使用 `--field-separator` 选项指定分隔符:
|
||||
|
||||
```
|
||||
$ awk --field-separator ':' '/Linux/ { print $2; }' os.txt
|
||||
@ -77,7 +74,6 @@ $ awk --field-separator ':' '/Linux/ { print $2; }' os.txt
|
||||
|
||||
在上面的例子中,可以看到在 `awk` 处理后每一行的行首都有一个空格,那是因为在源文件中每个冒号(`:`)后面都带有一个空格。和 `cut` 有所不同的是,`awk` 可以指定一个字符串作为分隔符,就像这样:
|
||||
|
||||
|
||||
```
|
||||
$ awk --field-separator ': ' '/Linux/ { print $2; }' os.txt
|
||||
CentOS Linux (10.1.1.8)
|
||||
@ -92,26 +88,23 @@ Elementary Linux (10.1.2.6)
|
||||
|
||||
可以通过这样的语法在 `awk` 中自定义函数:
|
||||
|
||||
|
||||
```
|
||||
`函数名称(参数) { 操作 }`
|
||||
函数名称(参数) { 操作 }
|
||||
```
|
||||
|
||||
函数的好处在于只需要编写一次就可以多次复用,因此函数在脚本中起到的作用会比在构造单行命令时大。同时 `awk` 自身也带有很多预定义的函数,并且工作原理和其它编程语言或电子表格保持一致。你只需要了解函数需要接受什么参数,就可以放心使用了。
|
||||
函数的好处在于只需要编写一次就可以多次复用,因此函数在脚本中起到的作用会比在构造单行命令时大。同时 `awk` 自身也带有很多预定义的函数,并且工作原理和其它编程语言或电子表格一样。你只需要了解函数需要接受什么参数,就可以放心使用了。
|
||||
|
||||
`awk` 中提供了数学运算和字符串处理的相关函数。数学运算函数通常比较简单,传入一个数字,它就会传出一个结果:
|
||||
|
||||
|
||||
```
|
||||
$ awk 'BEGIN { print sqrt(1764); }'
|
||||
42
|
||||
```
|
||||
|
||||
而字符串处理函数则稍微复杂一点,但 [GNU awk 手册][3]中也有充足的文档。例如 `split()` 函数需要传入一个待分割的单一字段、一个数组用于存放分割结果,以及用于分割的<ruby>定界符<rt>delimiter</rt></ruby>。
|
||||
而字符串处理函数则稍微复杂一点,但 [GNU awk 手册][3]中也有充足的文档。例如 `split()` 函数需要传入一个待分割的单一字段、一个用于存放分割结果的数组,以及用于分割的<ruby>定界符<rt>delimiter</rt></ruby>。
|
||||
|
||||
例如前面示例中的输出内容,每条记录的末尾都包含了一个 IP 地址。由于变量 `NF` 代表的是每条记录的字段数量,刚好对应的是每条记录中最后一个字段的序号,因此可以通过引用 `NF` 将每条记录的最后一个字段传入 `split()` 函数:
|
||||
|
||||
|
||||
```
|
||||
$ awk --field-separator ': ' '/Linux/ { split($NF, IP, "."); print "subnet: " IP[3]; }' os.txt
|
||||
subnet: 1
|
||||
@ -122,13 +115,13 @@ subnet: 2
|
||||
subnet: 2
|
||||
```
|
||||
|
||||
实际上 `awk` 的功能还远远不止于此,你还可以跳出 `awk` 本身,通过命令管道和脚本来自定义更多功能。
|
||||
还有更多的函数,没有理由将自己限制在每个 `awk` 代码块中。你可以在终端中使用 `awk` 构建复杂的管道,也可以编写 `awk` 脚本来定义和使用你自己的函数。
|
||||
|
||||
### 下载电子书
|
||||
|
||||
使用 `awk` 本身就是一个学习 `awk` 的过程,即使某些操作使用 `sed`、`grep`、`cut`、`tr` 命令已经完全足够了,也可以尝试使用 `awk` 来实现。只要熟悉了 `awk`,就可以在 Bash 中自定义一些 `awk` 函数,进而解析复杂的数据。
|
||||
|
||||
[下载我们的电子书][4]学习并开始使用 `awk` 吧!
|
||||
[下载我们的这本电子书][4](需注册)学习并开始使用 `awk` 吧!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -137,7 +130,7 @@ via: https://opensource.com/article/20/9/awk-ebook
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[HankChow](https://github.com/hankchow)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
215
published/20200908 How to install software with Ansible.md
Normal file
215
published/20200908 How to install software with Ansible.md
Normal file
@ -0,0 +1,215 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (MjSeven)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12703-1.html)
|
||||
[#]: subject: (How to install software with Ansible)
|
||||
[#]: via: (https://opensource.com/article/20/9/install-packages-ansible)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
如何使用 Ansible 安装软件
|
||||
======
|
||||
|
||||
> 使用 Ansible 剧本自动安装和更新设备上的软件。
|
||||
|
||||

|
||||
|
||||
Ansible 是系统管理员和开发人员用来保持计算机系统处于最佳状态的一种流行的自动化工具。与可扩展框架一样,[Ansible][2] 本身功能有限,它真正的功能体现在许多模块中。在某种程度上,Ansible 模块就是 [Linux][3] 系统的命令。它们针对特定问题提供解决方案,而维护计算机时的一项常见任务是使所有计算机的更新和一致。
|
||||
|
||||
我曾经使用软件包的文本列表来保持系统或多或少的同步:我会列出笔记本电脑上安装的软件包,然后将其与台式机或另一台服务器之间进行交叉参考,手动弥补差异。当然,在 Linux 机器上安装和维护应用程序是 Ansible 的一项基本功能,这意味着你可以在自己关心的计算机上列出所需的内容。
|
||||
|
||||
### 寻找正确的 Ansible 模块
|
||||
|
||||
Ansible 模块的数量非常庞大,如何找到能完成你任务的模块?在 Linux 中,你可以在应用程序菜单或 `/usr/bin` 中查找要运行的应用程序。使用 Ansible 时,你可以参考 [Ansible 模块索引][4]。
|
||||
|
||||
这个索引按照类别列出。稍加搜索,你就很可能找到所需的模块。对于包管理,[Packaging 模块][5]几乎适用于所有带包管理器的系统。
|
||||
|
||||
### 动手写一个 Ansible 剧本
|
||||
|
||||
首先,选择本地计算机上的包管理器。例如,如果你打算在运行 Fedora 的笔记本电脑上编写 Ansible 指令(在 Ansible 中称为“<ruby>剧本<rt>playbook</rt></ruby>”),那么从 `dnf` 模块开始。如果你在 Elementary OS 上编写,使用 `apt` 模块,以此类推。这样你就可以开始进行测试和验证,并可以在以后扩展到其它计算机。
|
||||
|
||||
第一步是创建一个代表你的剧本的目录。这不是绝对必要的,但这是一个好习惯。Ansible 只需要一个配置文件就可以运行在 YAML 中,但是如果你以后想要扩展剧本,你就可以通过改变目录和文件的方式来控制 Ansible。现在,只需创建一个名为 `install_packages` 或类似的目录:
|
||||
|
||||
```
|
||||
$ mkdir ~/install_packages
|
||||
```
|
||||
|
||||
你可以根据自己的喜好来命名 Ansible 的剧本,但通常将其命名为 `site.yml`:
|
||||
|
||||
```
|
||||
$ touch ~/install_packages/site.yml
|
||||
```
|
||||
|
||||
在你最喜欢的文本编辑器中打开 `site.yml`,添加以下内容:
|
||||
|
||||
```
|
||||
---
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
dnf:
|
||||
state: present
|
||||
name:
|
||||
- tcsh
|
||||
- htop
|
||||
```
|
||||
|
||||
你必须调整使用的模块名称以匹配你使用的发行版。在此示例中,我使用 `dnf` 是因为我在 Fedora Linux 上编写剧本。
|
||||
|
||||
就像 Linux 终端中的命令一样,知道 **如何** 来调用 Ansible 模块就已经成功了一半。这个示例剧本遵循标准剧本格式:
|
||||
|
||||
* `hosts` 是一台或多台计算机。在本示例中,目标计算机是 `localhost`,即你当前正在使用的计算机(而不是你希望 Ansible 连接的远程系统)。
|
||||
* `tasks` 是你要在主机上执行的任务列表。
|
||||
* `name` 是任务的人性化名称。在这种情况下,我使用 `install packages`,因为这就是该任务正在做的事情。
|
||||
* `become` 允许 Ansible 更改运行此任务的用户。
|
||||
* `become_user` 允许 Ansible 成为 `root` 用户来运行此任务。这是必须的,因为只有 root 用户才能使用 `dnf` 安装应用程序。
|
||||
* `dnf` 是模块名称,你可以在 Ansible 网站上的模块索引中找到。
|
||||
|
||||
`dnf` 下的节点是 `dnf` 模块专用的。这是模块文档的关键所在。就像 Linux 命令的手册页一样,模块文档会告诉你可用的选项和所需的参数。
|
||||
|
||||
![Ansible 文档][6]
|
||||
|
||||
安装软件包是一个相对简单的任务,仅需要两个元素。`state` 选项指示 Ansible 检查系统上是否存在 **软件包**,而 `name` 选项列出要查找的软件包。Ansible 会针对机器的 **状态** 进行调整,因此模块指令始终意味着更改。假如 Ansible 扫描了系统状态,发现剧本里描述的系统(在本例中,`tcsh` 和 `htop` 存在)与实际状态存在冲突,那么 Ansible 的任务是进行必要的更改来使系统与剧本匹配。Ansible 可以通过 `dnf`(或 `apt` 或者其它任何包管理器)模块进行更改。
|
||||
|
||||
每个模块可能都有一组不同的选项,所以在编写剧本时,要经常参考模块文档。除非你对模块非常熟悉,否则这是期望模块完成工作的唯一合理方法。
|
||||
|
||||
### 验证 YAML
|
||||
|
||||
剧本是用 YAML 编写的。因为 YAML 遵循严格的语法,所以安装 `yamllint` 来检查剧本是很有帮助的。更妙的是,有一个专门针对 Ansible 的检查工具称为 `ansible-lint`,它专门为剧本而生。在继续之前,安装它。
|
||||
|
||||
在 Fedora 或 CentOs 上:
|
||||
|
||||
```
|
||||
$ sudo dnf ins tall yamllint python3-ansible-lint
|
||||
```
|
||||
|
||||
在 Debian、Elementary 或 Ubuntu 上,同样的:
|
||||
|
||||
```
|
||||
$ sudo apt install yamllint ansible-lint
|
||||
```
|
||||
|
||||
使用 `ansible-link` 来验证你的剧本。如果你无法使用 `ansible-lint`,你可以使用 `yamllint`。
|
||||
|
||||
```
|
||||
$ ansible-lint ~/install_packages/site.yml
|
||||
```
|
||||
|
||||
成功则不返回任何内容,但如果文件中有错误,则必须先修复它们,然后再继续。复制和粘贴过程中的常见错误包括在最后一行的末尾省略换行符、使用制表符而不是空格来缩进。在文本编辑器中修复它们,重新运行 `ansible-lint`,重复这个过程,直到 `ansible-lint` 或 `yamllint` 没有返回为止。
|
||||
|
||||
### 使用 Ansible 安装一个应用
|
||||
|
||||
现在你有了一个可验证的有效剧本,你终于可以在本地计算机上运行它了,因为你碰巧知道该剧本定义的任务需要 root 权限,所以在调用 Ansible 时必须使用 `--ask-become-pass` 选项,因此系统会提示你输入管理员密码。
|
||||
|
||||
开始安装:
|
||||
|
||||
```
|
||||
$ ansible-playbook --ask-become-pass ~/install_packages/site.yml
|
||||
BECOME password:
|
||||
PLAY [localhost] ******************************
|
||||
|
||||
TASK [Gathering Facts] ******************************
|
||||
ok: [localhost]
|
||||
|
||||
TASK [install packages] ******************************
|
||||
ok: [localhost]
|
||||
|
||||
PLAY RECAP ******************************
|
||||
localhost: ok=0 changed=2 unreachable=0 failed=0 [...]
|
||||
```
|
||||
|
||||
这些命令被执行后,目标系统将处于与剧本中描述的相同的状态。
|
||||
|
||||
### 在远程系统上安装应用程序
|
||||
|
||||
通过这么多操作来替换一个简单的命令可能会适得其反,但是 Ansible 的优势是它可以在你的所有系统中实现自动化。你可以使用条件语句使 Ansible 在不同的系统上使用特定的模块,但是现在,假定所有计算机都使用相同的包管理器。
|
||||
|
||||
要连接到远程系统,你必须在 `/etc/ansible/hosts` 文件中定义远程系统,该文件与 Ansible 是一起安装的,所以它已经存在了,但它可能是空的,除了一些解释性注释之外。使用 `sudo` 在你喜欢的文本编辑器中打开它。
|
||||
|
||||
你可以通过其 IP 地址或主机名(只要主机名可以解析)定义主机。例如,如果你已经在 `/etc/hosts` 中定义了 `liavara` 并可以成功 `ping` 通,那么你可以在 `/etc/ansible/hosts` 中将 `liavara` 设置为主机。或者,如果你正在运行一个域名服务器或 Avahi 服务器并且可以 `ping` 通 `liavara`,那么你就可以在 `/etc/ansible/hosts` 中定义它。否则,你必须使用它的 IP 地址。
|
||||
|
||||
你还必须成功地建立与目标主机的安全 shell(SSH)连接。最简单的方法是使用 `ssh-copy-id` 命令,但是如果你以前从未与主机建立 SSH 连接,[阅读我关于如何创建自动 SSH 连接的文章][8]。
|
||||
|
||||
一旦你在 `/etc/ansible/hosts` 文件中输入了主机名或 IP 地址后,你就可以在剧本中更改 `hosts` 定义:
|
||||
|
||||
```
|
||||
---
|
||||
- hosts: all
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
dnf:
|
||||
state: present
|
||||
name:
|
||||
- tcsh
|
||||
- htop
|
||||
```
|
||||
|
||||
再次运行 `ansible-playbook`:
|
||||
|
||||
```
|
||||
$ ansible-playbook --ask-become-pass ~/install_packages/site.yml
|
||||
```
|
||||
|
||||
这次,剧本会在你的远程系统上运行。
|
||||
|
||||
如果你添加更多主机,则有许多方法可以过滤哪个主机执行哪个任务。例如,你可以创建主机组(服务器的 `webserves`,台式机的 `workstations`等)。
|
||||
|
||||
### 适用于混合环境的 Ansible
|
||||
|
||||
到目前为止,我们一直假定 Ansible 配置的所有主机都运行相同的操作系统(都是是使用 `dnf` 命令进行程序包管理的操作系统)。那么,如果你要管理不同发行版的主机,例如 Ubuntu(使用 `apt`)或 Arch(使用 `pacman`),或者其它的操作系统时,该怎么办?
|
||||
|
||||
只要目标操作系统具有程序包管理器([MacOs 有 Homebrew][9],[Windows 有 Chocolatey][10]),Ansible 就能派上用场。
|
||||
|
||||
这就是 Ansible 优势最明显的地方。在 shell 脚本中,你必须检查目标主机上有哪些可用的包管理器,即使使用纯 Python,也必须检查操作系统。Ansible 不仅内置了这些功能,而且还具有在剧本中使用命令结果的机制。你可以使用 `action` 关键字来执行由 Ansible 事实收集子系统提供的变量定义的任务,而不是使用 `dnf` 模块。
|
||||
|
||||
```
|
||||
---
|
||||
- hosts: all
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
action: >
|
||||
{{ ansible_pkg_mgr }} name=htop,transmission state=present update_cache=yes
|
||||
```
|
||||
|
||||
`action` 关键字会加载目标插件。在本例中,它使用了 `ansible_pkg_mgr` 变量,该变量由 Ansible 在初始 **收集信息** 期间填充。你不需要告诉 Ansible 收集有关其运行操作系统的事实,所以很容易忽略这一点,但是当你运行一个剧本时,你会在默认输出中看到它:
|
||||
|
||||
```
|
||||
TASK [Gathering Facts] *****************************************
|
||||
ok: [localhost]
|
||||
```
|
||||
|
||||
`action` 插件使用来自这个探针的信息,使用相关的包管理器命令填充 `ansible_pkg_mgr`,以安装在 `name` 参数之后列出的程序包。使用 8 行代码,你可以克服在其它脚本选项中很少允许的复杂跨平台难题。
|
||||
|
||||
### 使用 Ansible
|
||||
|
||||
现在是 21 世纪,我们都希望我们的计算机设备能够互联并且相对一致。无论你维护的是两台还是 200 台计算机,你都不必一次又一次地执行相同的维护任务。使用 Ansible 来同步生活中的计算机设备,看看 Ansible 还能为你做些什么。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/9/install-packages-ansible
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[MjSeven](https://github.com/MjSeven)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/puzzle_computer_solve_fix_tool.png?itok=U0pH1uwj (Puzzle pieces coming together to form a computer screen)
|
||||
[2]: https://opensource.com/resources/what-ansible
|
||||
[3]: https://opensource.com/resources/linux
|
||||
[4]: https://docs.ansible.com/ansible/latest/modules/modules_by_category.html
|
||||
[5]: https://docs.ansible.com/ansible/latest/modules/list_of_packaging_modules.html
|
||||
[6]: https://opensource.com/sites/default/files/uploads/ansible-module.png (Ansible documentation)
|
||||
[7]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[8]: https://opensource.com/article/20/8/how-ssh
|
||||
[9]: https://opensource.com/article/20/6/homebrew-mac
|
||||
[10]: https://opensource.com/article/20/3/chocolatey
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12676-1.html)
|
||||
[#]: subject: (Installing and running Vagrant using qemu-kvm)
|
||||
[#]: via: (https://fedoramagazine.org/vagrant-qemukvm-fedora-devops-sysadmin/)
|
||||
[#]: author: (Andy Mott https://fedoramagazine.org/author/amott/)
|
||||
@ -12,47 +12,50 @@
|
||||
|
||||
![][1]
|
||||
|
||||
Vagrant 是一个出色的工具,被 DevOps 专业人员、程序员、系统管理员和普通极客来使用它来建立可复制的基础架构来进行开发和测试。来自它们的网站:
|
||||
Vagrant 是一个出色的工具,DevOps 专业人员、程序员、系统管理员和普通极客来使用它来建立可重复的基础架构来进行开发和测试。引用自它的网站:
|
||||
|
||||
> Vagrant 是用于在单工作流程中构建和管理虚拟机环境的工具。凭借简单易用的工作流程和对自动化的关注,Vagrant 降低了开发环境的设置时间,提高了生产效率,并使”在我的机器上工作“的借口成为过去。
|
||||
> Vagrant 是用于在单工作流程中构建和管理虚拟机环境的工具。凭借简单易用的工作流程并专注于自动化,Vagrant 降低了开发环境的设置时间,提高了生产效率,并使“在我的机器上可以工作”的借口成为过去。
|
||||
>
|
||||
> 如果你已经熟悉 Vagrant 的基础知识,那么文档为所有的功能和内部结构提供了更好的参考。
|
||||
> 如果你已经熟悉 Vagrant 的基础知识,那么该文档为所有的功能和内部结构提供了更好的参考。
|
||||
>
|
||||
> Vagrant 提供了易于配置、可复制、可移植的工作环境,它建立在行业标准技术之上,并由一个统一的工作流程控制,帮助你和你的团队最大限度地提高生产力和灵活性。
|
||||
> Vagrant 提供了基于行业标准技术构建的、易于配置、可复制、可移植的工作环境,并由一个一致的工作流程控制,帮助你和你的团队最大限度地提高生产力和灵活性。
|
||||
>
|
||||
> <https://www.vagrantup.com/intro>
|
||||
|
||||
本指南将通过必要的步骤,让 Vagrant 在基于 Fedora 的机器上工作。
|
||||
本指南将逐步介绍使 Vagrant 在基于 Fedora 的计算机上工作所需的步骤。
|
||||
|
||||
我从最小化安装 Fedora Server 开始,因为这样可以减少主机操作系统的内存占用,但如果你已经有一台可以使用的 Fedora 机器,无论是服务器还是工作站,那么也没问题。
|
||||
我从最小化安装 Fedora 服务器开始,因为这样可以减少宿主机操作系统的内存占用,但如果你已经有一台可以使用的 Fedora 机器,无论是服务器还是工作站版本,那么也没问题。
|
||||
|
||||
### 检查机器是否支持虚拟化:
|
||||
### 检查机器是否支持虚拟化
|
||||
|
||||
```
|
||||
$ sudo lscpu | grep Virtualization
|
||||
```
|
||||
|
||||
```
|
||||
Virtualization: VT-x
|
||||
Virtualization type: full
|
||||
```
|
||||
|
||||
### 安装 qemu-kvm:
|
||||
### 安装 qemu-kvm
|
||||
|
||||
```
|
||||
sudo dnf install qemu-kvm libvirt libguestfs-tools virt-install rsync
|
||||
```
|
||||
|
||||
### 启用并启动 libvirt 守护进程:
|
||||
### 启用并启动 libvirt 守护进程
|
||||
|
||||
```
|
||||
sudo systemctl enable --now libvirtd
|
||||
```
|
||||
|
||||
### 安装 Vagrant:
|
||||
### 安装 Vagrant
|
||||
|
||||
```
|
||||
sudo dnf install vagrant
|
||||
```
|
||||
|
||||
### 安装 Vagrant libvirtd 插件:
|
||||
### 安装 Vagrant libvirtd 插件
|
||||
|
||||
```
|
||||
sudo vagrant plugin install vagrant-libvirt
|
||||
@ -64,48 +67,58 @@ sudo vagrant plugin install vagrant-libvirt
|
||||
vagrant box add fedora/32-cloud-base --provider=libvirt
|
||||
```
|
||||
|
||||
### 创建一个最小的 Vagrantfile 来测试:
|
||||
(LCTT 译注:以防你不知道,box 是 Vagrant 中的一种包格式,Vagrant 支持的任何平台上的任何人都可以使用盒子来建立相同的工作环境。)
|
||||
|
||||
### 创建一个最小化的 Vagrantfile 来测试
|
||||
|
||||
```
|
||||
$ mkdir vagrant-test
|
||||
$ cd vagrant-test
|
||||
$ vi VagrantfileVagrant.configure("2") do |config|
|
||||
$ vi Vagrantfile
|
||||
```
|
||||
|
||||
```
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "fedora/32-cloud-base"
|
||||
end
|
||||
```
|
||||
|
||||
**注意文件名和文件内容的大写。**
|
||||
**注意文件名和文件内容的大小写。**
|
||||
|
||||
### 检查文件:
|
||||
### 检查文件
|
||||
|
||||
```
|
||||
vagrant statusCurrent machine states:
|
||||
vagrant status
|
||||
```
|
||||
|
||||
```
|
||||
Current machine states:
|
||||
|
||||
default not created (libvirt)
|
||||
|
||||
The Libvirt domain is not created. Run 'vagrant up' to create it.
|
||||
```
|
||||
|
||||
### 启动 box:
|
||||
### 启动 box
|
||||
|
||||
```
|
||||
vagrant up
|
||||
```
|
||||
|
||||
### 连接到你的新机器:
|
||||
### 连接到你的新机器
|
||||
|
||||
```
|
||||
vagrant ssh
|
||||
```
|
||||
|
||||
完成了。现在你的 Fedora 机器上有 Vagrant 在工作。
|
||||
完成了。现在你的 Fedora 机器上 Vagrant 可以工作了。
|
||||
|
||||
要停止机器,请使用 _vagrant halt_。这只是简单地停止机器,但保留虚拟机和磁盘。
|
||||
要关闭并删除它,请使用 _vagrant destroy_。这将删除整个机器和你在其中所做的任何更改。
|
||||
要停止该机器,请使用 `vagrant halt`。这只是简单地停止机器,但保留虚拟机和磁盘。
|
||||
要关闭并删除它,请使用 `vagrant destroy`。这将删除整个机器和你在其中所做的任何更改。
|
||||
|
||||
### 接下来的步骤
|
||||
|
||||
在运行 _vagrant up_ 命令之前,你不需要下载 box。你可以直接在 Vagrantfile 中指定 box 和提供者,如果还没有的话,Vagrant 会下载它。下面是一个例子,它还设置了内存量和 CPU 数量:
|
||||
在运行 `vagrant up` 命令之前,你不需要下载 box。你可以直接在 Vagrantfile 中指定 box 和提供者,如果还没有的话,Vagrant 会下载它。下面是一个例子,它还设置了内存量和 CPU 数量:
|
||||
|
||||
```
|
||||
# -*- mode: ruby -*-
|
||||
@ -131,7 +144,7 @@ via: https://fedoramagazine.org/vagrant-qemukvm-fedora-devops-sysadmin/
|
||||
作者:[Andy Mott][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: "lujun9972"
|
||||
[#]: translator: "lxbwolf"
|
||||
[#]: reviewer: " "
|
||||
[#]: publisher: " "
|
||||
[#]: url: " "
|
||||
[#]: reviewer: "wxy"
|
||||
[#]: publisher: "wxy"
|
||||
[#]: url: "https://linux.cn/article-12681-1.html"
|
||||
[#]: subject: "Find security issues in Go code using gosec"
|
||||
[#]: via: "https://opensource.com/article/20/9/gosec"
|
||||
[#]: author: "Gaurav Kamathe https://opensource.com/users/gkamathe"
|
||||
@ -10,20 +10,21 @@
|
||||
使用 gosec 检查 Go 代码中的安全问题
|
||||
======
|
||||
|
||||
来学习下 Golang 的安全检查工具 gosec。
|
||||
![A lock on the side of a building][1]
|
||||
> 来学习下 Go 语言的安全检查工具 gosec。
|
||||
|
||||

|
||||
|
||||
[Go 语言][2]写的代码越来越常见,尤其是在容器、Kubernetes 或云生态相关的开发中。Docker 是最早采用 Golang 的项目之一,随后是 Kubernetes,之后大量的新项目在众多编程语言中选择了 Go。
|
||||
|
||||
像其他语言一样,Go 也有它的长处和短处(如安全缺陷)。这些缺陷可能会因为语言本身的限制在程序员编码不当时出现,例如,C 代码中的内存安全问题。
|
||||
像其他语言一样,Go 也有它的长处和短处(如安全缺陷)。这些缺陷可能会因为语言本身的缺陷加上程序员编码不当而产生,例如,C 代码中的内存安全问题。
|
||||
|
||||
无论它们出现的原因是什么,安全问题都应该在开发过程中尽早修复,以免在封装好的软件中出现。幸运的是,静态分析工具可以帮你批量地处理这些问题。静态分析工具通过解析用某种编程语言写的代码来找到问题。
|
||||
无论它们出现的原因是什么,安全问题都应该在开发过程的早期修复,以免在封装好的软件中出现。幸运的是,静态分析工具可以帮你以更可重复的方式处理这些问题。静态分析工具通过解析用某种编程语言写的代码来找到问题。
|
||||
|
||||
这类工具中很多被称为 linter。传统意义上,linter 更注重的是检查代码中编码问题、bug、代码风格之类的问题,不会检查安全问题。例如,[Coverity][3] 是很受欢迎的用来检查 C/C++ 代码问题的工具。然而,有工具专门用来检查源码中的安全问题。例如,[Bandit][4] 用来检查 Python 代码中的安全缺陷。[gosec][5] 用来搜寻 Go 源码中的安全缺陷。gosec 通过扫描 Go 的 AST(<ruby>抽象语法树<rt>abstract syntax tree</rt></ruby>)来检查源码中的安全问题。
|
||||
这类工具中很多被称为 linter。传统意义上,linter 更注重的是检查代码中编码问题、bug、代码风格之类的问题,它们可能不会发现代码中的安全问题。例如,[Coverity][3] 是一个很流行的工具,它可以帮助寻找 C/C++ 代码中的问题。然而,也有一些工具专门用来检查源码中的安全问题。例如,[Bandit][4] 可以检查 Python 代码中的安全缺陷。而 [gosec][5] 则用来搜寻 Go 源码中的安全缺陷。`gosec` 通过扫描 Go 的 AST(<ruby>抽象语法树<rt>abstract syntax tree</rt></ruby>)来检查源码中的安全问题。
|
||||
|
||||
### 开始使用 gosec
|
||||
|
||||
在开始学习和使用 gosec 之前,你需要准备一个 Go 语言写的项目。有这么多开源软件,我相信这不是问题。你可以在 GitHub 的 [Golang 库排行榜]][6]中找一个。
|
||||
在开始学习和使用 `gosec` 之前,你需要准备一个 Go 语言写的项目。有这么多开源软件,我相信这不是问题。你可以在 GitHub 的 [热门 Golang 仓库][6]中找一个。
|
||||
|
||||
本文中,我随机选了 [Docker CE][7] 项目,但你可以选择任意的 Go 项目。
|
||||
|
||||
@ -31,57 +32,45 @@
|
||||
|
||||
如果你还没安装 Go,你可以先从仓库中拉取下来。如果你用的是 Fedora 或其他基于 RPM 的 Linux 发行版本:
|
||||
|
||||
|
||||
```
|
||||
`$ dnf install golang.x86_64`
|
||||
$ dnf install golang.x86_64
|
||||
```
|
||||
|
||||
如果你用的是其他操作系统,请参照 [Golang 安装][8]页面。
|
||||
|
||||
使用 `version` 参数来验证 Go 是否安装成功:
|
||||
|
||||
|
||||
```
|
||||
$ go version
|
||||
go version go1.14.6 linux/amd64
|
||||
$
|
||||
```
|
||||
|
||||
运行 `go get` 命令就可以轻松地安装 gosec:
|
||||
|
||||
运行 `go get` 命令就可以轻松地安装 `gosec`:
|
||||
|
||||
```
|
||||
$ go get github.com/securego/gosec/cmd/gosec
|
||||
$
|
||||
```
|
||||
|
||||
上面这行命令会从 GitHub 下载 gosec 的源码、编译并安装到指定位置。在仓库的 README 中你还可以看到[安装工具的其他方法][9]。
|
||||
|
||||
gosec 的源码会被下载到 `$GOPATH` 的位置,编译出的二进制文件会被安装到你系统上设置的 `bin` 目录下。你可以运行下面的命令来查看 `$GOPATH` 和 `$GOBIN` 目录:
|
||||
上面这行命令会从 GitHub 下载 `gosec` 的源码,编译并安装到指定位置。在仓库的 `README` 中你还可以看到[安装该工具的其他方法][9]。
|
||||
|
||||
`gosec` 的源码会被下载到 `$GOPATH` 的位置,编译出的二进制文件会被安装到你系统上设置的 `bin` 目录下。你可以运行下面的命令来查看 `$GOPATH` 和 `$GOBIN` 目录:
|
||||
|
||||
```
|
||||
$ go env | grep GOBIN
|
||||
GOBIN="/root/go/gobin"
|
||||
$
|
||||
$ go env | grep GOPATH
|
||||
GOPATH="/root/go"
|
||||
$
|
||||
```
|
||||
|
||||
如果 `go get` 命令执行成功,那么 gosec 二进制应该就可以使用了:
|
||||
|
||||
如果 `go get` 命令执行成功,那么 `gosec` 二进制应该就可以使用了:
|
||||
|
||||
```
|
||||
$
|
||||
$ ls -l ~/go/bin/
|
||||
total 9260
|
||||
-rwxr-xr-x. 1 root root 9482175 Aug 20 04:17 gosec
|
||||
$
|
||||
```
|
||||
|
||||
你可以把 `$GOPATH` 下的 `bin` 目录添加到 `$PATH` 中。这样你就可以像使用系统上的其他命令一样来使用 gosec 命令行工具(CLI)了。
|
||||
|
||||
你可以把 `$GOPATH` 下的 `bin` 目录添加到 `$PATH` 中。这样你就可以像使用系统上的其他命令一样来使用 `gosec` 命令行工具(CLI)了。
|
||||
|
||||
```
|
||||
$ which gosec
|
||||
@ -89,8 +78,7 @@ $ which gosec
|
||||
$
|
||||
```
|
||||
|
||||
使用 gosec 命令行工具的 `-help` 选项来看看运行是否符合预期:
|
||||
|
||||
使用 `gosec` 命令行工具的 `-help` 选项来看看运行是否符合预期:
|
||||
|
||||
```
|
||||
$ gosec -help
|
||||
@ -109,17 +97,12 @@ USAGE:
|
||||
|
||||
之后,创建一个目录,把源码下载到这个目录作为实例项目(本例中,我用的是 Docker CE):
|
||||
|
||||
|
||||
```
|
||||
$ mkdir gosec-demo
|
||||
$
|
||||
$ cd gosec-demo/
|
||||
$
|
||||
$ pwd
|
||||
/root/gosec-demo
|
||||
$
|
||||
|
||||
$ git clone <https://github.com/docker/docker-ce.git>
|
||||
$ git clone https://github.com/docker/docker-ce.git
|
||||
Cloning into 'docker-ce'...
|
||||
remote: Enumerating objects: 1271, done.
|
||||
remote: Counting objects: 100% (1271/1271), done.
|
||||
@ -128,10 +111,9 @@ remote: Total 431003 (delta 384), reused 981 (delta 318), pack-reused 429732
|
||||
Receiving objects: 100% (431003/431003), 166.84 MiB | 28.94 MiB/s, done.
|
||||
Resolving deltas: 100% (221338/221338), done.
|
||||
Updating files: 100% (10861/10861), done.
|
||||
$
|
||||
```
|
||||
|
||||
代码统计工具(本例中用的是 cloc)显示这个项目大部分是用 Go 写的,恰好迎合了 gosec 的功能。
|
||||
代码统计工具(本例中用的是 `cloc`)显示这个项目大部分是用 Go 写的,恰好迎合了 `gosec` 的功能。
|
||||
|
||||
|
||||
```
|
||||
@ -140,9 +122,10 @@ $ ./cloc /root/gosec-demo/docker-ce/
|
||||
8724 unique files.
|
||||
2560 files ignored.
|
||||
|
||||
\-----------------------------------------------------------------------------------
|
||||
|
||||
-----------------------------------------------------------------------------------
|
||||
Language files blank comment code
|
||||
\-----------------------------------------------------------------------------------
|
||||
-----------------------------------------------------------------------------------
|
||||
Go 7222 190785 230478 1574580
|
||||
YAML 37 4831 817 156762
|
||||
Markdown 529 21422 0 67893
|
||||
@ -151,13 +134,11 @@ Protocol Buffers 149 5014 16562 10
|
||||
|
||||
### 使用默认选项运行 gosec
|
||||
|
||||
在 Docker CE 项目中使用默认选项运行 gosec,执行 `gosec ./...` 命令。屏幕上会有很多输出内容。在末尾你会看到一个简短的 `Summary`,列出了浏览的文件数、所有文件的总行数,以及源码中发现的问题数。
|
||||
|
||||
在 Docker CE 项目中使用默认选项运行 `gosec`,执行 `gosec ./...` 命令。屏幕上会有很多输出内容。在末尾你会看到一个简短的 “Summary”,列出了浏览的文件数、所有文件的总行数,以及源码中发现的问题数。
|
||||
|
||||
```
|
||||
$ pwd
|
||||
/root/gosec-demo/docker-ce
|
||||
$
|
||||
$ time gosec ./...
|
||||
[gosec] 2020/08/20 04:44:15 Including rules: default
|
||||
[gosec] 2020/08/20 04:44:15 Excluding rules: default
|
||||
@ -183,180 +164,166 @@ $
|
||||
|
||||
滚动屏幕你会看到不同颜色高亮的行:红色表示需要尽快查看的高优先级问题,黄色表示中优先级的问题。
|
||||
|
||||
#### 关于“假阳性”
|
||||
#### 关于误判
|
||||
|
||||
在开始检查代码之前,我想先分享几条基本原则。默认情况下,静态检查工具会基于一系列的规则对测试代码进行分析并报告出检查出来的*所有*问题。这表示工具报出来的每一个问题都需要修复吗?非也。这个问题最好的解答者是设计和开发这个软件的人。他们最熟悉代码,更重要的是,他们了解软件会在什么环境下部署以及会被怎样使用。
|
||||
在开始检查代码之前,我想先分享几条基本原则。默认情况下,静态检查工具会基于一系列的规则对测试代码进行分析,并报告出它们发现的*所有*问题。这是否意味着工具报出来的每一个问题都需要修复?非也。这个问题最好的解答者是设计和开发这个软件的人。他们最熟悉代码,更重要的是,他们了解软件会在什么环境下部署以及会被怎样使用。
|
||||
|
||||
这个知识点对于判定工具标记出来的某段代码到底是不是安全缺陷至关重要。随着工作时间和经验的积累,你会慢慢学会怎样让静态分析工具忽略非安全缺陷,使报告内容的可执行性更高。因此,要判定 gosec 报出来的某个问题是否需要修复,让一名有经验的开发者对源码做人工审计会是比较好的办法。
|
||||
这个知识点对于判定工具标记出来的某段代码到底是不是安全缺陷至关重要。随着工作时间和经验的积累,你会慢慢学会怎样让静态分析工具忽略非安全缺陷,使报告内容的可执行性更高。因此,要判定 `gosec` 报出来的某个问题是否需要修复,让一名有经验的开发者对源码做人工审计会是比较好的办法。
|
||||
|
||||
#### 高优先级问题
|
||||
|
||||
从输出内容看,gosec 发现了 Docker CE 的一个高优先级问题,它使用的是低版本的 TLS(<ruby>传输层安全<rt>Transport Layer Security<rt></ruby>)。无论什么时候,使用软件和库的最新版本都是确保它更新及时、没有安全问题的最好的方法。
|
||||
|
||||
从输出内容看,`gosec` 发现了 Docker CE 的一个高优先级问题,它使用的是低版本的 TLS(<ruby>传输层安全<rt>Transport Layer Security<rt></ruby>)。无论什么时候,使用软件和库的最新版本都是确保它更新及时、没有安全问题的最好的方法。
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/daemon/logger/splunk/splunk.go:173] - G402 (CWE-295): TLS MinVersion too low. (Confidence: HIGH, Severity: HIGH)
|
||||
172:
|
||||
> 173: tlsConfig := &tls.Config{}
|
||||
> 173: tlsConfig := &tls.Config{}
|
||||
174:
|
||||
```
|
||||
|
||||
它还发现了一个伪随机数生成器。它是不是一个安全缺陷,取决于生成的随机数的使用方式。
|
||||
|
||||
它还发现了一个弱随机数生成器。它是不是一个安全缺陷,取决于生成的随机数的使用方式。
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/pkg/namesgenerator/names-generator.go:843] - G404 (CWE-338): Use of weak random number generator (math/rand instead of crypto/rand) (Confidence: MEDIUM, Severity: HIGH)
|
||||
842: begin:
|
||||
> 843: name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))])
|
||||
> 843: name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))])
|
||||
844: if name == "boring_wozniak" /* Steve Wozniak is not boring */ {
|
||||
```
|
||||
|
||||
#### 中优先级问题
|
||||
|
||||
这个工具还发现了一些中优先级问题。它标记了一个通过与 tar 相关的解压炸弹这种方式实现的潜在的 DoS 威胁,这种方式可能会被恶意的攻击者利用。
|
||||
|
||||
这个工具还发现了一些中优先级问题。它标记了一个通过与 `tar` 相关的解压炸弹这种方式实现的潜在的 DoS 威胁,这种方式可能会被恶意的攻击者利用。
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/pkg/archive/copy.go:357] - G110 (CWE-409): Potential DoS vulnerability via decompression bomb (Confidence: MEDIUM, Severity: MEDIUM)
|
||||
356:
|
||||
> 357: if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
> 357: if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
358: w.CloseWithError(err)
|
||||
```
|
||||
|
||||
它还发现了一个通过变量访问文件的问题。如果恶意使用者能访问这个变量,那么他们就可以改变变量的值去读其他文件。
|
||||
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/cli/cli/context/tlsdata.go:80] - G304 (CWE-22): Potential file inclusion via variable (Confidence: HIGH, Severity: MEDIUM)
|
||||
79: if caPath != "" {
|
||||
> 80: if ca, err = ioutil.ReadFile(caPath); err != nil {
|
||||
> 80: if ca, err = ioutil.ReadFile(caPath); err != nil {
|
||||
81: return nil, err
|
||||
```
|
||||
|
||||
文件和目录通常是操作系统安全的最基础的元素。这里,gosec 报出了一个可能需要你检查目录的权限是否安全的问题。
|
||||
|
||||
文件和目录通常是操作系统安全的最基础的元素。这里,`gosec` 报出了一个可能需要你检查目录的权限是否安全的问题。
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/contrib/apparmor/main.go:41] - G301 (CWE-276): Expect directory permissions to be 0750 or less (Confidence: HIGH, Severity: MEDIUM)
|
||||
40: // make sure /etc/apparmor.d exists
|
||||
> 41: if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil {
|
||||
> 41: if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil {
|
||||
42: log.Fatal(err)
|
||||
```
|
||||
|
||||
你经常需要在源码中启动命令行工具。Go 使用内建的 exec 库来实现。仔细地分析用来调用这些工具的变量,就能发现安全缺陷。
|
||||
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/testutil/fakestorage/fixtures.go:59] - G204 (CWE-78): Subprocess launched with variable (Confidence: HIGH, Severity: MEDIUM)
|
||||
58:
|
||||
> 59: cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver")
|
||||
> 59: cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver")
|
||||
60: cmd.Env = append(os.Environ(), []string{
|
||||
```
|
||||
|
||||
#### 低优先级问题
|
||||
|
||||
在这个输出中,gosec 报出了一个 “unsafe” 调用相关的低优先级问题,这个调用会绕开 Go 提供的内存保护。再仔细分析下你调用 “unsafe” 的方式,看看是否有被别人利用的可能性。
|
||||
|
||||
在这个输出中,gosec 报出了一个 `unsafe` 调用相关的低优先级问题,这个调用会绕开 Go 提供的内存保护。再仔细分析下你调用 `unsafe` 的方式,看看是否有被别人利用的可能性。
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/engine/pkg/archive/changes_linux.go:264] - G103 (CWE-242): Use of unsafe calls should be audited (Confidence: HIGH, Severity: LOW)
|
||||
263: for len(buf) > 0 {
|
||||
> 264: dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
263: for len(buf) > 0 {
|
||||
> 264: dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
265: buf = buf[dirent.Reclen:]
|
||||
|
||||
|
||||
|
||||
[/root/gosec-demo/docker-ce/components/engine/pkg/devicemapper/devmapper_wrapper.go:88] - G103 (CWE-242): Use of unsafe calls should be audited (Confidence: HIGH, Severity: LOW)
|
||||
87: func free(p *C.char) {
|
||||
> 88: C.free(unsafe.Pointer(p))
|
||||
> 88: C.free(unsafe.Pointer(p))
|
||||
89: }
|
||||
```
|
||||
|
||||
它还标记了源码中未处理的错误。源码中出现的错误你都应该处理。
|
||||
|
||||
|
||||
```
|
||||
[/root/gosec-demo/docker-ce/components/cli/cli/command/image/build/context.go:172] - G104 (CWE-703): Errors unhandled. (Confidence: HIGH, Severity: LOW)
|
||||
171: err := tar.Close()
|
||||
> 172: os.RemoveAll(dockerfileDir)
|
||||
> 172: os.RemoveAll(dockerfileDir)
|
||||
173: return err
|
||||
```
|
||||
|
||||
### 自定义 gosec 扫描
|
||||
|
||||
使用 gosec 的默认选项带来了很多的问题。然而,经过人工审计和随着时间推移,你会掌握哪些问题是不需要标记的。你可以自己指定排除和包含哪些测试。
|
||||
使用 `gosec` 的默认选项会带来很多的问题。然而,经过人工审计,随着时间推移你会掌握哪些问题是不需要标记的。你可以自己指定排除和包含哪些测试。
|
||||
|
||||
我上面提到过,gosec 是基于一系列的规则从 Go 源码中查找问题的。下面是它使用的完整的[规则][10]列表:
|
||||
|
||||
* G101:查找硬编码凭证
|
||||
我上面提到过,`gosec` 是基于一系列的规则从 Go 源码中查找问题的。下面是它使用的完整的[规则][10]列表:
|
||||
|
||||
- G101:查找硬编码凭证
|
||||
- G102:绑定到所有接口
|
||||
- G103:审计不安全区块的使用
|
||||
- G103:审计 `unsafe` 块的使用
|
||||
- G104:审计未检查的错误
|
||||
- G106:审计 ssh.InsecureIgnoreHostKey 的使用
|
||||
- G107: 提供给 HTTP 请求的 url 作为污点输入
|
||||
- G108: 统计端点自动暴露到 /debug/pprof
|
||||
- G109: strconv.Atoi 转换到 int16 或 int32 时潜在的整数溢出
|
||||
- G110: 潜在的通过解压炸弹实现的 DoS
|
||||
- G106:审计 `ssh.InsecureIgnoreHostKey` 的使用
|
||||
- G107: 提供给 HTTP 请求的 url 作为污点输入
|
||||
- G108: `/debug/pprof` 上自动暴露的剖析端点
|
||||
- G109: `strconv.Atoi` 转换到 int16 或 int32 时潜在的整数溢出
|
||||
- G110: 潜在的通过解压炸弹实现的 DoS
|
||||
- G201:SQL 查询构造使用格式字符串
|
||||
- G202:SQL 查询构造使用字符串连接
|
||||
- G203:在 HTML 模板中使用未转义的数据
|
||||
- G203:在HTML模板中使用未转义的数据
|
||||
- G204:审计命令执行情况
|
||||
- G301:创建目录时文件权限分配不合理
|
||||
- G302:chmod 文件权限分配不合理
|
||||
- G302:使用 `chmod` 时文件权限分配不合理
|
||||
- G303:使用可预测的路径创建临时文件
|
||||
- G304:作为污点输入提供的文件路径
|
||||
- G304:通过污点输入提供的文件路径
|
||||
- G305:提取 zip/tar 文档时遍历文件
|
||||
- G306: 写到新文件时文件权限分配不合理
|
||||
- G307: 把返回错误的函数放到 defer 内
|
||||
- G401:检测 DES、RC4、MD5 或 SHA1 的使用情况
|
||||
- G306: 写到新文件时文件权限分配不合理
|
||||
- G307: 把返回错误的函数放到 `defer` 内
|
||||
- G401:检测 DES、RC4、MD5 或 SHA1 的使用
|
||||
- G402:查找错误的 TLS 连接设置
|
||||
- G403:确保最小 RSA 密钥长度为 2048 位
|
||||
- G404:不安全的随机数源(rand)
|
||||
- G404:不安全的随机数源(`rand`)
|
||||
- G501:导入黑名单列表:crypto/md5
|
||||
- G502:导入黑名单列表:crypto/des
|
||||
- G503:导入黑名单列表:crypto/rc4
|
||||
- G504:导入黑名单列表:net/http/cgi
|
||||
- G505:导入黑名单列表:crypto/sha1
|
||||
- G601: 在 range 语句中使用隐式的元素别名
|
||||
|
||||
|
||||
- G601: 在 `range` 语句中使用隐式的元素别名
|
||||
|
||||
#### 排除指定的测试
|
||||
|
||||
你可以自定义 gosec 来避免对已知为安全的问题进行扫描和报告。你可以使用 `-exclude` 选项和上面的规则编号来忽略指定的问题。
|
||||
|
||||
例如,如果你不想让 gosec 检查源码中硬编码凭证相关的未处理的错误,那么你可以运行下面的命令来忽略这些错误:
|
||||
你可以自定义 `gosec` 来避免对已知为安全的问题进行扫描和报告。你可以使用 `-exclude` 选项和上面的规则编号来忽略指定的问题。
|
||||
|
||||
例如,如果你不想让 `gosec` 检查源码中硬编码凭证相关的未处理的错误,那么你可以运行下面的命令来忽略这些错误:
|
||||
|
||||
```
|
||||
$ gosec -exclude=G104 ./...
|
||||
$ gosec -exclude=G104,G101 ./...
|
||||
```
|
||||
|
||||
有时候你知道某段代码是安全的,但是 gosec 还是会报出问题。然而,你又不想完全排除掉整个检查,因为你想让 gosec 检查新增的代码。通过在你已知为安全的代码块添加 `#nosec` 标记可以避免 gosec 扫描。这样 gosec 会继续扫描新增代码,而忽略掉 `#nosec` 标记的代码块。
|
||||
有时候你知道某段代码是安全的,但是 `gosec` 还是会报出问题。然而,你又不想完全排除掉整个检查,因为你想让 `gosec` 检查新增的代码。通过在你已知为安全的代码块添加 `#nosec` 标记可以避免 `gosec` 扫描。这样 `gosec` 会继续扫描新增代码,而忽略掉 `#nosec` 标记的代码块。
|
||||
|
||||
#### 运行指定的检查
|
||||
|
||||
另一方面,如果你只想检查指定的问题,你可以通过 `-include` 选项和规则编号来告诉 gosec 运行哪些检查:
|
||||
|
||||
另一方面,如果你只想检查指定的问题,你可以通过 `-include` 选项和规则编号来告诉 `gosec` 运行哪些检查:
|
||||
|
||||
```
|
||||
`$ gosec -include=G201,G202 ./...`
|
||||
$ gosec -include=G201,G202 ./...
|
||||
```
|
||||
|
||||
#### 扫描测试文件
|
||||
|
||||
Go 语言自带对测试的支持,通过单元测试来检验一个元素是否符合预期。在默认模式下,gosec 会忽略测试文件,你可以使用 `-tests` 选项把它们包含进来:
|
||||
|
||||
Go 语言自带对测试的支持,通过单元测试来检验一个元素是否符合预期。在默认模式下,`gosec` 会忽略测试文件,你可以使用 `-tests` 选项把它们包含进来:
|
||||
|
||||
```
|
||||
`gosec -tests ./...`
|
||||
gosec -tests ./...
|
||||
```
|
||||
|
||||
#### 修改输出的格式
|
||||
|
||||
找出问题只是它的一半功能;另一半功能是把它检查到的问题以用户友好同时又方便工具处理的方式报告出来。幸运的是,gosec 可以用不同的方式输出。例如,如果你想看 JSON 格式的报告,那么就使用 `-fmt` 选项指定 JSON 格式并把结果保存到 `results.json` 文件中:
|
||||
|
||||
找出问题只是它的一半功能;另一半功能是把它检查到的问题以用户友好同时又方便工具处理的方式报告出来。幸运的是,`gosec` 可以用不同的方式输出。例如,如果你想看 JSON 格式的报告,那么就使用 `-fmt` 选项指定 JSON 格式并把结果保存到 `results.json` 文件中:
|
||||
|
||||
```
|
||||
$ gosec -fmt=json -out=results.json ./...
|
||||
@ -370,7 +337,7 @@ $
|
||||
"confidence": "HIGH",
|
||||
"cwe": {
|
||||
"ID": "242",
|
||||
"URL": "<https://cwe.mitre.org/data/definitions/242.html>"
|
||||
"URL": "https://cwe.mitre.org/data/definitions/242.html"
|
||||
},
|
||||
"rule_id": "G103",
|
||||
"details": "Use of unsafe calls should be audited",
|
||||
@ -381,9 +348,9 @@ $
|
||||
},
|
||||
```
|
||||
|
||||
### 用 gosec 检查容易暴露出来的问题
|
||||
### 用 gosec 检查容易被发现的问题
|
||||
|
||||
静态检查工具不能完全代替人工代码审计。然而,当代码量变大、有众多开发者时,这样的工具通常能用批量的方式帮忙找出容易暴露的问题。它对于帮助新开发者识别和在编码时避免引入这些安全缺陷很有用。
|
||||
静态检查工具不能完全代替人工代码审计。然而,当代码量变大、有众多开发者时,这样的工具往往有助于以可重复的方式找出容易被发现的问题。它对于帮助新开发者识别和在编码时避免引入这些安全缺陷很有用。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -392,7 +359,7 @@ via: https://opensource.com/article/20/9/gosec
|
||||
作者:[Gaurav Kamathe][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[lxbowlf](https://github.com/lxbwolf)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,52 +1,52 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12691-1.html)
|
||||
[#]: subject: (5 questions to ask yourself when writing project documentation)
|
||||
[#]: via: (https://opensource.com/article/20/9/project-documentation)
|
||||
[#]: author: (Alexei Leontief https://opensource.com/users/alexeileontief)
|
||||
|
||||
编写项目文档时要问自己 5 个问题
|
||||
编写项目文档时要问自己的 5 个问题
|
||||
======
|
||||
使用一些有效沟通的基本原则可以帮助你创建与你的品牌一致的,编写良好,内容丰富的项目文档。
|
||||
![A person writing.][1]
|
||||
|
||||
在开始另一个开源项目文档的实际写作部分之前,甚至在采访专家之前,最好回答一些有关新文档的高级问题。
|
||||
> 使用有效沟通的一些基本原则可以帮助你创建与你的品牌一致的、编写良好、内容丰富的项目文档。
|
||||
|
||||
著名的传播理论家 Harold Lasswell 在他 1948 年的文章《社会中的传播结构和功能》(_The Structure and Function of Communication in Society_)中写道:
|
||||

|
||||
|
||||
> (一个)描述沟通行为的方便方法是回答以下问题:
|
||||
在开始实际撰写又一个开源项目的文档之前,甚至在采访专家之前,最好回答一些有关新文档的高级问题。
|
||||
|
||||
著名的传播理论家 Harold Lasswell 在他 1948 年的文章《<ruby>社会中的传播结构和功能<rt>The Structure and Function of Communication in Society</rt></ruby>》中写道:
|
||||
|
||||
> (一种)描述沟通行为的方便方法是回答以下问题:
|
||||
>
|
||||
> * 谁
|
||||
> * 说什么
|
||||
> * 在哪个渠道
|
||||
> * 对谁
|
||||
> * 有什么效果?
|
||||
>
|
||||
|
||||
作为一名技术交流者,你可以运用 Lasswell 的理论,回答关于你文档的类似问题,以更好地传达你的信息,达到预期的效果。
|
||||
|
||||
作为一名技术沟通者,你可以运用 Lasswell 的理论,回答关于你文档的类似问题,以更好地传达你的信息,达到预期的效果。
|
||||
### 谁:谁是文档的所有者?
|
||||
|
||||
### 谁—谁是文档的所有者?
|
||||
或者说,文档背后是什么公司?它想向受众传达什么品牌形象?这个问题的答案将极大地影响你的写作风格。公司可能有自己的风格指南,或者至少有正式的使命声明,在这种情况下,你应该从这开始。
|
||||
|
||||
或者说,文档背后是什么公司?它想向受众传达什么品牌形象?这个问题的答案将大大影响你的写作风格。公司也可能有自己的风格指南,或者至少有正式的使命声明,在这种情况下,你应该从这开始。
|
||||
如果公司刚刚起步,你可以向文件的主人提出上述问题。作为作者,将你为公司创造的声音和角色与你自己的世界观和信念结合起来是很重要的。这将使你的写作看起来更自然,而不像公司的行话。
|
||||
|
||||
如果公司刚刚起步,你可以向文件的主人提出上述问题。作为作者,将你为公司创造的声音和角色与你自己的世界观和信仰结合起来是很重要的。这将使你的写作看起来更自然,而不像公司的行话。
|
||||
### 说什么:文件类型是什么?
|
||||
|
||||
### 说什么—文件类型是什么?
|
||||
你需要传达什么信息?它是什么类型的文档:用户指南、API 参考、发布说明等?许多文档类型有模板或普遍认可的结构,这些结构为你提供一个开始的地方,并帮助确保包括所有必要的信息。
|
||||
|
||||
你需要传达什么信息?它是什么类型的文档:用户指南、API 参考、发布说明等?许多文档类型将有模板或普遍认可的结构,它将让你从这开始,并帮助确保包括所有必要的信息。
|
||||
|
||||
### 在哪个渠道—文档的格式是什么?
|
||||
### 在哪个渠道:文档的格式是什么?
|
||||
|
||||
对于技术文档,沟通的渠道通常会告诉你文档的最终格式,也就是 PDF、HTML、文本文件等。这很可能也决定了你应该使用什么工具来编写你的文档。
|
||||
|
||||
### 对谁—目标受众是谁?
|
||||
### 对谁:目标受众是谁?
|
||||
|
||||
谁会阅读这份文档?他们的知识水平如何?他们的工作职责和主要挑战是什么?这些问题将帮助你确定你应该覆盖什么,是否应该进入细节,是否可以使用任何特定的术语,等等。在某些情况下,这些问题的答案甚至可以影响你使用的语法的复杂性。
|
||||
谁会阅读这份文档?他们的知识水平如何?他们的工作职责和主要挑战是什么?这些问题将帮助你确定你应该覆盖什么内容,是否应该应该涉及细节,是否可以使用特定的术语,等等。在某些情况下,这些问题的答案甚至可以影响你使用的语法的复杂性。
|
||||
|
||||
### 有什么效果-文档的目的是什么?
|
||||
### 有什么效果:文档的目的是什么?
|
||||
|
||||
在这里,你应该定义这个文档要为它的潜在读者解决什么问题,或者它应该为他们回答什么问题。例如,你的文档的目的可以是教你的客户如何使用你的产品。
|
||||
|
||||
@ -56,7 +56,7 @@
|
||||
|
||||
### 总结
|
||||
|
||||
上面的问题旨在帮助你形成有效沟通的基础,并确保你的文件涵盖了所有应该涵盖的内容。你可以把它们分解成你自己的问题清单,并把它们放在身边,以便在你有文件要创建的时候使用。当你面对空白页时,这份清单也可能会派上用场。希望它能激发你的灵感,帮助你产生想法。
|
||||
上面的问题旨在帮助你形成有效沟通的基础,并确保你的文件涵盖了所有应该涵盖的内容。你可以把它们分解成你自己的问题清单,并把它们放在身边,以便在你有文件要创建的时候使用。当你面对空白页无从着笔时,这份清单也可能会派上用场。希望它能激发你的灵感,帮助你产生想法。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -65,7 +65,7 @@ via: https://opensource.com/article/20/9/project-documentation
|
||||
作者:[Alexei Leontief][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -1,8 +1,8 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12699-1.html)
|
||||
[#]: subject: (Create template files in GNOME)
|
||||
[#]: via: (https://opensource.com/article/20/9/gnome-templates)
|
||||
[#]: author: (Alan Formy-Duval https://opensource.com/users/alanfdoss)
|
||||
@ -10,27 +10,23 @@
|
||||
在 GNOME 中创建文档模板
|
||||
======
|
||||
|
||||
![Digital images of a computer desktop][1]
|
||||
> 制作模板可以让你更快地开始写作新的文档。
|
||||
|
||||
制作模板可以让你更快地开始写作新的文档。
|
||||

|
||||
|
||||
我偶然发现了 [GNOME][2] 的一个新功能(对我来说是的):创建文档模版。模版(template)也被称作样版文件(boilerplate),一般是有着特定格式的空文档,例如律师事务所的信笺,在其顶部有着律所的名称和地址;另一个例子是银行以及保险公司的保函,在其底部页脚包含着某些免责声明。由于这类信息很少改变,你可以把它们添加到空文档中作为模板使用。
|
||||
我只是偶然发现了 [GNOME][2] 的一个新功能(对我来说是的):创建文档模版。<ruby>模版<rt>template</rt></ruby>也被称作<ruby>样版文件<rt>boilerplate</rt></ruby>,一般是有着特定格式的空文档,例如律师事务所的信笺,在其顶部有着律所的名称和地址;另一个例子是银行以及保险公司的保函,在其底部页脚包含着某些免责声明。由于这类信息很少改变,你可以把它们添加到空文档中作为模板使用。
|
||||
|
||||
一天,在浏览我的 Linux 系统文件的时候,我点击了**模板**(Templates)文件夹,然后刚好发现窗口的上方有一条消息写着:“将文件放入此文件夹并用作新文档的模板”,以及一个**获取详情……** 的链接,指向了 [GNOME 指南(GNOME help)][3]中的模板页面。
|
||||
一天,在浏览我的 Linux 系统文件的时候,我点击了<ruby>模板<rt>Templates</rt></ruby>文件夹,然后刚好发现窗口的上方有一条消息写着:“将文件放入此文件夹并用作新文档的模板”,以及一个“获取详情……” 的链接,打开了模板的 [GNOME 帮助页面][3]。
|
||||
|
||||
![Message at top of Templates folder in GNOME Desktop][4]
|
||||
|
||||
(Alan Formy-Duval, [CC BY-SA 4.0][5])
|
||||
|
||||
### 创建模板
|
||||
|
||||
在 GNOME 中创建模板非常简单。有几种方法可以把文件放进模板文件夹里:你既可以通过图形用户界面(GUI)或是命令行界面(CLI)从另一个位置复制或移动文件,也可以创建一个全新的文件;我选择了后者,实际上我也创建了两个文件。
|
||||
在 GNOME 中创建模板非常简单。有几种方法可以把文件放进模板文件夹里:你既可以通过图形用户界面(GUI)或是命令行界面(CLI)从另一个位置复制或移动文件,也可以创建一个全新的文件;我选择了后者,实际上,我创建了两个文件。
|
||||
|
||||
![My first two GNOME templates][6]
|
||||
|
||||
(Alan Formy-Duval, [CC BY-SA 4.0][5])
|
||||
|
||||
我的第一份模板是为 Opensource.com 的文章准备的,它有一个输入标题的位置以及关于我的名字和文章使用的许可证的几行。我的文章使用 Markdown 格式,所以我将模板创建为了一个新的 Markdown 文档——**Opensource.com Article.md**:
|
||||
我的第一份模板是为 Opensource.com 的文章准备的,它有一个输入标题的位置以及关于我的名字和文章使用的许可证的几行。我的文章使用 Markdown 格式,所以我将模板创建为了一个新的 Markdown 文档——`Opensource.com Article.md`:
|
||||
|
||||
````
|
||||
# Title
|
||||
@ -46,12 +42,10 @@ Creative Commons BY-SA 4.0
|
||||
|
||||
### 使用模板
|
||||
|
||||
每当我有了新文章的灵感的时候,我只需要在我计划用来组织内容的文件夹里单击右键,然后从**新建文档**(New Document)列表中选择我想要的模板就可以开始了。
|
||||
每当我有了新文章的灵感的时候,我只需要在我计划用来组织内容的文件夹里单击右键,然后从<ruby>新建文档<rt>New Document</rt></ruby>列表中选择我想要的模板就可以开始了。
|
||||
|
||||
![Select the template by name][7]
|
||||
|
||||
(Alan Formy-Duval, [CC BY-SA 4.0][5])
|
||||
|
||||
你可以为各种文档或文件制作模板。我写这篇文章时使用了我为 Opensource.com 的文章创建的模板。程序员可能会把模板用于软件代码,这样的话也许你想要只包含 `main()` 的模板。
|
||||
|
||||
GNOME 桌面环境为 Linux 及相关操作系统的用户提供了一个非常实用、功能丰富的界面。你最喜欢的 GNOME 功能是什么,你又是怎样使用它们的呢?请在评论中分享~
|
||||
@ -63,7 +57,7 @@ via: https://opensource.com/article/20/9/gnome-templates
|
||||
作者:[Alan Formy-Duval][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[rakino](https://github.com/rakino)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
@ -0,0 +1,91 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12708-1.html)
|
||||
[#]: subject: (How to Use the Firefox Task Manager \(to Find and Kill RAM and CPU Eating Tabs and Extensions\))
|
||||
[#]: via: (https://itsfoss.com/firefox-task-manager/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
如何使用 Firefox 任务管理器
|
||||
======
|
||||
|
||||

|
||||
|
||||
> 查找并杀死占用内存和 CPU 的标签页和扩展程序
|
||||
|
||||
Firefox 在 Linux 用户中很受欢迎。它是几个 Linux 发行版上的默认 Web 浏览器。
|
||||
|
||||
在它所提供的许多功能之中,Firefox 也提供了一个自己的任务管理器。
|
||||
|
||||
不过,在 Linux 中既然你有[任务管理器][1]这种形式的[系统监控工具][2],为什么还要使用 Firefox 的呢?这里有个很好的理由。
|
||||
|
||||
假设你的系统占用了太多的内存或 CPU。如果你使用 `top` 或其他一些系统[资源监控工具,如 Glances][3],你会发现这些工具无法区分是哪个打开的标签或扩展占用了资源。
|
||||
|
||||
通常情况下,每个 Firefox 标签页都显示为 “<ruby>Web 内容<rt>Web Content</rt></ruby>”。你可以看到是某个 Firefox 进程导致了这个问题,但这无法准确判断是哪个标签页或扩展。
|
||||
|
||||
这时你可以使用 Firefox 任务管理器。让我来告诉你怎么做!
|
||||
|
||||
### Firefox 任务管理器
|
||||
|
||||
有了 Firefox 任务管理器,你就可以列出所有消耗系统资源的标签页、跟踪器和附加组件。
|
||||
|
||||
![][4]
|
||||
|
||||
正如你在上面的截图中所看到的,你会看到标签页的名称、类型(标签或附加组件)、能源影响和消耗的内存。
|
||||
|
||||
其它的都不言自明,但**“能源影响”指的是 CPU 的使用**,如果你使用的是笔记本电脑,它是一个很好的指标,可以告诉你什么东西会更快耗尽电池电量。
|
||||
|
||||
#### 在 Firefox 中访问任务管理器
|
||||
|
||||
令人意外的是,任务管理器没有 [Firefox 键盘快捷键][5]。
|
||||
|
||||
要快速启动 Firefox 任务管理器,可以在地址栏中输入 `about:performance`,如下图所示。
|
||||
|
||||
![Quickly access task manager in Firefox][6]
|
||||
|
||||
另外,你也可以点击“菜单”图标,然后进入“更多”选项,如下截图所示。
|
||||
|
||||
![Accessing task manager in Firefox][7]
|
||||
|
||||
接下来,你会发现选择“任务管理器”的选项,只需点击它就行。
|
||||
|
||||
![][8]
|
||||
|
||||
#### 使用 Firefox 任务管理器
|
||||
|
||||
到这后,你可以检查资源的使用情况,展开标签页来查看跟踪器和它的使用情况,也可以选择关闭标签,如下截图高亮所示。
|
||||
|
||||
![][9]
|
||||
|
||||
以下是你应该知道的:
|
||||
|
||||
* “能源影响”指的是 CPU 消耗。
|
||||
* 子框架或子任务通常是与需要在后台运行的标签相关联的跟踪器/脚本。
|
||||
|
||||
通过这个任务管理器,你可以发现网站上的流氓脚本,以及它是否导致你的浏览器变慢。
|
||||
|
||||
这并不是什么 高科技,但并不是所有人都知道 Firefox 任务管理器。现在你知道了,它应该很方便,你觉得呢?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/firefox-task-manager/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/task-manager-linux/
|
||||
[2]: https://itsfoss.com/linux-system-monitoring-tools/
|
||||
[3]: https://itsfoss.com/glances/
|
||||
[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-shot.png?resize=800%2C519&ssl=1
|
||||
[5]: https://itsfoss.com/firefox-keyboard-shortcuts/
|
||||
[6]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-url-performance.jpg?resize=800%2C357&ssl=1
|
||||
[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-steps.jpg?resize=800%2C779&ssl=1
|
||||
[8]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-menu.jpg?resize=800%2C465&ssl=1
|
||||
[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-close-tab.png?resize=800%2C496&ssl=1
|
@ -1,22 +1,24 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12702-1.html)
|
||||
[#]: subject: (How to view information on your Linux devices with lshw)
|
||||
[#]: via: (https://www.networkworld.com/article/3583598/how-to-view-information-on-your-linux-devices-with-lshw.html)
|
||||
[#]: author: (Sandra Henry-Stocker https://www.networkworld.com/author/Sandra-Henry_Stocker/)
|
||||
|
||||
How to view information on your Linux devices with lshw
|
||||
如何使用 lshw 查看 Linux 设备信息
|
||||
======
|
||||
The lshw (list hardware) command on Linux systems provides a lot more information on system devices than most of us might imagine is available.
|
||||
Kali Linux / nevarpp / Getty Images
|
||||
|
||||
While far from being one of the first 50 Linux commands anyone learns, the **lshw** command (read as “ls hardware”) can provide a lot of useful details on your system’s hardware.
|
||||
> Linux 系统上的 lshw 命令提供的系统设备信息比我们大多数人想象的要多得多。
|
||||
|
||||
It extracts details—maybe quite a few more than you knew were available—in a format that is reasonably easy to digest. Given descriptions, logical (device) names, sizes, etc., you are likely to appreciate how much detail you can access.
|
||||
![Kali Linux logo / gears / binary data][1]
|
||||
|
||||
This post examines the information that **lshw** provides with a particular focus on disk and related hardware. Here is some sample **lshw** output:
|
||||
虽然 `lshw` 命令(<ruby>列出硬件<rt>list hardware</rt></ruby>,读作 “ls hardware”)远不是每个人最先学会的 50 个 Linux 命令之一,但它可以提供很多系统硬件的有用信息。
|
||||
|
||||
它以一种相当易于理解的格式提取出可能比你知道的更多的信息。在看到描述、(设备)逻辑名称、大小等以后,你可能会理解到自己能获得多少信息。
|
||||
|
||||
这篇文章会研究 `lshw` 给出的信息,但侧重于磁盘及相关硬件。下面是 `lshw` 的输出示例:
|
||||
|
||||
```
|
||||
$ sudo lshw -C disk
|
||||
@ -35,11 +37,11 @@ $ sudo lshw -C disk
|
||||
logical name: /dev/sdc
|
||||
```
|
||||
|
||||
Note that you should run the **lshw** command with **sudo** to ensure that you get all of the available details.
|
||||
请注意,你需要使用 `sudo` 运行 `lshw` 命令以确保能得到所有可用的信息。
|
||||
|
||||
While we asked for “disk” in the above command (the output included shows only the first of five entries displayed), this particular output shows not a hard disk, but a card reader—another member of the disk class. Note that the system knows this device as **/dev/sdc**.
|
||||
虽然我们在上面的命令中要求了输出“磁盘(`disk`)”(上面只包含了原始输出里五个条目中的一个),这里的输出却不是一个硬盘,而是读卡器——磁盘的一种。注意系统将这个设备命名为了 `/dev/sdc`。
|
||||
|
||||
Similar details are provided on the primary disk on the system:
|
||||
系统的主磁盘上也有相似的信息:
|
||||
|
||||
```
|
||||
*-disk
|
||||
@ -47,7 +49,7 @@ Similar details are provided on the primary disk on the system:
|
||||
product: SSD2SC120G1CS175
|
||||
physical id: 0
|
||||
bus info: scsi@0:0.0.0
|
||||
logical name: /dev/sda <==
|
||||
logical name: /dev/sda <==这里
|
||||
version: 1101
|
||||
serial: PNY20150000778410606
|
||||
size: 111GiB (120GB)
|
||||
@ -56,9 +58,9 @@ Similar details are provided on the primary disk on the system:
|
||||
f63b5929
|
||||
```
|
||||
|
||||
This disk is **/dev/sda**. The hard disks on this system both show up as **ATA** disks. **ATA** is a disk-drive implementation that integrates the controller on the disk drive itself.
|
||||
这块硬盘是 `/dev/sda`。这个系统上的硬盘都显示为 `ATA` 磁盘,`ATA` 是一种把控制器与盘体集成在一起的磁盘驱动器实现。
|
||||
|
||||
To get an abbreviated list of devices in the “disk” class, you can run a command like this one. Notice that two of the devices are listed twice, so we are still seeing five disk devices.
|
||||
要获得“磁盘”类设备的简略列表,可以运行下面这条命令。注意其中有两个设备被列出了两次,所以我们看到的仍然是五个磁盘设备。
|
||||
|
||||
```
|
||||
$ sudo lshw -short -C disk
|
||||
@ -73,7 +75,7 @@ H/W path Device Class Description
|
||||
/0/100/1f.5/0.0.0 /dev/sdb disk 500GB SAMSUNG HE502HJ
|
||||
```
|
||||
|
||||
Hold onto your seat if you decide you want to see _**all**_ of the devices on a system. You will get a list that includes a lot more things than you probably normally think of as “devices”. Here’s an example—and this is the “short” (few details) list:
|
||||
如果你决定要查看系统上的 **所有** 设备,请坐稳了;你会得到一个包含的东西比你通常认为的“设备”要多得多的列表,下面是一个例子,这是一个“简短(`short`)”(信息很少)的列表:
|
||||
|
||||
```
|
||||
$ sudo lshw -short
|
||||
@ -152,7 +154,7 @@ H/W path Device Class Description
|
||||
/0/9 system PnP device PNP0c01
|
||||
```
|
||||
|
||||
Run a command like this to list device classes and count how many devices are in each class.
|
||||
运行下面的命令来列出设备类别,并统计每个类别中的设备数量。
|
||||
|
||||
```
|
||||
$ sudo lshw -short | awk ‘{print substr($0,36,13)}’ | tail -n +3 | sort | uniq -c
|
||||
@ -172,16 +174,18 @@ $ sudo lshw -short | awk ‘{print substr($0,36,13)}’ | tail -n +3 | sort | un
|
||||
2 volume
|
||||
```
|
||||
|
||||
**NOTE:** The **awk** command selects the Class column from the **lshw** output using $0 (complete lines), but taking only the substrings that start in the correct place (column 36). None of the class entries have more than 13 letters so the substring ends there. The **tail -n +3** part of the command drops the heading and the “=====” line beneath it, so only the 14 device classes are included in the final listing.
|
||||
**注意:** 上面使用 `awk` 命令从 `lshw` 的输出中选择 Class(类别)栏是这样实现的:使用 `$0`(选取完整行),但只取从正确位置(第 36 个字符)开始的子串,而因为“类别”中并没有条目的长度超过 13 个字符,所以子串就在那里结束。命令中 `tail -n +3` 的部分移除了标题和下面的`=====`,所以最终的列表中只包含了那 14 种设备类型。
|
||||
|
||||
One thing you’ll notice is that we get approximately 12 lines of output for each device in the disk class when we don’t use the **-short** option. We see the logical names, such as **/dev/sda**, disk sizes and types, etc.
|
||||
(LCTT 译注:上面的命令中 `awk` 的部分在选取子串时是从第 36 个字符开始的,这个数字基本上取决于最长的设备逻辑名称的长度,因而在不同的系统环境中可能有所不同,一个例子是,当你的系统上有 NVMe SSD 时,可能需要将其改为 41。)
|
||||
|
||||
你会发现在没有使用 `-short` 选项的时候,每一个磁盘类设备都会有大约 12 行的输出,包括像是 `/dev/sda` 这样的逻辑名称,磁盘大小和种类等等。
|
||||
|
||||
```
|
||||
$ sudo lshw -C disk
|
||||
[sudo] password for shs:
|
||||
*-disk:0
|
||||
description: SCSI Disk
|
||||
product: Card Reader-1 card reader?
|
||||
product: Card Reader-1 <== 读卡器?
|
||||
vendor: JIE LI
|
||||
physical id: 0.0.0
|
||||
bus info: scsi@4:0.0.0
|
||||
@ -209,13 +213,13 @@ $ sudo lshw -C disk
|
||||
product: SSD2SC120G1CS175
|
||||
physical id: 0
|
||||
bus info: scsi@0:0.0.0
|
||||
logical name: /dev/sda main system disk
|
||||
logical name: /dev/sda <== 主要磁盘
|
||||
version: 1101
|
||||
serial: PNY20150000778410606
|
||||
size: 111GiB (120GB)
|
||||
capabilities: partitioned partitioned:dos
|
||||
configuration: ansiversion=5 logicalsectorsize=512 sectorsize=512 signature=f63b5929
|
||||
*-cdrom aka /dev/sr0
|
||||
*-cdrom <== 也叫 /dev/sr0
|
||||
description: DVD writer
|
||||
product: DVD+-RW GSA-H73N
|
||||
vendor: HL-DT-ST
|
||||
@ -235,7 +239,7 @@ $ sudo lshw -C disk
|
||||
product: SAMSUNG HE502HJ
|
||||
physical id: 0.0.0
|
||||
bus info: scsi@3:0.0.0
|
||||
logical name: /dev/sdb secondary disk
|
||||
logical name: /dev/sdb <== 次要磁盘
|
||||
version: 0002
|
||||
serial: S2B6J90B501053
|
||||
size: 465GiB (500GB)
|
||||
@ -243,11 +247,9 @@ $ sudo lshw -C disk
|
||||
configuration: ansiversion=5 logicalsectorsize=512 sectorsize=512 signature=7e67ccf3
|
||||
```
|
||||
|
||||
### Wrap-up
|
||||
### 总结
|
||||
|
||||
The **lshw** command provides details that many of us won’t normally deal with. Still, it’s nice to know how much information is available even if you only use a portion of it.
|
||||
|
||||
Join the Network World communities on [Facebook][1] and [LinkedIn][2] to comment on topics that are top of mind.
|
||||
`lshw` 命令提供了一些我们许多人通常不会处理的信息,不过即使你只用了其中的一部分,知道有多少信息可用还是很不错的。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
@ -255,12 +257,13 @@ via: https://www.networkworld.com/article/3583598/how-to-view-information-on-you
|
||||
|
||||
作者:[Sandra Henry-Stocker][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
译者:[rakino](https://github.com/rakino)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.networkworld.com/author/Sandra-Henry_Stocker/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.facebook.com/NetworkWorld/
|
||||
[2]: https://www.linkedin.com/company/network-world
|
||||
[1]: https://images.idgesg.net/images/article/2020/02/kali_linux_tools_abstract_gears_binary_data_by_nevarpp_gettyimages-688718788_2400x1600-100832674-large.jpg
|
||||
[2]: https://www.facebook.com/NetworkWorld/
|
||||
[3]: https://www.linkedin.com/company/network-world
|
@ -0,0 +1,119 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12712-1.html)
|
||||
[#]: subject: (Drawing is an Open Source MS-Paint Type of App for Linux Desktop)
|
||||
[#]: via: (https://itsfoss.com/drawing-app/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Drawing:一款开源的类似微软画图的 Linux 桌面应用
|
||||
======
|
||||
|
||||

|
||||
|
||||
> Drawing 是一个基本的图像编辑器,就像微软画图一样。有了这个开源的应用,你可以画箭头、线条、几何图形、添加颜色和其他你期望在普通绘图应用程序中做的事情。
|
||||
|
||||
### Drawing: 一个简单的 Linux 绘图应用
|
||||
|
||||
![][1]
|
||||
|
||||
对于从 Windows XP (或更早版本)开始使用电脑的人来说,微软<ruby>画图<rt>Paint</rt></ruby>是一个有趣的应用,是个可以随便画一些草图的应用。在这个被 Photoshop 和 GIMP 主导的世界里,画图应用仍然具有一定的现实意义。
|
||||
|
||||
有几个[可用于 Linux 的绘画应用][2],我打算在这个列表中再添加一个。
|
||||
|
||||
这款应用不出意外地叫做 [Drawing][3],你可以在 Linux 桌面和 Linux 智能手机上使用它。
|
||||
|
||||
### Drawing 应用的功能
|
||||
|
||||
![][4]
|
||||
|
||||
Drawing 拥有你所期待的绘图应用的所有功能。你可以:
|
||||
|
||||
* 从头开始创建新的绘图
|
||||
* 编辑现有的 PNG、JPEG 或 BMP 图像文件
|
||||
* 添加几何图形、线条、箭头等
|
||||
* 虚线
|
||||
* 使用铅笔工具进行自由手绘
|
||||
* 使用曲线和形状工具
|
||||
* 裁剪图像
|
||||
* 缩放图像到不同的像素大小
|
||||
* 添加文本
|
||||
* 选择图像的一部分(矩形、自由选择和颜色选择)
|
||||
* 旋转图像
|
||||
* 添加复制到剪贴板的图像
|
||||
* 可在偏好中使用橡皮擦、荧光笔、油漆桶、颜色选择、颜色选择器工具
|
||||
* 无限撤销
|
||||
* 滤镜可以增加模糊、像素化、透明度等
|
||||
|
||||
### 我使用 Drawing 的经验
|
||||
|
||||
![][5]
|
||||
|
||||
这个应用是新出现的,并且有不错的用户界面。它具有你期望在标准的绘画应用中找到的所有基本功能。
|
||||
|
||||
它有一些额外的工具,如颜色选择和拾色器,但在使用时可能会混淆。没有什么文档描述这些工具的使用,要全靠你自己摸索。
|
||||
|
||||
它的体验很流畅,作为图像编辑工具,我觉得这个工具很有潜力取代 Shutter (是的,我[用 Shutter 编辑截图][6])。
|
||||
|
||||
我觉得最麻烦的是,添加元素后无法编辑/修改。你有撤消和重做选项,但如果你想修改一个你在 12 步前添加的文本,你就必须重做所有的步骤。这是未来的版本中开发者可能要做的一些改进。
|
||||
|
||||
### 在 Linux 上安装 Drawing
|
||||
|
||||
这是一款 Linux 专属应用。它也适用于基于 Linux 的智能手机,如 [PinePhone][7]。
|
||||
|
||||
有多种方式可以安装 Drawing。它在许多主要的 Linux 发行版的仓库中都有。
|
||||
|
||||
#### 基于 Ubuntu 的发行版
|
||||
|
||||
Drawing 包含在 Ubuntu 的 universe 仓库中,这意味着你可以从 Ubuntu 软件中心安装它。
|
||||
|
||||
但是,如果你想要最新的版本,有一个 [PPA][8] 可以轻松地在 Ubuntu、Linux Mint 和其他基于 Ubuntu 的发行版上安装 Drawing。
|
||||
|
||||
使用下面的命令:
|
||||
|
||||
```
|
||||
sudo add-apt-repository ppa:cartes/drawing
|
||||
sudo apt update
|
||||
sudo apt install drawing
|
||||
```
|
||||
|
||||
如果你想删除它,你可以使用以下命令:
|
||||
|
||||
```
|
||||
sudo apt remove drawing
|
||||
sudo add-apt-repository -r ppa:cartes/drawing
|
||||
```
|
||||
|
||||
#### 其他 Linux 发行版
|
||||
|
||||
检查你的发行版的包管理器中是否有 Drawing,然后在那里安装。如果你想要最新的版本,你可以使用 Flatpak 版本的应用。
|
||||
|
||||
- [Drawing Flatpak][9]
|
||||
|
||||
### 总结
|
||||
|
||||
你还在用画图应用么?你用的是哪一款?如果你已经尝试过 Drawing,你的体验如何?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/drawing-app/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/drawing-app-interface.jpg?resize=789%2C449&ssl=1
|
||||
[2]: https://itsfoss.com/open-source-paint-apps/
|
||||
[3]: https://maoschanz.github.io/drawing/
|
||||
[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/drawing-screenshot.jpg?resize=800%2C489&ssl=1
|
||||
[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/using-drawing-app-linux.png?resize=787%2C473&ssl=1
|
||||
[6]: https://itsfoss.com/install-shutter-ubuntu/
|
||||
[7]: https://itsfoss.com/pinephone/
|
||||
[8]: https://launchpad.net/~cartes/+archive/ubuntu/drawing
|
||||
[9]: https://flathub.org/apps/details/com.github.maoschanz.drawing
|
@ -0,0 +1,101 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12719-1.html)
|
||||
[#]: subject: (Present Slides in Linux Terminal With This Nifty Python Tool)
|
||||
[#]: via: (https://itsfoss.com/presentation-linux-terminal/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
在 Linux 终端中展示幻灯片
|
||||
======
|
||||
|
||||

|
||||
|
||||
演示文稿往往是枯燥的。这就是为什么有些人会添加动画或漫画/meme 来增加一些幽默和风格来打破单调。
|
||||
|
||||
如果你需要在你的大学或公司的演示文稿中加入一些独特的风格,那么使用 Linux 终端怎么样?想象一下,这将是多么酷的事情啊!
|
||||
|
||||
### Present:Linux 终端中进行演示
|
||||
|
||||
在终端中可以做很多[有趣好玩的事情][1]。制作和展示幻灯片只是其中之一。
|
||||
|
||||
这个基于 Python 的应用名为 [Present][2],它可以让你创建基于 Markdown 和 YML 的幻灯片,你可以在你的大学或公司里演讲,并以真正的极客风格取悦人们。
|
||||
|
||||
我制作了一个视频,展示了在 Linux 终端中用 Present 演示一些东西的样子。
|
||||
|
||||
- [VIDEO](https://img.linux.net.cn/static/video/Makes%20ASCII%20Based%20Presentation%20Slides%20in%20Linux%20terminal-462902030.mp4)
|
||||
|
||||
#### Present 的功能
|
||||
|
||||
你可以用 Present 做以下事情:
|
||||
|
||||
* 使用 Markdown 语法在幻灯片中添加文本
|
||||
* 用箭头或 `PgUp`/`Down` 键控制幻灯片
|
||||
* 改变前景和背景颜色
|
||||
* 在幻灯片中添加图像
|
||||
* 增加代码块
|
||||
* 播放模拟代码,并用 codio YML 文件输出
|
||||
|
||||
#### 在 Linux 上安装 Present
|
||||
|
||||
Present 是一个基于 Python 的工具,你可以使用 PIP 来安装它。你应该确保用这个命令[在 Ubuntu 上安装 Pip][4]:
|
||||
|
||||
```
|
||||
sudo apt install python3-pip
|
||||
```
|
||||
|
||||
如果你使用的是其他发行版,请检查你的包管理器来安装 PIP3。
|
||||
|
||||
安装 PIP 后,你就可以以这种方式全局安装 Present:
|
||||
|
||||
```
|
||||
sudo pip3 install present
|
||||
```
|
||||
|
||||
你也可以只为当前用户安装,但你也必须将 `~/.local/bin` 添加到你的 `PATH` 环境变量。
|
||||
|
||||
#### 在 Linux 终端中使用 Present 来创建和展示幻灯片
|
||||
|
||||
![][5]
|
||||
|
||||
由于 Present 使用了 Markdown 语法,你应该用它来创建自己的幻灯片。在这里使用 [Markdown 编辑器][6]会有帮助。
|
||||
|
||||
Present 需要一个 Markdown 文件来读取和播放幻灯片。你可以[下载这个示例幻灯片][7],但你需要单独下载嵌入的图像,并将它放在图像文件夹内。
|
||||
|
||||
* 在 Markdown 文件中使用 `—` 来分隔幻灯片。
|
||||
* 使用 Markdown 语法在幻灯片中添加文本。
|
||||
* 使用以下语法添加图片 `![RC] (images/name.png)`。
|
||||
* 通过添加像 `<!– fg=white bg=red –>` 这样的语法来改变幻灯片的颜色。
|
||||
* 使用像 `<!– effect=fireworks –>` 这样的语法来添加带有效果的幻灯片。
|
||||
* 使用 [codio 语法][8] 添加代码运行模拟。
|
||||
* 使用 `q` 退出演示,并用左/右箭头或 `PgUp`/`Down` 键控制幻灯片。
|
||||
|
||||
请记住,在演示时调整终端窗口的大小会把东西搞乱,按回车键也是如此。
|
||||
|
||||
### 总结
|
||||
|
||||
如果你熟悉 Markdown 和终端,使用 Present 对你来说并不困难。
|
||||
|
||||
你不能把它和常规的用 Impress、MS Office 等制作的幻灯片相比,但偶尔使用,它是一个很酷的工具。如果你是计算机科学/网络专业的学生,或者是开发人员或系统管理员,你的同事一定会觉得很有趣。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/presentation-linux-terminal/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/funny-linux-commands/
|
||||
[2]: https://github.com/vinayak-mehta/present
|
||||
[4]: https://itsfoss.com/install-pip-ubuntu/
|
||||
[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/presentation-in-linux-terminal.png?resize=800%2C494&ssl=1
|
||||
[6]: https://itsfoss.com/best-markdown-editors-linux/
|
||||
[7]: https://github.com/vinayak-mehta/present/blob/master/examples/sample.md
|
||||
[8]: https://present.readthedocs.io/en/latest/codio.html
|
@ -0,0 +1,112 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (wxy)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12713-1.html)
|
||||
[#]: subject: (Linux Jargon Buster: What is a Package Manager in Linux? How Does it Work?)
|
||||
[#]: via: (https://itsfoss.com/package-manager/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Linux 黑话解释:什么是包管理器?它是如何工作的?
|
||||
======
|
||||
|
||||

|
||||
|
||||
[Linux 发行版之间有什么不同][1]的要点之一是包管理。在这篇 Linux 黑话解释中,你将了解 Linux 中的打包和包管理器。你将了解什么是包,什么是包管理器,它们是如何工作的,以及有什么包管理器。
|
||||
|
||||
### 什么是包管理器?
|
||||
|
||||
简单来说,“<ruby>包管理器<rt>package manager</rt></ruby>”(或“软件包管理器”)是一种工具,它允许用户在操作系统上安装、删除、升级、配置和管理软件包。软件包管理器可以是像“软件中心”这样的图形化应用,也可以是像 [apt-get][2] 或 [pacman][3] 这样的命令行工具。
|
||||
|
||||
你会发现我经常在教程和文章中使用“包”这个词。要了解包管理器,你必须了解什么是包。
|
||||
|
||||
### 什么是包?
|
||||
|
||||
一个“<ruby>包<rt>package</rt></ruby>”(或“软件包”)通常指的是一个应用程序,它可以是一个 GUI 应用程序、命令行工具或(其他软件程序需要的)软件库。包本质上是一个存档文件,包含二进制可执行文件、配置文件,有时还包含依赖关系的信息。
|
||||
|
||||
在旧时代,[软件曾经是从它的源代码安装的][4]。你会参考一个文件(通常命名为 `README`),看看它需要什么软件组件、二进制文件的位置。它通常包括一个配置脚本或 `Makefile`。你必须自己编译该软件或自己处理所有的依赖关系(有些软件需要安装其他软件)。
|
||||
|
||||
为了摆脱这种复杂性,Linux 发行版创建了自己的打包格式,为终端用户提供随时可用的二进制文件(预编译软件),以便安装软件,同时提供一些[元数据][5](版本号、描述)和依赖关系。
|
||||
|
||||
这就像烤蛋糕与买蛋糕一样。
|
||||
|
||||
![][6]
|
||||
|
||||
大约在上世纪 90 年代中期,Debian 创建了 DEB 打包格式(`.deb`),Red Hat Linux 创建了 RPM(Red Hat Package Manager 的缩写)打包系统(`.rpm`)。编译源代码的方式仍然存在,但现在是可选的。
|
||||
|
||||
要与打包系统交互或使用打包系统,你需要一个包管理器。
|
||||
|
||||
### 包管理器是如何工作的?
|
||||
|
||||
请记住,包管理器是一个通用的概念,它并不是 Linux 独有的。你会经常发现各种软件或编程语言的包管理器。有[只是针对 Python 包的 PIP 包管理器][7]。甚至 [Atom 编辑器也有自己的包管理器][8]。
|
||||
|
||||
由于本文的重点是 Linux,所以我会从 Linux 的角度出发。不过,这里的大部分解释也可以应用于一般的包管理器。
|
||||
|
||||
我创建了这个图(基于 SUSE Wiki),这样你就可以很容易理解包管理器是如何工作的。
|
||||
|
||||
![][9]
|
||||
|
||||
几乎所有的 Linux 发行版都有“<ruby>软件仓库<rt>software repository</rt></ruby>”,它基本上是软件包的集合。是的,可以有不止一个软件库。软件库包含不同种类的软件包。
|
||||
|
||||
软件仓库也有元数据文件,其中包含了软件包的信息,如软件包的名称、版本号、软件包的描述和软件仓库名称等。这就是你在 Ubuntu/Debian 中使用 [apt show 命令][10]所看到的。
|
||||
|
||||
你的系统上的包管理器首先会与元数据进行交互。包管理器在你的系统上创建了一个元数据的本地缓存。当你运行包管理器的更新选项(例如 `apt update`)时,它会通过引用仓库中的元数据来更新本地元数据缓存。
|
||||
|
||||
当你运行软件包管理器的安装命令(例如 `apt install package_name`)时,软件包管理器会引用这个缓存。如果它在缓存中找到了包的信息,它就会使用互联网连接到相应的仓库,并在你的系统上安装之前先下载包。
|
||||
|
||||
一个包可能有依赖关系。意思是说,它可能需要安装其他软件包。软件包管理器通常会处理这些依赖关系,并将其与你正在安装的软件包一起自动安装。
|
||||
|
||||
![Linux 中包管理器会处理依赖关系][11]
|
||||
|
||||
同样,当你使用包管理器删除一个包时,它要么自动删除,要么通知你系统有未使用的包可以清理。
|
||||
|
||||
除了安装、删除这些显而易见的任务外,你还可以使用包管理器对包进行配置,并根据自己的需要进行管理。例如,你可以在常规的系统更新中[防止升级某个包的版本][12]。你的包管理器可能还能做很多事情。
|
||||
|
||||
### 不同种类的包管理器
|
||||
|
||||
包管理器因打包系统而异,但同一打包系统却可能有多个包管理器。
|
||||
|
||||
例如,RPM 有 [Yum][13] 和 [DNF][14] 包管理器。对于 DEB,你有 `apt-get`、[aptitude][15] 等基于命令行的包管理器。
|
||||
|
||||
![Synaptic 包管理器][16]
|
||||
|
||||
软件包管理器不一定是基于命令行的,也有图形化的软件包管理工具,比如 [Synaptic][17]。你的发行版的“软件中心”也是一个软件包管理器,即使它在底层运行的是 `apt-get` 或 DNF。
|
||||
|
||||
### 结论
|
||||
|
||||
我不想进一步详细介绍这个话题,虽然我可以继续说下去,但这将偏离本主题的目标 —— 即让你对 Linux 中的包管理器有一个基本的了解。
|
||||
|
||||
我暂时忽略了新的通用打包格式,比如 Snap 和 Flatpak。
|
||||
|
||||
我希望你对 Linux 中的包管理系统有更好的理解。如果你还有困惑,或者你对这个主题有一些问题,请发表评论。我会尽量回答你的问题,如果需要的话,我会在本文中更新新的内容。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/package-manager/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[wxy](https://github.com/wxy)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/what-is-linux/
|
||||
[2]: https://itsfoss.com/apt-vs-apt-get-difference/
|
||||
[3]: https://itsfoss.com/pacman-command/
|
||||
[4]: https://itsfoss.com/install-software-from-source-code/
|
||||
[5]: https://www.computerhope.com/jargon/m/metadata.htm
|
||||
[6]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/source-code-comilation-vs-packaging.png?resize=800%2C450&ssl=1
|
||||
[7]: https://itsfoss.com/install-pip-ubuntu/
|
||||
[8]: https://itsfoss.com/install-packages-in-atom/
|
||||
[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/linux-package-manager-explanation.png?resize=800%2C450&ssl=1
|
||||
[10]: https://itsfoss.com/apt-search-command/
|
||||
[11]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/package-manager-handling-dependencies-in-linux.png?resize=800%2C450&ssl=1
|
||||
[12]: https://itsfoss.com/prevent-package-update-ubuntu/
|
||||
[13]: https://fedoraproject.org/wiki/Yum
|
||||
[14]: https://fedoraproject.org/wiki/DNF
|
||||
[15]: https://wiki.debian.org/Aptitude
|
||||
[16]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/06/see-packages-by-repositories-synaptic.png?resize=799%2C548&ssl=1
|
||||
[17]: https://itsfoss.com/synaptic-package-manager/
|
@ -0,0 +1,88 @@
|
||||
[#]: collector: (wxy)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: (wxy)
|
||||
[#]: publisher: (wxy)
|
||||
[#]: url: (https://linux.cn/article-12718-1.html)
|
||||
[#]: subject: (Could Microsoft be en route to dumping Windows in favor of Linux?)
|
||||
[#]: via: (https://www.techrepublic.com/article/could-microsoft-be-en-route-to-dumping-windows-in-favor-of-linux/)
|
||||
[#]: author: (Jack Wallen https://www.techrepublic.com/meet-the-team/us/jack-wallen/)
|
||||
|
||||
微软能否放弃 Windows 转向 Linux?
|
||||
======
|
||||
|
||||
> Jack Wallen 认为,Microsoft Linux 是 Microsoft 桌面操作系统的下一个演进方向。他解释了为什么这将是一个对 Microsoft、IT 专业人士、用户和 Linux 社区的双赢。
|
||||
|
||||

|
||||
|
||||
我尊敬的同事 Steven J. Vaughan-Nichols 在姊妹网站 ZDNet 上发表了一篇出色的文章,名为《[基于 Linux 的 Windows 非常有意义][1]》,他在文中讨论了 Eric S. Raymond 的[观点](https://linux.cn/article-12664-1.html),即我们正接近桌面战争的最后阶段。Vaughan-Nichols 猜测,下一个合乎逻辑的步骤是在 Linux 内核上运行的 Windows 界面。
|
||||
|
||||
这是有道理的,尤其是考虑到微软在 [Windows 的 Linux 子系统(WSL)][2] 上的努力。然而,从我过去几年所目睹的一切来看,我认为可以得出一个对微软更有意义的结论。
|
||||
|
||||
### Microsoft Linux: 为什么它是最好的解决方案
|
||||
|
||||
一度,微软的最大摇钱树是软件,确切地说是 Windows 和 Microsoft Office。但是,就像科技行业中的所有事物一样,进化也在发生,而拒绝进化的科技公司失败了。
|
||||
|
||||
微软明白这一点,并且它已经进化了。一个恰当的例子是:[Microsoft Azure][4]。微软的云计算服务,以及 [AWS][5] 和 [Google Cloud][6] 已经成为这个不断变化的行业的巨大推动力。Azure 已成为微软新世界的摇钱树 —— 多到以至于这家在桌面电脑市场上享有垄断地位的公司已经开始意识到,或许还有更好的方式来利用桌面计算机。
|
||||
|
||||
这种优势很容易通过 Linux 来实现,但不是你可能想到的 Linux。Vaughan-Nichols 所建议的 Microsoft Linux 对于微软来说可能是一个很好的垫脚石,但我相信该公司需要做出一个更大的飞跃。我说的是登月规模的飞跃 —— 这将使所有参与者的生活变得更加轻松。
|
||||
|
||||
我说的是深入 Linux 领域。忘掉在 Linux 内核上运行 [Windows 10][7] 界面的桌面版本吧,最后承认 Microsoft Linux 可能是当今世界的最佳解决方案。
|
||||
|
||||
微软发布一个完整的 Linux 发行版将对所有参与者来说意味着更少的挫败感。微软可以将其在 Windows 10 桌面系统上的开发工作转移到一个更稳定、更可靠、更灵活、更经考验的桌面系统上来。微软可以从任意数量的桌面系统中选择自己的官方风格:GNOME、KDE、Pantheon、Xfce、Mint、Cinnamon... 不胜枚举。微软可以按原样使用桌面,也可以为它们做出贡献,创造一些更符合用户习惯的东西。
|
||||
|
||||
### 开发:微软并没有摆脱困境
|
||||
|
||||
这并不意味着微软在开发方面将摆脱困境。微软也希望对 Wine 做出重大贡献,以确保其所有产品均可在兼容层上顺畅运行,并且默认集成到操作系统中,这样最终用户就不必为安装 Windows 应用程序做任何额外的工作。
|
||||
|
||||
### Windows 用户需要 Defender
|
||||
|
||||
微软开发团队也希望将 Windows Defender 移植到这个新的发行版中。等一等。什么?我真的是在暗示 Microsoft Linux 需要 Windows Defender 吗?是的,我确定。为什么?
|
||||
|
||||
最终用户仍然需要防范 <ruby>[网络钓鱼][8] 诈骗<rt>phishing scams</rt></ruby>、恶意 URL 和其他类型的攻击。普通的 Windows 用户可能不会意识到,Linux 和安全使用实践的结合远比 Windows 10 和 Windows Defender 要安全得多。所以,是的,将 Windows Defender 移植到 Microsoft Linux 将是保持用户基础舒适的一个很好的步骤。
|
||||
|
||||
这些用户将很快了解在台式计算机上工作的感觉,而不必处理 Windows 操作系统带来的日常困扰。更新更流畅、更值得信赖、更安全,桌面更有意义。
|
||||
|
||||
### 微软、用户和 IT 专业人士的双赢
|
||||
|
||||
微软一直在尽其所能将用户从标准的基于客户端的软件迁移到云和其他托管解决方案,并且其软件摇钱树已经变成了以网络为中心和基于订阅的软件。所有这些 Linux 用户仍然可以使用 [Microsoft 365][10] 和它必须提供的任何其他 <ruby>[软件即服务][11]<rt>Software as a Service</rt></ruby>(SaaS)解决方案——所有这些都来自于 Linux 操作系统的舒适性和安全性。
|
||||
|
||||
这对微软和消费者而言是双赢的,因为这样 Windows 没有那么多令人头疼的事情要处理(通过捕获漏洞和对其专有解决方案进行安全补丁),消费者可以得到一个更可靠的解决方案而不会错过任何东西。
|
||||
如果微软打对了牌,他们可以对 KDE 或几乎任何 Linux 桌面环境重新设置主题,使其与 Windows 10 界面没有太大区别。
|
||||
|
||||
如果布局得当,消费者甚至可能都不知道其中的区别——“Windows 11” 将仅仅是 Microsoft 桌面操作系统的下一个演进版本。
|
||||
|
||||
说到胜利,IT 专业人员将花费更少的时间来处理病毒、恶意软件和操作系统问题,而把更多的时间用于保持网络(以及为该网络供动力的服务器)的运行和安全上。
|
||||
|
||||
### 大卖场怎么办?
|
||||
|
||||
这是个关键的地方。为了让这一做法真正发挥作用,微软将不得不完全放弃 Windows,转而使用自己风格的 Linux。基于同样的思路,微软需要确保大卖场里的 PC 都安装了 Microsoft Linux 系统。没有半途而废的余地——微软必须全力以赴,以确保这次转型的成功。
|
||||
|
||||
一旦大卖场开始销售安装了 Microsoft Linux 的 PC 和笔记本电脑,我预测这一举措对所有相关人员来说将会是一个巨大的成功。微软会被视为终于推出了一款值得消费者信赖的操作系统;消费者将拥有一个这样的桌面操作系统,它不会带来太多令人头疼的事情,而会带来真正的生产力和乐趣;Linux 社区最终将主导桌面计算机。
|
||||
|
||||
### Microsoft Linux:时机已到
|
||||
|
||||
你可能会认为这个想法很疯狂,但如果你真的仔细想想,微软 Windows 的演进就是朝着这个方向发展的。为什么不绕过这个时间线的中途部分,而直接跳到一个为所有参与者带来成功的终极游戏呢? Microsoft Linux 正当其时。
|
||||
|
||||
---
|
||||
via: https://www.techrepublic.com/article/could-microsoft-be-en-route-to-dumping-windows-in-favor-of-linux/
|
||||
|
||||
作者:[jack-wallen][a]
|
||||
选题:[wxy][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[wxy](https://github.com/wxy)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.techrepublic.com/meet-the-team/us/jack-wallen/
|
||||
[b]: https://github.com/wxy
|
||||
[1]: https://www.zdnet.com/article/linux-based-windows-makes-perfect-sense/
|
||||
[2]: https://www.techrepublic.com/article/microsoft-older-windows-10-versions-now-get-to-run-windows-subsystem-for-linux-2/
|
||||
[3]: https://www.techrepublic.com/resource-library/whitepapers/microsoft-build-2020-highlights/
|
||||
[4]: https://www.techrepublic.com/article/microsoft-azure-the-smart-persons-guide/
|
||||
[5]: https://www.techrepublic.com/article/amazon-web-services-the-smart-persons-guide/
|
||||
[6]: https://www.techrepublic.com/article/google-cloud-platform-the-smart-persons-guide/
|
||||
[7]: https://www.techrepublic.com/article/windows-10-the-smart-persons-guide/
|
||||
[8]: https://www.techrepublic.com/article/phishing-and-spearphishing-a-cheat-sheet/
|
||||
[9]: https://www.techrepublic.com/article/everything-a-linux-admin-needs-to-know-about-working-from-the-command-line/
|
||||
[10]: https://www.techrepublic.com/article/microsoft-365-a-cheat-sheet/
|
||||
[11]: https://www.techrepublic.com/article/software-as-a-service-saas-a-cheat-sheet/
|
@ -0,0 +1,122 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (KDE Plasma 5.20 is Here With Exciting Improvements)
|
||||
[#]: via: (https://itsfoss.com/kde-plasma-5-20/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
KDE Plasma 5.20 is Here With Exciting Improvements
|
||||
======
|
||||
|
||||
KDE Plasma 5.20 is finally here and there’s a lot of things to be excited about, including the new wallpaper ‘**Shell’** by Lucas Andrade.
|
||||
|
||||
It is worth noting that is not an LTS release unlike [KDE Plasma 5.18][1] and will be maintained for the next 4 months or so. So, if you want the latest and greatest, you can surely go ahead and give it a try.
|
||||
|
||||
In this article, I shall mention the key highlights of KDE Plasma 5.20 from [my experience with it on KDE Neon][2] (Testing Edition).
|
||||
|
||||
![][3]
|
||||
|
||||
### Plasma 5.20 Features
|
||||
|
||||
If you like to see things in action, we made a feature overview video for you.
|
||||
|
||||
[Subscribe to our YouTube channel for more Linux videos][4]
|
||||
|
||||
#### Icon-only Taskbar
|
||||
|
||||
![][5]
|
||||
|
||||
You must be already comfortable with a taskbar that mentions the title of the window along the icon. However, that takes a lot of space in the taskbar, which looks bad when you want to have a clean look with multiple applications/windows opened.
|
||||
|
||||
Not just limited to that, if you launch several windows of the same application, it will group them together and let you cycle through it from a single icon on the task bar.
|
||||
|
||||
So, with this update, you get an icon-only taskbar by default which makes it look a lot cleaner and you can have more things in the taskbar at a glance.
|
||||
|
||||
#### Digital Clock Applet with Date
|
||||
|
||||
![][6]
|
||||
|
||||
If you’ve used any KDE-powered distro, you must have noticed that the digital clock applet (in the bottom-right corner) displays the time but not the date by default.
|
||||
|
||||
It’s always a good choice to have the date and time as well (at least I prefer that). So, with KDE Plasma 5.20, the applet will have both time and date.
|
||||
|
||||
#### Get Notified When your System almost Runs out of Space
|
||||
|
||||
I know this is not a big addition, but a necessary one. No matter whether your home directory is on a different partition, you will be notified when you’re about to run out of space.
|
||||
|
||||
#### Set the Charge Limit Below 100%
|
||||
|
||||
You are in for a treat if you are a laptop user. To help you preserve the battery health, you can now set a charge limit below 100%. I couldn’t show it to you because I use a desktop.
|
||||
|
||||
#### Workspace Improvements
|
||||
|
||||
Working with the workspaces on KDE desktop was already an impressive experience, now with the latest update, several tweaks have been made to take the user experience up a notch.
|
||||
|
||||
To start with, the system tray has been overhauled with a grid-like layout replacing the list view.
|
||||
|
||||
The default shortcut has been re-assigned with Meta+drag instead of Alt+drag to move/re-size windows to avoid conflicts with some other productivity apps with Alt+drag keybind support. You can also use the key binds like Meta + up/left/down arrow to corner-tile windows.
|
||||
|
||||
![][7]
|
||||
|
||||
It is also easier to list all the disks using the old “**Device Notifier**” applet, which has been renamed to “**Disks & Devices**“.
|
||||
|
||||
If that wasn’t enough, you will also find improvements to [KRunner][8], which is the essential application launcher or search utility for users. It will now remember the search text history and you can also have it centered on the screen instead of having it on top of the screen.
|
||||
|
||||
#### System Settings Improvements
|
||||
|
||||
The look and feel of the system setting is the same but it is more useful now. You will notice a new “**Highlight changed settings**” option which will show you the recent/modified changes when compared to the default values.
|
||||
|
||||
So, in that way, you can monitor any changes that you did accidentally or if someone else did it.
|
||||
|
||||
![][9]
|
||||
|
||||
In addition to that, you also get to utilize S.M.A.R.T monitoring and disk failure notifications.
|
||||
|
||||
#### Wayland Support Improvements
|
||||
|
||||
If you prefer to use a Wayland session, you will be happy to know that it now supports [Klipper][10] and you can also middle-click to paste (on KDE apps only for the time being).
|
||||
|
||||
The much-needed screencasting support has also been added.
|
||||
|
||||
#### Other Improvements
|
||||
|
||||
Of course, you will notice some subtle visual improvements or adjustments for the look and feel. You may notice a smooth transition effect when changing the brightness. Similarly, when changing the brightness or volume, the on-screen display that pops up is now less obtrusive
|
||||
|
||||
Options like controlling the scroll speed of mouse/touchpad have been added to give you finer controls.
|
||||
|
||||
You can find the detailed list of changes in its [official changelog][11], if you’re curious.
|
||||
|
||||
### Wrapping Up
|
||||
|
||||
The changes are definitely impressive and should make the KDE experience better than ever before.
|
||||
|
||||
If you’re running KDE Neon, you should get the update soon. But, if you are on Kubuntu, you will have to try the 20.10 ISO to get your hands on Plasma 5.20.
|
||||
|
||||
What do you like the most among the list of changes? Have you tried it yet? Let me know your thoughts in the comments below.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/kde-plasma-5-20/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/kde-plasma-5-18-release/
|
||||
[2]: https://itsfoss.com/kde-neon-review/
|
||||
[3]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-feat.png?resize=800%2C394&ssl=1
|
||||
[4]: https://www.youtube.com/c/itsfoss?sub_confirmation=1
|
||||
[5]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-taskbar.jpg?resize=472%2C290&ssl=1
|
||||
[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-clock.jpg?resize=372%2C224&ssl=1
|
||||
[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/kde-plasma-5-20-notify.jpg?resize=800%2C692&ssl=1
|
||||
[8]: https://docs.kde.org/trunk5/en/kde-workspace/plasma-desktop/krunner.html
|
||||
[9]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/plasma-disks-smart.png?resize=800%2C539&ssl=1
|
||||
[10]: https://userbase.kde.org/Klipper
|
||||
[11]: https://kde.org/announcements/plasma-5.20.0
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (beamrolling)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
|
@ -1,136 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Things You Didn't Know About GNU Readline)
|
||||
[#]: via: (https://twobithistory.org/2019/08/22/readline.html)
|
||||
[#]: author: (Two-Bit History https://twobithistory.org)
|
||||
|
||||
Things You Didn't Know About GNU Readline
|
||||
======
|
||||
|
||||
I sometimes think of my computer as a very large house. I visit this house every day and know most of the rooms on the ground floor, but there are bedrooms I’ve never been in, closets I haven’t opened, nooks and crannies that I’ve never explored. I feel compelled to learn more about my computer the same way anyone would feel compelled to see a room they had never visited in their own home.
|
||||
|
||||
GNU Readline is an unassuming little software library that I relied on for years without realizing that it was there. Tens of thousands of people probably use it every day without thinking about it. If you use the Bash shell, every time you auto-complete a filename, or move the cursor around within a single line of input text, or search through the history of your previous commands, you are using GNU Readline. When you do those same things while using the command-line interface to Postgres (`psql`), say, or the Ruby REPL (`irb`), you are again using GNU Readline. Lots of software depends on the GNU Readline library to implement functionality that users expect, but the functionality is so auxiliary and unobtrusive that I imagine few people stop to wonder where it comes from.
|
||||
|
||||
GNU Readline was originally created in the 1980s by the Free Software Foundation. Today, it is an important if invisible part of everyone’s computing infrastructure, maintained by a single volunteer.
|
||||
|
||||
### Feature Replete
|
||||
|
||||
The GNU Readline library exists primarily to augment any command-line interface with a common set of keystrokes that allow you to move around within and edit a single line of input. If you press `Ctrl-A` at a Bash prompt, for example, that will jump your cursor to the very beginning of the line, while pressing `Ctrl-E` will jump it to the end. Another useful command is `Ctrl-U`, which will delete everything in the line before the cursor.
|
||||
|
||||
For an embarrassingly long time, I moved around on the command line by repeatedly tapping arrow keys. For some reason, I never imagined that there was a faster way to do it. Of course, no programmer familiar with a text editor like Vim or Emacs would deign to punch arrow keys for long, so something like Readline was bound to be created. Using Readline, you can do much more than just jump around—you can edit your single line of text as if you were using a text editor. There are commands to delete words, transpose words, upcase words, copy and paste characters, etc. In fact, most of Readline’s keystrokes/shortcuts are based on Emacs. Readline is essentially Emacs for a single line of text. You can even record and replay macros.
|
||||
|
||||
I have never used Emacs, so I find it hard to remember what all the different Readline commands are. But one thing about Readline that is really neat is that you can switch to using a Vim-based mode instead. To do this for Bash, you can use the `set` builtin. The following will tell Readline to use Vim-style commands for the current shell:
|
||||
|
||||
```
|
||||
$ set -o vi
|
||||
```
|
||||
|
||||
With this option enabled, you can delete words using `dw` and so on. The equivalent to `Ctrl-U` in the Emacs mode would be `d0`.
|
||||
|
||||
I was excited to try this when I first learned about it, but I’ve found that it doesn’t work so well for me. I’m happy that this concession to Vim users exists, and you might have more luck with it than me, particularly if you haven’t already used Readline’s default command keystrokes. My problem is that, by the time I heard about the Vim-based interface, I had already learned several Readline keystrokes. Even with the Vim option enabled, I keep using the default keystrokes by mistake. Also, without some sort of indicator, Vim’s modal design is awkward here—it’s very easy to forget which mode you’re in. So I’m stuck at a local maximum using Vim as my text editor but Emacs-style Readline commands. I suspect a lot of other people are in the same position.
|
||||
|
||||
If you feel, not unreasonably, that both Vim and Emacs’ keyboard command systems are bizarre and arcane, you can customize Readline’s key bindings and make them whatever you like. This is not hard to do. Readline reads a `~/.inputrc` file on startup that can be used to configure various options and key bindings. One thing I’ve done is reconfigured `Ctrl-K`. Normally it deletes from the cursor to the end of the line, but I rarely do that. So I’ve instead bound it so that pressing `Ctrl-K` deletes the whole line, regardless of where the cursor is. I’ve done that by adding the following to `~/.inputrc`:
|
||||
|
||||
```
|
||||
Control-k: kill-whole-line
|
||||
```
|
||||
|
||||
Each Readline command (the documentation refers to them as _functions_) has a name that you can associate with a key sequence this way. If you edit `~/.inputrc` in Vim, it turns out that Vim knows the filetype and will help you by highlighting valid function names but not invalid ones!
|
||||
|
||||
Another thing you can do with `~/.inputrc` is create canned macros by mapping key sequences to input strings. [The Readline manual][1] gives one example that I think is especially useful. I often find myself wanting to save the output of a program to a file, which means that I often append something like `> output.txt` to Bash commands. To save some time, you could make this a Readline macro:
|
||||
|
||||
```
|
||||
Control-o: "> output.txt"
|
||||
```
|
||||
|
||||
Now, whenever you press `Ctrl-O`, you’ll see that `> output.txt` gets added after your cursor on the command line. Neat!
|
||||
|
||||
But with macros you can do more than just create shortcuts for strings of text. The following entry in `~/.inputrc` means that, every time I press `Ctrl-J`, any text I already have on the line is surrounded by `$(` and `)`. The macro moves to the beginning of the line with `Ctrl-A`, adds `$(`, then moves to the end of the line with `Ctrl-E` and adds `)`:
|
||||
|
||||
```
|
||||
Control-j: "\C-a$(\C-e)"
|
||||
```
|
||||
|
||||
This might be useful if you often need the output of one command to use for another, such as in:
|
||||
|
||||
```
|
||||
$ cd $(brew --prefix)
|
||||
```
|
||||
|
||||
The `~/.inputrc` file also allows you to set different values for what the Readline manual calls _variables_. These enable or disable certain Readline behaviors. You can use these variables to change, for example, how Readline auto-completion works or how the Readline history search works. One variable I’d recommend turning on is the `revert-all-at-newline` variable, which by default is off. When the variable is off, if you pull a line from your command history using the reverse search feature, edit it, but then decide to search instead for another line, the edit you made is preserved in the history. I find this confusing because it leads to lines showing up in your Bash command history that you never actually ran. So add this to your `~/.inputrc`:
|
||||
|
||||
```
|
||||
set revert-all-at-newline on
|
||||
```
|
||||
|
||||
When you set options or key bindings using `~/.inputrc`, they apply wherever the Readline library is used. This includes Bash most obviously, but you’ll also get the benefit of your changes in other programs like `irb` and `psql` too! A Readline macro that inserts `SELECT * FROM` could be useful if you often use command-line interfaces to relational databases.
|
||||
|
||||
### Chet Ramey
|
||||
|
||||
GNU Readline is today maintained by Chet Ramey, a Senior Technology Architect at Case Western Reserve University. Ramey also maintains the Bash shell. Both projects were first authored by a Free Software Foundation employee named Brian Fox beginning in 1988. But Ramey has been the sole maintainer since around 1994.
|
||||
|
||||
Ramey told me via email that Readline, far from being an original idea, was created to implement functionality prescribed by the POSIX specification, which in the late 1980s had just been created. Many earlier shells, including the Korn shell and at least one version of the Unix System V shell, included line editing functionality. The 1988 version of the Korn shell (`ksh88`) provided both Emacs-style and Vi/Vim-style editing modes. As far as I can tell from [the manual page][2], the Korn shell would decide which mode you wanted to use by looking at the `VISUAL` and `EDITOR` environment variables, which is pretty neat. The parts of POSIX that specified shell functionality were closely modeled on `ksh88`, so GNU Bash was going to have to implement a similarly flexible line-editing system to stay compliant. Hence Readline.
|
||||
|
||||
When Ramey first got involved in Bash development, Readline was a single source file in the Bash project directory. It was really just a part of Bash. Over time, the Readline file slowly moved toward becoming an independent project, though it was not until 1994 (with the 2.0 release of Readline) that Readline became a separate library entirely.
|
||||
|
||||
Readline is closely associated with Bash, and Ramey usually pairs Readline releases with Bash releases. But as I mentioned above, Readline is a library that can be used by any software implementing a command-line interface. And it’s really easy to use. This is a simple example, but here’s how you would you use Readline in your own C program. The string argument to the `readline()` function is the prompt that you want Readline to display to the user:
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "readline/readline.h"
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
char* line = readline("my-rl-example> ");
|
||||
printf("You entered: \"%s\"\n", line);
|
||||
|
||||
free(line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
Your program hands off control to Readline, which is responsible for getting a line of input from the user (in such a way that allows the user to do all the fancy line-editing things). Once the user has actually submitted the line, Readline returns it to you. I was able to compile the above by linking against the Readline library, which I apparently have somewhere in my library search path, by invoking the following:
|
||||
|
||||
```
|
||||
$ gcc main.c -lreadline
|
||||
```
|
||||
|
||||
The Readline API is much more extensive than that single function of course, and anyone using it can tweak all sorts of things about the library’s behavior. Library users can even add new functions that end users can configure via `~/.inputrc`, meaning that Readline is very easy to extend. But, as far as I can tell, even Bash ultimately calls the simple `readline()` function to get input just as in the example above, though there is a lot of configuration beforehand. (See [this line][3] in the source for GNU Bash, which seems to be where Bash hands off responsibility for getting input to Readline.)
|
||||
|
||||
Ramey has now worked on Bash and Readline for well over a decade. He has never once been compensated for his work—he is and has always been a volunteer. Bash and Readline continue to be actively developed, though Ramey said that Readline changes much more slowly than Bash does. I asked Ramey what it was like being the sole maintainer of software that so many people use. He said that millions of people probably use Bash without realizing it (because every Apple device runs Bash), which makes him worry about how much disruption a breaking change might cause. But he’s slowly gotten used to the idea of all those people out there. He said that he continues to work on Bash and Readline because at this point he is deeply invested and because he simply likes to make useful software available to the world.
|
||||
|
||||
_You can find more information about Chet Ramey at [his website][4]._
|
||||
|
||||
_If you enjoyed this post, more like it come out every four weeks! Follow [@TwoBitHistory][5] on Twitter or subscribe to the [RSS feed][6] to make sure you know when a new post is out._
|
||||
|
||||
_Previously on TwoBitHistory…_
|
||||
|
||||
> Please enjoy my long overdue new post, in which I use the story of the BBC Micro and the Computer Literacy Project as a springboard to complain about Codecademy.<https://t.co/PiWlKljDjK>
|
||||
>
|
||||
> — TwoBitHistory (@TwoBitHistory) [March 31, 2019][7]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://twobithistory.org/2019/08/22/readline.html
|
||||
|
||||
作者:[Two-Bit History][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://twobithistory.org
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://tiswww.case.edu/php/chet/readline/readline.html
|
||||
[2]: https://web.archive.org/web/20151105130220/http://www2.research.att.com/sw/download/man/man1/ksh88.html
|
||||
[3]: https://github.com/bminor/bash/blob/9f597fd10993313262cab400bf3c46ffb3f6fd1e/parse.y#L1487
|
||||
[4]: https://tiswww.case.edu/php/chet/
|
||||
[5]: https://twitter.com/TwoBitHistory
|
||||
[6]: https://twobithistory.org/feed.xml
|
||||
[7]: https://twitter.com/TwoBitHistory/status/1112492084383092738?ref_src=twsrc%5Etfw
|
@ -0,0 +1,133 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Linux Jargon Buster: What is FOSS (Free and Open Source Software)? What is Open Source?)
|
||||
[#]: via: (https://itsfoss.com/what-is-foss/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Linux Jargon Buster: What is FOSS (Free and Open Source Software)? What is Open Source?
|
||||
======
|
||||
|
||||
What does FOSS in It’s FOSS mean? What is FOSS?
|
||||
|
||||
I have been asked this question numerous time in the past. It was about time that I explained what is FOSS in Linux and the software world.
|
||||
|
||||
The distinction is important because FOSS is a generic world and it could mean different depending on the context. Here, I am discussing the FOSS principle in software.
|
||||
|
||||
### What is FOSS?
|
||||
|
||||
FOSS means Free and Open Source Software. It doesn’t mean software is free of cost. It means that source code of the software is open for all and anyone is free to use, study and modify the code. This principle allows other people to contribute to the development and improvement of a software like a community.
|
||||
|
||||
#### The origin of FOSS
|
||||
|
||||
In the 60s and 70s, computers were hardware focused and the hardware were expensive. They were mainly used by academics in universities or researchers in labs. The limited amount of software used to come for free or with their source code and the users were allowed to modify the source code to suit their need.
|
||||
|
||||
In the late 70s and early 80s, the manufacturer’s stopped distributing source code in an attempt to not let their software run on their competitor’s computers.
|
||||
|
||||
This restrictive licensing led to the inconvenience and dislike of peoplewho were used to and fond of modifying software. In the mid 80s, Richard Stallman started the Free Software Movement.
|
||||
|
||||
[Stallman specified four essential fundamental freedom][1] for a software to be Free and Open Source Software.
|
||||
|
||||
![Free Software Freedoms][2]
|
||||
|
||||
I am rephrasing them for easier understanding:
|
||||
|
||||
* Any user should be able to run the software for any purpose.
|
||||
* User should be free to see the source code of the software and if need be, user should be allowed to modify the code as well.
|
||||
* User should be free to distribute the copies of the software to others.
|
||||
* If a user modified the code, she/he should be free to distribute the modified code to others. The modified code must have the source code open.
|
||||
|
||||
|
||||
|
||||
If interested, I would advise reading this article on the [history of FOSS][3].
|
||||
|
||||
### Free in Free and Open Source Software DOES NOT mean free of cost
|
||||
|
||||
![][4]
|
||||
|
||||
As you may have noticed, the ‘free’ in Free and Open Source Software doesn’t mean it is free of cost. It means freedom to run, modify and distribute the software.
|
||||
|
||||
People often wrongly think that FOSS or Open Source software cannot have a price tag. This is not correct.
|
||||
|
||||
Most Free and Open Source Software are available free of cost because of a number of reasons:
|
||||
|
||||
* The source code is already available to public so some developers see no point in putting a price tag on the downloads.
|
||||
* Some projects are contributed by a number of volunteers for free. So, the main developer(s) find it unethical to charge for something that has been contributed freely by so many people.
|
||||
* Some projects are supported and/or developed by bigger corporate or non-profit organizations who employ developers to work on their open source projects.
|
||||
* Some developers create open source projects as hobby or out of their passion for contributing to the world with their code. Things like number of downloads, contributions and words of appreciations matter more than money for them.
|
||||
|
||||
|
||||
|
||||
To avoid the emphasis on ‘free’ some people use the term FLOSS. FLOSS stands for Free and Libre Open Source Software. The world libre (meaning freedom) is different than gartuit/gratis (free of cost).
|
||||
|
||||
> Free as in free speech, not free as in free beer.
|
||||
|
||||
### How do FOSS projects make money?
|
||||
|
||||
It is a myth that open source projects don’t make money. Red Hat was the first open source company to reach the billion dollars mark. [IBM bought Red Hat for $34 billion][5]. There are many such examples.
|
||||
|
||||
Many open source projects, specially the ones in the enterprise sectors, offer support and enterprise oriented features for a fee. This is main business model for Red Hat, SUSE Linux and more such projects.
|
||||
|
||||
Some open source projects like Discourse, WordPress offer hosted instance of their software for a premium fee.
|
||||
|
||||
Many open source projects, specially the desktop applications, rely on donations. VLC, GIMP, Inkscape and other such open source software fell in this category. There are [ways to fund open-source programs][6] but usually, you’ll find donation links on project websites.
|
||||
|
||||
Making money with open source software may be difficult but it is not entirely impossible.
|
||||
|
||||
### But I am not a programmer. Why should I care if a software is open source or not?
|
||||
|
||||
This is a valid question. You are not a software developer, just a regular computer user. Even if the source code of the software is available, you won’t understand how the program works.
|
||||
|
||||
That’s fine. You won’t understand it but someone with the necessary skill sets will and that’s what matter.
|
||||
|
||||
Think of this way. Perhaps you won’t understand a complicated legal document. But if you have the freedom to look at the document and keep a copy of it, you can consult someone who can check the document for legal pitfalls.
|
||||
|
||||
In other words, open source software has transparency.
|
||||
|
||||
### What is the difference between FOSS and Open Source?
|
||||
|
||||
![][7]
|
||||
|
||||
You’ll often come across terms FOSS and open source. They are often used interchangeably.
|
||||
|
||||
Are they the same thing? It is difficult to answer in yes and no.
|
||||
|
||||
You see, the term ‘free’ in FOSS is confusing for many as people incorrectly assume that it as free of cost. Enterprise executives, higher ups and decision makers tend to focus on ‘free’ in Free and Open Source. Since they are business people focused on making money for their company, the term ‘free’ works as deterrence in adopting the FOSS principles.
|
||||
|
||||
This is why a new organization named [Open Source Initiative][8] was created in the mid 90s. They removed the ‘Free’ from Free and Open Source Software and created their own [definition of open source][9]. and their own set of licenses.
|
||||
|
||||
The term ‘open source’ got quite popular specially in the software industry. The executives are more comfortable with Open Source. The adoption of open source grew rapidly and I believe removal of ‘free’ term did play a role here.
|
||||
|
||||
**Got questions?**
|
||||
|
||||
This As I explained in the article [what is Linux Distribution][10], the FOSS/open source concept played a big role in the development and popularity of Linux.
|
||||
|
||||
I tried to explain the concept of FOSS and open source in simpler terms in this jargon buster article. I have tried to avoid going too much in detail or technical accuracies.
|
||||
|
||||
I do hope you have a better understanding of this topic now. If you have got questions or suggestions, feel free to leave a comment and continue the discussion there.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/what-is-foss/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.gnu.org/philosophy/free-sw.html
|
||||
[2]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/foss-freedoms.jpg?resize=800%2C671&ssl=1
|
||||
[3]: https://itsfoss.com/history-of-foss/
|
||||
[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/think-free-speech-not-free-beer.jpg?resize=800%2C800&ssl=1
|
||||
[5]: https://itsfoss.com/ibm-red-hat-acquisition/
|
||||
[6]: https://itsfoss.com/open-source-funding-platforms/
|
||||
[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/what-is-foss.png?resize=800%2C450&ssl=1
|
||||
[8]: https://opensource.org/
|
||||
[9]: https://opensource.org/osd
|
||||
[10]: https://itsfoss.com/what-is-linux-distribution/
|
@ -0,0 +1,92 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Telcos Move from Black boxes to Open Source)
|
||||
[#]: via: (https://www.linux.com/interviews/telcos-move-from-black-boxes-to-open-source/)
|
||||
[#]: author: (Swapnil Bhartiya https://www.linux.com/author/swapnil/)
|
||||
|
||||
Telcos Move from Black boxes to Open Source
|
||||
======
|
||||
|
||||
*Linux Foundation Networking (LFN) organized its first virtual event last week and we sat down with Arpit Joshipura, the General Manager of Networking, IoT and Edge at the Linux Foundation, to talk about the key points of the event and how LFN is leading the adoption of open source within the telco space. *
|
||||
|
||||
**Swapnil Bhartiya: Today, we have with us Arpit Joshipura, General Manager of Networking, IoT and Edge, at the Linux Foundation. Arpit, what were some of the highlights of this event? Some big announcements that you can talk about?**
|
||||
|
||||
**Arpit Joshipura:** This was a global event with more than 80 sessions and was attended by attendees from over 75 countries. The sessions were very diverse. A lot of the sessions were end-user driven, operator driven as well as from our vendors and partners. If you take LF Networking and LFH as two umbrellas that are leading the Networking and Edge implementations here, we had a very significant announcement. I would probably group them into 5 main things:
|
||||
|
||||
Number one, we released a white paper at the Linux Foundation level where we had a bunch of vertical industries transformed using open source. These are over 100-year-old industries like telecom, automotive, finance, energy, healthcare, etc. So, that’s kind of one big announcement where vertical industries have taken advantage of open source.
|
||||
|
||||
The second announcement was easy enough: Google Cloud joins Linux Foundation Networking as a partner. That announcement comes on the basis of the telecom market and the cloud market converging together and building on each other.
|
||||
|
||||
The third major announcement was a project under LF Networking. If you remember, two years ago, a project collaboration with GSMA was started. It was called CNTT, which really defined and narrowed the scope of interoperability and compliance. And we have OPNFV under LFN. What we announced at Open Networking and Edge summit is the two projects are going to come together. This would be fantastic to a global community of operators who are simplifying the deployment and interoperability of implementation of NFVI manual VNFs and CNFs.
|
||||
|
||||
The next announcement was around a research study that we released on open source code that was created by Linux Foundation Networking, using LFN analytics and COCOMO estimation. We’re talking $7.2 billion worth of IP investment, right? This is the power of shared technology.
|
||||
|
||||
And finally, we released a survey on the Edge community asking them, “Why are you contributing to open source?” And the answer was fascinating. It was all-around innovation, speed to deployment, market creation. Yes, cost was important, but not initially.
|
||||
|
||||
So those were the 5 big highlights of the show from an LFN and LFH perspective.
|
||||
|
||||
**Swapnil Bhartiya: There are two things that I’m interested in. One is the consolidation that you talk about, and the second is survey. The fact is that everybody is using open source. There is no doubt about it. But the problem that is happening is since everybody’s using it, there seems to be some gap between the awareness of how to be a good open source citizen as well. What have you seen in the telco space?**
|
||||
|
||||
**Arpit Joshipura:** First of all, 5 years ago, they were all using black box and proprietary technologies. Then, we launched a project called OpenDaylight. And of course, OpenDaylight announced its 13th release today, but that’s kind of on their 6-year anniversary from being proprietary to today in one of the more active projects called ONAP. The telcos are 4 of the Top 10 contributors of source code and open source, right? Who would have imagined that an AT&T, Verizon, Amdocs, DT, Vodafone, and a China mobile and a China telecom, you name it are all actively contributing code? So that’s a paradigm shift in terms of not only consuming it, but also contributing towards it.
|
||||
|
||||
**Swapnil Bhartiya: And since you mentioned ONAP, if I’m not wrong, I think AT&T released its own work as E-com. And then the projects within the Foundation were merged to create ONAP. And then you mentioned actually NTD. So, what I want to understand from you is how many projects are there that you see within the Foundation? The problem is that Linux Foundation and all those other foundations are open servers. It’s a very good place for those products to come in. It’s obvious that there will be some projects that will overlap. So what is the situation right now? Where do you see some overlap happening and, at the same time, are there still gaps that you need to fill?**
|
||||
|
||||
**Arpit Joshipura:** So that’s a question of the philosophies of a foundation, right? I’ll start off with the most loose situation, which is GitHub. Millions and millions of projects on GitHub. Any PhD student can throw his code on GitHub and say that’s open source and at the end of the day, if there’s no community around it, that project is dead. Okay. That’s the most extreme scenario. Then, there are foundations like CNCF who have a process of accepting projects that could have competing solutions. May the best project win.
|
||||
|
||||
From an LF Networking and LFH perspective, the process is a little bit more restrictive: there is a formal project life cycle document and a process available on the Wiki that looks at the complementary nature of the project, that looks at the ecosystem, that looks at how it will enable and foster innovation. Then based on that, the governing board and the neutral governance that we have set up under the Linux Foundation, they would approve it.
|
||||
|
||||
Overall, it depends on the philosophy for LFN and LFH. We have 8 projects each in the umbrella, and most of these projects are quite complementary when it comes to solving different use cases in different parts of the network.
|
||||
|
||||
**Swapnil Bhartiya: Awesome. Now, I want to talk about 5G a bit. I did not hear any announcements, but can you talk a bit about what is the word going on to help the further deployment of 5G technologies?**
|
||||
|
||||
**Arpit Joshipura:** Yeah. I’m happy and sad to say that 5G is old news, right? The reality is all of the infrastructure work on 5G already was released earlier this year. So ONAP Frankfurt release, for example, has a blueprint on 5G slicing, right? All the work has been done, lots of blueprint and Akraino using 5G and mech. So, that work is done. The cities are getting lit up by the carriers. You see announcements from global carriers on 5G deployments. I think there are 2 missing pieces of work remaining for 5G.
|
||||
|
||||
One is obviously the O-RAN support, right? The O-RAN software community, which we host at the Linux Foundation also is coming up with a second release. And, all the support for 5G is in there.
|
||||
|
||||
The second part of 5G is really the compliance and verification testing. A lot of work is going into CMTT and OPN and feed. Remember that merge project we talked about where 5G is in context of not just OpenStack, but also Kubernetes? So the cloud-native aspects of 5G are all being worked on this year. I think we’ll see a lot more cloud-native 5G deployments next year primarily because projects like ONAP or cloud native integrate with projects like ONAP and Anthos or Azure stack and things like that.
|
||||
|
||||
**Swapnil Bhartiya: What are some of the biggest challenges that the telco industry is facing? I mean, technically, no externalization and all those things were there, but foundations have solved the problem. Some rough ideas are still there that you’re trying to resolve for them.**
|
||||
|
||||
**Arpit Joshipura:** Yeah. I think the recent pandemic caused a significant change in the telcos’ thinking, right? Fortunately, because they had already started on a virtualization and open-source route, you heard from Android, and you heard from Deutsche Telekom, and you heard from Achronix, all of the operators were able to handle the change in the network traffic, change in the network, traffic direction, SLS workloads, etc., right? All because of the _softwarization_ as we call it on the network.
|
||||
|
||||
Given the pandemic, I think the first challenge for them was, can the network hold up? And the answer is, yes. Right? All the work-from-home and all these video recordings, we have to hang out with the web, that was number one.
|
||||
|
||||
Number two is it’s good to hold up the network, but did I end up spending millions and millions of dollars for operational expenditures? And the answer to that is no, especially for the telcos who have embraced an open-source ecosystem, right? So people who have deployed projects like SDN or ONAP or automation and orchestration or closed-loop controls, they automatically configure and reconfigure based on workloads and services and traffic, right? And that does not require manual labor, right? Tremendous amounts of costs were saved from an opex perspective, right?
|
||||
|
||||
For operators who are still in the old mindset have significantly increased their opex, and what that has caused is a real strain on their budget sheets.
|
||||
|
||||
So those were the 2 big things that we felt were challenges, but have been solved. Going forward, now it’s just a quick rollout/build-out of 5G, expanding 5G to Edge, and then partnering with the public cloud providers, at least, here in the US to bring the cloud-native solutions to market.
|
||||
|
||||
**Swapnil Bhartiya: Awesome. Now, Arpit, if I’m not wrong, LF Edge is I think, going to celebrate its second anniversary in January. What do you feel the product has achieved so far? What are its accomplishments? And what are some challenges that the project still has to tackle?**
|
||||
|
||||
**Arpit Joshipura:** Let me start off with the most important accomplishment as a community and that is terminology. We have a project called State of the Edge and we just issued a white paper, which outlines terminology, terms and definitions of what Edge is because, historically, people use terms like thin edge, thick edge, cloud edge, far edge, near edge and blah, blah, blah. They’re all relative terms. Okay. It’s an edge in relation to who I am.
|
||||
|
||||
Instead of that, the paper now defines absolute terms. If I give you a quick example, there are really 2 kinds of edges. There’s a device edge, and then there is a service provider edge. A device edge is really controlled by the operator, by the end user, I should say. Service provider edge is really shared as a service and the last mile typically separates them.
|
||||
|
||||
Now, if you double click on each of these categories, then you have several incarnations of an edge. You can have an extremely constrained edge, microcontrollers, etc., mostly manufacturing, IIoT type. You could have a smart device edge like gateways, etc. Or you could have an on-prem silver type device edge. Either way, an end user controls that edge versus the other edge. Whether it’s on the radio-based stations or in a smart central office, the operator controls it. So that’s kind of the first accomplishment, right? Standardizing on terminology.
|
||||
|
||||
The second big Edge accomplishment is around 2 projects: Akraino and EdgeX Foundry. These are stage 3 mature projects. They have come out with significant [results]. Akraino, for example, has come out with 20 plus blueprints. These are blueprints that actually can be deployed today, right? Just to refresh, a blueprint is a declarative configuration that has everything from end to end to solve a particular use case. So things like connected classrooms, AR/VR, connected cars, right? Network cloud, smart factories, smart cities, etc. So all these are available today.
|
||||
|
||||
EdgeX is the IoT framework for an industrial setup, and that’s kind of the most downloaded. Those 2 projects, along with Fledge, EVE, Baetyl, Home Edge, Open Horizon, security advanced onboarding, NSoT, right? Very, very strong growth over 200% growth in terms of contributions. Huge growth in membership, huge growth in new projects and the community overall. We’re seeing that Edge is really picking up great. Remember, I told you Edge is 4 times the size of the cloud. So, everybody is in it.
|
||||
|
||||
**Swapnil Bhartiya: Now, the second part of the question was also some of the challenges that are still there. You talked about accomplishment. What are the problems that you see that you still think that the project has to solve for the industry and the community?**
|
||||
|
||||
**Arpit Joshipura:** The fundamental challenge that remains is we’re still working as a community in different markets. I think the vendor ecosystem is trying to figure out who is the customer and who is the provider, right? Think of it this way: a carrier, for example, AT&T, could be a provider to a manufacturing factory, who actually could consume something from a provider, and then ship it to an end user. So, there’s like a value shift, if you may, in the business world, on who gets the cut, if you may. That’s still a challenge. People are trying to figure out, I think people who are going to be quick to define, solve and implement solutions using open technology will probably turn out to be winners.
|
||||
|
||||
People who will just do analysis per analysis will be left behind like any other industry. I think that is kind of fundamentally number one. And number two, I think the speed at which we want to solve things. The pandemic has just accelerated the need for Edge and 5G. I think people are just eager to get gaming with low latency, get manufacturing, predictive maintenance with low latency, home surveillance with low latency, connected cars, autonomous driving, all the classroom use cases. They should have been done next year, but because of the pandemic, it just got accelerated.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.linux.com/interviews/telcos-move-from-black-boxes-to-open-source/
|
||||
|
||||
作者:[Swapnil Bhartiya][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.linux.com/author/swapnil/
|
||||
[b]: https://github.com/lujun9972
|
@ -0,0 +1,91 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (5 qualities of great open source developer advocates)
|
||||
[#]: via: (https://opensource.com/article/20/10/open-source-developer-advocates)
|
||||
[#]: author: (Jason Blais https://opensource.com/users/jasonblais)
|
||||
|
||||
5 qualities of great open source developer advocates
|
||||
======
|
||||
Whether you're looking to hire a developer advocate or become one, here
|
||||
are the qualities to aim for.
|
||||
![Women talking][1]
|
||||
|
||||
The developer relations job category is less than 10 years old, and [the developer advocate role is even newer][2]. In essence, developer advocates represent the voice of the user—in this case, that's usually the developer—internally to the company and the voice of the company externally to the community.
|
||||
|
||||
[Mattermost][3] depends on its developer advocates to be the bridge between the community and the organization. At Mattermost, a developer advocate's three primary areas of responsibility are:
|
||||
|
||||
* Raising awareness among developers about the open source project, including educating users and helping them get the most out of the platform
|
||||
* Building strong relationships with users across open source and developer communities
|
||||
* Advocating for users internally with the product team by sharing the community's feedback and challenges
|
||||
|
||||
|
||||
|
||||
Not everyone is cut out to succeed as an open source developer advocate. With that in mind, here are the top five qualities we've identified in outstanding developer advocates.
|
||||
|
||||
### 1\. A genuine passion for helping others
|
||||
|
||||
Developer advocates often start as developers or in other highly technical roles. They excel at their job but get less pleasure from creating solutions than they do by helping others do the same.
|
||||
|
||||
Over time, they turn this enthusiasm into enabling and empowering other developers to be successful. They naturally evolve into developer advocates by educating and helping users get the most out of the platforms they're working with.
|
||||
|
||||
Passion is one of the most important qualities of a developer advocate. To advocate for the user both internally and externally, they must put the user and the community first. This does not happen without genuine motivation to help others succeed.
|
||||
|
||||
### 2\. An authentic communication style
|
||||
|
||||
An outstanding developer advocate has a passion for writing and talking about technology—whether they're solving technical challenges, providing knowledge about specific frameworks (e.g., Kubernetes), or sharing solutions built on top of a platform.
|
||||
|
||||
But when they share their knowledge, they must be careful not to be perceived as promoting a specific platform or product. If any group is turned off by inauthentic marketing, [it is developers][4]. Developer relations is [not the same as developer marketing][5]. That is why authenticity is critical when connecting with developer communities—particularly in open source.
|
||||
|
||||
Combining these creates an authentic communicator who captures developers' attention and pulls people in. That engagement enables them to raise awareness, educate to help users get the most out of the platform, and build strong relationships with users—the three main areas that developer advocates own.
|
||||
|
||||
### 3\. A natural flair for building relationships
|
||||
|
||||
Developer advocates are typically extroverts. Through their authenticity and passion for helping others, they can build strong relationships with users and members of the community.
|
||||
|
||||
The truly outstanding advocates have a natural flair for creating connections through their superb networking skills. They also know where the communities exist, whether it's Reddit, Twitter, meetup groups, forums, or chat channels.
|
||||
|
||||
Why is having a natural flair for creating relationships so important?
|
||||
|
||||
First, developer advocates bring their existing connections to developers and open source communities when they join a team. Second, they come in knowing the right social media and developer channels to reach developers and open source users. Finally, they create new relationships with community influencers and open source leaders that can give your platform an opportunity to grow rapidly.
|
||||
|
||||
### 4\. A personal investment in the community
|
||||
|
||||
As I mentioned, developer advocates are not only the advocates of users internally; they are also the voice of the company externally to the community. This allows them to develop their own personal brand and "street cred" that they will carry with them after they move on to another company or community.
|
||||
|
||||
When a developer advocate cares about their personal brand and has a personal investment in what they do, they are typically more motivated. Not only are they responsible for cultivating the company's brand, but they are also putting their own reputation on the line. This can be frightening to some. But those who are brave and willing to personally commit have the edge needed to excel as a developer advocate.
|
||||
|
||||
As a side note, developer advocates who are employed by the company they're advocating for should work as part of the community and put the community ahead of themselves. They must be willing to continuously learn from and with the community, be a team player, and never put their own brand ahead of the company or community.
|
||||
|
||||
### 5\. Technical sharpness
|
||||
|
||||
The final key attribute of great developer advocates is their technical sharpness. Are they knowledgeable about cutting-edge technologies, languages, and frameworks? Do they learn technologies and tools easily? Are they self-taught, self-resourced learners?
|
||||
|
||||
Since other developers will look to them for guidance, it is important for developer advocates to be highly technical with several years of relevant experience in software development or DevOps. Without already being a developer (or otherwise highly technical), it would be difficult to really understand the developer mindset and know what tickles their curiosity.
|
||||
|
||||
### Other characteristics
|
||||
|
||||
These five qualities—a genuine passion for helping others, authentic communication capabilities, a natural flair for building relationships, a personal investment, and technical sharpness—are Mattermost's core characteristics in outstanding open source developer advocates. Are there any other must-have qualities I've missed? Let me know in the comments!
|
||||
|
||||
_Thank you to Justin Reynolds for the valuable edits on this article._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/open-source-developer-advocates
|
||||
|
||||
作者:[Jason Blais][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/jasonblais
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/conversation-interview-mentor.png?itok=HjoOPcrB (Women talking)
|
||||
[2]: https://medium.com/@ashleymcnamara/what-is-developer-advocacy-3a92442b627c
|
||||
[3]: http://mattermost.com/
|
||||
[4]: https://hackernoon.com/developer-marketing-allergies-authenticity-622014fdebfb
|
||||
[5]: https://medium.com/@aspleenic/developer-relations-and-developer-marketing-they-arent-the-same-thing-35b896159825
|
@ -1,969 +0,0 @@
|
||||
Go on very small hardware (Part 2)
|
||||
============================================================
|
||||
|
||||
|
||||
[][1]
|
||||
|
||||
At the end of the [first part][2] of this article I promised to write something about _interfaces_ . I don’t want to write here a complete or even brief lecture about the interfaces. Instead, I’ll show a simple example how to define and use an interface, and then, how to take advantage of ubiquitous _io.Writer_ interface. There will also be a few words about _reflection_ and _semihosting_ .
|
||||
|
||||
Interfaces are a crucial part of Go language. If you want to learn more about them, I suggest to read [Effective Go][3] and [Russ Cox article][4].
|
||||
|
||||
### Concurrent Blinky – revisited
|
||||
|
||||
When you read the code of previous examples you probably noticed a counterintuitive way to turn the LED on or off. The _Set_ method was used to turn the LED off and the _Clear_ method was used to turn the LED on. This is due to driving the LEDs in open-drain configuration. What we can do to make the code less confusing? Let’s define the _LED_ type with _On_ and _Off_ methods:
|
||||
|
||||
```
|
||||
type LED struct {
|
||||
pin gpio.Pin
|
||||
}
|
||||
|
||||
func (led LED) On() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func (led LED) Off() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Now we can simply call `led.On()` and `led.Off()` which no longer raises any doubts.
|
||||
|
||||
In all previous examples I tried to use the same open-drain configuration to don’t complicate the code. But in the last example, it would be easier for me to connect the third LED between GND and PA3 pins and configure PA3 in push-pull mode. The next example will use a LED connected this way.
|
||||
|
||||
But our new _LED_ type doesn’t support the push-pull configuration. In fact, we should call it _OpenDrainLED_ and define another _PushPullLED_ type:
|
||||
|
||||
```
|
||||
type PushPullLED struct {
|
||||
pin gpio.Pin
|
||||
}
|
||||
|
||||
func (led PushPullLED) On() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func (led PushPullLED) Off() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Note, that both types have the same methods that work the same. It would be nice if the code that operates on LEDs could use both types, without paying attention to which one it uses at the moment. The _interface type_ comes to help:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"delay"
|
||||
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
type LED interface {
|
||||
On()
|
||||
Off()
|
||||
}
|
||||
|
||||
type PushPullLED struct{ pin gpio.Pin }
|
||||
|
||||
func (led PushPullLED) On() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func (led PushPullLED) Off() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func MakePushPullLED(pin gpio.Pin) PushPullLED {
|
||||
pin.Setup(&gpio.Config{Mode: gpio.Out, Driver: gpio.PushPull})
|
||||
return PushPullLED{pin}
|
||||
}
|
||||
|
||||
type OpenDrainLED struct{ pin gpio.Pin }
|
||||
|
||||
func (led OpenDrainLED) On() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func (led OpenDrainLED) Off() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func MakeOpenDrainLED(pin gpio.Pin) OpenDrainLED {
|
||||
pin.Setup(&gpio.Config{Mode: gpio.Out, Driver: gpio.OpenDrain})
|
||||
return OpenDrainLED{pin}
|
||||
}
|
||||
|
||||
var led1, led2 LED
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(false)
|
||||
led1 = MakeOpenDrainLED(gpio.A.Pin(4))
|
||||
led2 = MakePushPullLED(gpio.A.Pin(3))
|
||||
}
|
||||
|
||||
func blinky(led LED, period int) {
|
||||
for {
|
||||
led.On()
|
||||
delay.Millisec(100)
|
||||
led.Off()
|
||||
delay.Millisec(period - 100)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
go blinky(led1, 500)
|
||||
blinky(led2, 1000)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
We’ve defined _LED_ interface that has two methods: _On_ and _Off_ . The _PushPullLED_ and _OpenDrainLED_ types represent two ways of driving LEDs. We also defined two _Make_ _*LED_ functions which act as constructors. Both types implement the _LED_ interface, so the values of these types can be assigned to the variables of type _LED_ :
|
||||
|
||||
```
|
||||
led1 = MakeOpenDrainLED(gpio.A.Pin(4))
|
||||
led2 = MakePushPullLED(gpio.A.Pin(3))
|
||||
|
||||
```
|
||||
|
||||
In this case the assignability is checked at compile time. After the assignment the _led1_ variable contains `OpenDrainLED{gpio.A.Pin(4)}` and a pointer to the method set of the _OpenDrainLED_ type. The `led1.On()` call roughly corresponds to the following C code:
|
||||
|
||||
```
|
||||
led1.methods->On(led1.value)
|
||||
|
||||
```
|
||||
|
||||
As you can see, this is quite inexpensive abstraction if only consider the function call overhead.
|
||||
|
||||
But any assigment to an interface causes to include a lot of information about the assigned type. There can be a lot information in case of complex type which consists of many other types:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10356 196 212 10764 2a0c cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
If we don’t use [reflection][5] we can save some bytes by avoid to include the names of types and struct fields:
|
||||
|
||||
```
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10312 196 212 10720 29e0 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
The resulted binary still contains some necessary information about types and full information about all exported methods (with names). This information is need for checking assignability at runtime, mainly when you assign one value stored in the interface variable to any other variable.
|
||||
|
||||
We can also remove type and field names from imported packages by recompiling them all:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10272 196 212 10680 29b8 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
Let’s load this program to see does it work as expected. This time we’ll use the [st-flash][6] command:
|
||||
|
||||
```
|
||||
$ arm-none-eabi-objcopy -O binary cortexm0.elf cortexm0.bin
|
||||
$ st-flash write cortexm0.bin 0x8000000
|
||||
st-flash 1.4.0-33-gd76e3c7
|
||||
2018-04-10T22:04:34 INFO usb.c: -- exit_dfu_mode
|
||||
2018-04-10T22:04:34 INFO common.c: Loading device parameters....
|
||||
2018-04-10T22:04:34 INFO common.c: Device connected is: F0 small device, id 0x10006444
|
||||
2018-04-10T22:04:34 INFO common.c: SRAM size: 0x1000 bytes (4 KiB), Flash: 0x4000 bytes (16 KiB) in pages of 1024 bytes
|
||||
2018-04-10T22:04:34 INFO common.c: Attempting to write 10468 (0x28e4) bytes to stm32 address: 134217728 (0x8000000)
|
||||
Flash page at addr: 0x08002800 erased
|
||||
2018-04-10T22:04:34 INFO common.c: Finished erasing 11 pages of 1024 (0x400) bytes
|
||||
2018-04-10T22:04:34 INFO common.c: Starting Flash write for VL/F0/F3/F1_XL core id
|
||||
2018-04-10T22:04:34 INFO flash_loader.c: Successfully loaded flash loader in sram
|
||||
11/11 pages written
|
||||
2018-04-10T22:04:35 INFO common.c: Starting verification of write complete
|
||||
2018-04-10T22:04:35 INFO common.c: Flash written and verified! jolly good!
|
||||
|
||||
```
|
||||
|
||||
I didn’t connected the NRST signal to the programmer so the _—reset_ option can’t be used and the reset button have to be pressed to run the program.
|
||||
|
||||

|
||||
|
||||
It seems that the _st-flash_ works a bit unreliably with this board (often requires reseting the ST-LINK dongle). Additionally, the current version doesn’t issue the reset command over SWD (uses only NRST signal). The software reset isn’t realiable however it usually works and lack of it introduces inconvenience. For this board-programmer pair the _OpenOCD_ works much better.
|
||||
|
||||
### UART
|
||||
|
||||
UART (Universal Aynchronous Receiver-Transmitter) is still one of the most important peripherals of today’s microcontrollers. Its advantage is unique combination of the following properties:
|
||||
|
||||
* relatively high speed,
|
||||
|
||||
* only two signal lines (even one in case of half-duplex communication),
|
||||
|
||||
* symmetry of roles,
|
||||
|
||||
* synchronous in-band signaling about new data (start bit),
|
||||
|
||||
* accurate timing inside transmitted word.
|
||||
|
||||
This causes that UART, originally intedned to transmit asynchronous messages consisting of 7-9 bit words, is also used to efficiently implement various other phisical protocols such as used by [WS28xx LEDs][7] or [1-wire][8] devices.
|
||||
|
||||
However, we will use the UART in its usual role: to printing text messages from our program.
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"rtos"
|
||||
|
||||
"stm32/hal/dma"
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/irq"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
"stm32/hal/usart"
|
||||
)
|
||||
|
||||
var tts *usart.Driver
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(true)
|
||||
tx := gpio.A.Pin(9)
|
||||
|
||||
tx.Setup(&gpio.Config{Mode: gpio.Alt})
|
||||
tx.SetAltFunc(gpio.USART1_AF1)
|
||||
d := dma.DMA1
|
||||
d.EnableClock(true)
|
||||
tts = usart.NewDriver(usart.USART1, d.Channel(2, 0), nil, nil)
|
||||
tts.Periph().EnableClock(true)
|
||||
tts.Periph().SetBaudRate(115200)
|
||||
tts.Periph().Enable()
|
||||
tts.EnableTx()
|
||||
|
||||
rtos.IRQ(irq.USART1).Enable()
|
||||
rtos.IRQ(irq.DMA1_Channel2_3).Enable()
|
||||
}
|
||||
|
||||
func main() {
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
}
|
||||
|
||||
func ttsISR() {
|
||||
tts.ISR()
|
||||
}
|
||||
|
||||
func ttsDMAISR() {
|
||||
tts.TxDMAISR()
|
||||
}
|
||||
|
||||
//c:__attribute__((section(".ISRs")))
|
||||
var ISRs = [...]func(){
|
||||
irq.USART1: ttsISR,
|
||||
irq.DMA1_Channel2_3: ttsDMAISR,
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
You can find this code slightly complicated but for now there is no simpler UART driver in STM32 HAL (simple polling driver will be probably useful in some cases). The _usart.Driver_ is efficient driver that uses DMA and interrupts to ofload the CPU.
|
||||
|
||||
STM32 USART peripheral provides traditional UART and its synchronous version. To use it as output we have to connect its Tx signal to the right GPIO pin:
|
||||
|
||||
```
|
||||
tx.Setup(&gpio.Config{Mode: gpio.Alt})
|
||||
tx.SetAltFunc(gpio.USART1_AF1)
|
||||
|
||||
```
|
||||
|
||||
The _usart.Driver_ is configured in Tx-only mode (rxdma and rxbuf are set to nil):
|
||||
|
||||
```
|
||||
tts = usart.NewDriver(usart.USART1, d.Channel(2, 0), nil, nil)
|
||||
|
||||
```
|
||||
|
||||
We use its _WriteString_ method to print the famous sentence. Let’s clean everything and compile this program:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
12728 236 176 13140 3354 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
To see something you need an UART peripheral in your PC.
|
||||
|
||||
**Do not use RS232 port or USB to RS232 converter!**
|
||||
|
||||
The STM32 family uses 3.3 V logic but RS232 can produce from -15 V to +15 V which will probably demage your MCU. You need USB to UART converter that uses 3.3 V logic. Popular converters are based on FT232 or CP2102 chips.
|
||||
|
||||

|
||||
|
||||
You also need some terminal emulator program (I prefer [picocom][9]). Flash the new image, run the terminal emulator and press the reset button a few times:
|
||||
|
||||
```
|
||||
$ openocd -d0 -f interface/stlink.cfg -f target/stm32f0x.cfg -c 'init; program cortexm0.elf; reset run; exit'
|
||||
Open On-Chip Debugger 0.10.0+dev-00319-g8f1f912a (2018-03-07-19:20)
|
||||
Licensed under GNU GPL v2
|
||||
For bug reports, read
|
||||
http://openocd.org/doc/doxygen/bugs.html
|
||||
debug_level: 0
|
||||
adapter speed: 1000 kHz
|
||||
adapter_nsrst_delay: 100
|
||||
none separate
|
||||
adapter speed: 950 kHz
|
||||
target halted due to debug-request, current mode: Thread
|
||||
xPSR: 0xc1000000 pc: 0x080016f4 msp: 0x20000a20
|
||||
adapter speed: 4000 kHz
|
||||
** Programming Started **
|
||||
auto erase enabled
|
||||
target halted due to breakpoint, current mode: Thread
|
||||
xPSR: 0x61000000 pc: 0x2000003a msp: 0x20000a20
|
||||
wrote 13312 bytes from file cortexm0.elf in 1.020185s (12.743 KiB/s)
|
||||
** Programming Finished **
|
||||
adapter speed: 950 kHz
|
||||
$
|
||||
$ picocom -b 115200 /dev/ttyUSB0
|
||||
picocom v3.1
|
||||
|
||||
port is : /dev/ttyUSB0
|
||||
flowcontrol : none
|
||||
baudrate is : 115200
|
||||
parity is : none
|
||||
databits are : 8
|
||||
stopbits are : 1
|
||||
escape is : C-a
|
||||
local echo is : no
|
||||
noinit is : no
|
||||
noreset is : no
|
||||
hangup is : no
|
||||
nolock is : no
|
||||
send_cmd is : sz -vv
|
||||
receive_cmd is : rz -vv -E
|
||||
imap is :
|
||||
omap is :
|
||||
emap is : crcrlf,delbs,
|
||||
logfile is : none
|
||||
initstring : none
|
||||
exit_after is : not set
|
||||
exit is : no
|
||||
|
||||
Type [C-a] [C-h] to see available commands
|
||||
Terminal ready
|
||||
Hello, World!
|
||||
Hello, World!
|
||||
Hello, World!
|
||||
|
||||
```
|
||||
|
||||
Every press of the reset button produces new “Hello, World!” line. Everything works as expected.
|
||||
|
||||
To see bi-directional UART code for this MCU check out [this example][10].
|
||||
|
||||
### io.Writer
|
||||
|
||||
The _io.Writer_ interface is probably the second most commonly used interface type in Go, right after the _error_ interface. Its definition looks like this:
|
||||
|
||||
```
|
||||
type Writer interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
_usart.Driver_ implements _io.Writer_ so we can replace:
|
||||
|
||||
```
|
||||
tts.WriteString("Hello, World!\r\n")
|
||||
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
|
||||
```
|
||||
|
||||
Additionally you need to add the _io_ package to the _import_ section.
|
||||
|
||||
The declaration of _io.WriteString_ function looks as follows:
|
||||
|
||||
```
|
||||
func WriteString(w Writer, s string) (n int, err error)
|
||||
|
||||
```
|
||||
|
||||
As you can see, the _io.WriteString_ allows to write strings using any type that implements _io.Writer_ interface. Internally it check does the underlying type has _WriteString_ method and uses it instead of _Write_ if available.
|
||||
|
||||
Let’s compile the modified program:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15456 320 248 16024 3e98 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
As you can see, _io.WriteString_ causes a significant increase in the size of the binary: 15776 - 12964 = 2812 bytes. There isn’t too much space left on the Flash. What caused such a drastic increase in size?
|
||||
|
||||
Using the command:
|
||||
|
||||
```
|
||||
arm-none-eabi-nm --print-size --size-sort --radix=d cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
we can print all symbols ordered by its size for both cases. By filtering and analyzing the obtained data (awk, diff) we can find about 80 new symbols. The ten largest are:
|
||||
|
||||
```
|
||||
> 00000062 T stm32$hal$usart$Driver$DisableRx
|
||||
> 00000072 T stm32$hal$usart$Driver$RxDMAISR
|
||||
> 00000076 T internal$Type$Implements
|
||||
> 00000080 T stm32$hal$usart$Driver$EnableRx
|
||||
> 00000084 t errors$New
|
||||
> 00000096 R $8$stm32$hal$usart$Driver$$
|
||||
> 00000100 T stm32$hal$usart$Error$Error
|
||||
> 00000360 T io$WriteString
|
||||
> 00000660 T stm32$hal$usart$Driver$Read
|
||||
|
||||
```
|
||||
|
||||
So, even though we don’t use the _usart.Driver.Read_ method it was compiled in, same as _DisableRx_ , _RxDMAISR_ , _EnableRx_ and other not mentioned above. Unfortunately, if you assign something to the interface, its full method set is required (with all dependences). This isn’t a problem for a large programs that use most of the methods anyway. But for our simple one it’s a huge burden.
|
||||
|
||||
We’re already close to the limits of our MCU but let’s try to print some numbers (you need to replace _io_ package with _strconv_ in _import_ section):
|
||||
|
||||
```
|
||||
func main() {
|
||||
a := 12
|
||||
b := -123
|
||||
|
||||
tts.WriteString("a = ")
|
||||
strconv.WriteInt(tts, a, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
tts.WriteString("b = ")
|
||||
strconv.WriteInt(tts, b, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
|
||||
tts.WriteString("hex(a) = ")
|
||||
strconv.WriteInt(tts, a, 16, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
tts.WriteString("hex(b) = ")
|
||||
strconv.WriteInt(tts, b, 16, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
As in the case of _io.WriteString_ function, the first argument of the _strconv.WriteInt_ is of type _io.Writer_ .
|
||||
|
||||
```
|
||||
$ egc
|
||||
/usr/local/arm/bin/arm-none-eabi-ld: /home/michal/firstemgo/cortexm0.elf section `.rodata' will not fit in region `Flash'
|
||||
/usr/local/arm/bin/arm-none-eabi-ld: region `Flash' overflowed by 692 bytes
|
||||
exit status 1
|
||||
|
||||
```
|
||||
|
||||
This time we’ve run out of space. Let’s try to slim down the information about types:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15876 316 320 16512 4080 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
It was close, but we fit. Let’s load and run this code:
|
||||
|
||||
```
|
||||
a = 12
|
||||
b = -123
|
||||
hex(a) = c
|
||||
hex(b) = -7b
|
||||
|
||||
```
|
||||
|
||||
The _strconv_ package in Emgo is quite different from its archetype in Go. It is intended for direct use to write formatted numbers and in many cases can replace heavy _fmt_ package. That’s why the function names start with _Write_ instead of _Format_ and have additional two parameters. Below is an example of their use:
|
||||
|
||||
```
|
||||
func main() {
|
||||
b := -123
|
||||
strconv.WriteInt(tts, b, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, ' ')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, '0')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, '.')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, ' ')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, '0')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, '.')
|
||||
tts.WriteString("\r\n")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
There is its output:
|
||||
|
||||
```
|
||||
-123
|
||||
-123
|
||||
-00123
|
||||
..-123
|
||||
-123
|
||||
-123
|
||||
-123..
|
||||
|
||||
```
|
||||
|
||||
### Unix streams and Morse code
|
||||
|
||||
Thanks to the fact that most of the functions that write something use _io.Writer_ instead of concrete type (eg. _FILE_ in C) we get a functionality similar to _Unix streams_ . In Unix we can easily combine simple commands to perform larger tasks. For example, we can write text to the file this way:
|
||||
|
||||
```
|
||||
echo "Hello, World!" > file.txt
|
||||
|
||||
```
|
||||
|
||||
The `>` operator writes the output stream of the preceding command to the file. There is also `|`operator that connects output and input streams of adjacent commands.
|
||||
|
||||
Thanks to the streams we can easily convert/filter output of any command. For example, to convert all letters to uppercase we can filter the echo’s output through _tr_ command:
|
||||
|
||||
```
|
||||
echo "Hello, World!" | tr a-z A-Z > file.txt
|
||||
|
||||
```
|
||||
|
||||
To show the analogy between _io.Writer_ and Unix streams let’s write our:
|
||||
|
||||
```
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
|
||||
```
|
||||
|
||||
in the following pseudo-unix form:
|
||||
|
||||
```
|
||||
io.WriteString "Hello, World!" | usart.Driver usart.USART1
|
||||
|
||||
```
|
||||
|
||||
The next example will show how to do this:
|
||||
|
||||
```
|
||||
io.WriteString "Hello, World!" | MorseWriter | usart.Driver usart.USART1
|
||||
|
||||
```
|
||||
|
||||
Let’s create a simple encoder that encodes the text written to it using Morse coding:
|
||||
|
||||
```
|
||||
type MorseWriter struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func (w *MorseWriter) Write(s []byte) (int, error) {
|
||||
var buf [8]byte
|
||||
for n, c := range s {
|
||||
switch {
|
||||
case c == '\n':
|
||||
c = ' ' // Replace new lines with spaces.
|
||||
case 'a' <= c && c <= 'z':
|
||||
c -= 'a' - 'A' // Convert to upper case.
|
||||
}
|
||||
if c < ' ' || 'Z' < c {
|
||||
continue // c is outside ASCII [' ', 'Z']
|
||||
}
|
||||
var symbol morseSymbol
|
||||
if c == ' ' {
|
||||
symbol.length = 1
|
||||
buf[0] = ' '
|
||||
} else {
|
||||
symbol = morseSymbols[c-'!']
|
||||
for i := uint(0); i < uint(symbol.length); i++ {
|
||||
if (symbol.code>>i)&1 != 0 {
|
||||
buf[i] = '-'
|
||||
} else {
|
||||
buf[i] = '.'
|
||||
}
|
||||
}
|
||||
}
|
||||
buf[symbol.length] = ' '
|
||||
if _, err := w.W.Write(buf[:symbol.length+1]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
type morseSymbol struct {
|
||||
code, length byte
|
||||
}
|
||||
|
||||
//emgo:const
|
||||
var morseSymbols = [...]morseSymbol{
|
||||
{1<<0 | 1<<1 | 1<<2, 4}, // ! ---.
|
||||
{1<<1 | 1<<4, 6}, // " .-..-.
|
||||
{}, // #
|
||||
{1<<3 | 1<<6, 7}, // $ ...-..-
|
||||
|
||||
// Some code omitted...
|
||||
|
||||
{1<<0 | 1<<3, 4}, // X -..-
|
||||
{1<<0 | 1<<2 | 1<<3, 4}, // Y -.--
|
||||
{1<<0 | 1<<1, 4}, // Z --..
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
You can find the full _morseSymbols_ array [here][11]. The `//emgo:const` directive ensures that _morseSymbols_ array won’t be copied to the RAM.
|
||||
|
||||
Now we can print our sentence in two ways:
|
||||
|
||||
```
|
||||
func main() {
|
||||
s := "Hello, World!\r\n"
|
||||
mw := &MorseWriter{tts}
|
||||
|
||||
io.WriteString(tts, s)
|
||||
io.WriteString(mw, s)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
We use the pointer to the _MorseWriter_ `&MorseWriter{tts}` instead os simple `MorseWriter{tts}` value beacuse the _MorseWriter_ is to big to fit into an interface variable.
|
||||
|
||||
Emgo, unlike Go, doesn’t dynamically allocate memory for value stored in interface variable. The interface type has limited size, equal to the size of three pointers (to fit _slice_ ) or two _float64_ (to fit _complex128_ ), what is bigger. It can directly store values of all basic types and small structs/arrays but for bigger values you must use pointers.
|
||||
|
||||
Let’s compile this code and see its output:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15152 324 248 15724 3d6c cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
Hello, World!
|
||||
.... . .-.. .-.. --- --..-- .-- --- .-. .-.. -.. ---.
|
||||
|
||||
```
|
||||
|
||||
### The Ultimate Blinky
|
||||
|
||||
The _Blinky_ is hardware equivalent of _Hello, World!_ program. Once we have a Morse encoder we can easly combine both to obtain the _Ultimate Blinky_ program:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"delay"
|
||||
"io"
|
||||
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
var led gpio.Pin
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(false)
|
||||
led = gpio.A.Pin(4)
|
||||
|
||||
cfg := gpio.Config{Mode: gpio.Out, Driver: gpio.OpenDrain, Speed: gpio.Low}
|
||||
led.Setup(&cfg)
|
||||
}
|
||||
|
||||
type Telegraph struct {
|
||||
Pin gpio.Pin
|
||||
Dotms int // Dot length [ms]
|
||||
}
|
||||
|
||||
func (t Telegraph) Write(s []byte) (int, error) {
|
||||
for _, c := range s {
|
||||
switch c {
|
||||
case '.':
|
||||
t.Pin.Clear()
|
||||
delay.Millisec(t.Dotms)
|
||||
t.Pin.Set()
|
||||
delay.Millisec(t.Dotms)
|
||||
case '-':
|
||||
t.Pin.Clear()
|
||||
delay.Millisec(3 * t.Dotms)
|
||||
t.Pin.Set()
|
||||
delay.Millisec(t.Dotms)
|
||||
case ' ':
|
||||
delay.Millisec(3 * t.Dotms)
|
||||
}
|
||||
}
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
telegraph := &MorseWriter{Telegraph{led, 100}}
|
||||
for {
|
||||
io.WriteString(telegraph, "Hello, World! ")
|
||||
}
|
||||
}
|
||||
|
||||
// Some code omitted...
|
||||
|
||||
```
|
||||
|
||||
In the above example I omitted the definition of _MorseWriter_ type because it was shown earlier. The full version is available [here][12]. Let’s compile it and run:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
11772 244 244 12260 2fe4 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Reflection
|
||||
|
||||
Yes, Emgo supports [reflection][13]. The _reflect_ package isn’t complete yet but that what is done is enough to implement _fmt.Print_ family of functions. Let’s see what can we do on our small MCU.
|
||||
|
||||
To reduce memory usage we will use [semihosting][14] as standard output. For convenience, we also write simple _println_ function which to some extent mimics _fmt.Println_ .
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"debug/semihosting"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
var stdout semihosting.File
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
var err error
|
||||
stdout, err = semihosting.OpenFile(":tt", semihosting.W)
|
||||
for err != nil {
|
||||
}
|
||||
}
|
||||
|
||||
type stringer interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
func println(args ...interface{}) {
|
||||
for i, a := range args {
|
||||
if i > 0 {
|
||||
stdout.WriteString(" ")
|
||||
}
|
||||
switch v := a.(type) {
|
||||
case string:
|
||||
stdout.WriteString(v)
|
||||
case int:
|
||||
strconv.WriteInt(stdout, v, 10, 0, 0)
|
||||
case bool:
|
||||
strconv.WriteBool(stdout, v, 't', 0, 0)
|
||||
case stringer:
|
||||
stdout.WriteString(v.String())
|
||||
default:
|
||||
stdout.WriteString("%unknown")
|
||||
}
|
||||
}
|
||||
stdout.WriteString("\r\n")
|
||||
}
|
||||
|
||||
type S struct {
|
||||
A int
|
||||
B bool
|
||||
}
|
||||
|
||||
func main() {
|
||||
p := &S{-123, true}
|
||||
|
||||
v := reflect.ValueOf(p)
|
||||
|
||||
println("kind(p) =", v.Kind())
|
||||
println("kind(*p) =", v.Elem().Kind())
|
||||
println("type(*p) =", v.Elem().Type())
|
||||
|
||||
v = v.Elem()
|
||||
|
||||
println("*p = {")
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
ft := v.Type().Field(i)
|
||||
fv := v.Field(i)
|
||||
println(" ", ft.Name(), ":", fv.Interface())
|
||||
}
|
||||
println("}")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The _semihosting.OpenFile_ function allows to open/create file on the host side. The special path _:tt_ corresponds to host’s standard output.
|
||||
|
||||
The _println_ function accepts arbitrary number of arguments, each of arbitrary type:
|
||||
|
||||
```
|
||||
func println(args ...interface{})
|
||||
|
||||
```
|
||||
|
||||
It’s possible because any type implements the empty interface _interface{}_ . The _println_ uses [type switch][15] to print strings, integers and booleans:
|
||||
|
||||
```
|
||||
switch v := a.(type) {
|
||||
case string:
|
||||
stdout.WriteString(v)
|
||||
case int:
|
||||
strconv.WriteInt(stdout, v, 10, 0, 0)
|
||||
case bool:
|
||||
strconv.WriteBool(stdout, v, 't', 0, 0)
|
||||
case stringer:
|
||||
stdout.WriteString(v.String())
|
||||
default:
|
||||
stdout.WriteString("%unknown")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Additionally it supports any type that implements _stringer_ interface, that is, any type that has _String()_ method. In any _case_ clause the _v_ variable has the right type, same as listed after _case_ keyword.
|
||||
|
||||
The `reflect.ValueOf(p)` returns _p_ in the form that allows to analyze its type and content programmatically. As you can see, we can even dereference pointers using `v.Elem()` and print all struct fields with their names.
|
||||
|
||||
Let’s try to compile this code. For now let’s see what will come out if compiled without type and field names:
|
||||
|
||||
```
|
||||
$ egc -nt -nf
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
16028 216 312 16556 40ac cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
Only 140 free bytes left on the Flash. Let’s load it using OpenOCD with semihosting enabled:
|
||||
|
||||
```
|
||||
$ openocd -d0 -f interface/stlink.cfg -f target/stm32f0x.cfg -c 'init; program cortexm0.elf; arm semihosting enable; reset run'
|
||||
Open On-Chip Debugger 0.10.0+dev-00319-g8f1f912a (2018-03-07-19:20)
|
||||
Licensed under GNU GPL v2
|
||||
For bug reports, read
|
||||
http://openocd.org/doc/doxygen/bugs.html
|
||||
debug_level: 0
|
||||
adapter speed: 1000 kHz
|
||||
adapter_nsrst_delay: 100
|
||||
none separate
|
||||
adapter speed: 950 kHz
|
||||
target halted due to debug-request, current mode: Thread
|
||||
xPSR: 0xc1000000 pc: 0x08002338 msp: 0x20000a20
|
||||
adapter speed: 4000 kHz
|
||||
** Programming Started **
|
||||
auto erase enabled
|
||||
target halted due to breakpoint, current mode: Thread
|
||||
xPSR: 0x61000000 pc: 0x2000003a msp: 0x20000a20
|
||||
wrote 16384 bytes from file cortexm0.elf in 0.700133s (22.853 KiB/s)
|
||||
** Programming Finished **
|
||||
semihosting is enabled
|
||||
adapter speed: 950 kHz
|
||||
kind(p) = ptr
|
||||
kind(*p) = struct
|
||||
type(*p) =
|
||||
*p = {
|
||||
X. : -123
|
||||
X. : true
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
If you’ve actually run this code, you noticed that semihosting is slow, especially if you write a byte after byte (buffering helps).
|
||||
|
||||
As you can see, there is no type name for `*p` and all struct fields have the same _X._ name. Let’s compile this program again, this time without _-nt -nf_ options:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
16052 216 312 16580 40c4 cortexm0.elf
|
||||
|
||||
```
|
||||
|
||||
Now the type and field names have been included but only these defined in ~~_main.go_ file~~ _main_ package. The output of our program looks as follows:
|
||||
|
||||
```
|
||||
kind(p) = ptr
|
||||
kind(*p) = struct
|
||||
type(*p) = S
|
||||
*p = {
|
||||
A : -123
|
||||
B : true
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Reflection is a crucial part of any easy to use serialization library and serialization ~~algorithms~~ like [JSON][16]gain in importance in the IOT era.
|
||||
|
||||
This is where I finish the second part of this article. I think there is a chance for the third part, more entertaining, where we connect to this board various interesting devices. If this board won’t carry them, we replace it with something a little bigger.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html
|
||||
|
||||
作者:[Michał Derkacz ][a]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://ziutek.github.io/
|
||||
[1]:https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html
|
||||
[2]:https://ziutek.github.io/2018/03/30/go_on_very_small_hardware.html
|
||||
[3]:https://golang.org/doc/effective_go.html#interfaces
|
||||
[4]:https://research.swtch.com/interfaces
|
||||
[5]:https://blog.golang.org/laws-of-reflection
|
||||
[6]:https://github.com/texane/stlink
|
||||
[7]:http://www.world-semi.com/solution/list-4-1.html
|
||||
[8]:https://en.wikipedia.org/wiki/1-Wire
|
||||
[9]:https://github.com/npat-efault/picocom
|
||||
[10]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/usart/main.go
|
||||
[11]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/morseuart/main.go
|
||||
[12]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/morseled/main.go
|
||||
[13]:https://blog.golang.org/laws-of-reflection
|
||||
[14]:http://infocenter.arm.com/help/topic/com.arm.doc.dui0471g/Bgbjjgij.html
|
||||
[15]:https://golang.org/doc/effective_go.html#type_switch
|
||||
[16]:https://en.wikipedia.org/wiki/JSON
|
430
sources/tech/20180503 Go on very small hardware (Part 3).md
Normal file
430
sources/tech/20180503 Go on very small hardware (Part 3).md
Normal file
@ -0,0 +1,430 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Go on very small hardware (Part 3))
|
||||
[#]: via: (https://ziutek.github.io/2018/05/03/go_on_very_small_hardware3.html)
|
||||
[#]: author: (Michał Derkacz )
|
||||
|
||||
Go on very small hardware (Part 3)
|
||||
======
|
||||
[![STM32F030F4P6][1]][2]
|
||||
|
||||
Most of the examples discussed in the [first][3] and [second][4] part of this series are blinking LEDs in one or another way. It may have been interesting at first, but after a while it has become a bit boring. Let’s do something more entertaining…
|
||||
|
||||
…let’s light more LEDs!
|
||||
|
||||
### WS281x LEDs
|
||||
|
||||
The [WS281x][5] RGB LEDs (and their clones) are very popular. You can buy them as single elements, chained into long strips or assembled into matrices, rings or other form-factors.
|
||||
|
||||
![WS2812B][6]
|
||||
|
||||
They can be connected in series and thanks to this fact, you can control a long LED strip with only single pin of your MCU. Unfortunately, the phisical protocol used by their internal controller doesn’t fit straight into any peripheral you can find in a MCU. You have to use bit-banging or use available peripherals in unusual way.
|
||||
|
||||
Which of the available solutions is the most efficient depends on the number of LED strips controlled at the same time. If you have to drive 4 to 16 strips the most efficient way is to [use timers and DMA][7] (don’t overlook the links at the end of Martin’s article).
|
||||
|
||||
If you have to control only one or two strips, use the available SPI or UART peripherals. In case of SPI you can encode only two WS281x bits in one byte sent. UART allows more dense coding thanks to clever use of the start and stop bits: 3 bits per one byte sent.
|
||||
|
||||
The best explanation of how the UART protocol fits into WS281x protocol I found on [this site][8]. If you don’t know Polish, here is the [English translation][9].
|
||||
|
||||
The WS281x based LEDs are still the most popular but there are also SPI controlled LEDs on the market: [APA102][10], [SK9822][11]. Three interesting articles about them: [1][12], [2][13], [3][14].
|
||||
|
||||
### LED ring
|
||||
|
||||
There are many WS2812 based rings on the marker. I have this one:
|
||||
|
||||
![WS2812B][15]
|
||||
|
||||
It has 24 individually addressable RGB LEDs (WS2812B) and exposes four terminals: GND, 5V, DI and DO. You can chain more rings or other WS2812 based things by connecting DI (data in) terminal to the DO (data out) terminal of the previous one.
|
||||
|
||||
Let’s connect this ring to our STM32F030 board. We will use the UART based driver so the DI should be connected to the TXD pin on the UART header. The WS2812B LED requires a power supply with at least 3.5V. 24 LEDs can consume quite a lot of current, so during the programming/debuggin it’s best to connect the GND and 5V terminals on the ring directly to the GND and 5V pins available on ST-LINK programmer:
|
||||
|
||||
![WS2812B][16]
|
||||
|
||||
Our STM32F030F4P6 MCU and the whole STM32 F0, F3, F7, L4 families have one important thing that the F1, F4, L1 MCUs don’t have: it allows to invert the UART signals and therefore we can connect the ring directly to the UART TXD pin. If you don’t known that we need such inversion you probably didn’t read the [article][9] I mentioned above.
|
||||
|
||||
So you can’t use the popular [Blue Pill][17] or the [STM32F4-DISCOVERY][18] this way. Use their SPI peripheral or an external inverter. See the [Christmas Tree Lights][19] project as an example of UART+inverter or the [WS2812 example][20] for NUCLEO-F411RE that uses SPI.
|
||||
|
||||
By the way, probably the most of DISCOVERY boards have one more problem: they work with VDD = 3V instead of 3.3V. The WS281x requires at least the supply voltage * 0.7 for DI high. This is 3.5V in case of 5V supply and 3.3V in case of 4.7V you can find on the 5V pins of the DISCOVERY. As you can see, even in our case the first LED works 0.2V below spec. In case of DISCOVERY it will work 0.3V bellow spec if powered 4.7V and 0.5V bellow spec if powered 5V.
|
||||
|
||||
Let’s finish this lengthy introduction and go to the code:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"delay"
|
||||
"math/rand"
|
||||
"rtos"
|
||||
|
||||
"led"
|
||||
"led/ws281x/wsuart"
|
||||
|
||||
"stm32/hal/dma"
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/irq"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
"stm32/hal/usart"
|
||||
)
|
||||
|
||||
var tts *usart.Driver
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(true)
|
||||
tx := gpio.A.Pin(9)
|
||||
|
||||
tx.Setup(&gpio.Config{Mode: gpio.Alt})
|
||||
tx.SetAltFunc(gpio.USART1_AF1)
|
||||
|
||||
d := dma.DMA1
|
||||
d.EnableClock(true)
|
||||
|
||||
tts = usart.NewDriver(usart.USART1, d.Channel(2, 0), nil, nil)
|
||||
tts.Periph().EnableClock(true)
|
||||
tts.Periph().SetBaudRate(3000000000 / 1390)
|
||||
tts.Periph().SetConf2(usart.TxInv)
|
||||
tts.Periph().Enable()
|
||||
tts.EnableTx()
|
||||
|
||||
rtos.IRQ(irq.USART1).Enable()
|
||||
rtos.IRQ(irq.DMA1_Channel2_3).Enable()
|
||||
}
|
||||
|
||||
func main() {
|
||||
var rnd rand.XorShift64
|
||||
rnd.Seed(1)
|
||||
rgb := wsuart.GRB
|
||||
strip := wsuart.Make(24)
|
||||
black := rgb.Pixel(0)
|
||||
for {
|
||||
c := led.Color(rnd.Uint32()).Scale(127)
|
||||
pixel := rgb.Pixel(c)
|
||||
for i := range strip {
|
||||
strip[i] = pixel
|
||||
tts.Write(strip.Bytes())
|
||||
delay.Millisec(40)
|
||||
}
|
||||
for i := range strip {
|
||||
strip[i] = black
|
||||
tts.Write(strip.Bytes())
|
||||
delay.Millisec(20)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ttsISR() {
|
||||
tts.ISR()
|
||||
}
|
||||
|
||||
func ttsDMAISR() {
|
||||
tts.TxDMAISR()
|
||||
}
|
||||
|
||||
//c:__attribute__((section(".ISRs")))
|
||||
var ISRs = [...]func(){
|
||||
irq.USART1: ttsISR,
|
||||
irq.DMA1_Channel2_3: ttsDMAISR,
|
||||
}
|
||||
```
|
||||
|
||||
##### The import section
|
||||
|
||||
The new things in the import section compared to the previous examples are the rand/math package and led package with its led/ws281x subtree. The led package itself contains definition of Color type. The led/ws281x/wsuart defines the ColorOrder, Pixel and Strip types.
|
||||
|
||||
I was wondering about using the Color or RGBA type from image/color and about defining the Strip in the way that it will implement image.Image interface but because of using a [gamma correction][21] and the big overhead of image/draw package I ended with simple:
|
||||
|
||||
```
|
||||
type Color uint32
|
||||
type Strip []Pixel
|
||||
```
|
||||
|
||||
with a few useful methods. However, this can change in the future.
|
||||
|
||||
##### The init function
|
||||
|
||||
There aren’t so much novelties in the init function. The UART baud rate was changed from 115200 to 3000000000/1390 ≈ 2158273 which corresponds to 1390 nanoseconds per WS2812 bit. The TxInv bit in CR2 register is set to invert TXD signal.
|
||||
|
||||
##### The main function
|
||||
|
||||
The XorShift64 pseudorandom number generator is used to generate random colors. [XORSHIFT][22] is currently the only algorithm implemented by math/rand package. You have to explicitly initialize it using its Seed method with nonzero argument.
|
||||
|
||||
The rgb variable is of type wsuart.ColorOrder and is set to the GRB color order used by WS2812 (WS2811 uses RGB order). It’s then used to translate colors to pixels.
|
||||
|
||||
The `wsuart.Make(24)` creates initialized strip of 24 pixels. It is equivalent of:
|
||||
|
||||
```
|
||||
strip := make(wsuart.Strip, 24)
|
||||
strip.Clear()
|
||||
```
|
||||
|
||||
The rest of the code uses random colors to draw something similar to “Please Wait…” spinner.
|
||||
|
||||
The strip slice acts as a framebuffer. The `tts.Write(strip.Bytes())` sends the content of the framebuffer to the ring.
|
||||
|
||||
##### Interrupts
|
||||
|
||||
The program is ened with the code that handles interrupts, the same as in the previous [UART example][23].
|
||||
|
||||
Let’s compile it and run:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
14088 240 204 14532 38c4 cortexm0.elf
|
||||
$ openocd -d0 -f interface/stlink.cfg -f target/stm32f0x.cfg -c 'init; program cortexm0.elf; reset run; exit'
|
||||
```
|
||||
|
||||
I’ve skipped the openocd output. The video bellow shows how this program works:
|
||||
|
||||
Sorry, your browser doesn't support embedded videos.
|
||||
|
||||
### Let’s do something useful…
|
||||
|
||||
At the beginning of the [first part][3] I’ve asked: “How low we can Go and still do something useful?”. Our MCU is actually a low-end device (8-bitters will probably disagree with me) but we haven’t done anything useful so far.
|
||||
|
||||
So… Let’s do something useful… Let’s make a Clock!
|
||||
|
||||
There are many examples of clocks built of RGB LEDs on the Internet. Let’s make our own using our little board and RGB ring. We change the previous code as described below.
|
||||
|
||||
##### The import section
|
||||
|
||||
Remove the math/rand package and add stm32/hal/exti.
|
||||
|
||||
##### Global variables
|
||||
|
||||
Add two new global variables: btn and btnev:
|
||||
|
||||
```
|
||||
var (
|
||||
tts *usart.Driver
|
||||
btn gpio.Pin
|
||||
btnev rtos.EventFlag
|
||||
)
|
||||
```
|
||||
|
||||
They will be used to handle the “button” that will be used to set our clock. Our board has no button except reset, but somehow we can manage without it.
|
||||
|
||||
##### The init function
|
||||
|
||||
Add this code to the init function:
|
||||
|
||||
```
|
||||
btn = gpio.A.Pin(4)
|
||||
|
||||
btn.Setup(&gpio.Config{Mode: gpio.In, Pull: gpio.PullUp})
|
||||
ei := exti.Lines(btn.Mask())
|
||||
ei.Connect(btn.Port())
|
||||
ei.EnableFallTrig()
|
||||
ei.EnableRiseTrig()
|
||||
ei.EnableIRQ()
|
||||
|
||||
rtos.IRQ(irq.EXTI4_15).Enable()
|
||||
```
|
||||
|
||||
The PA4 pin is configured as input with the internal pull-up resistor enabled. It’s connected to the onboard LED but that doesn’t hinder anything. More important is that it’s located next to the GND pin so we can use any metal object to simulate the button and set the clock. As a bonus we have additional feedback from the onboard LED.
|
||||
|
||||
We use the EXTI peripheral to track the PA4 state. It’s configured to generate an interrupt on any change.
|
||||
|
||||
##### The btnWait function
|
||||
|
||||
Define a new auxiliary function:
|
||||
|
||||
```
|
||||
func btnWait(state int, deadline int64) bool {
|
||||
for btn.Load() != state {
|
||||
if !btnev.Wait(1, deadline) {
|
||||
return false // timeout
|
||||
}
|
||||
btnev.Reset(0)
|
||||
}
|
||||
delay.Millisec(50) // debouncing
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
It waits for the specified state on the “button” pin, but only until the deadline occurs. This is slightly improved polling code:
|
||||
|
||||
```
|
||||
for btn.Load() != state {
|
||||
if rtos.Nanosec() >= deadline {
|
||||
// timeout
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Our btnWait function, instead of busy waiting for state or deadline, uses the btnev variable of type rtos.EventFlag to sleep until something will happen. You can of course use a channel instead of rtos.EventFlag but the latter one is much cheaper.
|
||||
|
||||
##### The main function
|
||||
|
||||
We need completly new main function:
|
||||
|
||||
```
|
||||
func main() {
|
||||
rgb := wsuart.GRB
|
||||
strip := wsuart.Make(24)
|
||||
ds := 4 * 60 / len(strip) // Interval between LEDs (quarter-seconds).
|
||||
adjust := 0
|
||||
adjspeed := ds
|
||||
for {
|
||||
qs := int(rtos.Nanosec() / 25e7) // Quarter-seconds since reset.
|
||||
qa := qs + adjust
|
||||
|
||||
qa %= 12 * 3600 * 4 // Quarter-seconds since 0:00 or 12:00.
|
||||
hi := len(strip) * qa / (12 * 3600 * 4)
|
||||
|
||||
qa %= 3600 * 4 // Quarter-seconds in the current hour.
|
||||
mi := len(strip) * qa / (3600 * 4)
|
||||
|
||||
qa %= 60 * 4 // Quarter-seconds in the current minute.
|
||||
si := len(strip) * qa / (60 * 4)
|
||||
|
||||
hc := led.Color(0x550000)
|
||||
mc := led.Color(0x005500)
|
||||
sc := led.Color(0x000055)
|
||||
|
||||
// Blend the colors if the hands of the clock overlap.
|
||||
if hi == mi {
|
||||
hc |= mc
|
||||
mc = hc
|
||||
}
|
||||
if mi == si {
|
||||
mc |= sc
|
||||
sc = mc
|
||||
}
|
||||
if si == hi {
|
||||
sc |= hc
|
||||
hc = sc
|
||||
}
|
||||
|
||||
// Draw the clock and write to the ring.
|
||||
strip.Clear()
|
||||
strip[hi] = rgb.Pixel(hc)
|
||||
strip[mi] = rgb.Pixel(mc)
|
||||
strip[si] = rgb.Pixel(sc)
|
||||
tts.Write(strip.Bytes())
|
||||
|
||||
// Sleep until the button pressed or the second hand should be moved.
|
||||
if btnWait(0, int64(qs+ds)*25e7) {
|
||||
adjust += adjspeed
|
||||
// Sleep until the button is released or timeout.
|
||||
if !btnWait(1, rtos.Nanosec()+100e6) {
|
||||
if adjspeed < 5*60*4 {
|
||||
adjspeed += 2 * ds
|
||||
}
|
||||
continue
|
||||
}
|
||||
adjspeed = ds
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
We use the rtos.Nanosec function instead of time.Now to obtain the current time. This saves much of Flash but also reduces our clock to antique device that has no idea about days, months and years and worst of all it doesn’t handle daylight saving changes.
|
||||
|
||||
Our ring has 24 LEDs, so the second hand can be presented with the accuracy of 2.5s. To don’t sacrifice this accuracy and get smooth operation we use quarter-second as base interval. Half-second would be enough but quarter-second is more accurate and works also well with 16 and 48 LEDs.
|
||||
|
||||
The red, green and blue colors are used respectively for hour, minute and second hands. This allows us to use simple logical or operation for color blending. We have the Color.Blend method that can blend arbitrary colors but we’re low of Flash so we prefer simplest possible solution.
|
||||
|
||||
We redraw the clock only when the second hand moved. The:
|
||||
|
||||
```
|
||||
btnWait(0, int64(qs+ds)*25e7)
|
||||
```
|
||||
|
||||
is waiting for exactly that moment or for the press of the button.
|
||||
|
||||
Every press of the button adjust the clock forward. There is an acceleration when the button is held down for some time.
|
||||
|
||||
##### Interrupts
|
||||
|
||||
Define new interrupt handler:
|
||||
|
||||
```
|
||||
func exti4_15ISR() {
|
||||
pending := exti.Pending() & 0xFFF0
|
||||
pending.ClearPending()
|
||||
if pending&exti.Lines(btn.Mask()) != 0 {
|
||||
btnev.Signal(1)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
and add `irq.EXTI4_15: exti4_15ISR,` entry to the ISRs array.
|
||||
|
||||
This handler (or Interrupt Service Routine) handles EXTI4_15 IRQ. The Cortex-M0 CPU supports significantly fewer IRQs than its bigger brothers, so you can often see that one IRQ is shared by multiple interrupt sources. In our case one IRQ is shared by 12 EXTI lines.
|
||||
|
||||
The exti4_15ISR reads all pending bits and selects 12 more significant of them. Next it clears the seleced bits in EXTI and starts to handle them. In our case only bit 4 is checked. The `btnev.Signal(1)` causes that the `btnev.Wait(1, deadline)` wakes up and returns true.
|
||||
|
||||
You can find the complete code on [Github][24]. Let’s compile it:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15960 240 216 16416 4020 cortexm0.elf
|
||||
```
|
||||
|
||||
There are only 184 bytes for any iprovements. Let’s rebuild everything one more time but this time without any type and field names in typeinfo:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15120 240 216 15576 3cd8 cortexm0.elf
|
||||
```
|
||||
|
||||
Now, with a kilobyte of free space you can improve something. Let’s see how it works:
|
||||
|
||||
Sorry, your browser doesn't support embedded videos.
|
||||
|
||||
I don’t know how I managed to hit exactly 3:00 !?
|
||||
|
||||
That’s all Folks! In the part 4 (ending this series) we’ll try to display something on a LCD.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://ziutek.github.io/2018/05/03/go_on_very_small_hardware3.html
|
||||
|
||||
作者:[-;Michał Derkacz][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://ziutek.github.io
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://ziutek.github.io/images/mcu/f030-demo-board/board.jpg
|
||||
[2]: https://ziutek.github.io/2018/05/03/go_on_very_small_hardware3.html
|
||||
[3]: https://ziutek.github.io/2018/03/30/go_on_very_small_hardware.html
|
||||
[4]: https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html
|
||||
[5]: http://www.world-semi.com/solution/list-4-1.html
|
||||
[6]: https://ziutek.github.io/images/led/ws2812b.jpg
|
||||
[7]: http://www.martinhubacek.cz/arm/improved-stm32-ws2812b-library
|
||||
[8]: http://mikrokontrolery.blogspot.com/2011/03/Diody-WS2812B-sterowanie-XMega-cz-2.html
|
||||
[9]: https://translate.google.pl/translate?sl=pl&tl=en&u=http://mikrokontrolery.blogspot.com/2011/03/Diody-WS2812B-sterowanie-XMega-cz-2.html
|
||||
[10]: http://neon-world.com/en/product.php
|
||||
[11]: http://www.normandled.com/index.php/Product/view/id/800.html
|
||||
[12]: https://cpldcpu.wordpress.com/2014/08/27/apa102/
|
||||
[13]: https://cpldcpu.wordpress.com/2014/11/30/understanding-the-apa102-superled/
|
||||
[14]: https://cpldcpu.wordpress.com/2016/12/13/sk9822-a-clone-of-the-apa102/
|
||||
[15]: https://ziutek.github.io/images/led/rgbring.jpg
|
||||
[16]: https://ziutek.github.io/images/led/ring-stlink-f030.jpg
|
||||
[17]: https://jeelabs.org/article/1649a/
|
||||
[18]: http://www.st.com/en/evaluation-tools/stm32f4discovery.html
|
||||
[19]: https://github.com/ziutek/emgo/tree/master/egpath/src/stm32/examples/minidev/treelights
|
||||
[20]: https://github.com/ziutek/emgo/tree/master/egpath/src/stm32/examples/nucleo-f411re/ws2812
|
||||
[21]: https://en.wikipedia.org/wiki/Gamma_correction
|
||||
[22]: https://en.wikipedia.org/wiki/Xorshift
|
||||
[23]: https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html#uart
|
||||
[24]: https://github.com/ziutek/emgo/tree/master/egpath/src/stm32/examples/f030-demo-board/ws2812-clock
|
@ -1,315 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Building a Messenger App: Messages)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-messages/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Messages
|
||||
======
|
||||
|
||||
This post is the 4th on a series:
|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
|
||||
|
||||
|
||||
In this post we’ll code the endpoints to create a message and list them, also an endpoint to update the last time the participant read messages. Start by adding these routes in the `main()` function.
|
||||
|
||||
```
|
||||
router.HandleFunc("POST", "/api/conversations/:conversationID/messages", requireJSON(guard(createMessage)))
|
||||
router.HandleFunc("GET", "/api/conversations/:conversationID/messages", guard(getMessages))
|
||||
router.HandleFunc("POST", "/api/conversations/:conversationID/read_messages", guard(readMessages))
|
||||
```
|
||||
|
||||
Messages goes into conversations so the endpoint includes the conversation ID.
|
||||
|
||||
### Create Message
|
||||
|
||||
This endpoint handles POST requests to `/api/conversations/{conversationID}/messages` with a JSON body with just the message content and return the newly created message. It has two side affects: it updates the conversation `last_message_id` and updates the participant `messages_read_at`.
|
||||
|
||||
```
|
||||
func createMessage(w http.ResponseWriter, r *http.Request) {
|
||||
var input struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
errs := make(map[string]string)
|
||||
input.Content = removeSpaces(input.Content)
|
||||
if input.Content == "" {
|
||||
errs["content"] = "Message content required"
|
||||
} else if len([]rune(input.Content)) > 480 {
|
||||
errs["content"] = "Message too long. 480 max"
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
respond(w, Errors{errs}, http.StatusUnprocessableEntity)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not begin tx: %v", err))
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query participant existance: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if !isParticipant {
|
||||
http.Error(w, "Conversation not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var message Message
|
||||
if err := tx.QueryRowContext(ctx, `
|
||||
INSERT INTO messages (content, user_id, conversation_id) VALUES
|
||||
($1, $2, $3)
|
||||
RETURNING id, created_at
|
||||
`, input.Content, authUserID, conversationID).Scan(
|
||||
&message.ID,
|
||||
&message.CreatedAt,
|
||||
); err != nil {
|
||||
respondError(w, fmt.Errorf("could not insert message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := tx.ExecContext(ctx, `
|
||||
UPDATE conversations SET last_message_id = $1
|
||||
WHERE id = $2
|
||||
`, message.ID, conversationID); err != nil {
|
||||
respondError(w, fmt.Errorf("could not update conversation last message ID: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not commit tx to create a message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil {
|
||||
log.Printf("could not update messages read at: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
message.Content = input.Content
|
||||
message.UserID = authUserID
|
||||
message.ConversationID = conversationID
|
||||
// TODO: notify about new message.
|
||||
message.Mine = true
|
||||
|
||||
respond(w, message, http.StatusCreated)
|
||||
}
|
||||
```
|
||||
|
||||
First, it decodes the request body into an struct with the message content. Then, it validates the content is not empty and has less than 480 characters.
|
||||
|
||||
```
|
||||
var rxSpaces = regexp.MustCompile("\\s+")
|
||||
|
||||
func removeSpaces(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
|
||||
lines := make([]string, 0)
|
||||
for _, line := range strings.Split(s, "\n") {
|
||||
line = rxSpaces.ReplaceAllLiteralString(line, " ")
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
```
|
||||
|
||||
This is the function to remove spaces. It iterates over each line, remove more than two consecutives spaces and returns with the non empty lines.
|
||||
|
||||
After the validation, it starts an SQL transaction. First, it queries for the participant existance in the conversation.
|
||||
|
||||
```
|
||||
func queryParticipantExistance(ctx context.Context, tx *sql.Tx, userID, conversationID string) (bool, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
var exists bool
|
||||
if err := tx.QueryRowContext(ctx, `SELECT EXISTS (
|
||||
SELECT 1 FROM participants
|
||||
WHERE user_id = $1 AND conversation_id = $2
|
||||
)`, userID, conversationID).Scan(&exists); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
```
|
||||
|
||||
I extracted it into a function because it’s reused later.
|
||||
|
||||
If the user isn’t participant of the conversation, we return with a `404 Not Found` error.
|
||||
|
||||
Then, it inserts the message and updates the conversation `last_message_id`. Since this point, `last_message_id` cannot by `NULL` because we don’t allow removing messages.
|
||||
|
||||
Then it commits the transaction and we update the participant `messages_read_at` in a goroutine.
|
||||
|
||||
```
|
||||
func updateMessagesReadAt(ctx context.Context, userID, conversationID string) error {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if _, err := db.ExecContext(ctx, `
|
||||
UPDATE participants SET messages_read_at = now()
|
||||
WHERE user_id = $1 AND conversation_id = $2
|
||||
`, userID, conversationID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Before responding with the new message, we must notify about it. This is for the realtime part we’ll code in the next post so I left a comment there.
|
||||
|
||||
### Get Messages
|
||||
|
||||
This endpoint handles GET requests to `/api/conversations/{conversationID}/messages`. It responds with a JSON array with all the messages in the conversation. It also has the same side affect of updating the participant `messages_read_at`.
|
||||
|
||||
```
|
||||
func getMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
tx, err := db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true})
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not begin tx: %v", err))
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
isParticipant, err := queryParticipantExistance(ctx, tx, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query participant existance: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if !isParticipant {
|
||||
http.Error(w, "Conversation not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
rows, err := tx.QueryContext(ctx, `
|
||||
SELECT
|
||||
id,
|
||||
content,
|
||||
created_at,
|
||||
user_id = $1 AS mine
|
||||
FROM messages
|
||||
WHERE messages.conversation_id = $2
|
||||
ORDER BY messages.created_at DESC
|
||||
`, authUserID, conversationID)
|
||||
if err != nil {
|
||||
respondError(w, fmt.Errorf("could not query messages: %v", err))
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
messages := make([]Message, 0)
|
||||
for rows.Next() {
|
||||
var message Message
|
||||
if err = rows.Scan(
|
||||
&message.ID,
|
||||
&message.Content,
|
||||
&message.CreatedAt,
|
||||
&message.Mine,
|
||||
); err != nil {
|
||||
respondError(w, fmt.Errorf("could not scan message: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
messages = append(messages, message)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not iterate over messages: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
respondError(w, fmt.Errorf("could not commit tx to get messages: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = updateMessagesReadAt(nil, authUserID, conversationID); err != nil {
|
||||
log.Printf("could not update messages read at: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
respond(w, messages, http.StatusOK)
|
||||
}
|
||||
```
|
||||
|
||||
First, it begins an SQL transaction in readonly mode. Checks for the participant existance and queries all the messages. In each message, we use the current authenticated user ID to know whether the user owns the message (`mine`). Then it commits the transaction, updates the participant `messages_read_at` in a goroutine and respond with the messages.
|
||||
|
||||
### Read Messages
|
||||
|
||||
This endpoint handles POST requests to `/api/conversations/{conversationID}/read_messages`. Without any request or response body. In the frontend we’ll make this request each time a new message arrive in the realtime stream.
|
||||
|
||||
```
|
||||
func readMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
conversationID := way.Param(ctx, "conversationID")
|
||||
|
||||
if err := updateMessagesReadAt(ctx, authUserID, conversationID); err != nil {
|
||||
respondError(w, fmt.Errorf("could not update messages read at: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
```
|
||||
|
||||
It uses the same function we’ve been using to update the participant `messages_read_at`.
|
||||
|
||||
* * *
|
||||
|
||||
That concludes it. Realtime messages is the only part left in the backend. Wait for it in the next post.
|
||||
|
||||
[Souce Code][4]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://nicolasparada.netlify.com/posts/go-messenger-messages/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/
|
||||
[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/
|
||||
[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/
|
||||
[4]: https://github.com/nicolasparada/go-messenger-demo
|
@ -1,175 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Building a Messenger App: Realtime Messages)
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Realtime Messages
|
||||
======
|
||||
|
||||
This post is the 5th on a series:
|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
* [Part 4: Messages][4]
|
||||
|
||||
|
||||
|
||||
For realtime messages we’ll use [Server-Sent Events][5]. This is an open connection in which we can stream data. We’ll have and endpoint in which the user subscribes to all the messages sended to him.
|
||||
|
||||
### Message Clients
|
||||
|
||||
Before the HTTP part, let’s code a map to have all the clients listening for messages. Initialize this globally like so:
|
||||
|
||||
```
|
||||
type MessageClient struct {
|
||||
Messages chan Message
|
||||
UserID string
|
||||
}
|
||||
|
||||
var messageClients sync.Map
|
||||
```
|
||||
|
||||
### New Message Created
|
||||
|
||||
Remember in the [last post][4] when we created the message, we left a “TODO” comment. There we’ll dispatch a goroutine with this function.
|
||||
|
||||
```
|
||||
go messageCreated(message)
|
||||
```
|
||||
|
||||
Insert that line just where we left the comment.
|
||||
|
||||
```
|
||||
func messageCreated(message Message) error {
|
||||
if err := db.QueryRow(`
|
||||
SELECT user_id FROM participants
|
||||
WHERE user_id != $1 and conversation_id = $2
|
||||
`, message.UserID, message.ConversationID).
|
||||
Scan(&message.ReceiverID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go broadcastMessage(message)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func broadcastMessage(message Message) {
|
||||
messageClients.Range(func(key, _ interface{}) bool {
|
||||
client := key.(*MessageClient)
|
||||
if client.UserID == message.ReceiverID {
|
||||
client.Messages <- message
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
The function queries for the recipient ID (the other participant ID) and sends the message to all the clients.
|
||||
|
||||
### Subscribe to Messages
|
||||
|
||||
Lets go to the `main()` function and add this route:
|
||||
|
||||
```
|
||||
router.HandleFunc("GET", "/api/messages", guard(subscribeToMessages))
|
||||
```
|
||||
|
||||
This endpoint handles GET requests on `/api/messages`. The request should be an [EventSource][6] connection. It responds with an event stream in which the data is JSON formatted.
|
||||
|
||||
```
|
||||
func subscribeToMessages(w http.ResponseWriter, r *http.Request) {
|
||||
if a := r.Header.Get("Accept"); !strings.Contains(a, "text/event-stream") {
|
||||
http.Error(w, "This endpoint requires an EventSource connection", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
respondError(w, errors.New("streaming unsupported"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
authUserID := ctx.Value(keyAuthUserID).(string)
|
||||
|
||||
h := w.Header()
|
||||
h.Set("Cache-Control", "no-cache")
|
||||
h.Set("Connection", "keep-alive")
|
||||
h.Set("Content-Type", "text/event-stream")
|
||||
|
||||
messages := make(chan Message)
|
||||
defer close(messages)
|
||||
|
||||
client := &MessageClient{Messages: messages, UserID: authUserID}
|
||||
messageClients.Store(client, nil)
|
||||
defer messageClients.Delete(client)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case message := <-messages:
|
||||
if b, err := json.Marshal(message); err != nil {
|
||||
log.Printf("could not marshall message: %v\n", err)
|
||||
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "data: %s\n\n", b)
|
||||
}
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
First it checks for the correct request headers and checks the server supports streaming. We create a channel of messages to make a client and store it in the clients map. Each time a new message is created, it will go in this channel, so we can read from it with a `for-select` loop.
|
||||
|
||||
Server-Sent Events uses this format to send data:
|
||||
|
||||
```
|
||||
data: some data here\n\n
|
||||
```
|
||||
|
||||
We are sending it in JSON format:
|
||||
|
||||
```
|
||||
data: {"foo":"bar"}\n\n
|
||||
```
|
||||
|
||||
We are using `fmt.Fprintf()` to write to the response writter in this format and flushing the data in each iteration of the loop.
|
||||
|
||||
This will loop until the connection is closed using the request context. We defered the close of the channel and the delete of the client, so when the loop ends, the channel will be closed and the client won’t receive more messages.
|
||||
|
||||
Note aside, the JavaScript API to work with Server-Sent Events (EventSource) doesn’t support setting custom headers 😒 So we cannot set `Authorization: Bearer <token>`. And that’s the reason why the `guard()` middleware reads the token from the URL query string also.
|
||||
|
||||
* * *
|
||||
|
||||
That concludes the realtime messages. I’d like to say that’s everything in the backend, but to code the frontend I’ll add one more endpoint to login. A login that will be just for development.
|
||||
|
||||
[Souce Code][7]
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://nicolasparada.netlify.com/posts/go-messenger-realtime-messages/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://nicolasparada.netlify.com/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://nicolasparada.netlify.com/posts/go-messenger-schema/
|
||||
[2]: https://nicolasparada.netlify.com/posts/go-messenger-oauth/
|
||||
[3]: https://nicolasparada.netlify.com/posts/go-messenger-conversations/
|
||||
[4]: https://nicolasparada.netlify.com/posts/go-messenger-messages/
|
||||
[5]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
|
||||
[6]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource
|
||||
[7]: https://github.com/nicolasparada/go-messenger-demo
|
@ -1,265 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Using Yarn on Ubuntu and Other Linux Distributions)
|
||||
[#]: via: (https://itsfoss.com/install-yarn-ubuntu)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Using Yarn on Ubuntu and Other Linux Distributions
|
||||
======
|
||||
|
||||
**This quick tutorial shows you the official way of installing Yarn package manager on Ubuntu and Debian Linux. You’ll also learn some basic Yarn commands and the steps to remove Yarn completely.**
|
||||
|
||||
[Yarn][1] is an open source JavaScript package manager developed by Facebook. It is an alternative or should I say improvement to the popular npm package manager. [Facebook developers’ team][2] created Yarn to overcome the shortcomings of [npm][3]. Facebook claims that Yarn is faster, reliable and more secure than npm.
|
||||
|
||||
Like npm, Yarn provides you a way to automate the process of installing, updating, configuring, and removing packages retrieved from a global registry.
|
||||
|
||||
The advantage of Yarn is that it is faster as it caches every package it downloads so it doesn’t need to download it again. It also parallelizes operations to maximize resource utilization. Yarn also uses [checksums to verify the integrity][4] of every installed package before its code is executed. Yarn also guarantees that an install that worked on one system will work exactly the same way on any other system.
|
||||
|
||||
If you are [using nodejs on Ubuntu][5], probably you already have npm installed on your system. In that case, you can use npm to install Yarn globally in the following manner:
|
||||
|
||||
```
|
||||
sudo npm install yarn -g
|
||||
```
|
||||
|
||||
However, I would recommend using the official way to install Yarn on Ubuntu/Debian.
|
||||
|
||||
### Installing Yarn on Ubuntu and Debian [The Official Way]
|
||||
|
||||
![Yarn JS][6]
|
||||
|
||||
The instructions mentioned here should be applicable to all versions of Ubuntu such as Ubuntu 18.04, 16.04 etc. The same set of instructions are also valid for Debian and other Debian based distributions.
|
||||
|
||||
Since the tutorial uses Curl to add the GPG key of Yarn project, it would be a good idea to verify whether you have Curl installed already or not.
|
||||
|
||||
```
|
||||
sudo apt install curl
|
||||
```
|
||||
|
||||
The above command will install Curl if it wasn’t installed already. Now that you have curl, you can use it to add the GPG key of Yarn project in the following fashion:
|
||||
|
||||
```
|
||||
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
```
|
||||
|
||||
After that, add the repository to your sources list so that you can easily upgrade the Yarn package in future with the rest of the system updates:
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "deb https://dl.yarnpkg.com/debian/ stable main" >> /etc/apt/sources.list.d/yarn.list'
|
||||
```
|
||||
|
||||
You are set to go now. [Update Ubuntu][7] or Debian system to refresh the list of available packages and then install yarn:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install yarn
|
||||
```
|
||||
|
||||
This will install Yarn along with nodejs. Once the process completes, verify that Yarn has been installed successfully. You can do that by checking the Yarn version.
|
||||
|
||||
```
|
||||
yarn --version
|
||||
```
|
||||
|
||||
For me, it showed an output like this:
|
||||
|
||||
```
|
||||
yarn --version
|
||||
1.12.3
|
||||
```
|
||||
|
||||
This means that I have Yarn version 1.12.3 installed on my system.
|
||||
|
||||
### Using Yarn
|
||||
|
||||
I presume that you have some basic understandings of JavaScript programming and how dependencies work. I am not going to go in details here. I’ll show you some of the basic Yarn commands that will help you getting started with it.
|
||||
|
||||
#### Creating a new project with Yarn
|
||||
|
||||
Like npm, Yarn also works with a package.json file. This is where you add your dependencies. All the packages of the dependencies are cached in the node_modules directory in the root directory of your project.
|
||||
|
||||
In the root directory of your project, run the following command to generate a fresh package.json file:
|
||||
|
||||
It will ask you a number of questions. You can skip the questions r go with the defaults by pressing enter.
|
||||
|
||||
```
|
||||
yarn init
|
||||
yarn init v1.12.3
|
||||
question name (test_yarn): test_yarn_proect
|
||||
question version (1.0.0): 0.1
|
||||
question description: Test Yarn
|
||||
question entry point (index.js):
|
||||
question repository url:
|
||||
question author: abhishek
|
||||
question license (MIT):
|
||||
question private:
|
||||
success Saved package.json
|
||||
Done in 82.42s.
|
||||
```
|
||||
|
||||
With this, you get a package.json file of this sort:
|
||||
|
||||
```
|
||||
{
|
||||
"name": "test_yarn_proect",
|
||||
"version": "0.1",
|
||||
"description": "Test Yarn",
|
||||
"main": "index.js",
|
||||
"author": "abhishek",
|
||||
"license": "MIT"
|
||||
}
|
||||
```
|
||||
|
||||
Now that you have the package.json, you can either manually edit it to add or remove package dependencies or use Yarn commands (preferred).
|
||||
|
||||
#### Adding dependencies with Yarn
|
||||
|
||||
You can add a dependency on a certain package in the following fashion:
|
||||
|
||||
```
|
||||
yarn add <package_name>
|
||||
```
|
||||
|
||||
For example, if you want to use [Lodash][8] in your project, you can add it using Yarn like this:
|
||||
|
||||
```
|
||||
yarn add lodash
|
||||
yarn add v1.12.3
|
||||
info No lockfile found.
|
||||
[1/4] Resolving packages…
|
||||
[2/4] Fetching packages…
|
||||
[3/4] Linking dependencies…
|
||||
[4/4] Building fresh packages…
|
||||
success Saved lockfile.
|
||||
success Saved 1 new dependency.
|
||||
info Direct dependencies
|
||||
└─ [email protected]
|
||||
info All dependencies
|
||||
└─ [email protected]
|
||||
Done in 2.67s.
|
||||
```
|
||||
|
||||
And you can see that this dependency has been added automatically in the package.json file:
|
||||
|
||||
```
|
||||
{
|
||||
"name": "test_yarn_proect",
|
||||
"version": "0.1",
|
||||
"description": "Test Yarn",
|
||||
"main": "index.js",
|
||||
"author": "abhishek",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.11"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
By default, Yarn will add the latest version of a package in the dependency. If you want to use a specific version, you may specify it while adding.
|
||||
|
||||
As always, you can also update the package.json file manually.
|
||||
|
||||
#### Upgrading dependencies with Yarn
|
||||
|
||||
You can upgrade a particular dependency to its latest version with the following command:
|
||||
|
||||
```
|
||||
yarn upgrade <package_name>
|
||||
```
|
||||
|
||||
It will see if the package in question has a newer version and will update it accordingly.
|
||||
|
||||
You can also change the version of an already added dependency in the following manner:
|
||||
|
||||
You can also upgrade all the dependencies of your project to their latest version with one single command:
|
||||
|
||||
```
|
||||
yarn upgrade
|
||||
```
|
||||
|
||||
It will check the versions of all the dependencies and will update them if there are any newer versions.
|
||||
|
||||
#### Removing dependencies with Yarn
|
||||
|
||||
You can remove a package from the dependencies of your project in this way:
|
||||
|
||||
```
|
||||
yarn remove <package_name>
|
||||
```
|
||||
|
||||
#### Install all project dependencies
|
||||
|
||||
If you made any changes to the project.json file, you should run either
|
||||
|
||||
```
|
||||
yarn
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
to install all the dependencies at once.
|
||||
|
||||
### How to remove Yarn from Ubuntu or Debian
|
||||
|
||||
I’ll complete this tutorial by mentioning the steps to remove Yarn from your system if you used the above steps to install it. If you ever realized that you don’t need Yarn anymore, you will be able to remove it.
|
||||
|
||||
Use the following command to remove Yarn and its dependencies.
|
||||
|
||||
```
|
||||
sudo apt purge yarn
|
||||
```
|
||||
|
||||
You should also remove the Yarn repository from the repository list:
|
||||
|
||||
```
|
||||
sudo rm /etc/apt/sources.list.d/yarn.list
|
||||
```
|
||||
|
||||
The optional next step is to remove the GPG key you had added to the trusted keys. But for that, you need to know the key. You can get that using the apt-key command:
|
||||
|
||||
Warning: apt-key output should not be parsed (stdout is not a terminal) pub rsa4096 2016-10-05 [SC] 72EC F46A 56B4 AD39 C907 BBB7 1646 B01B 86E5 0310 uid [ unknown] Yarn Packaging
|
||||
|
||||
Warning: apt-key output should not be parsed (stdout is not a terminal) pub rsa4096 2016-10-05 [SC] 72EC F46A 56B4 AD39 C907 BBB7 1646 B01B 86E5 0310 uid [ unknown] Yarn Packaging yarn@dan.cx sub rsa4096 2016-10-05 [E] sub rsa4096 2019-01-02 [S] [expires: 2020-02-02]
|
||||
|
||||
The key here is the last 8 characters of the GPG key’s fingerprint in the line starting with pub.
|
||||
|
||||
So, in my case, the key is 86E50310 and I’ll remove it using this command:
|
||||
|
||||
```
|
||||
sudo apt-key del 86E50310
|
||||
```
|
||||
|
||||
You’ll see an OK in the output and the GPG key of Yarn package will be removed from the list of GPG keys your system trusts.
|
||||
|
||||
I hope this tutorial helped you to install Yarn on Ubuntu, Debian, Linux Mint, elementary OS etc. I provided some basic Yarn commands to get you started along with complete steps to remove Yarn from your system.
|
||||
|
||||
I hope you liked this tutorial and if you have any questions or suggestions, please feel free to leave a comment below.
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/install-yarn-ubuntu
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://yarnpkg.com/lang/en/
|
||||
[2]: https://code.fb.com/
|
||||
[3]: https://www.npmjs.com/
|
||||
[4]: https://itsfoss.com/checksum-tools-guide-linux/
|
||||
[5]: https://itsfoss.com/install-nodejs-ubuntu/
|
||||
[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/01/yarn-js-ubuntu-debian.jpeg?resize=800%2C450&ssl=1
|
||||
[7]: https://itsfoss.com/update-ubuntu/
|
||||
[8]: https://lodash.com/
|
@ -1,147 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (HankChow)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Using Bash traps in your scripts)
|
||||
[#]: via: (https://opensource.com/article/20/6/bash-trap)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
Using Bash traps in your scripts
|
||||
======
|
||||
Traps help your scripts end cleanly, whether they run successfully or
|
||||
not.
|
||||
![Hands programming][1]
|
||||
|
||||
It's easy to detect when a shell script starts, but it's not always easy to know when it stops. A script might end normally, just as its author intends it to end, but it could also fail due to an unexpected fatal error. Sometimes it's beneficial to preserve the remnants of whatever was in progress when a script failed, and other times it's inconvenient. Either way, detecting the end of a script and reacting to it in some pre-calculated manner is why the [Bash][2] `trap` directive exists.
|
||||
|
||||
### Responding to failure
|
||||
|
||||
Here's an example of how one failure in a script can lead to future failures. Say you have written a program that creates a temporary directory in `/tmp` so that it can unarchive and process files before bundling them back together in a different format:
|
||||
|
||||
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
CWD=`pwd`
|
||||
TMP=${TMP:-/tmp/tmpdir}
|
||||
|
||||
## create tmp dir
|
||||
mkdir $TMP
|
||||
|
||||
## extract files to tmp
|
||||
tar xf "${1}" --directory $TMP
|
||||
|
||||
## move to tmpdir and run commands
|
||||
pushd $TMP
|
||||
for IMG in *.jpg; do
|
||||
mogrify -verbose -flip -flop $IMG
|
||||
done
|
||||
tar --create --file "${1%.*}".tar *.jpg
|
||||
|
||||
## move back to origin
|
||||
popd
|
||||
|
||||
## bundle with bzip2
|
||||
bzip2 --compress $TMP/"${1%.*}".tar \
|
||||
--stdout > "${1%.*}".tbz
|
||||
|
||||
## clean up
|
||||
/usr/bin/rm -r /tmp/tmpdir
|
||||
```
|
||||
|
||||
Most of the time, the script works as expected. However, if you accidentally run it on an archive filled with PNG files instead of the expected JPEG files, it fails halfway through. One failure leads to another, and eventually, the script exits without reaching its final directive to remove the temporary directory. As long as you manually remove the directory, you can recover quickly, but if you aren't around to do that, then the next time the script runs, it has to deal with an existing temporary directory full of unpredictable leftover files.
|
||||
|
||||
One way to combat this is to reverse and double-up on the logic by adding a precautionary removal to the start of the script. While valid, that relies on brute force instead of structure. A more elegant solution is `trap`.
|
||||
|
||||
### Catching signals with trap
|
||||
|
||||
The `trap` keyword catches _signals_ that may happen during execution. You've used one of these signals if you've ever used the `kill` or `killall` commands, which call `SIGTERM` by default. There are many other signals that shells respond to, and you can see most of them with `trap -l` (as in "list"):
|
||||
|
||||
|
||||
```
|
||||
$ trap --list
|
||||
1) SIGHUP 2) SIGINT 3) SIGQUIT 4) SIGILL 5) SIGTRAP
|
||||
6) SIGABRT 7) SIGBUS 8) SIGFPE 9) SIGKILL 10) SIGUSR1
|
||||
11) SIGSEGV 12) SIGUSR2 13) SIGPIPE 14) SIGALRM 15) SIGTERM
|
||||
16) SIGSTKFLT 17) SIGCHLD 18) SIGCONT 19) SIGSTOP 20) SIGTSTP
|
||||
21) SIGTTIN 22) SIGTTOU 23) SIGURG 24) SIGXCPU 25) SIGXFSZ
|
||||
26) SIGVTALRM 27) SIGPROF 28) SIGWINCH 29) SIGIO 30) SIGPWR
|
||||
31) SIGSYS 34) SIGRTMIN 35) SIGRTMIN+1 36) SIGRTMIN+2 37) SIGRTMIN+3
|
||||
38) SIGRTMIN+4 39) SIGRTMIN+5 40) SIGRTMIN+6 41) SIGRTMIN+7 42) SIGRTMIN+8
|
||||
43) SIGRTMIN+9 44) SIGRTMIN+10 45) SIGRTMIN+11 46) SIGRTMIN+12 47) SIGRTMIN+13
|
||||
48) SIGRTMIN+14 49) SIGRTMIN+15 50) SIGRTMAX-14 51) SIGRTMAX-13 52) SIGRTMAX-12
|
||||
53) SIGRTMAX-11 54) SIGRTMAX-10 55) SIGRTMAX-9 56) SIGRTMAX-8 57) SIGRTMAX-7
|
||||
58) SIGRTMAX-6 59) SIGRTMAX-5 60) SIGRTMAX-4 61) SIGRTMAX-3 62) SIGRTMAX-2
|
||||
63) SIGRTMAX-1 64) SIGRTMAX
|
||||
```
|
||||
|
||||
Any of these signals may be anticipated with `trap`. In addition to these, `trap` recognizes:
|
||||
|
||||
* `EXIT`: Occurs when a process exits
|
||||
* `ERR`: Occurs when a process exits with a non-zero status
|
||||
* `DEBUG`: A Boolean representing debug mode
|
||||
|
||||
|
||||
|
||||
To set a trap in Bash, use `trap` followed by a list of commands you want to be executed, followed by a list of signals to trigger it.
|
||||
|
||||
For instance, this trap detects a `SIGINT`, the signal sent when a user presses **Ctrl+C** while a process is running:
|
||||
|
||||
|
||||
```
|
||||
`trap "{ echo 'Terminated with Ctrl+C'; }" SIGINT`
|
||||
```
|
||||
|
||||
The example script with temporary directory problems can be fixed with a trap detecting `SIGINT`, errors, and successful exits:
|
||||
|
||||
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
CWD=`pwd`
|
||||
TMP=${TMP:-/tmp/tmpdir}
|
||||
|
||||
trap \
|
||||
"{ /usr/bin/rm -r $TMP ; exit 255; }" \
|
||||
SIGINT SIGTERM ERR EXIT
|
||||
|
||||
## create tmp dir
|
||||
mkdir $TMP
|
||||
tar xf "${1}" --directory $TMP
|
||||
|
||||
## move to tmp and run commands
|
||||
pushd $TMP
|
||||
for IMG in *.jpg; do
|
||||
mogrify -verbose -flip -flop $IMG
|
||||
done
|
||||
tar --create --file "${1%.*}".tar *.jpgh
|
||||
|
||||
## move back to origin
|
||||
popd
|
||||
|
||||
## zip tar
|
||||
bzip2 --compress $TMP/"${1%.*}".tar \
|
||||
--stdout > "${1%.*}".tbz
|
||||
```
|
||||
|
||||
For complex actions, you can simplify `trap` statements with [Bash functions][3].
|
||||
|
||||
### Traps in Bash
|
||||
|
||||
Traps are useful to ensure that your scripts end cleanly, whether they run successfully or not. It's never safe to rely completely on automated garbage collection, so this is a good habit to get into in general. Try using them in your scripts, and see what they can do!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/6/bash-trap
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/programming-code-keyboard-laptop.png?itok=pGfEfu2S (Hands programming)
|
||||
[2]: https://opensource.com/resources/what-bash
|
||||
[3]: https://opensource.com/article/20/6/how-write-functions-bash
|
@ -1,150 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Automate testing for website errors with this Python tool)
|
||||
[#]: via: (https://opensource.com/article/20/7/seodeploy)
|
||||
[#]: author: (JR Oakes https://opensource.com/users/jroakes)
|
||||
|
||||
Automate testing for website errors with this Python tool
|
||||
======
|
||||
SEODeploy helps identify SEO problems in a website before they're
|
||||
deployed.
|
||||
![Computer screen with files or windows open][1]
|
||||
|
||||
As a technical search-engine optimizer, I'm often called in to coordinate website migrations, new site launches, analytics implementations, and other areas that affect sites' online visibility and measurement to limit risk. Many companies generate a substantial portion of monthly recurring revenue from users finding their products and services through search engines. Although search engines have gotten good at handling poorly formatted code, things can still go wrong in development that adversely affects how search engines index and display pages for users.
|
||||
|
||||
I've been part of manual processes attempting to mitigate this risk by reviewing staged changes for search engine optimization (SEO)-breaking problems. My team's findings determine whether the project gets the green light (or not) to launch. But this process is often inefficient, can be applied to only a limited number of pages, and has a high likelihood of human error.
|
||||
|
||||
The industry has long sought a usable and trustworthy way to automate this process while still giving developers and search-engine optimizers a meaningful say in what must be tested. This is important because these groups often have competing priorities in development sprints, with search-engine optimizers pushing for changes and developers needing to control regressions and unexpected experiences.
|
||||
|
||||
### Common SEO-breaking problems
|
||||
|
||||
Many websites I work with have tens of thousands of pages. Some have millions. It's daunting to understand how a development change might affect so many pages. In the world of SEO, you can see large, sitewide changes in how Google and other search engines show your pages from very minor and seemingly innocuous changes. It's imperative to have processes in place that catch these types of errors before they make it to production.
|
||||
|
||||
Below are a few examples of problems that I have seen in the last year.
|
||||
|
||||
#### Accidental noindex
|
||||
|
||||
A proprietary third-party SEO monitoring tool we use, [ContentKing][2], found this problem immediately after launch to production. This is a sneaky error because it's not visible in the HTML, rather it is hidden from view in the server response header, yet it can very quickly cause the loss of your search visibility.
|
||||
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Date: Tue May 25 2010 21:12:42 GMT
|
||||
[...]
|
||||
X-Robots-Tag: noindex
|
||||
[...]
|
||||
```
|
||||
|
||||
#### Canonical lower-casing
|
||||
|
||||
A change to production mistakenly lower-cased an entire website's [canonical link elements][3]. The change affected nearly 30,000 URLs. Before the update, the URLs were in title case (for instance, `/URL-Path/`). This is a problem because the canonical link element is a hint for Google about a webpage's true canonical URL version. This change caused many URLs to be removed from Google's index and re-indexed at the new uncased location (`/url-path/`). The impact was a loss of 10–15% of traffic and corruption of page metric data over the next few weeks.
|
||||
|
||||
#### Origin server regression
|
||||
|
||||
One website with a complex and novel implementation of React had a mysterious issue with regression of `origin.domain.com` URLs displaying for its origin content-delivery network server. It would intermittently output the origin host instead of the edge host in the site metadata (such as the canonical link element, URLs, and Open Graph links). The problem was found in the raw HTML and the rendered HTML. This impacted search visibility and the quality of shares on social media.
|
||||
|
||||
### Introducing SEODeploy
|
||||
|
||||
SEOs often use diff-testing tools to look at changes between sets of rendered and raw HTML. Diff testing is ideal because it allows certainty that the eye does not. You want to look for differences in how Google renders your page, not how users do. You want to look at what the raw HTML looks like, not the rendered HTML, as these are two separate processing steps for Google.
|
||||
|
||||
This led my colleagues and me to create [SEODeploy][4], a "Python library for automating SEO testing in deployment pipelines." Our mission was:
|
||||
|
||||
> To develop a tool that allowed developers to provide a few to many URL paths, and which allowed those paths to be diff tested on production and staging hosts, looking specifically for unanticipated regressions in SEO-related data.
|
||||
|
||||
SEODeploy's mechanics are simple: Provide a text file containing a newline-delimited set of paths, and the tool runs a series of modules on those paths, comparing production and staging URLs and reporting on any errors or messages (changes) it finds.
|
||||
|
||||
![SEODeploy overview][5]
|
||||
|
||||
(SEODeploy, [CC BY-SA 4.0][6])
|
||||
|
||||
The configuration for the tool and modules is just one YAML file, which can be customized based on anticipated changes.
|
||||
|
||||
![SEODeploy output][7]
|
||||
|
||||
(SEODeploy, [CC BY-SA 4.0][6])
|
||||
|
||||
The initial release includes the following core features and concepts:
|
||||
|
||||
1. **Open source**: We believe deeply in sharing code that can be criticized, improved, extended, shared, and reused.
|
||||
2. **Modular**: There are many different stacks and edge cases in development for the web. The SEODeploy tool is conceptually simple, so modularity is used to control the complexity. We provide two built modules and an example module that outline the basic structure.
|
||||
3. **URL sampling:** Since it is not always feasible or efficient to test every URL, we included a method to randomly sample XML sitemap URLs or URLs monitored by ContentKing.
|
||||
4. **Flexible diff checking**: Web data is messy. The diff checking functionality tries to do a good job of converting this data to messages (changes) no matter the data type it's checking, including ext, arrays (lists), JSON objects (dictionaries), integers, floats, etc.
|
||||
5. **Automated**: A simple command-line interface is used to call the sampling and execution methods to make it easy to incorporate SEODeploy into existing pipelines.
|
||||
|
||||
|
||||
|
||||
### Modules
|
||||
|
||||
While the core functionality is simple, by design, modules are where SEODeploy gains features and complexity. The modules handle the harder task of getting, cleaning, and organizing the data collected from staging and production servers for comparison.
|
||||
|
||||
#### Headless module
|
||||
|
||||
The tool's [Headless module][8] is a nod to anyone who doesn't want to have to pay for a third-party service to get value from the library. It runs any version of Chrome and extracts rendered data from each comparison set of URLs.
|
||||
|
||||
The headless module extracts the following core data for comparison:
|
||||
|
||||
1. SEO content, e.g., titles, headings, links, etc.
|
||||
2. Performance data from the Chrome Timings and Chrome DevTools Protocol (CDP) Performance APIs
|
||||
3. Calculated performance metrics including the Cumulative Layout Shift (CLS), a recently popular [Web Vital][9] released by Google
|
||||
4. Coverage data for CSS and JavaScript from the CDP Coverage API
|
||||
|
||||
|
||||
|
||||
The module includes functionality to handle authentication for staging, network speed presets (for better normalization of comparisons), as well as a method for handling staging-host replacement in staging comparative data. It should be fairly easy for developers to extend this module to collect any other data they want to compare per page.
|
||||
|
||||
#### Other modules
|
||||
|
||||
We created an [example module][10] for any developer who wants to use the framework to create a custom extraction module. Another module integrates with ContentKing. Note that the ContentKing module requires a subscription to ContentKing, while Headless can be run on any machine capable of running Chrome.
|
||||
|
||||
### Problems to solve
|
||||
|
||||
We have [plans][11] to extend and enhance the library but are looking for [feedback][12] from developers on what works and what doesn't meet their needs. A few of the issues and items on our list are:
|
||||
|
||||
1. Dynamic timestamps create false positives for some comparison elements, especially schema.
|
||||
2. Saving test data to a database to enable reviewing historical deployment processes and testing changes against the last staging push.
|
||||
3. Enhancing the scale and speed of the extraction with a cloud infrastructure for rendering.
|
||||
4. Increasing testing coverage from the current 46% to 99%-plus.
|
||||
5. Currently, we rely on [Poetry][13] for dependency management, but we want to publish a PyPl library so it can be installed easily with `pip install`.
|
||||
6. We are looking for more issues and field data on usage.
|
||||
|
||||
|
||||
|
||||
### Get started
|
||||
|
||||
The project is [on GitHub][4], and we have [documentation][14] for most features.
|
||||
|
||||
We hope that you will clone SEODeploy and give it a go. Our goal is to support the open source community with a tool developed by technical search-engine optimizers and validated by developers and engineers. We've seen the time it takes to validate complex staging issues and the business impact minor changes can have across many URLs. We think this library can save time and de-risk the deployment process for development teams.
|
||||
|
||||
If you have questions, issues, or want to contribute, please see the project's [About page][15].
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/7/seodeploy
|
||||
|
||||
作者:[JR Oakes][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/jroakes
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/browser_screen_windows_files.png?itok=kLTeQUbY (Computer screen with files or windows open)
|
||||
[2]: https://www.contentkingapp.com/
|
||||
[3]: https://en.wikipedia.org/wiki/Canonical_link_element
|
||||
[4]: https://github.com/locomotive-agency/SEODeploy
|
||||
[5]: https://opensource.com/sites/default/files/uploads/seodeploy.png (SEODeploy overview)
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://opensource.com/sites/default/files/uploads/seodeploy_output.png (SEODeploy output)
|
||||
[8]: https://locomotive-agency.github.io/SEODeploy/modules/headless/
|
||||
[9]: https://web.dev/vitals/
|
||||
[10]: https://locomotive-agency.github.io/SEODeploy/modules/creating/
|
||||
[11]: https://locomotive-agency.github.io/SEODeploy/todo/
|
||||
[12]: https://locomotive-agency.github.io/SEODeploy/about/#contact
|
||||
[13]: https://python-poetry.org/
|
||||
[14]: https://locomotive-agency.github.io/SEODeploy/
|
||||
[15]: https://locomotive-agency.github.io/SEODeploy/about/
|
@ -1,284 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (robsean)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Learn the basics of programming with C)
|
||||
[#]: via: (https://opensource.com/article/20/8/c-programming-cheat-sheet)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
Learn the basics of programming with C
|
||||
======
|
||||
Our new cheat sheet puts all the essentials of C syntax on an
|
||||
easy-to-read handout.
|
||||
![Cheat Sheet cover image][1]
|
||||
|
||||
In 1972, Dennis Ritchie was at Bell Labs, where a few years earlier, he and his fellow team members invented Unix. After creating an enduring OS (still in use today), he needed a good way to program those Unix computers so that they could perform new tasks. It seems strange now, but at the time, there were relatively few programming languages; Fortran, Lisp, [Algol][2], and B were popular but insufficient for what the Bell Labs researchers wanted to do. Demonstrating a trait that would become known as a primary characteristic of programmers, Dennis Ritchie created his own solution. He called it C, and nearly 50 years later, it's still in widespread use.
|
||||
|
||||
### Why you should learn C
|
||||
|
||||
Today, there are many languages that provide programmers more features than C. The most obvious one is C++, a rather blatantly named language that built upon C to create a nice object-oriented language. There are many others, though, and there's a good reason they exist. Computers are good at consistent repetition, so anything predictable enough to be built into a language means less work for programmers. Why spend two lines recasting an `int` to a `long` in C when one line of C++ (`long x = long(n);`) can do the same?
|
||||
|
||||
And yet C is still useful today.
|
||||
|
||||
First of all, C is a fairly minimal and straightforward language. There aren't very advanced concepts beyond the basics of programming, largely because C is literally one of the foundations of modern programming languages. For instance, C features arrays, but it doesn't offer a dictionary (unless you write it yourself). When you learn C, you learn the building blocks of programming that can help you recognize the improved and elaborate designs of recent languages.
|
||||
|
||||
Because C is a minimal language, your applications are likely to get a boost in performance that they wouldn't see with many other languages. It's easy to get caught up in the race to the bottom when you're thinking about how fast your code executes, so it's important to ask whether you _need_ more speed for a specific task. And with C, you have less to obsess over in each line of code, compared to, say, Python or Java. C is fast. There's a good reason the Linux kernel is written in C.
|
||||
|
||||
Finally, C is easy to get started with, especially if you're running Linux. You can already run C code because Linux systems include the GNU C library (`glibc`). To write and build it, all you need to do is install a compiler, open a text editor, and start coding.
|
||||
|
||||
### Getting started with C
|
||||
|
||||
If you're running Linux, you can install a C compiler using your package manager. On Fedora or RHEL:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo dnf install gcc`
|
||||
```
|
||||
|
||||
On Debian and similar:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo apt install build-essential`
|
||||
```
|
||||
|
||||
On macOS, you can [install Homebrew][3] and use it to install [GCC][4]:
|
||||
|
||||
|
||||
```
|
||||
`$ brew install gcc`
|
||||
```
|
||||
|
||||
On Windows, you can install a minimal set of GNU utilities, GCC included, with [MinGW][5].
|
||||
|
||||
Verify you've installed GCC on Linux or macOS:
|
||||
|
||||
|
||||
```
|
||||
$ gcc --version
|
||||
gcc (GCC) x.y.z
|
||||
Copyright (C) 20XX Free Software Foundation, Inc.
|
||||
```
|
||||
|
||||
On Windows, provide the full path to the EXE file:
|
||||
|
||||
|
||||
```
|
||||
PS> C:\MinGW\bin\gcc.exe --version
|
||||
gcc.exe (MinGW.org GCC Build-2) x.y.z
|
||||
Copyright (C) 20XX Free Software Foundation, Inc.
|
||||
```
|
||||
|
||||
### C syntax
|
||||
|
||||
C isn't a scripting language. It's compiled, meaning that it gets processed by a C compiler to produce a binary executable file. This is different from a scripting language like [Bash][6] or a hybrid language like [Python][7].
|
||||
|
||||
In C, you create _functions_ to carry out your desired task. A function named `main` is executed by default.
|
||||
|
||||
Here's a simple "hello world" program written in C:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
[printf][8]("Hello world");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
The first line includes a _header file_, essentially free and very low-level C code that you can reuse in your own programs, called `stdio.h` (standard input and output). A function called `main` is created and populated with a rudimentary print statement. Save this text to a file called `hello.c`, then compile it with GCC:
|
||||
|
||||
|
||||
```
|
||||
`$ gcc hello.c --output hello`
|
||||
```
|
||||
|
||||
Try running your C program:
|
||||
|
||||
|
||||
```
|
||||
$ ./hello
|
||||
Hello world$
|
||||
```
|
||||
|
||||
#### Return values
|
||||
|
||||
It's part of the Unix philosophy that a function "returns" something to you after it executes: nothing upon success and something else (an error message, for example) upon failure. These return codes are often represented with numbers (integers, to be precise): 0 represents nothing, and any number higher than 0 represents some non-successful state.
|
||||
|
||||
There's a good reason Unix and Linux are designed to expect silence upon success. It's so that you can always plan for success by assuming no errors nor warnings will get in your way when executing a series of commands. Similarly, functions in C expect no errors by design.
|
||||
|
||||
You can see this for yourself with one small modification to make your program appear to fail:
|
||||
|
||||
|
||||
```
|
||||
include <stdio.h>
|
||||
|
||||
int main() {
|
||||
[printf][8]("Hello world");
|
||||
return 1;
|
||||
}
|
||||
```
|
||||
|
||||
Compile it:
|
||||
|
||||
|
||||
```
|
||||
`$ gcc hello.c --output failer`
|
||||
```
|
||||
|
||||
Now run it using a built-in Linux test for success. The `&&` operator executes the second half of a command only upon success. For example:
|
||||
|
||||
|
||||
```
|
||||
$ echo "success" && echo "it worked"
|
||||
success
|
||||
it worked
|
||||
```
|
||||
|
||||
The `||` test executes the second half of a command upon _failure_.
|
||||
|
||||
|
||||
```
|
||||
$ ls blah || echo "it did not work"
|
||||
ls: cannot access 'blah': No such file or directory
|
||||
it did not work
|
||||
```
|
||||
|
||||
Now try your program, which does _not_ return 0 upon success; it returns 1 instead:
|
||||
|
||||
|
||||
```
|
||||
$ ./failer && echo "it worked"
|
||||
String is: hello
|
||||
```
|
||||
|
||||
The program executed successfully, yet did not trigger the second command.
|
||||
|
||||
#### Variables and types
|
||||
|
||||
In some languages, you can create variables without specifying what _type_ of data they contain. Those languages have been designed such that the interpreter runs some tests against a variable in an attempt to discover what kind of data it contains. For instance, Python knows that `var=1` defines an integer when you create an expression that adds `var` to something that is obviously an integer. It similarly knows that the word `world` is a string when you concatenate `hello` and `world`.
|
||||
|
||||
C doesn't do any of these investigations for you; you must define your variable type. There are several types of variables, including integers (int), characters (char), float, and Boolean.
|
||||
|
||||
You may also notice there's no string type. Unlike Python and Java and Lua and many others, C doesn't have a string type and instead sees strings as an array of characters.
|
||||
|
||||
Here's some simple code that establishes a `char` array variable, and then prints it to your screen using [printf][9] along with a short message:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
char var[6] = "hello";
|
||||
[printf][8]("Your string is: %s\r\n",var);
|
||||
```
|
||||
|
||||
You may notice that this code sample allows six characters for a five-letter word. This is because there's a hidden terminator at the end of the string, which takes up one byte in the array. You can run the code by compiling and executing it:
|
||||
|
||||
|
||||
```
|
||||
$ gcc hello.c --output hello
|
||||
$ ./hello
|
||||
hello
|
||||
```
|
||||
|
||||
### Functions
|
||||
|
||||
As with other languages, C functions take optional parameters. You can pass parameters from one function to another by defining the type of data you want a function to accept:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int printmsg(char a[]) {
|
||||
[printf][8]("String is: %s\r\n",a);
|
||||
}
|
||||
|
||||
int main() {
|
||||
char a[6] = "hello";
|
||||
printmsg(a);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
The way this code sample breaks one function into two isn't very useful, but it demonstrates that `main` runs by default and how to pass data between functions.
|
||||
|
||||
### Conditionals
|
||||
|
||||
In real-world programming, you usually want your code to make decisions based on data. This is done with _conditional_ statements, and the `if` statement is one of the most basic of them.
|
||||
|
||||
To make this example program more dynamic, you can include the `string.h` header file, which contains code to examine (as the name implies) strings. Try testing whether the string passed to the `printmsg` function is greater than 0 by using the `strlen` function from the `string.h` file:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int printmsg(char a[]) {
|
||||
size_t len = [strlen][10](a);
|
||||
if ( len > 0) {
|
||||
[printf][8]("String is: %s\r\n",a);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
char a[6] = "hello";
|
||||
printmsg(a);
|
||||
return 1;
|
||||
}
|
||||
```
|
||||
|
||||
As implemented in this example, the sample condition will never be untrue because the string provided is always "hello," the length of which is always greater than 0. The final touch to this humble re-implementation of the `echo` command is to accept input from the user.
|
||||
|
||||
### Command arguments
|
||||
|
||||
The `stdio.h` file contains code that provides two arguments each time a program is launched: a count of how many items are contained in the command (`argc`) and an array containing each item (`argv`). For example, suppose you issue this imaginary command:
|
||||
|
||||
|
||||
```
|
||||
`$ foo -i bar`
|
||||
```
|
||||
|
||||
The `argc` is three, and the contents of `argv` are:
|
||||
|
||||
* `argv[0] = foo`
|
||||
* `argv[1] = -i`
|
||||
* `argv[2] = bar`
|
||||
|
||||
|
||||
|
||||
Can you modify the example C program to accept `argv[2]` as the string instead of defaulting to `hello`?
|
||||
|
||||
### Imperative programming
|
||||
|
||||
C is an imperative programming language. It isn't object-oriented, and it has no class structure. Using C can teach you a lot about how data is processed and how to better manage the data you generate as your code runs. Use C enough, and you'll eventually be able to write libraries that other languages, such as Python and Lua, can use.
|
||||
|
||||
To learn more about C, you need to use it. Look in `/usr/include/` for useful C header files, and see what small tasks you can do to make C useful to you. As you learn, use our [C cheat sheet][11] by [Jim Hall][12] of FreeDOS. It's got all the basics on one double-sided sheet, so you can immediately access all the essentials of C syntax while you practice.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/8/c-programming-cheat-sheet
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/coverimage_cheat_sheet.png?itok=lYkNKieP (Cheat Sheet cover image)
|
||||
[2]: https://opensource.com/article/20/6/algol68
|
||||
[3]: https://opensource.com/article/20/6/homebrew-mac
|
||||
[4]: https://gcc.gnu.org/
|
||||
[5]: https://opensource.com/article/20/8/gnu-windows-mingw
|
||||
[6]: https://opensource.com/resources/what-bash
|
||||
[7]: https://opensource.com/resources/python
|
||||
[8]: http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
|
||||
[9]: https://opensource.com/article/20/8/printf
|
||||
[10]: http://www.opengroup.org/onlinepubs/009695399/functions/strlen.html
|
||||
[11]: https://opensource.com/downloads/c-programming-cheat-sheet
|
||||
[12]: https://opensource.com/users/jim-hall
|
@ -1,229 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (MjSeven)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How to install software with Ansible)
|
||||
[#]: via: (https://opensource.com/article/20/9/install-packages-ansible)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
How to install software with Ansible
|
||||
======
|
||||
Automate software installations and updates across your devices with
|
||||
Ansible playbooks.
|
||||
![Puzzle pieces coming together to form a computer screen][1]
|
||||
|
||||
Ansible is a popular automation tool used by sysadmins and developers to keep their computer systems in prime condition. As is often the case with extensible frameworks, [Ansible][2] has limited use on its own, with its real power dwelling in its many modules. Ansible modules are, in a way, what commands are to a [Linux][3] computer. They provide solutions to specific problems, and one common task when maintaining computers is keeping all the ones you use updated and consistent.
|
||||
|
||||
I used to use a text list of packages to keep my systems more or less synchronized: I'd list the packages installed on my laptop and then cross-reference that with my desktop, or between one server and another server, making up for any difference manually. Of course, installing and maintaining applications on a Linux machine is a basic task for Ansible, and it means you can list what you want across all computers under your care.
|
||||
|
||||
### Finding the right Ansible module
|
||||
|
||||
The number of Ansible modules can be overwhelming. How do you find the one you need for a given task? In Linux, you might look in your Applications menu or in `/usr/bin` to discover new applications to run. When you're using Ansible, you refer to the [Ansible module index][4].
|
||||
|
||||
The index is listed primarily by category. With a little searching, you're very likely to find a module for whatever you need. For package management, the [Packaging modules][5] section contains a module for nearly any system with a package manager.
|
||||
|
||||
### Writing an Ansible playbook
|
||||
|
||||
To begin, choose the package manager on your local computer. For instance, if you're going to write your Ansible instructions (a "playbook," as it's called in Ansible) on a laptop running Fedora, start with the `dnf` module. If you're writing on Elementary OS, use the `apt` module, and so on. This gets you started with something you can test and verify as you go, and you can expand your work for your other computers later.
|
||||
|
||||
The first step is to create a directory representing your playbook. This isn't strictly necessary, but it's a good idea to establish the habit. Ansible can run with just a configuration file written in YAML, but if you want to expand your playbook later, you can control Ansible by how you lay out your directories and files. For now, just create a directory called `install_packages` or similar:
|
||||
|
||||
|
||||
```
|
||||
`$ mkdir ~/install_packages`
|
||||
```
|
||||
|
||||
The file that serves as the Ansible playbook can be named anything you like, but it's traditional to name it `site.yml`:
|
||||
|
||||
|
||||
```
|
||||
`$ touch ~/install_packages/site.yml`
|
||||
```
|
||||
|
||||
Open `site.yml` in your favorite text editor, and add this:
|
||||
|
||||
|
||||
```
|
||||
\---
|
||||
\- hosts: localhost
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
dnf:
|
||||
state: present
|
||||
name:
|
||||
- tcsh
|
||||
- htop
|
||||
```
|
||||
|
||||
You must adjust the module name you use to match the distribution you're using. In this example, I used `dnf` because I wrote the playbook on Fedora Linux.
|
||||
|
||||
Like with a command in a Linux terminal, knowing _how_ to invoke an Ansible module is half the battle. This playbook example follows the standard playbook format:
|
||||
|
||||
* `hosts` targets a computer or computers. In this case, the computer being targeted is `localhost`, which is the computer you're using right now (as opposed to a remote system you want Ansible to connect with).
|
||||
* `tasks` opens a list of tasks you want to be performed on the hosts.
|
||||
* `name` is a human-friendly title for a task. In this case, I'm using `install packages` because that's what this task is doing.
|
||||
* `become` permits Ansible to change which user is running this task.
|
||||
* `become_user` permits Ansible to become the `root` user to run this task. This is necessary because only the root user can install new applications using `dnf`.
|
||||
* `dnf` is the name of the module, which you discovered from the module index on the Ansible website.
|
||||
|
||||
|
||||
|
||||
The items under the `dnf` item are specific to the `dnf` module. This is where the module documentation is essential. Like a man page for a Linux command, the module documentation tells you what options are available and what kinds of arguments are required.
|
||||
|
||||
![Ansible documentation][6]
|
||||
|
||||
Ansible module documentation (Seth Kenlon, [CC BY-SA 4.0][7])
|
||||
|
||||
Package installation is a relatively simple task and only requires two elements. The `state` option instructs Ansible to check whether or not _some package_ is present on the system, and the `name` option lists which packages to look for. Ansible deals in machine _state_, so module instructions always imply change. Should Ansible scan a system and find a conflict between how a playbook describes a system (in this case, that the commands `tcsh` and `htop` are present) and what the system state actually is (in this example, `tcsh` and `htop` are not present), then Ansible's task is to make whatever changes are necessary for the system to match the playbook. Ansible can make those changes because of the `dnf` (or `apt` or whatever your package manager is) module.
|
||||
|
||||
Each module is likely to have a different set of options, so when you're writing playbooks, anticipate referring to the module documentation often. Until you're very familiar with a module, it's the only reasonable way to expect a module to do what you need it to do.
|
||||
|
||||
### Verifying YAML
|
||||
|
||||
Playbooks are written in YAML. Because YAML adheres to a strict syntax, it's helpful to install the `yamllint` command to check (or "lint," in computer terminology) your work. Better still, there's a linter specific to Ansible called `ansible-lint` created specifically for playbooks. Install these before continuing.
|
||||
|
||||
On Fedora or CentOS:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo dnf install yamllint python3-ansible-lint`
|
||||
```
|
||||
|
||||
On Debian, Elementary, Ubuntu, or similar:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo apt install yamllint ansible-lint`
|
||||
```
|
||||
|
||||
Verify your playbook with `ansible-lint`. If you don't have access to `ansible-lint`, you can use `yamllint`.
|
||||
|
||||
|
||||
```
|
||||
`$ ansible-lint ~/install_packages/site.yml`
|
||||
```
|
||||
|
||||
Success returns nothing, but if there are errors in your file, you must fix them before continuing. Common errors from copying and pasting include omitting a newline character at the end of the final line and using tabs instead of spaces for indentation. Fix them in a text editor, rerun the linter, and repeat this process until you get no feedback from `ansible-lint` or `yamllint`.
|
||||
|
||||
### Installing an application with Ansible
|
||||
|
||||
Now that you have a verifiably valid playbook, you can finally run it on your local machine. Because you happen to know that the task defined by the playbook requires root permissions, you must use the `--ask-become-pass` option when invoking Ansible, so you will be prompted for your administrative password.
|
||||
|
||||
Start the installation:
|
||||
|
||||
|
||||
```
|
||||
$ ansible-playbook --ask-become-pass ~/install_packages/site.yml
|
||||
BECOME password:
|
||||
PLAY [localhost] ******************************
|
||||
|
||||
TASK [Gathering Facts] ******************************
|
||||
ok: [localhost]
|
||||
|
||||
TASK [install packages] ******************************
|
||||
ok: [localhost]
|
||||
|
||||
PLAY RECAP ******************************
|
||||
localhost: ok=0 changed=2 unreachable=0 failed=0 [...]
|
||||
```
|
||||
|
||||
The commands are installed, leaving the target system in an identical state to the one described by the playbook.
|
||||
|
||||
### Installing an application on remote systems
|
||||
|
||||
Going through all of that to replace one simple command would be counterproductive, but Ansible's advantage is that it can be automated across all of your systems. You can use conditional statements to cause Ansible to use a specific module on different systems, but for now, assume all your computers use the same package manager.
|
||||
|
||||
To connect to a remote system, you must define the remote system in the `/etc/ansible/hosts` file. This file was installed along with Ansible, so it already exists, but it's probably empty, aside from explanatory comments. Use `sudo` to open the file in your favorite text editor.
|
||||
|
||||
You can define a host by its IP address or hostname, as long as the hostname can be resolved. For instance, if you've already defined `liavara` in `/etc/hosts` and can successfully ping it, then you can set `liavara` as a host in `/etc/ansible/hosts`. Alternately, if you're running a domain name server or Avahi server and can ping `liavara`, then you can set it as a host in `/etc/ansible/hosts`. Otherwise, you must use its internet protocol address.
|
||||
|
||||
You also must have set up a successful secure shell (SSH) connection to your target hosts. The easiest way to do that is with the `ssh-copy-id` command, but if you've never set up an SSH connection with a host before, [read my article on how to create an automated SSH connection][8].
|
||||
|
||||
Once you've entered the hostname or IP address in the `/etc/ansible/hosts` file, change the `hosts` definition in your playbook:
|
||||
|
||||
|
||||
```
|
||||
\---
|
||||
\- hosts: all
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
dnf:
|
||||
state: present
|
||||
name:
|
||||
- tcsh
|
||||
- htop
|
||||
```
|
||||
|
||||
Run `ansible-playbook` again:
|
||||
|
||||
|
||||
```
|
||||
`$ ansible-playbook --ask-become-pass ~/install_packages/site.yml`
|
||||
```
|
||||
|
||||
This time, the playbook runs on your remote system.
|
||||
|
||||
Should you add more hosts, there are many ways to filter which host performs which task. For instance, you can create groups of hosts (`webservers` for servers, `workstations` for desktop machines, and so on).
|
||||
|
||||
### Ansible for mixed environments
|
||||
|
||||
The logic used in the solution so far assumes that all hosts being configured by Ansible run the same OS (specifically, one that uses the **dnf** command for package management). So what do you do if you're managing hosts running a different distribution, such as Ubuntu (which uses **apt**) or Arch (using **pacman**), or even different operating systems?
|
||||
|
||||
As long as the targeted OS has a package manager (and these days even [MacOS has Homebrew][9] and [Windows has Chocolatey][10]), Ansible can help.
|
||||
|
||||
This is where Ansible's advantage becomes most apparent. In a shell script, you'd have to check for what package manager is available on the target host, and even with pure Python you'd have to check for the OS. Ansible not only has those checks built in, but it also has mechanisms to use the results in your playbook. Instead of using the **dnf** module, you can use the **action** keyword to perform tasks defined by variables provided by Ansible's fact gathering subsystem.
|
||||
|
||||
|
||||
```
|
||||
\---
|
||||
\- hosts: all
|
||||
tasks:
|
||||
- name: install packages
|
||||
become: true
|
||||
become_user: root
|
||||
action: >
|
||||
{{ ansible_pkg_mgr }} name=htop,transmission state=present update_cache=yes
|
||||
```
|
||||
|
||||
The **action** keyword loads action plugins. In this example, it's using the **ansible_pkg_mgr** variable, which is populated by Ansible during the initial **Gathering Facts** task. You don't have to tell Ansible to gather facts about the OS it's running on, so it's easy to overlook it, but when you run a playbook, you see it listed in the default output:
|
||||
|
||||
|
||||
```
|
||||
TASK [Gathering Facts] *****************************************
|
||||
ok: [localhost]
|
||||
```
|
||||
|
||||
The **action** plugin uses information from this probe to populate **ansible_pkg_mgr** with the relevant package manager command to install the packages listed after the **name** argument. With 8 lines of code, you can overcome a complex cross-platform quandary that few other scripting options allow.
|
||||
|
||||
### Use Ansible
|
||||
|
||||
It's the 21st century, and we all expect our computing devices to be connected and relatively consistent. Whether you maintain two or 200 computers, you shouldn't have to perform the same maintenance tasks over and over again. Use Ansible to synchronize the computers in your life, then see what else Ansible can do for you.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/9/install-packages-ansible
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/puzzle_computer_solve_fix_tool.png?itok=U0pH1uwj (Puzzle pieces coming together to form a computer screen)
|
||||
[2]: https://opensource.com/resources/what-ansible
|
||||
[3]: https://opensource.com/resources/linux
|
||||
[4]: https://docs.ansible.com/ansible/latest/modules/modules_by_category.html
|
||||
[5]: https://docs.ansible.com/ansible/latest/modules/list_of_packaging_modules.html
|
||||
[6]: https://opensource.com/sites/default/files/uploads/ansible-module.png (Ansible documentation)
|
||||
[7]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[8]: https://opensource.com/article/20/8/how-ssh
|
||||
[9]: https://opensource.com/article/20/6/homebrew-mac
|
||||
[10]: https://opensource.com/article/20/3/chocolatey
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (HankChow)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
|
@ -1,119 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (tanslating)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (GNOME 3.38 is Here With Customizable App Grid, Performance Improvements and Tons of Other Changes)
|
||||
[#]: via: (https://itsfoss.com/gnome-3-38-release/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
GNOME 3.38 is Here With Customizable App Grid, Performance Improvements and Tons of Other Changes
|
||||
======
|
||||
|
||||
[GNOME 3.36][1] brought some much-needed improvements along with a major performance boost. Now, after 6 months, we’re finally here with GNOME 3.38 with a big set of changes.
|
||||
|
||||
### GNOME 3.38 Key Features
|
||||
|
||||
Here are the main highlight of GNOME 3.38 codenamed Orbis:
|
||||
|
||||
[Subscribe to our YouTube channel for more Linux videos][2]
|
||||
|
||||
#### Customizable App Menu
|
||||
|
||||
The app grid or the app menu will now be customizable as part of a big change in GNOME 3.38.
|
||||
|
||||
Now, you can create folders by dragging application icons over each other and move them to/from folders and set it right back in the app grid. You can also just reposition the icons as you want in the app grid.
|
||||
|
||||
![][3]
|
||||
|
||||
Also, these changes are some basic building blocks for upcoming design changes planned for future updates — so it’ll be exciting to see what we can expect.
|
||||
|
||||
#### Calendar Menu Updates
|
||||
|
||||
![][4]
|
||||
|
||||
The notification area is a lot cleaner with the recent GNOME updates but now with GNOME 3.38, you can finally access calendar events right below the calendar area to make things convenient and easy to access.
|
||||
|
||||
It’s not a major visual overhaul, but there’s a few improvements to it.
|
||||
|
||||
#### Parental Controls Improvement
|
||||
|
||||
You will observe a parental control service as a part of GNOME 3.38. It supports integration with various components of the desktop, the shell, the settings, and others to help you limit what a user can access.
|
||||
|
||||
#### The Restart Button
|
||||
|
||||
Some subtle improvements lead to massive changes and this is exactly one of those changes. It’s always annoying to click on the “Power Off” / “Shut down” button first and then hit the “Restart” button to reboot the system.
|
||||
|
||||
So, with GNOME 3.38, you will finally notice a “Restart” entry as a separate button which will save you click and give you a peace of mind.
|
||||
|
||||
#### Screen Recording Improvements
|
||||
|
||||
[GNOME shell’s built-in screen record][5] is now a separate system service which should potentially make recording the screen a smooth experience.
|
||||
|
||||
Also, window screencasting had several improvements to it along with some bug fixes:
|
||||
|
||||
#### GNOME apps Updates
|
||||
|
||||
The GNOME calculator has received a lot of bug fixes. In addition to that, you will also find some major changes to the [epiphany GNOME browser][6].
|
||||
|
||||
GNOME Boxes now lets you pick the OS from a list of operating systems and GNOME Maps was updated with some UI changes as well.
|
||||
|
||||
Not just limited to these, you will also find subtle updates and fixes to GNOME control center, Contacts, Photos, Nautilus, and some other packages.
|
||||
|
||||
#### Performance & multi-monitor support improvements
|
||||
|
||||
There’s a bunch of under-the-hood improvements to improve GNOME 3.38 across the board. For instance, there were some serious fixes to [Mutter][7] which now lets two monitors run at different refresh rates.
|
||||
|
||||
![][8]
|
||||
|
||||
Previously, if you had one monitor with a 60 Hz refresh rate and another with 144 Hz, the one with the slower rate will limit the second monitor. But, with the improvements in GNOME 3.38, it will handle multi-monitors without limiting any of them.
|
||||
|
||||
Also, some changes reported by [Phoronix][9] pointed out around 10% lower render time in some cases. So, that’s definitely a great performance optimization.
|
||||
|
||||
#### Miscellaneous other changes
|
||||
|
||||
* Battery percentage indicator
|
||||
* Restart option in the power menu
|
||||
* New welcome tour
|
||||
* Fingerprint login
|
||||
* QR code scanning for sharing Wi-Fi hotspot
|
||||
* Privacy and other improvements to GNOME Browser
|
||||
* GNOME Maps is now responsive and changes its size based on the screen
|
||||
* Revised icons
|
||||
|
||||
|
||||
|
||||
You can find a details list of changes in their official [changelog][10].
|
||||
|
||||
### Wrapping Up
|
||||
|
||||
GNOME 3.38 is indeed an impressive update to improve the GNOME experience. Even though the performance was greatly improved with GNOME 3.36, more optimizations is a very good thing for GNOME 3.38.
|
||||
|
||||
GNOME 3.38 will be available in Ubuntu 20.10 and [Fedora 33][11]. Arch and Manjaro users should be getting it soon.
|
||||
|
||||
I think there are plenty of changes in right direction. What do you think?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/gnome-3-38-release/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/gnome-3-36-release/
|
||||
[2]: https://www.youtube.com/c/itsfoss?sub_confirmation=1
|
||||
[3]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-app-arranger.jpg?resize=799%2C450&ssl=1
|
||||
[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-3-38-calendar-menu.png?resize=800%2C721&ssl=1
|
||||
[5]: https://itsfoss.com/gnome-screen-recorder/
|
||||
[6]: https://en.wikipedia.org/wiki/GNOME_Web
|
||||
[7]: https://en.wikipedia.org/wiki/Mutter_(software)
|
||||
[8]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-multi-monitor-refresh-rate.jpg?resize=800%2C369&ssl=1
|
||||
[9]: https://www.phoronix.com/scan.php?page=news_item&px=GNOME-3.38-Last-Min-Mutter
|
||||
[10]: https://help.gnome.org/misc/release-notes/3.38
|
||||
[11]: https://itsfoss.com/fedora-33/
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (robsean)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
|
@ -1,89 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How to Use the Firefox Task Manager (to Find and Kill RAM and CPU Eating Tabs and Extensions))
|
||||
[#]: via: (https://itsfoss.com/firefox-task-manager/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
How to Use the Firefox Task Manager (to Find and Kill RAM and CPU Eating Tabs and Extensions)
|
||||
======
|
||||
|
||||
Firefox is popular among Linux users. It is the default web browser on several Linux distributions.
|
||||
|
||||
Among many other features, Firefox provides a task manager of its own.
|
||||
|
||||
Now, why would you use it when you have [task manager in Linux][1] in the form of [system monitoring tools][2]? There is a good reason for that.
|
||||
|
||||
Suppose your system is taking too much of RAM or CPU. If you use top or some other system [resource monitoring tool like Glances][3], you’ll notice that these tools cannot distinguish the opened tabs or extensions.
|
||||
|
||||
Usually, each Firefox tab is displayed as **Web Content**. You can see that some Firefox process is causing the issue but that’s no way to accurately determine which tab or extension it is.
|
||||
|
||||
This is where you can use the Firefox task manager. Let me show you how!
|
||||
|
||||
### Firefox Task Manager
|
||||
|
||||
With Firefox Task Manager, you will be able to list all the tabs, trackers, and add-ons consuming system resources.
|
||||
|
||||
![][4]
|
||||
|
||||
As you can see in the screenshot above, you get the name of the tab, the type (tab or add-on), the energy impact, and the memory consumed.
|
||||
|
||||
While everything is self-explanatory, the **energy impact refers to the CPU usage** and if you are using a Laptop, it is a good indicator to show you what will drain the battery quicker.
|
||||
|
||||
#### Access Task Manager in Firefox
|
||||
|
||||
Surprisingly, there is no [Firefox keyboard shortcut][5] for the task manager.
|
||||
|
||||
To quickly launch Firefox Task Manager, you can type “**about:performance**” in the address bar as shown in the screenshot below.
|
||||
|
||||
![Quickly access task manager in Firefox][6]
|
||||
|
||||
Alternatively, you can click on the **menu** icon and then head on to “**More**” options as shown in the screenshot below.
|
||||
|
||||
![Accessing task manager in Firefox][7]
|
||||
|
||||
Next, you will find the option to select “**Task Manager**” — so just click on it.
|
||||
|
||||
![][8]
|
||||
|
||||
#### Using Firefox task manager
|
||||
|
||||
Once there, you can check for the resource usage, expand the tabs to see the trackers and its usage, and also choose to close the tabs right there as highlighted in the screenshot below.
|
||||
|
||||
![][9]
|
||||
|
||||
Here’s what you should know:
|
||||
|
||||
* Energy impact means CPU consumption.
|
||||
* The subframes or the subtasks are usually the trackers/scripts associated with a tab that needs to run in the background.
|
||||
|
||||
|
||||
|
||||
With this task manager, you can spot a rogue script on a site as well whether it’s causing your browser to slow down.
|
||||
|
||||
This isn’t rocket-science but not many people are aware of Firefox task manager. Now that you know it, this should come in pretty handy, don’t you think?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/firefox-task-manager/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/task-manager-linux/
|
||||
[2]: https://itsfoss.com/linux-system-monitoring-tools/
|
||||
[3]: https://itsfoss.com/glances/
|
||||
[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-shot.png?resize=800%2C519&ssl=1
|
||||
[5]: https://itsfoss.com/firefox-keyboard-shortcuts/
|
||||
[6]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-url-performance.jpg?resize=800%2C357&ssl=1
|
||||
[7]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-steps.jpg?resize=800%2C779&ssl=1
|
||||
[8]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-menu.jpg?resize=800%2C465&ssl=1
|
||||
[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/firefox-task-manager-close-tab.png?resize=800%2C496&ssl=1
|
@ -1,119 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Drawing is an Open Source MS-Paint Type of App for Linux Desktop)
|
||||
[#]: via: (https://itsfoss.com/drawing-app/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Drawing is an Open Source MS-Paint Type of App for Linux Desktop
|
||||
======
|
||||
|
||||
_**Brief: Drawing is a basic image editor like Microsoft Paint. With this open source application, you can draw arrows, lines, geometrical shapes, add colors and other stuff you expect to do in a regular drawing application.**_
|
||||
|
||||
### Drawing: A simple drawing application for Linux
|
||||
|
||||
![][1]
|
||||
|
||||
For people introduced to computers with Windows XP (or earlier version), MS Paint was an amusing application from sketching random stuff. In a world dominated with Photoshop and GIMP, the paint applications still hold some relevance.
|
||||
|
||||
There are several [painting applications available for Linux][2], and I am going to add one more to this list.
|
||||
|
||||
The app is unsurprisingly called [Drawing][3] and you can use it on both Linux desktop and Linux smartphones.
|
||||
|
||||
### Features of Drawing app
|
||||
|
||||
![][4]
|
||||
|
||||
Drawing has all the features you expect from a drawing application. You can
|
||||
|
||||
* Create new drawings from scratch
|
||||
* Edit an existing image in PNG, JPEG or BMP file
|
||||
* Add geometrical shapes, lines, arrows etc
|
||||
* Dashed
|
||||
* Use pencil tool for free-hand drawing
|
||||
* Use curve and shape tool
|
||||
* Crop images
|
||||
* Scale images to different pixel size
|
||||
* Add text
|
||||
* Select part of image (rectangle, freehand and color selection)
|
||||
* Rotate images
|
||||
* Add images copied to clipboard
|
||||
* Eraser, Highlighter, Paint, Color Selection, Color Picker tools are available in preferences
|
||||
* Unlimited undo
|
||||
* Filters to add blur, pixelisation, transparency etc
|
||||
|
||||
|
||||
|
||||
### My experience with Drawing
|
||||
|
||||
![][5]
|
||||
|
||||
The application is new and has a decent user interface. It comes with all the basic features you expect to find in a standard paint app.
|
||||
|
||||
It has some additional tools like color selection and color picker but it might be confusing to use them. There is no documentation available to describe the use of these tools to you are on your own here.
|
||||
|
||||
The experience is smooth and I feel that this tool has good potential to replace Shutter as image editing tool (yes, I [use Shutter for editing screenshots][6]).
|
||||
|
||||
The thing that I find most bothersome is that it is not possible to edit/modify an element after adding it. You have the undo and redo options but if you want to modify a text you added 12 steps back, you’ll have to redo all the steps. This is something the developer may look into it in the future releases.
|
||||
|
||||
### Installing Drawing on Linux
|
||||
|
||||
This is a Linux exclusive app. It is also available for Linux-based smartphones like [PinePhone][7].
|
||||
|
||||
There are various ways you can install Drawing app. It is available in the repositories of many major Linux distributions.
|
||||
|
||||
#### Ubuntu-based distributions
|
||||
|
||||
Drawing is included in the universe repository in Ubuntu. Which means you can install it from the Ubuntu Software Center.
|
||||
|
||||
However, if you want the latest version, there is a [PPA available][8] for easily installing Drawing on Ubuntu. Linux Mint and other Ubuntu-based distributions.
|
||||
|
||||
Use the following command:
|
||||
|
||||
```
|
||||
sudo add-apt-repository ppa:cartes/drawing
|
||||
sudo apt update
|
||||
sudo apt install drawing
|
||||
```
|
||||
|
||||
If you want to remove it, you can use the following commands:
|
||||
|
||||
```
|
||||
sudo apt remove drawing
|
||||
sudo add-apt-repository -r ppa:cartes/drawing
|
||||
```
|
||||
|
||||
#### Other Linux distributions
|
||||
|
||||
Check your distribution’s package manager for Drawing and install it from there. If you want the latest version, you may use the Flatpak version of the app.
|
||||
|
||||
[Drawing Flatpak][9]
|
||||
|
||||
**Conclusion**
|
||||
|
||||
Do you still use a paint application? Which one do you use? If you have tried Drawing app already, how is your experience with it?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/drawing-app/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/drawing-app-interface.jpg?resize=789%2C449&ssl=1
|
||||
[2]: https://itsfoss.com/open-source-paint-apps/
|
||||
[3]: https://maoschanz.github.io/drawing/
|
||||
[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/drawing-screenshot.jpg?resize=800%2C489&ssl=1
|
||||
[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/using-drawing-app-linux.png?resize=787%2C473&ssl=1
|
||||
[6]: https://itsfoss.com/install-shutter-ubuntu/
|
||||
[7]: https://itsfoss.com/pinephone/
|
||||
[8]: https://launchpad.net/~cartes/+archive/ubuntu/drawing
|
||||
[9]: https://flathub.org/apps/details/com.github.maoschanz.drawing
|
@ -1,104 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Present Slides in Linux Terminal With This Nifty Python Tool)
|
||||
[#]: via: (https://itsfoss.com/presentation-linux-terminal/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Present Slides in Linux Terminal With This Nifty Python Tool
|
||||
======
|
||||
|
||||
Presentations are often boring. This is why some people add animation or comics/meme to add some humor and style to break the monotony.
|
||||
|
||||
If you have to add some unique style to your college or company presentation, how about using the Linux terminal? Imagine how cool it would be!
|
||||
|
||||
### Present: Do Your Presentation in Linux Terminal
|
||||
|
||||
There are so many amusing and [fun stuff you can do in the terminal][1]. Making and presenting slides is just one of them.
|
||||
|
||||
Python based application named [Present][2] lets you create markdown and YML based slides that you can present in your college or company and amuse people in the true geek style.
|
||||
|
||||
I have made a video showing what it would look like to present something in the Linux terminal with Present.
|
||||
|
||||
[Subscribe to our YouTube channel for more Linux videos][3]
|
||||
|
||||
#### Features of Present
|
||||
|
||||
You can do the following things with Present:
|
||||
|
||||
* Use markdown syntax for adding text to the slides
|
||||
* Control the slides with arrow or PgUp/Down keys
|
||||
* Change the foreground and background colors
|
||||
* Add images to the slides
|
||||
* Add code blocks
|
||||
* Play a simulation of code and output with codio YML files
|
||||
|
||||
|
||||
|
||||
#### Installing Present on Linux
|
||||
|
||||
Present is a Python based tool and you can use PIP to install it. You should make sure to [install Pip on Ubuntu][4] with this command:
|
||||
|
||||
```
|
||||
sudo apt install python3-pip
|
||||
```
|
||||
|
||||
If you are using some other distributions, please check your package manager to install PIP3.
|
||||
|
||||
Once you have PIP installed, you can install Present system wide in this manner:
|
||||
|
||||
```
|
||||
sudo pip3 install present
|
||||
```
|
||||
|
||||
You may also install it for only the current user but then you’ll also have to add ~/.local/bin to your PATH.
|
||||
|
||||
#### Using Present to create and present slides in Linux terminal
|
||||
|
||||
![][5]
|
||||
|
||||
Since Present utilizes markdown syntax, you should be aware of it to create your own slides. Using a [markdown editor][6] will be helpful here.
|
||||
|
||||
Present needs a markdown file to read and play the slides. You may [download this sample slide][7] but you need to download the embed image separately and put it inside image folder.
|
||||
|
||||
* Separate slides using — in your markdown file.
|
||||
* Use markdown syntax for adding text to the slides.
|
||||
* Add images with this syntax: ![RC] (images/name.png).
|
||||
* Change slide colors by adding syntax like <!– fg=white bg=red –>.
|
||||
* Add a slide with effects using syntax like <!– effect=fireworks –>.
|
||||
* Use [codio syntax][8] to add a code running simulation.
|
||||
* Quit the presentation using q and control the slides with left/right arrow or PgUp/Down keys.
|
||||
|
||||
|
||||
|
||||
Keep in mind that resizing the terminal window while running the presentation will mess things up and so does pressing enter key.
|
||||
|
||||
**Conclusion**
|
||||
|
||||
If you are familiar with Markdown and the terminal, using Present won’t be difficult for you.
|
||||
|
||||
You cannot compare it to regular presentation slides made with Impress, MS Office etc but it is a cool tool to occasionally use it. If you are a computer science/networking student or work as a developer or sysadmin, your colleagues will surely find this amusing.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/presentation-linux-terminal/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/funny-linux-commands/
|
||||
[2]: https://github.com/vinayak-mehta/present
|
||||
[3]: https://www.youtube.com/c/itsfoss?sub_confirmation=1
|
||||
[4]: https://itsfoss.com/install-pip-ubuntu/
|
||||
[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/presentation-in-linux-terminal.png?resize=800%2C494&ssl=1
|
||||
[6]: https://itsfoss.com/best-markdown-editors-linux/
|
||||
[7]: https://github.com/vinayak-mehta/present/blob/master/examples/sample.md
|
||||
[8]: https://present.readthedocs.io/en/latest/codio.html
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (robsean)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
|
113
sources/tech/20201005 How the Linux kernel handles interrupts.md
Normal file
113
sources/tech/20201005 How the Linux kernel handles interrupts.md
Normal file
@ -0,0 +1,113 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How the Linux kernel handles interrupts)
|
||||
[#]: via: (https://opensource.com/article/20/10/linux-kernel-interrupts)
|
||||
[#]: author: (Stephan Avenwedde https://opensource.com/users/hansic99)
|
||||
|
||||
How the Linux kernel handles interrupts
|
||||
======
|
||||
Interrupts are a crucial part of how computers process data.
|
||||
![Penguin driving a car with a yellow background][1]
|
||||
|
||||
Interrupts are an essential part of how modern CPUs work. For example, every time you press a key on the keyboard, the CPU is interrupted so that the PC can read user input from the keyboard. This happens so quickly that you don't notice any change or impairment in user experience.
|
||||
|
||||
Moreover, the keyboard is not the only component that can cause interrupts. In general, there are three types of events that can cause the CPU to interrupt: _Hardware interrupts_, _software interrupts_, and _exceptions_. Before getting into the different types of interrupts, I'll define some terms.
|
||||
|
||||
### Definitions
|
||||
|
||||
An interrupt request (**IRQ**) is requested by the programmable interrupt controller (**PIC**) with the aim of interrupting the CPU and executing the interrupt service routine (**ISR**). The ISR is a small program that processes certain data depending on the cause of the IRQ. Normal processing is interrupted until the ISR finishes.
|
||||
|
||||
In the past, IRQs were handled by a separate microchip—the PIC—and I/O devices were wired directly to the PIC. The PIC managed the various hardware IRQs and could talk directly to the CPU. When an IRQ occurred, the PIC wrote the data to the CPU and raised the interrupt request (**INTR**) pin.
|
||||
|
||||
Nowadays, IRQs are handled by an advanced programmable interrupt controller (**APIC**), which is part of the CPU. Each core has its own APIC.
|
||||
|
||||
### Types of interrupts
|
||||
|
||||
As I mentioned, interrupts can be separated into three types depending on their source:
|
||||
|
||||
#### Hardware interrupts
|
||||
|
||||
When a hardware device wants to tell the CPU that certain data is ready to process (e.g., a keyboard entry or when a packet arrives at the network interface), it sends an IRQ to signal the CPU that the data is available. This invokes a specific ISR that was registered by the device driver during the kernel's start.
|
||||
|
||||
#### Software interrupts
|
||||
|
||||
When you're playing a video, it is essential to synchronize the music and video playback so that the music's speed doesn't vary. This is accomplished through a software interrupt that is repetitively fired by a precise timer system (known as [jiffies][2]). This timer enables your music player to synchronize. A software interrupt can also be invoked by a special instruction to read or write data to a hardware device.
|
||||
|
||||
Software interrupts are also crucial when real-time capability is required (such as in industrial applications). You can find more information about this in the Linux Foundation's article _[Intro to real-time Linux for embedded developers][3]_.
|
||||
|
||||
#### Exceptions
|
||||
|
||||
Exceptions are the type of interrupt that you probably know about. When the CPU executes a command that would result in division by zero or a page fault, any additional execution is interrupted. In such a case, you will be informed about it by a pop-up window or by seeing **segmentation fault (core dumped)** in the console output. But not every exception is caused by a faulty instruction.
|
||||
|
||||
Exceptions can be further divided into _Faults_, _Traps_, and _Aborts_.
|
||||
|
||||
* **Faults:** Faults are an exception that the system can correct, e.g., when a process tries to access data from a memory page that was swapped to the hard drive. The requested address is within the process address space, and the access rights are correct. If the page is not present in RAM, an IRQ is raised and it starts the **page fault exception handler** to load the desired memory page into RAM. If the operation is successful, execution will continue.
|
||||
* **Traps:** Traps are mainly used for debugging. If you set a breakpoint in a program, you insert a special instruction that causes it to trigger a trap. A trap can trigger a context switch that allows your debugger to read and display values of local variables. Execution can continue afterward. Traps are also the default way to execute system calls (like killing a process).
|
||||
* **Aborts:** Aborts are caused by hardware failure or inconsistent values in system tables. An abort does not report the location of the instruction that causes the exception. These are the most critical interrupts. An abort invokes the system's **abort exception handler**, which terminates the process that caused it.
|
||||
|
||||
|
||||
|
||||
### Get hands-on
|
||||
|
||||
IRQs are ordered by priority in a vector on the APIC (0=highest priority). The first 32 interrupts (0–31) have a fixed sequence that is specified by the CPU. You can find an overview of them on [OsDev's Exceptions][4] page. Subsequent IRQs can be assigned differently. The interrupt descriptor table (**IDT**) contains the assignment between IRQ and ISR. Linux defines an IRQ vector from 0 to 256 for the assignment.
|
||||
|
||||
To print a list of registered interrupts on your system, open a console and type:
|
||||
|
||||
|
||||
```
|
||||
`cat /proc/interrupts`
|
||||
```
|
||||
|
||||
You should see something like this:
|
||||
|
||||
![Registered interrupts list][5]
|
||||
|
||||
Registered interrupts in kernel version 5.6.6 (Stephan Avenwedde, [CC BY-SA 4.0][6])
|
||||
|
||||
From left to right, the columns are: IRQ vector, interrupt count per CPU (`0 .. n`), the hardware source, the hardware source's channel information, and the name of the device that caused the IRQ.
|
||||
|
||||
On the bottom of the table, there are some non-numeric interrupts. They are the architecture-specific interrupts, like the local timer interrupt (**LOC**) on IRQ 236. Some of them are specified in the [Linux IRQ vector layout][7] in the Linux kernel source tree.
|
||||
|
||||
![Architecture-specific interrupts][8]
|
||||
|
||||
Architecture-specific interrupts (Stephan Avenwedde, [CC BY-SA 4.0][6])
|
||||
|
||||
To get a live view of this table, run:
|
||||
|
||||
|
||||
```
|
||||
`watch -n1 "cat /proc/interrupts"`
|
||||
```
|
||||
|
||||
### Conclusion
|
||||
|
||||
Proper IRQ handling is essential for the proper interaction of hardware, drivers, and software. Luckily, the Linux kernel does a really good job, and a normal PC user will hardly notice anything about the kernel's entire interrupt handling.
|
||||
|
||||
This can get very complicated, and this article gives only a brief overview of the topic. Good sources of information for a deeper dive into the subject are the _[Linux Inside][9]_ eBook (CC BY-NC-SA 4.0) and the [Linux Kernel Teaching][10] repository.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/linux-kernel-interrupts
|
||||
|
||||
作者:[Stephan Avenwedde][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/hansic99
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/car-penguin-drive-linux-yellow.png?itok=twWGlYAc (Penguin driving a car with a yellow background)
|
||||
[2]: https://elinux.org/Kernel_Timer_Systems
|
||||
[3]: https://www.linuxfoundation.org/blog/2013/03/intro-to-real-time-linux-for-embedded-developers/
|
||||
[4]: https://wiki.osdev.org/Exceptions
|
||||
[5]: https://opensource.com/sites/default/files/uploads/proc_interrupts_1.png (Registered interrupts list)
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://github.com/torvalds/linux/blob/master/arch/x86/include/asm/irq_vectors.h
|
||||
[8]: https://opensource.com/sites/default/files/uploads/proc_interrupts_2.png (Architecture-specific interrupts)
|
||||
[9]: https://0xax.gitbooks.io/linux-insides/content/Interrupts/
|
||||
[10]: https://linux-kernel-labs.github.io/refs/heads/master/lectures/interrupts.html#
|
@ -0,0 +1,165 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (5 Scratch code blocks to teach kids how to program a video game)
|
||||
[#]: via: (https://opensource.com/article/20/10/advanced-scratch)
|
||||
[#]: author: (Jess Weichler https://opensource.com/users/cyanide-cupcake)
|
||||
|
||||
5 Scratch code blocks to teach kids how to program a video game
|
||||
======
|
||||
Advance your Scratch skills with loops, conditional statements,
|
||||
collision detection, and more in this article in a series about teaching
|
||||
kids to code.
|
||||
![Binary code on a computer screen][1]
|
||||
|
||||
In the second article in this series, you [created your first few video game scripts in Scratch][2]. This article explores ways to expand programming's possibilities to create more advanced code.
|
||||
|
||||
There are multiple ways to introduce these skills to kids, such as:
|
||||
|
||||
1. Introduce a task or challenge that requires children to use the skill. Use inquiry to help them find the solution, then reinforce their discoveries with a formal explanation.
|
||||
2. Encourage free experimentation by having children come up with their own projects. As they work through their code, go over skills as needed.
|
||||
3. Introduce the skill, then have children experiment with it.
|
||||
|
||||
|
||||
|
||||
No matter which one you choose, always remember that the most important part of learning coding is making mistakes. Even skilled programmers don't get it right every time or know every possible line of code. It works best when educators, pupils, and peers are all learning to code together as a team.
|
||||
|
||||
There are [10 categories][3] of code blocks in Scratch; here is how to use some of the most common.
|
||||
|
||||
### Loops
|
||||
|
||||
_This is the code that doesn't end; yes, it goes on and on, my friend!_ **Forever loops** and **repeat blocks** in [Scratch][4] are what you need to repeat lines of code automatically. Any code blocks placed inside a loop block continue to run until the game is stopped or, if you're using a repeat block, the number is reached.
|
||||
|
||||
![Loops in Scratch][5]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
### Conditional statements
|
||||
|
||||
**Conditional statements** run only if certain conditions are met. "If you're cold, then put on a sweater" is a real-world example of a conditional statement: you put a sweater on only if you determine that it's cold.
|
||||
|
||||
There are four conditional statement code blocks in Scratch:
|
||||
|
||||
* if ... then
|
||||
* if ... then ... else
|
||||
* wait until...
|
||||
* repeat until...
|
||||
|
||||
|
||||
|
||||
Any code blocks placed inside a conditional statement run only if the condition is met.
|
||||
|
||||
![Conditional statement blocks in Scratch][7]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
Notice the diamond shapes in each conditional statement code block; can you find any code blocks that might fit inside?
|
||||
|
||||
Diamond-shaped code blocks can be used to complete any of the four conditional-statement blocks. You can find diamond-shaped blocks in the [Sensing][8] and [Operators][9] block categories.
|
||||
|
||||
![Diamond-shaped blocks in Scratch][10]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
### Collision-detection loop
|
||||
|
||||
Sometimes you may want to check to see if your sprite is touching another sprite or a specific color. To do so, use a [**collision-detection loop**][11].
|
||||
|
||||
A collision-detection loop combines loops and conditional statements to constantly check whether the sprite is touching another sprite (for example, a coin sprite).
|
||||
|
||||
![Collision-detection script in Scratch][12]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
Inside the inner `if ... then` block, place the action you want to happen when the condition is met.
|
||||
|
||||
This type of algorithm is a **collision-detection script**. Collision-detection scripts sense when two sprites or objects are touching. A basic collision-detection script uses four main code blocks:
|
||||
|
||||
* Event hat
|
||||
* Forever loop
|
||||
* If … then
|
||||
* Touching
|
||||
|
||||
|
||||
|
||||
You can place more code blocks inside the `if ... then` block. These blocks will run only if the active sprite is touching the sprite listed in the `touching` block.
|
||||
|
||||
Can you figure out how to make an object "hide" when it collides with another sprite? This is a common technique to indicate that, for instance, a sprite has eaten some food or has picked up an item.
|
||||
|
||||
### Variables and math
|
||||
|
||||
A **variable** is a placeholder for a value, usually a number, that you don't know yet. In math, using a variable might look something like this: `x+12=15`.
|
||||
|
||||
![Variables in Scratch][13]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
If that doesn't make sense to you, that's okay. I didn't understand variables until I started coding as an adult.
|
||||
|
||||
Here is one example of how you might use a variable in code:
|
||||
|
||||
![Variables in Scratch][14]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
### Coordinates
|
||||
|
||||
Scratch uses a coordinate graph to measure the screen. The exact middle of the screen has a value of 0,0. The length of the screen (X-axis) is -240 to 240, the height (Y-axis) is -180 to 180.
|
||||
|
||||
The X and Y **coordinates** control where each sprite is on the screen, and you can code a sprite's X and Y coordinates to set a specific place using **[motion blocks][15]**.
|
||||
|
||||
![Coordinates in Scratch][16]
|
||||
|
||||
(Jess Weichler, [CC BY-SA 4.0][6])
|
||||
|
||||
### Put it all together
|
||||
|
||||
Think about the basics of any game; what are some elements you usually need?
|
||||
|
||||
Here are some examples:
|
||||
|
||||
* A goal
|
||||
* A way to win
|
||||
* A way to lose
|
||||
* An obstacle
|
||||
* A scoring system
|
||||
|
||||
|
||||
|
||||
With the techniques above, you have everything you need to create a playable game with these elements and more.
|
||||
|
||||
There are still heaps of code blocks in Scratch that I haven't mentioned. Keep exploring the possibilities. If you don't know what a code block does, put it in a script to see what happens!
|
||||
|
||||
Coming up with an idea for a game can be difficult. The great thing about the open source community, Scratchers included, is that we love to build upon one another's work. With that in mind, in the next article, I'll look at some of my favorite user-made projects for inspiration.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/advanced-scratch
|
||||
|
||||
作者:[Jess Weichler][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/cyanide-cupcake
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/binary_code_computer_screen.png?itok=7IzHK1nn (Binary code on a computer screen)
|
||||
[2]: https://opensource.com/article/20/9/scratch
|
||||
[3]: https://en.scratch-wiki.info/wiki/Categories
|
||||
[4]: https://scratch.mit.edu/
|
||||
[5]: https://opensource.com/sites/default/files/uploads/codekids3_1.png (Loops in Scratch)
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://opensource.com/sites/default/files/uploads/codekids3_2.png (Conditional statement blocks in Scratch)
|
||||
[8]: https://en.scratch-wiki.info/wiki/Blocks#Sensing_blocks
|
||||
[9]: https://en.scratch-wiki.info/wiki/Blocks#Operators_blocks
|
||||
[10]: https://opensource.com/sites/default/files/uploads/codekids3_3.png (Diamond-shaped blocks in Scratch)
|
||||
[11]: https://en.scratch-wiki.info/wiki/Making_Sprites_Detect_and_Sense_Other_Sprites
|
||||
[12]: https://opensource.com/sites/default/files/uploads/codekids3_4.png (Collision-detection script in Scratch)
|
||||
[13]: https://opensource.com/sites/default/files/uploads/codekids3_5.png (Variables in Scratch)
|
||||
[14]: https://opensource.com/sites/default/files/uploads/codekids3_6.png (Variables in Scratch)
|
||||
[15]: https://en.scratch-wiki.info/wiki/Motion_Blocks
|
||||
[16]: https://opensource.com/sites/default/files/uploads/codekids3_7.png (Coordinates in Scratch)
|
@ -0,0 +1,63 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Start using virtual tables in Apache Cassandra 4.0)
|
||||
[#]: via: (https://opensource.com/article/20/10/virtual-tables-apache-cassandra)
|
||||
[#]: author: (Ben Bromhead https://opensource.com/users/ben-bromhead)
|
||||
|
||||
Start using virtual tables in Apache Cassandra 4.0
|
||||
======
|
||||
What they are and how to use them.
|
||||
![Computer laptop in space][1]
|
||||
|
||||
Among the [many additions][2] in the recent [Apache Cassandra 4.0 beta release][3], virtual tables is one that deserves some attention.
|
||||
|
||||
In previous versions of Cassandra, users needed access to Java Management Extensions ([JMX][4]) to examine Cassandra details such as running compactions, clients, metrics, and a variety of configuration settings. Virtual tables removes these challenges. Cassandra 4.0 beta enables users to query those details and data as Cassandra Query Language (CQL) rows from a read-only system table.
|
||||
|
||||
Here is how the JMX-based mechanism in previous Cassandra versions worked. Imagine a user wants to check on the compaction status of a particular node in a cluster. The user first has to establish a JMX connection to run `nodetool compactionstats` on the node. This requirement immediately presents the user with a few complications. Is the user's client configured for JMX access? Are the Cassandra nodes and firewall configured to allow JMX access? Are the proper measures for security and auditing prepared and in place? These are only some of the concerns users had to contend with when dealing with in previous versions of Cassandra.
|
||||
|
||||
With Cassandra 4.0, virtual tables make it possible for users to query the information they need by utilizing their previously configured driver. This change removes all overhead associated with implementing and maintaining JMX access.
|
||||
|
||||
Cassandra 4.0 creates two new keyspaces to help users leverage virtual tables: `system_views` and `system_virtual_schema`. The `system_views` keyspace contains all the valuable information that users seek, usefully stored in a number of tables. The `system_virtual_schema` keyspace, as the name implies, stores all necessary schema information for those virtual tables.
|
||||
|
||||
![system_views and system_virtual_schema keyspaces and tables][5]
|
||||
|
||||
(Ben Bromhead, [CC BY-SA 4.0][6])
|
||||
|
||||
It's important to understand that the scope of each virtual table is restricted to its node. Any query of virtual tables will return data that is valid only for the node that acts as its coordinator, regardless of consistency. To simplify for this requirement, support has been added to several drivers to specify the coordinator node in these queries (the Python, DataStax Java, and other drivers now offer this support).
|
||||
|
||||
To illustrate, examine this `sstable_tasks` virtual table. This virtual table displays all operations on [SSTables][7], including compactions, cleanups, upgrades, and more.
|
||||
|
||||
![Querying the sstable_tasks virtual table][8]
|
||||
|
||||
(Ben Bromhead, [CC BY-SA 4.0][6])
|
||||
|
||||
If a user were to run `nodetool compactionstats` in a previous Cassandra version, this is the same type of information that would be displayed. Here, the query finds that the node currently has one active compaction. It also displays its progress and its keyspace and table. Thanks to the virtual table, a user can gather this information quickly, and just as efficiently gain the insight needed to correctly diagnose the cluster's health.
|
||||
|
||||
To be clear, Cassandra 4.0 doesn't eliminate the need for JMX access: JMX is still the only option for querying some metrics. That said, users will welcome the ability to pull key cluster metrics simply by using CQL. Thanks to the convenience afforded by virtual tables, users may be able to reinvest time and resources previously devoted to JMX tools into Cassandra itself. Client-side tooling should also begin to leverage the advantages offered by virtual tables.
|
||||
|
||||
If you are interested in the Cassandra 4.0 beta release and its virtual tables feature, [try it out][3].
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/virtual-tables-apache-cassandra
|
||||
|
||||
作者:[Ben Bromhead][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/ben-bromhead
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_space_graphic_cosmic.png?itok=wu493YbB (Computer laptop in space)
|
||||
[2]: https://www.instaclustr.com/apache-cassandra-4-0-beta-released/
|
||||
[3]: https://cassandra.apache.org/download/
|
||||
[4]: https://en.wikipedia.org/wiki/Java_Management_Extensions
|
||||
[5]: https://opensource.com/sites/default/files/uploads/cassandra_virtual-tables.png (system_views and system_virtual_schema keyspaces and tables)
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://cassandra.apache.org/doc/latest/architecture/storage_engine.html#sstables
|
||||
[8]: https://opensource.com/sites/default/files/uploads/cassandra_virtual-tables_sstable_tasks.png (Querying the sstable_tasks virtual table)
|
@ -0,0 +1,199 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Design and document APIs using an open source cross-platform tool)
|
||||
[#]: via: (https://opensource.com/article/20/10/spec-first-development-apis)
|
||||
[#]: author: (Greg Schier https://opensource.com/users/gregschier)
|
||||
|
||||
Design and document APIs using an open source cross-platform tool
|
||||
======
|
||||
Insomnia Designer makes spec-first development more accessible by
|
||||
providing collaborative tools to organize, maintain, and validate API
|
||||
specs.
|
||||
![Computer laptop in space][1]
|
||||
|
||||
In the world of software-as-a-service (SaaS) and service-based architectures, it's not uncommon for companies to maintain dozens or even hundreds of APIs, often spanning multiple teams, programming languages, and environments. This variability makes it extremely difficult to see what's happening at a high level to prevent changes from having negative impacts.
|
||||
|
||||
It's estimated that 40% of large enterprises struggle with challenges to secure, scale, or ensure performance for APIs. Because of this, more and more companies are choosing to adopt a "spec-first development" approach by defining and documenting APIs in an external format like [OpenAPI][2]. Storing these documents together in a central location makes it much easier to design, discuss, and approve changes _before_ implementation.
|
||||
|
||||
In this tutorial, you'll use the recently released [Insomnia Designer][3] to document an API, explore it, and propose a change using a spec-first approach. Designer is a cross-platform, open source REST client that builds on top of Insomnia Core—a popular app for interacting with HTTP and GraphQL APIs—aiming to make spec-first development more accessible by providing collaborative tools to organize, maintain, and validate API specs. In essence, Core is best for exploring and debugging APIs while Designer is best for designing and documenting them.
|
||||
|
||||
In this how-to, you'll use the [Open Library API][4] as a base to have working examples to play with. You'll create a minimal OpenAPI spec to document the APIs, use Insomnia Designer to test and verify that what you've done is correct, and then make some design changes to the API using a spec-first approach.
|
||||
|
||||
### The spec-first workflow
|
||||
|
||||
Before you begin, you should understand the steps necessary to adopt a spec-first workflow. In spec-first development, a specification can be in one of two states:
|
||||
|
||||
* **Published spec:** A specification that describes a currently published API exactly
|
||||
* **Proposal spec:** A draft specification that contains changes that need to be implemented
|
||||
|
||||
|
||||
|
||||
From this information, you can define a workflow for making changes to an API:
|
||||
|
||||
1. Start with the published specification for the API
|
||||
2. Make changes to the specification to add or modify behavior
|
||||
3. Review the proposal spec to ensure the design is sufficient
|
||||
4. Implement changes in code to match the proposal
|
||||
5. Publish the proposal spec along with the API
|
||||
|
||||
|
||||
|
||||
Now that you understand the workflow for what you are trying to accomplish, open Insomnia Designer and start trying it out.
|
||||
|
||||
### Define the initial specification
|
||||
|
||||
Since you don't yet have a published specification for the Open Library API, you need to define one.
|
||||
|
||||
Start by creating a new blank document from the **Create** menu, give it a name, then click to the document to enter **Design View**. From here, you can start editing your spec.
|
||||
|
||||
![Create a new document][5]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
The OpenAPI spec is most commonly written in [YAML][7] format and requires four top-level blocks to get started: `openapi`, `info`, `servers`, and `paths`. The following example defines each of these blocks with helpful comments to describe the purpose of each. Also, the `paths` block defines a route for `GET /recentchanges.json`:
|
||||
|
||||
|
||||
```
|
||||
# Specify that your document is the OpenAPI 3 format
|
||||
openapi: 3.0.0
|
||||
|
||||
# Define high-level metadata for the API
|
||||
info:
|
||||
version: 1.0.0
|
||||
title: Open Library API
|
||||
description: Open Library has a RESTful API
|
||||
|
||||
# Specify the base URL the API can be accessed from
|
||||
servers:
|
||||
- url: <http://openlibrary.org>
|
||||
|
||||
# Define operations for the API. This will be where most
|
||||
# of the work is done. The first route you'll be defining
|
||||
# is `GET /recentchanges.json`
|
||||
paths:
|
||||
/recentchanges.json:
|
||||
get:
|
||||
summary: Recent Changes
|
||||
```
|
||||
|
||||
OpenAPI provides much more than what's visible here, such as the ability to define authentication, response formats, reusable components, and more.
|
||||
|
||||
After copying the specification above into Insomnia Designer, you'll see three columns:
|
||||
|
||||
1. **Navigation sidebar (left):** Nested menu to make navigating larger documents easier
|
||||
2. **Spec editor (middle):** Text editor for editing the YAML document
|
||||
3. **Documentation preview:** Generated documentation to preview the specification
|
||||
|
||||
|
||||
|
||||
![Insomnia Designer UI with three columns][8]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
Feel free to modify different parts of the specification to see what happens. As a safeguard, Insomnia Designer alerts you when you've done something wrong. For example, if you accidentally delete the colon on line 18, an error panel will display below the editor.
|
||||
|
||||
![Insomnia Designer UI error message][9]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
Now that you have defined a specification, you can verify that your definition is correct by switching to **Debug** mode and sending a real request to the API. In Debug mode, you can see a single route was generated for the `GET /recentchanges.json` endpoint. Click the **Send** button beside the URL to execute the request and render the response in the right panel.
|
||||
|
||||
![Checking response in Insomnia Designer][10]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
There you have it! You've successfully verified that the API specification you created matches the production API. Now you can move to the next step in the spec-first development workflow and propose a change.
|
||||
|
||||
### Create a proposal specification
|
||||
|
||||
According to the workflow outlined above, changes made to your API should first be defined in the specification. This has a number of benefits:
|
||||
|
||||
* Specifications can be checked into a central source-code repository
|
||||
* Changes are easy to review and approve
|
||||
* APIs are defined in a single, consistent format
|
||||
* Unnecessary code changes are avoided
|
||||
|
||||
|
||||
|
||||
Go ahead and propose a change to your API. While in Debug mode, I noticed the API returned hundreds of results. To improve performance and usability, it would be useful to limit the number of results returned to a specific amount. A common way of doing this is to accept a `limit` parameter in the query section of the URL, so go ahead and modify your specification to add a `limit` parameter.
|
||||
|
||||
In OpenAPI, you can define this by adding a `parameters` block to the route**:**
|
||||
|
||||
|
||||
```
|
||||
# ...
|
||||
paths:
|
||||
/recentchanges.json:
|
||||
get:
|
||||
summary: Recent Changes
|
||||
|
||||
# Add parameter to limit the number of results
|
||||
parameters:
|
||||
- name: limit
|
||||
in: query
|
||||
description: Limit number of results
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
example: 1
|
||||
```
|
||||
|
||||
You can verify you defined it correctly by expanding the route within the preview and inspecting the parameters.
|
||||
|
||||
![Verifying spec definition in Insomnia][11]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
### Review and implement the proposal
|
||||
|
||||
Now that you have created a proposal spec, you can have your team review and approve the changes. Insomnia Designer provides the ability to [sync API specifications to source control][12], allowing teams to review and approve changes to API specs the same way they do with source code.
|
||||
|
||||
For example, you might commit and push your proposed spec to a new branch in GitHub and create a pull request to await approval.
|
||||
|
||||
Because this is a tutorial on spec-first development, you won't implement the proposal yourself. The parameter you added is already supported by the API, however, so for the purpose of this article, use your imagination and pretend that your team has implemented the change.
|
||||
|
||||
### Verify the updated specification
|
||||
|
||||
Once the proposal has been implemented and deployed, you can switch to Debug mode, which will regenerate the requests based on your changes, and again verify that the spec matches the production API. To ensure the new query param is being sent, click the **Query** tab within Debug mode and observe that the `limit` parameter is set to your example value of `1`.
|
||||
|
||||
Once you send the request, you can verify that it returns only a single result. Change the `limit` to a different value or disable the query parameter (using the checkbox) to further verify things work as expected.
|
||||
|
||||
![Verifying things work as expected in Insomnia Designer][13]
|
||||
|
||||
(Greg Schier, [CC BY-SA 4.0][6])
|
||||
|
||||
### The power of spec-first development
|
||||
|
||||
This tutorial walked through a simplified example of spec-first development. In it, you created an OpenAPI specification, verified the specification matched the implementation, and simulated what it's like to propose a behavior change.
|
||||
|
||||
For a single API as simple as this demo, it may be difficult to see the full benefit of spec-first development. However, imagine being a product owner in a large organization managing hundreds of production APIs. Having well-documented specifications, accessible from a central location like Insomnia Designer, allows anyone within the organization to quickly get up to speed on any API.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/spec-first-development-apis
|
||||
|
||||
作者:[Greg Schier][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/gregschier
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/computer_space_graphic_cosmic.png?itok=wu493YbB (Computer laptop in space)
|
||||
[2]: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md
|
||||
[3]: https://insomnia.rest/products/designer
|
||||
[4]: https://openlibrary.org/developers/api
|
||||
[5]: https://opensource.com/sites/default/files/uploads/insomnia_newdocument.png (Create a new document)
|
||||
[6]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[7]: https://yaml.org/
|
||||
[8]: https://opensource.com/sites/default/files/uploads/insomnia_columns.png (Insomnia Designer UI with three columns)
|
||||
[9]: https://opensource.com/sites/default/files/uploads/insomnia_error.png (Insomnia Designer UI error message)
|
||||
[10]: https://opensource.com/sites/default/files/uploads/insomnia_response.png (Checking response in Insomnia Designer)
|
||||
[11]: https://opensource.com/sites/default/files/uploads/insomnia_verifydefinition.png (Verifying spec definition in Insomnia)
|
||||
[12]: https://support.insomnia.rest/article/96-git-sync
|
||||
[13]: https://opensource.com/sites/default/files/uploads/insomnia_limit.png (Verifying things work as expected in Insomnia Designer)
|
@ -0,0 +1,123 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How to Clear Apt Cache and Reclaim Precious Disk Space)
|
||||
[#]: via: (https://itsfoss.com/clear-apt-cache/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
How to Clear Apt Cache and Reclaim Precious Disk Space
|
||||
======
|
||||
|
||||
How do you clear the apt cache? You simply use this [apt-get command][1] option:
|
||||
|
||||
```
|
||||
sudo apt-get clean
|
||||
```
|
||||
|
||||
But there is more to cleaning apt cache than just running the above command.
|
||||
|
||||
In this tutorial, I’ll explain what is apt cache, why is it used, why you would want to clean it and what other things you should know about purging apt cache.
|
||||
|
||||
I am going to use Ubuntu here for reference but since this is about apt, it is applicable to [Debian][2] and other Debian and Ubuntu-based distributions like Linux Mint, Deepin and more.
|
||||
|
||||
### What is apt cache? Why is it used?
|
||||
|
||||
When you install a package using apt-get or [apt command][3] (or DEB packages in the software center), the apt [package manager][4] downloads the package and its dependencies in .deb format and keeps it in /var/cache/apt/archives folder.
|
||||
|
||||
![][5]
|
||||
|
||||
While downloading, apt keeps the deb package in /var/cache/apt/archives/partial directory. When the deb package is downloaded completely, it is moved out to /var/cache/apt/archives directory.
|
||||
|
||||
Once the deb files for the package and its dependencies are downloaded, your system [installs the package from these deb files][6].
|
||||
|
||||
Now you see the use of cache? The system needs a place to keep the package files somewhere before installing them. If you are aware of the [Linux directory structure][7], you would understand that /var/cache is the appropriate here.
|
||||
|
||||
#### Why keep the cache after installing the package?
|
||||
|
||||
The downloaded deb files are not removed from the directory immediately after the installation is completed. If you remove a package and reinstall it, your system will look for the package in the cache and get it from here instead of downloading it again (as long as the package version in the cache is the same as the version in remote repository).
|
||||
|
||||
This is much quicker. You can try this on your own and see how long a program takes to install the first time, remove it and install it again. You can [use the time command to find out how long does it take to complete a command][8]: _**time sudo apt install package_name**_.
|
||||
|
||||
I couldn’t find anything concrete on the cache retention policy so I cannot say how long does Ubuntu keep the downloaded packages in the cache.
|
||||
|
||||
#### Should you clean apt cache?
|
||||
|
||||
It depends on you. If you are running out of disk space on root, you could clean apt cache and reclaim the disk space. It is one of the [several ways to free up disk space on Ubuntu][9].
|
||||
|
||||
Check how much space the cache takes with the [du command][10]:
|
||||
|
||||
![][11]
|
||||
|
||||
Sometime this could go in 100s of MB and this space could be crucial if you are running a server.
|
||||
|
||||
#### How to clean apt cache?
|
||||
|
||||
If you want to clear the apt cache, there is a dedicated command to do that. So don’t go about manually deleting the cache directory. Simply use this command:
|
||||
|
||||
```
|
||||
sudo apt-get clean
|
||||
```
|
||||
|
||||
This will remove the content of the /var/cache/apt/archives directory (except the lock file). Here’s a dry run (simulation) of what the apt-get clean command deletes:
|
||||
|
||||
![][12]
|
||||
|
||||
There is another command that deals with cleaning the apt cache:
|
||||
|
||||
```
|
||||
sudo apt-get autoclean
|
||||
```
|
||||
|
||||
Unlike clean, autoclean only removes the packages that are not possible to download from the repositories.
|
||||
|
||||
Suppose you installed package xyz. Its deb files remain in the cache. If there is now a new version of xyz package available in the repository, this existing xyz package in the cache is now outdated and useless. The autoclean option will delete such useless packages that cannot be downloaded anymore.
|
||||
|
||||
#### Is it safe to delete apt cache?
|
||||
|
||||
![][13]
|
||||
|
||||
Yes. It is completely safe to clear the cache created by apt. It won’t negatively impact the performance of the system. Maybe if you reinstall the package it will take a bit longer to download but that’s about it.
|
||||
|
||||
Again, use the apt-get clean command. It is quicker and easier than manually deleting cache directory.
|
||||
|
||||
You may also use graphical tools like [Stacer][14] or [Bleachbit][15] for this purpose.
|
||||
|
||||
#### Conclusion
|
||||
|
||||
At the time of writing this article, there is no built-in option with the newer apt command. However, keeping backward compatibility, _**apt clean**_ can still be run (which should be running apt-get clean underneath it). Please refer to this article to [know the difference between apt and apt-get][16].
|
||||
|
||||
I hope you find this explanation about apt cache interesting. It is not something essential but knowing this little things make you more knowledgeable about your Linux system.
|
||||
|
||||
I welcome your feedback and suggestions in the comment section.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/clear-apt-cache/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/apt-get-linux-guide/
|
||||
[2]: https://www.debian.org/
|
||||
[3]: https://itsfoss.com/apt-command-guide/
|
||||
[4]: https://itsfoss.com/package-manager/
|
||||
[5]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/apt-get-clean-cache.png?resize=800%2C470&ssl=1
|
||||
[6]: https://itsfoss.com/install-deb-files-ubuntu/
|
||||
[7]: https://linuxhandbook.com/linux-directory-structure/
|
||||
[8]: https://linuxhandbook.com/time-command/
|
||||
[9]: https://itsfoss.com/free-up-space-ubuntu-linux/
|
||||
[10]: https://linuxhandbook.com/find-directory-size-du-command/
|
||||
[11]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/apt-cache-archive-size.png?resize=800%2C233&ssl=1
|
||||
[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/apt-get-clean-ubuntu.png?resize=800%2C339&ssl=1
|
||||
[13]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/Clear-Apt-Cache.png?resize=800%2C450&ssl=1
|
||||
[14]: https://itsfoss.com/optimize-ubuntu-stacer/
|
||||
[15]: https://itsfoss.com/use-bleachbit-ubuntu/
|
||||
[16]: https://itsfoss.com/apt-vs-apt-get-difference/
|
@ -0,0 +1,106 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Protect your network with open source tools)
|
||||
[#]: via: (https://opensource.com/article/20/10/apache-security-tools)
|
||||
[#]: author: (Chantale Benoit https://opensource.com/users/chantalebenoit)
|
||||
|
||||
Protect your network with open source tools
|
||||
======
|
||||
Apache Syncope and Metron can help you secure your network against
|
||||
unauthorized access and data loss.
|
||||
![A lock on the side of a building][1]
|
||||
|
||||
System integrity is essential, especially when you're charged with safeguarding other people's personal details on your network. It's critical that system administrators are familiar with security tools, whether their purview is a home, a small business, or an organization with hundreds or thousands of employees.
|
||||
|
||||
### How cybersecurity works
|
||||
|
||||
Cybersecurity involves securing networks against unauthorized access. However, there are many attack vectors out there that most people don't consider. The cliché of a lone hacker manually dueling with firewall rules until they gain access to a network is popular—but wildly inaccurate. Security breaches happen through automation, malware, phishing, ransomware, and more. You can't directly fight every attack as it happens, and you can't count on every computer user to exercise common sense. Therefore, you have to design a system that resists intrusion and protects users against outside attacks as much as it protects them from their own mistakes.
|
||||
|
||||
The advantage of open source security tools is that they keep vulnerabilities transparent. They give full visibility into their codebase and are supported by a global community of experts working together to create strong, tried-and-tested code.
|
||||
|
||||
With so many domains needing protection, there's no single cybersecurity solution that fits every situation, but here are two that you should consider.
|
||||
|
||||
### Apache Syncope
|
||||
|
||||
[Apache Syncope][2] is an open source system for managing digital identities in an enterprise environment. From focusing on identity lifecycle management and identity storage to provisioning engines and accessing management capabilities, Apache Syncope is a comprehensive identity management solution. It also provides monitoring and security features for third-party applications.
|
||||
|
||||
Apache Syncope synchronizes users, groups, and other objects. _Users_ represent the buildup of virtual identities and account information fragmented across external resources. _Groups_ are entities on external resources that support the concept of LDAP or Active Directory. _Objects_ are entities such as printers, services, and sensors. It also does full reconciliation and live synchronization from external resources with workflow-based approval.
|
||||
|
||||
#### Third-party applications
|
||||
|
||||
Apache Syncope also exposes a fully compliant [JAX-RS][3] 2.0 [RESTful][4] interface to enable third-party applications written in any programming language. These applications consume identity management services, such as:
|
||||
|
||||
* **Logic:** Syncope implements business logic that can be triggered through REST services and controls additional features such as notifications, reports, and auditing.
|
||||
* **Provisioning:** It manages the internal and external representation of users, groups, and objects through workflow and specific connectors.
|
||||
* **Workflow:** Syncope supports Activiti or Flowable [business process management (BPM)][5] workflow engines and allows defining new and custom workflows when needed.
|
||||
* **Persistence:** It manages all data, such as users, groups, attributes, and resources, at a high level using a standard [JPA 2.0][6] approach. The data is further persisted to an underlying database, such as internal storage.
|
||||
* **Security:** Syncope defines a fine-grained set of entitlements, which are granted to administrators and enable the implementation of delegated administration scenarios.
|
||||
|
||||
|
||||
|
||||
#### Syncope extensions
|
||||
|
||||
Apache Syncope's features can be enhanced with [extensions][7], which add a REST endpoint and manage the persistence of additional entities, tweak the provisioning layer, and add features to the user interface.
|
||||
|
||||
Some popular extensions include:
|
||||
|
||||
* **Swagger UI** works as a user interface for Syncope RESTful services.
|
||||
* **SSO support** provides OpenID Connect and SAML 2.0 access to administrative or end-user web interfaces.
|
||||
* **Apache Camel provisioning manager** delegates the execution of the provisioning process to a group of Apache Camel routes. It can be dynamically changed at the runtime through the REST interfaces or the administrative console, and modifications are also instantly available for processing.
|
||||
* **Elasticsearch** provides an alternate internal search engine for users, groups, and objects through an external [Elasticsearch][8] cluster.
|
||||
|
||||
|
||||
|
||||
### Apache Metron
|
||||
|
||||
Security information and event management ([SIEM][9]) gives admins insights into the activities happening within their IT environment. It combines the concepts of security event management (SEM) with security information management (SIM) into one functionality. SIEM collects security data from network devices, servers, and domain controllers, then aggregates and analyzes the data to detect malicious threats and payloads.
|
||||
|
||||
[Apache Metron][10] is an advanced security analytics framework that detects cyber anomalies, such as phishing activity and malware infections. Further, it enables organizations to take corrective measures to counter the identified anomalies.
|
||||
|
||||
It also interprets and normalizes security events into standard JSON language, which makes it easier to analyze security events, such as:
|
||||
|
||||
* An employee flagging a suspicious email
|
||||
* An authorized or unauthorized software download by an employee to a company device
|
||||
* A security lapse due to a server outage
|
||||
|
||||
|
||||
|
||||
Apache Metron provides security alerts, labeling, and data enrichment. It can also store and index security events. Its four key capabilities are:
|
||||
|
||||
* **Security data lake:** Metron is a cost-effective way to store and combine a wide range of business and security data. The security data lake provides the amount of data required to power discovery analytics. It also provides a mechanism to search and query for operational analytics.
|
||||
* **Pluggable framework:** It provides a rich set of parsers for common security data sources such as pcap, NetFlow, Zeek (formerly Bro), Snort, FireEye, and Sourcefire. You can also add custom parsers for new data sources, including enrichment services for more contextual information, to the raw streaming data. The pluggable framework provides extensions for threat-intel feeds and lets you customize security dashboards. Machine learning and other models can also be plugged into real-time streams and provide extensibility.
|
||||
* **Threat detection platform:** It uses machine learning algorithms to detect anomalies in a system. It also helps analysts extract and reconstruct full packets to understand the attacker's identity, what data was leaked, and where the data was sent.
|
||||
* **Incident response application:** This refers to evolved SIEM capabilities, including alerting, threat intel frameworks, and agents to ingest data sources. Incident response applications include packet replay utilities, evidence storage, and hunting services commonly used by security operations center analysts.
|
||||
|
||||
|
||||
|
||||
### Security matters
|
||||
|
||||
Incorporating open source security tools into your IT infrastructure is imperative to keep your organization safe and secure. Open source tools, like Syncope and Metron from Apache, can help you identify and counter security threats. Learn to use them well, file bugs as you find them, and help the open source community protect the world's data.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/apache-security-tools
|
||||
|
||||
作者:[Chantale Benoit][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/chantalebenoit
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/BUSINESS_3reasons.png?itok=k6F3-BqA (A lock on the side of a building)
|
||||
[2]: https://syncope.apache.org/
|
||||
[3]: https://jax-rs.github.io/apidocs/2.0/
|
||||
[4]: https://www.redhat.com/en/topics/api/what-is-a-rest-api
|
||||
[5]: https://www.redhat.com/en/topics/automation/what-is-business-process-management
|
||||
[6]: http://openjpa.apache.org/openjpa-2.0.0.html
|
||||
[7]: http://syncope.apache.org/docs/2.1/reference-guide.html#extensions
|
||||
[8]: https://opensource.com/life/16/6/overview-elastic-stack
|
||||
[9]: https://en.wikipedia.org/wiki/Security_information_and_event_management
|
||||
[10]: http://metron.apache.org/
|
@ -0,0 +1,104 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Top 5 open source alternatives to Google Analytics)
|
||||
[#]: via: (https://opensource.com/article/18/1/top-5-open-source-analytics-tools)
|
||||
[#]: author: (Scott Nesbitt https://opensource.com/users/scottnesbitt)
|
||||
|
||||
Top 5 open source alternatives to Google Analytics
|
||||
======
|
||||
These four versatile web analytics tools provide valuable insights on
|
||||
your customers and site visitors while keeping you in control.
|
||||
![Analytics: Charts and Graphs][1]
|
||||
|
||||
If you have a website or run an online business, collecting data on where your visitors or customers come from, where they land on your site, and where they leave _is vital._ Why? That information can help you better target your products and services, and beef up the pages that are turning people away.
|
||||
|
||||
To gather that kind of information, you need a web analytics tool.
|
||||
|
||||
Many businesses of all sizes use Google Analytics. But if you want to keep control of your data, you need a tool that _you_ can control. You won’t get that from Google Analytics. Luckily, Google Analytics isn’t the only game on the web.
|
||||
|
||||
Here are four open source alternatives to Google Analytics.
|
||||
|
||||
### Matomo
|
||||
|
||||
Let’s start with the open source application that rivals Google Analytics for functions: [Matomo][2] (formerly known as Piwik). Matomo does most of what Google Analytics does, and chances are it offers the features that you need.
|
||||
|
||||
Those features include metrics on the number of visitors hitting your site, data on where they come from (both on the web and geographically), the pages from which they leave, and the ability to track search engine referrals. Matomo also offers many reports, and you can customize the dashboard to view the metrics that you want to see.
|
||||
|
||||
To make your life easier, Matomo integrates with more than 65 content management, e-commerce, and online forum systems, including WordPress, Magneto, Joomla, and vBulletin, using plugins. For any others, you can simply add a tracking code to a page on your site.
|
||||
|
||||
You can [test-drive][3] Matomo or use a [hosted version][4].
|
||||
|
||||
### Open Web Analytics
|
||||
|
||||
If there’s a close second to Matomo in the open source web analytics stakes, it’s [Open Web Analytics][5]. In fact, it includes key features that either rival Google Analytics or leave it in the dust.
|
||||
|
||||
In addition to the usual raft of analytics and reporting functions, Open Web Analytics tracks where on a page, and on what elements, visitors click; provides [heat maps][6] that show where on a page visitors interact the most; and even does e-commerce tracking.
|
||||
|
||||
Open Web Analytics has a [WordPress plugin][7] and can [integrate with MediaWiki][8] using a plugin. Or you can add a snippet of [JavaScript][9] or [PHP][10] code to your web pages to enable tracking.
|
||||
|
||||
Before you [download][11] the Open Web Analytics package, you can [give the demo a try][12] to see it it’s right for you.
|
||||
|
||||
### AWStats
|
||||
|
||||
Web server log files provide a rich vein of information about visitors to your site, but tapping into that vein isn't always easy. That's where [AWStats][13] comes to the rescue. While it lacks the most modern look and feel, AWStats more than makes up for that with breadth of data it can present.
|
||||
|
||||
That information includes the number of unique visitors, how long those visitors stay on the site, the operating system and web browsers they use, the size of a visitor's screen, and the search engines and search terms people use to find your site. AWStats can also tell you the number of times your site is bookmarked, track the pages where visitors enter and exit your sites, and keep a tally of the most popular pages on your site.
|
||||
|
||||
These features only scratch the surface of AWStats's capabilities. It also works with FTP and email logs, as well as [syslog][14] files. AWStats can gives you a deep insight into what's happening on your website using data that stays under your control.
|
||||
|
||||
### Countly
|
||||
|
||||
[Countly][15] bills itself as a "secure web analytics" platform. While I can't vouch for its security, Countly does a solid job of collecting and presenting data about your site and its visitors.
|
||||
|
||||
Heavily targeting marketing organizations, Countly tracks data that is important to marketers. That information includes site visitors' transactions, as well as which campaigns and sources led visitors to your site. You can also create metrics that are specific to your business. Countly doesn't forgo basic web analytics; it also keeps track of the number of visitors on your site, where they're from, which pages they visited, and more.
|
||||
|
||||
You can use the hosted version of Countly or [grab the source code][16] from GitHub and self-host the application. And yes, there are [differences between the hosted and self-hosted versions][17] of Countly.
|
||||
|
||||
### Plausible
|
||||
|
||||
[Plausible][18] is a newer kid on the open source analytics tools block. It’s lean, it’s fast, and only collects a small amount of information — that includes numbers of unique visitors and the top pages they visited, the number of page views, the bounce rate, and referrers. Plausible is simple and very focused.
|
||||
|
||||
What sets Plausible apart from its competitors is its heavy focus on privacy. The project creators state that the tool doesn’t collect or store any information about visitors to your website, which is particularly attractive if privacy is important to you. You can read more about that [here][19].
|
||||
|
||||
There’s a [demo instance][20] that you check out. After that, you can either [self-host][21] Plausible or sign up for a [paid, hosted account][22].
|
||||
|
||||
**Share your favorite open source web analytics tool with us in the comments.**
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/18/1/top-5-open-source-analytics-tools
|
||||
|
||||
作者:[Scott Nesbitt][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/scottnesbitt
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/analytics-graphs-charts.png?itok=sersoqbV (Analytics: Charts and Graphs)
|
||||
[2]: https://matomo.org/
|
||||
[3]: https://demo.matomo.org/index.php?module=CoreHome&action=index&idSite=3&period=day&date=yesterday
|
||||
[4]: https://www.innocraft.cloud/
|
||||
[5]: http://www.openwebanalytics.com/
|
||||
[6]: http://en.wikipedia.org/wiki/Heat_map
|
||||
[7]: https://github.com/padams/Open-Web-Analytics/wiki/WordPress-Integration
|
||||
[8]: https://github.com/padams/Open-Web-Analytics/wiki/MediaWiki-Integration
|
||||
[9]: https://github.com/padams/Open-Web-Analytics/wiki/Tracker
|
||||
[10]: https://github.com/padams/Open-Web-Analytics/wiki/PHP-Invocation
|
||||
[11]: https://github.com/padams/Open-Web-Analytics
|
||||
[12]: http://demo.openwebanalytics.com/
|
||||
[13]: http://www.awstats.org
|
||||
[14]: https://en.wikipedia.org/wiki/Syslog
|
||||
[15]: https://count.ly/web-analytics
|
||||
[16]: https://github.com/Countly
|
||||
[17]: https://count.ly/pricing#compare-editions
|
||||
[18]: https://plausible.io
|
||||
[19]: https://plausible.io/data-policy
|
||||
[20]: https://plausible.io/plausible.io
|
||||
[21]: https://plausible.io/self-hosted-web-analytics
|
||||
[22]: https://plausible.io/register
|
@ -0,0 +1,106 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (5 ways organizations can lean into failure and transform it into success)
|
||||
[#]: via: (https://opensource.com/article/20/10/organizations-can-lean-failure)
|
||||
[#]: author: (Dawn Parzych https://opensource.com/users/dawnparzych)
|
||||
|
||||
5 ways organizations can lean into failure and transform it into success
|
||||
======
|
||||
Removing the fear of failure fosters innovation and open-mindedness,
|
||||
turning mistakes into opportunities for growth and improvement.
|
||||
![failure sign at a party, celebrating failure][1]
|
||||
|
||||
> "If failure is not an option, then neither is success."—Seth Godin
|
||||
|
||||
Success is something we all strive toward, but the road to success has twists and turns, which are also known as our failures. But instead of giving up when things don't go my way, I have learned to look at failures as learning opportunities. In fact, I now proudly admit that I'm a failure:
|
||||
|
||||
* I failed my driving test the first time I took it.
|
||||
* I have not gotten every job I've ever applied or interviewed for.
|
||||
* Most of the articles I've written have not gone viral.
|
||||
|
||||
|
||||
|
||||
As software engineers, our industry may be competitive, but we need to remove the stigma associated with failing. One way of doing that is to talk about our failures. The more we hear and talk about failures, the more acceptable it becomes. We need to think about failures as learning opportunities, not a time to shame and ridicule others (or ourselves).
|
||||
|
||||
When we have failed, or think we have made a mistake, most of us get that pit in the bottom of our stomach, and the negative self-talk can kick in almost instantly. This reaction occurs because there are often consequences for mistakes, which can impact us personally as well as the organizations we work for.
|
||||
|
||||
Let's consider a hypothetical example: A software engineer submits a pull request for a new feature. It gets merged and released to production. Something goes wrong, and the application begins throwing errors. Customers can't log in. Sadly this isn't the first time this has happened. Some customers are fed up with the application breaking and have canceled their contracts. The CTO is furious. They want to know who is responsible for this feature being released. The team is understaffed because several team members have left the company, and those that remain are [burned out][2]. The incident takes way longer than usual to be resolved, and everyone is grumpy at the end.
|
||||
|
||||
This worst-case scenario doesn't mean we should always play it safe and not take risks. We need to balance the risks with the rewards when making technical decisions at work and with the open source projects we work on. I like to visualize success and failure as two opposite banks of a river, connected by a bridge. That bridge is built out of knowledge, learning, and understanding. Failure is when we grow, but only if we're learning from those failures and have a good mixture of success sprinkled in.
|
||||
|
||||
### What is needed to accept failure?
|
||||
|
||||
Embracing failure from a personal and organizational point of view doesn't happen overnight. It takes both time and the right culture. Start with one of the five areas outlined below.
|
||||
|
||||
#### Have a growth mindset
|
||||
|
||||
In the book _Growth_, Carol Dweck describes two types of mindsets: growth and fixed. A person with a fixed mindset believes in innate capabilities—you're either born with it, or you're not. Failure represents a limit on one's abilities.
|
||||
|
||||
A person with a growth mindset believes that individual effort and attitude determine one's abilities and that they can learn and improve as long as they put in the effort. Failure represents an opportunity to grow.
|
||||
|
||||
To encourage growth means to encourage failure. But saying "it's OK to fail" is not the same as creating a culture where it honestly is OK to fail. That requires psychological safety and a blameless culture.
|
||||
|
||||
#### Create a blameless culture
|
||||
|
||||
A blameless culture focuses on _where_ a system or a process failed, not on _who_ is responsible. If you spend time after an incident looking for and attributing root cause to human error, you are assigning blame. Instead, look for how things can be improved. What broke? How did it break? How can you prevent it in the future?
|
||||
|
||||
![Image of swampy area with overlaid quote reading "If a junior engineer asks where outages come from, I think a cute thing to tell them is 'The server is crying.' And if they ask why the server is crying, another cute thing to tell them is 'probably becaus][3]
|
||||
|
||||
Published with permission from Joshua Zimmerman (@TheJewberwocky)
|
||||
|
||||
#### Foster psychological safety
|
||||
|
||||
A blameless culture cannot exist without psychological safety. Psychological safety is the ability to show up to work without fear; you feel included in the whole, that it is safe to learn, safe to contribute, and safe to challenge the status quo without fear of consequences. Employees need to feel empowered to speak up if they see processes that need to be improved if they are concerned with security or lack of security procedures in place. They need to feel they won't be blamed for taking a risk where the end result wasn't quite what was expected.
|
||||
|
||||
One way to create a blameless culture and provide psychological safety is to eliminate the word "why" from your vocabulary. Why is used quite frequently—there are problem-solving techniques called ["The "5 Whys."][4] The problem with "why" is it is subtly coded instead as "explain yourself." Having to answer "why" questions put people on the defensive, and they spend time focusing on what the "right" answer is instead of answering honestly. Instead, switch to using "what" or "how" questions. "How" and "what" questions promote concrete action.
|
||||
|
||||
Consider the difference in these questions:
|
||||
|
||||
* Why did last night's release cause a series of exceptions?
|
||||
vs.
|
||||
* How were the exceptions triggered?
|
||||
|
||||
|
||||
|
||||
> "Recently, I was asked if I was going to fire an employee who made a mistake that cost the company $600,000. 'No,' I replied, 'I just spent $600,000 training him. Why would I want somebody else to hire his experience?'"— Thomas J. Watson, Chairman & CEO of IBM, 1914-1956
|
||||
|
||||
#### Remember the importance of play
|
||||
|
||||
The final aspect of embracing failure is to make time to play. Play shapes our brains and fosters empathy. From a young age, we learn to play games, play on a sports team, play on our own, etc. Chances are you didn't always play on an undefeated sports team or win every single game you've ever played, but practice helped you get better over time no matter what. These experiences teach us to be comfortable with failure and to take it in stride with growth and improvement.
|
||||
|
||||
#### Experimentation leads to learning
|
||||
|
||||
Now that you're ready to embrace failure in your DevOps or engineering practices, where do you start? One of the first processes to look at is the build process. As you most likely know, builds can sometimes result in failures. Changing your build process to include [progressive delivery][5] techniques helps you release features in a controlled manner as opposed to a big bang. Using ring deployments or a canary launch, you can see how a feature performs for a small group of users. If there is a failure, the blast radius has been minimized.
|
||||
|
||||
Another process that can help you learn about your applications and systems is experimentation. Experiments provide a number of learning opportunities, and they don't always go as expected. Some failures can turn into wild successes; for example—[Post-it Notes][6]. Experiments allow us to test a hypothesis and learn new things. Experimenting with software can look like running an A/B test to see which look and feel leads to greater engagement or collecting page load time metrics to see if adding pagination to search results negatively impacts performance.
|
||||
|
||||
A popular type of experimentation is to run a chaos day or game day. With a game day, you purposely cause a system or application to fail. This combines the notion of play, failure, and learning. During this controlled experiment, you learn how your system behaves during failure scenarios. The knowledge gained during game days can be used to put safety measures in place or define processes for what to do when a specific failure occurs.
|
||||
|
||||
### Lean into failure
|
||||
|
||||
Failure can be scary; it is seen as a negative and something we want to avoid. I call on you to change your perspective on this. If you're not failing, you're not learning.
|
||||
|
||||
I challenge you to normalize failure and help reduce the stigma by sharing a failure of yours. Share your failures in the comments below or tag me on social media.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/organizations-can-lean-failure
|
||||
|
||||
作者:[Dawn Parzych][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/dawnparzych
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/fail_failure_celebrate.png?itok=LbvDAEZF (failure sign at a party, celebrating failure)
|
||||
[2]: https://opensource.com/article/19/11/burnout-open-source-communities
|
||||
[3]: https://opensource.com/sites/default/files/uploads/quote-failure-opensource_0.png
|
||||
[4]: https://en.wikipedia.org/wiki/Five_whys
|
||||
[5]: https://launchdarkly.com/blog/all-the-canaries-lived-its-time-to-adopt-progressive-delivery/
|
||||
[6]: https://www.ideatovalue.com/insp/nickskillicorn/2017/04/true-story-post-notes-almost-failed/
|
235
sources/tech/20201009 My open source video game for Open Jam.md
Normal file
235
sources/tech/20201009 My open source video game for Open Jam.md
Normal file
@ -0,0 +1,235 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (My open source video game for Open Jam)
|
||||
[#]: via: (https://opensource.com/article/20/10/open-source-game)
|
||||
[#]: author: (Klaatu https://opensource.com/users/klaatu)
|
||||
|
||||
My open source video game for Open Jam
|
||||
======
|
||||
Step through a game programmed for Open Jam 2020 to get tips for your
|
||||
own design.
|
||||
![Gaming with penguin pawns][1]
|
||||
|
||||
This year, I joined in on the [Open Jam][2], a "game jam" in which programmers around the world dedicate a weekend to create open source games. The jam is essentially an excuse to spend a weekend coding, and the majority of the games that come out of the challenge are small distractions rather than something you're likely to play for hours on end. But they're fun, diverse, and open source, and that's a pretty good feature list for a game.
|
||||
|
||||
The game I submitted is [Unveil][3], a calming puzzle game in which the player must first discover the goal, and then work to achieve it with the greatest number of points. Because part of the game is the discovery process, I won't reveal any more about the gameplay than that.
|
||||
|
||||
![Unveil game][4]
|
||||
|
||||
(Klaatu, [CC BY-SA 4.0][5])
|
||||
|
||||
The whole game is only 338 lines, written in [Python][6] using the [Pygame][6] module. It's, of course, open source, and part of it may serve as a good introduction to a few programming concepts that used to confound me (two-dimensional arrays being the most significant). For simple game design, a two-dimensional array is very useful because so many enduring games are built on them. You can probably think of several, although if you don't know what a two-dimensional array is, you may not realize it.
|
||||
|
||||
### Arrays in gaming
|
||||
|
||||
An array is a collection of data. An array can be listed across a page or an X-axis (in mathematical terms). For instance:
|
||||
|
||||
|
||||
```
|
||||
`artichoke, lettuce, carrot, aubergine, potato`
|
||||
```
|
||||
|
||||
An array may also be represented as a list or a Y-axis:
|
||||
|
||||
|
||||
```
|
||||
artichoke
|
||||
lettuce
|
||||
carrot
|
||||
aubergine
|
||||
potato
|
||||
```
|
||||
|
||||
This is a one-dimensional array. A two-dimensional array extends on both the X-axis and Y-axis.
|
||||
|
||||
Here's a common two-dimensional array seen in the world of board games:
|
||||
|
||||
![Chess][7]
|
||||
|
||||
(Klaatu, [CC BY-SA 4.0][5])
|
||||
|
||||
Yes, two-dimensional arrays are used as the board for chess, draughts, noughts and crosses (also called tic-tac-toe), [RPG battle maps][8], [minesweeper][9], Carcassonne, Forbidden Island, and in slightly modified forms, games like Monopoly and even [Ur][10] (literally the oldest game we know of).
|
||||
|
||||
If you can comfortably create a two-dimensional array, you have a great start at programming any number of games.
|
||||
|
||||
### Creating tiles in Pygame
|
||||
|
||||
If you're not familiar with Python, you should take some time to review this [Python (and Pygame) introductory series][11]. If you feel confident enough to translate code to other libraries, though, there's nothing specific to Pygame in the "important" parts of this code (the array constructor), so you can use any library or language.
|
||||
|
||||
For simplicity, I'll call the individual squares in the game board array _tiles_. To construct a two-dimensional array, or game board as the case may be, you must have tiles. In object-oriented programming, you consider each component as a unique object based upon a template (or _class_, in programming terminology). So, before creating the game board, you must first create the infrastructure for the board's building blocks: tiles.
|
||||
|
||||
First, set up a simple Pygame project, creating a display (your window into the game world), a group to represent the game board, and a few standard variables:
|
||||
|
||||
|
||||
```
|
||||
import pygame
|
||||
|
||||
pygame.init()
|
||||
game_world = pygame.display.set_mode((960, 720))
|
||||
game_board = pygame.sprite.Group()
|
||||
|
||||
running = True
|
||||
black = (0, 0, 0)
|
||||
white = (255, 255, 255)
|
||||
red = (245, 22, 22)
|
||||
world_x = 960
|
||||
world_y = 720
|
||||
```
|
||||
|
||||
Next, create a `Tile` class to establish the template from which each tile gets cast. The first function initializes a new tile when one is created and gives it the necessary basic fields: width, height, an image (actually, I just filled it with the color white), and whether or not it's active. In this case, I use `is_pressed`, as if the tile is a button, because that's what it'll look like when the code is finished: when the user clicks a tile, it changes color as if it were a button being lit up. For other purposes, this state needn't be visible. In chess, for example, you might instead have a field to represent whether a tile is occupied and, if so, by what kind of chess piece.
|
||||
|
||||
|
||||
```
|
||||
class Tile(pygame.sprite.Sprite):
|
||||
def __init__(self, x, y, w, h, c):
|
||||
pygame.sprite.Sprite.__init__(self)
|
||||
self.image = pygame.Surface((w, h))
|
||||
self.image.fill(c)
|
||||
self.rect = self.image.get_rect()
|
||||
self.rect.x = x
|
||||
self.rect.y = y
|
||||
self.is_pressed = False
|
||||
```
|
||||
|
||||
The second function is an update function. Specifically, it checks whether a tile has been clicked by the user. This requires mouse coordinates, which you'll get later in the code during the event loop.
|
||||
|
||||
For this demonstration, I'll make this function fill the tile with the color red when it's in the `is_pressed` state and back to white otherwise:
|
||||
|
||||
|
||||
```
|
||||
def was_clicked(self, mouse):
|
||||
if self.rect.collidepoint(mouse) and not self.is_pressed:
|
||||
self.image.fill(red)
|
||||
self.is_pressed = True
|
||||
elif self.rect.collidepoint(mouse) and self.is_pressed:
|
||||
self.image.fill(white)
|
||||
self.is_pressed = False
|
||||
else:
|
||||
return False
|
||||
```
|
||||
|
||||
### Main loop
|
||||
|
||||
This demo's main loop is simple. It checks for two kinds of input: a quit signal and a mouse down (click) event. When it detects a mouse click, it calls the `was_clicked` function to react (filling it with red or white, depending on its current state).
|
||||
|
||||
Finally, the screen fills with black, the game board state is updated, and the screen is redrawn:
|
||||
|
||||
|
||||
```
|
||||
"""
|
||||
holding place for game board construction
|
||||
"""
|
||||
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
running = False
|
||||
|
||||
elif event.type == pygame.MOUSEBUTTONDOWN:
|
||||
for hitbox in game_board:
|
||||
hitbox.was_clicked(event.pos)
|
||||
|
||||
game_world.fill(black)
|
||||
game_board.update()
|
||||
game_board.draw(game_world)
|
||||
pygame.display.update()
|
||||
|
||||
pygame.quit()
|
||||
```
|
||||
|
||||
### Board construction
|
||||
|
||||
To build a two-dimensional array, you must first decide how many tiles you want in both directions. I'll use eight for this example because that's how a chessboard is constructed, but you could use fewer or more. You could even accept arguments at launch to define the array's size depending on options, such as `--row` and `--column:`
|
||||
|
||||
|
||||
```
|
||||
rows = 8
|
||||
columns = 8
|
||||
```
|
||||
|
||||
Because you don't know the size of the board, you must calculate the width of the rows and columns based on the size of your display. I also include one pixel of padding between each tile, because, without a gap, it looks like one big block of color:
|
||||
|
||||
|
||||
```
|
||||
column_width = world_x / columns
|
||||
row_height = world_y / rows
|
||||
pad = 1
|
||||
```
|
||||
|
||||
Laying out tiles across the display is simple. Of course, this isn't the goal, as it only draws along the X-axis, but it's a good start:
|
||||
|
||||
|
||||
```
|
||||
j = 0
|
||||
|
||||
while j < rows:
|
||||
tile = Tile(j * column_width, row_height, column_width - pad, row_height - pad, white)
|
||||
game_board.add(tile)
|
||||
j += 1
|
||||
```
|
||||
|
||||
The idea is that the variable `j` starts at 0, so the first tile is placed from 0 to `column_width`, less the value of the padding. Then the variable `j` is incremented to 1, so the next tile is placed at 1 times the value of `column_width`, and so on.
|
||||
|
||||
You can run that code to see the partial success it renders. What this solution obviously lacks is any awareness of further rows.
|
||||
|
||||
Use a second counter variable to track rows:
|
||||
|
||||
|
||||
```
|
||||
j = 0
|
||||
k = 0
|
||||
|
||||
while j < rows:
|
||||
while k < columns:
|
||||
tile = Tile(k * column_width, j * row_height, column_width - pad, row_height - pad, white)
|
||||
game_board.add(tile)
|
||||
k += 1
|
||||
j += 1
|
||||
k = 0
|
||||
```
|
||||
|
||||
In this code block, which achieves the desired result, each tile is positioned in a space determined by the current value of either `j` or `k`.
|
||||
|
||||
The `k` variable is incremented within its loop so that each tile is progressively placed along the X-axis.
|
||||
|
||||
The `j` variable is incremented outside the nested loop so that everything gets moved down one row.
|
||||
|
||||
The `k` variable is then set to 0 so that when the inner loop starts again, everything is shunted back to the far left of the screen.
|
||||
|
||||
![2D array][12]
|
||||
|
||||
(Klaatu, [CC BY-SA 4.0][5])
|
||||
|
||||
### Easy arrays
|
||||
|
||||
Creating a grid can seem mathematically and syntactically intensive, but with this example plus a little bit of thought about what result you want, you can generate them at will. The only thing left for you to do now is to create a game around it. That's what I did, and you're welcome to play it by downloading it from its home on [Itch.io][3] or from its source repository on [git.nixnet.xyz][13]. Enjoy!
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/open-source-game
|
||||
|
||||
作者:[Klaatu][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/klaatu
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/gaming_grid_penguin.png?itok=7Fv83mHR (Gaming with penguin pawns)
|
||||
[2]: http://openjam.io
|
||||
[3]: https://notklaatu.itch.io/unveil
|
||||
[4]: https://opensource.com/sites/default/files/uploads/unveil-2.jpg (Unveil game)
|
||||
[5]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[6]: https://www.python.org/
|
||||
[7]: https://opensource.com/sites/default/files/uploads/chess.jpg (Chess)
|
||||
[8]: https://opensource.com/article/18/5/maptool
|
||||
[9]: https://opensource.com/article/19/9/advanced-bash-building-minesweeper
|
||||
[10]: https://otagomuseum.nz/athome/the-royal-game-of-ur
|
||||
[11]: https://opensource.com/article/17/10/python-101
|
||||
[12]: https://opensource.com/sites/default/files/uploads/2darray.jpg (2D array)
|
||||
[13]: https://git.nixnet.xyz/klaatu/unveil
|
@ -0,0 +1,114 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Robust and Race-free Server Logging using Named Pipes)
|
||||
[#]: via: (https://theartofmachinery.com/2020/10/10/logging_with_named_pipes.html)
|
||||
[#]: author: (Simon Arneaud https://theartofmachinery.com)
|
||||
|
||||
Robust and Race-free Server Logging using Named Pipes
|
||||
======
|
||||
|
||||
If you do any server administration work, you’ll have worked with log files. And if your servers need to be reliable, you’ll know that log files are common source of problems, especially when you need to rotate or ship them (which is practically always). In particular, moving files around causes race conditions.
|
||||
|
||||
Thankfully, there are better ways. With named pipes, you can have a simple and robust logging framework, with no race conditions, and without patching your servers to support some network logging protocol.
|
||||
|
||||
### The problems with rotating log files
|
||||
|
||||
First, let’s talk about the problems. Race conditions are generally a problem with popular file-based logging setups, whether you’re rotating logs into archival storage, or shipping them to a remote log processing stack, or whatever. To keep things concrete, though, let me talk about [logrotate][1], just because it’s a popular tool.
|
||||
|
||||
Say you have a log file at `/var/log/foo`. It gets pretty big, and you want to process the logs periodically and start with a new, empty file. So you (or your distro maintainers) set up logrotate with various rules about when to rotate the file.
|
||||
|
||||
By default, logrotate will rename the file (to something like `/var/log/foo.1`) and create a new `/var/log/foo` to write to. That (mostly) works for software that runs intermittently (such as a package manager that does software updates). But it won’t do any good if the log file is generated by a long-running server. The server only uses the filename when it opens the file; after that it just keeps writing to its open file descriptor. That means it will keep writing to the old file (now named `/var/log/foo.1`), and the new `/var/log/foo` file will stay empty.
|
||||
|
||||
To handle this use-case, logrotate supports another mode: `copytruncate`. In this mode, instead of renaming, logrotate will copy the contents of `/var/log/foo` to an archival file, and then truncate the original file to zero length. As long as the server has the log file open in append mode, it will automatically write new logs to the start of the file, without needing to detect the truncation and do a file seek (the kernel handles that).
|
||||
|
||||
That `copytruncate` mode creates a race condition, though. Any log lines that are written after the copy but before the truncation will get destroyed. Actually, you tend to get the same race condition even with the default move-and-create mode. That’s because there’s not much point just splitting up the logs into multiple files. Most systems are configured to do something like compress the old log file, but ultimately you need to delete the old, uncompressed data, which creates the same race as truncating. (In practice, this race isn’t so likely for occasional log writers, like package managers, and the `delay` flag to logrotate makes it rarer, albeit by making the log handling a bit more complicated.)
|
||||
|
||||
Some servers, like [Nginx][2], support a modification of the default logrotate mode:
|
||||
|
||||
1. Rename the old file
|
||||
2. Create the new file
|
||||
3. (New step) notify the server that it needs to reopen its log file.
|
||||
|
||||
|
||||
|
||||
This works (as long as the logs processor doesn’t delete the old file before the server has finished reopening), but it requires special support from the server, and you’re out of luck with most software. There’s a lot of software out there, and log file handling just isn’t interesting enough to get high on the to-do list. This approach also only works for long-running servers.
|
||||
|
||||
I think this is a good point to stop and take a step back. Having multiple processes juggle log files around on disk without any synchronisation is just an inherently painful way to do things. It causes bugs and makes logging stacks complicated ([here’s just one of many examples][3]). One alternative is to use some network protocol like MQTT or networked syslog, but, realistically, most servers won’t support the one you want. And they shouldn’t have to — log files are a great interface for log writers.
|
||||
|
||||
That’s okay because *nix “everything is a file” lets us easily get a file interface on the writer side, with a streaming interface on the reader side.
|
||||
|
||||
### Named pipes 101
|
||||
|
||||
Maybe you’ve seen pipes in pipelines like this:
|
||||
|
||||
```
|
||||
$ sort user_log.txt | uniq
|
||||
```
|
||||
|
||||
The pipe connecting `sort` and `uniq` is a temporary, anonymous communication channel that `sort` writes to and `uniq` reads from. Named pipes are less common, but they’re also communication channels. The only difference is that they persist on the filesystem as if they were files.
|
||||
|
||||
Open up a terminal and `cd` into some temporary working directory. The following creates a named pipe and uses `cat` to open a writer:
|
||||
|
||||
```
|
||||
$ mkfifo p
|
||||
$ # This cat command will sit waiting for input
|
||||
$ cat > p
|
||||
```
|
||||
|
||||
Leave that `cat` command waiting, and open up another terminal in the same directory. In this terminal, start your reader:
|
||||
|
||||
```
|
||||
$ # This will sit waiting for data to come over the pipe
|
||||
$ cat p
|
||||
```
|
||||
|
||||
Now as you type things into the writer end, you’ll see them appear in the reader end. `cat` will use line buffering in interactive mode, so data will get transferred every time you start a new line.
|
||||
|
||||
`cat` doesn’t have to know anything about pipes for this to work — the pipe acts like a file as long as you just naïvely read or write to it. But if you check, you’ll see the data isn’t stored anywhere. You can pump gigabytes through a pipe without filling up any disk space. Once the data has been read once, it’s lost. (You can have multiple readers, but only one will receive any buffer-load of data.)
|
||||
|
||||
Another thing that makes pipes useful for communication is their buffering and blocking. You can start writing before any readers open the pipe, and data gets temporarily buffered inside the kernel until a reader comes along. If the reader starts first, its read will block, waiting for data from the writer. (The writer will also block if the pipe buffer gets full.) If you try the two-terminal experiment again with a regular file, you’ll see that the reader `cat` will eagerly read all the data it can and then exit.
|
||||
|
||||
### An annoying problem and a simple solution
|
||||
|
||||
Maybe you’re seeing how named pipes can help with logging: Servers can write to log “files” that are actually named pipes, and a logging stack can read log data directly from the named pipe without letting a single line fall onto the floor. You do whatever you want with the logs, without any racey juggling of files on disk.
|
||||
|
||||
There’s one annoying problem: the writer doesn’t need a reader to start writing, but if a reader opens the pipe and then closes it, the writer gets a `SIGPIPE` (“broken pipe”), which will kill it by default. (Try killing the reader `cat` while typing things into the writer to see what I mean.) Similarly, a reader can read without a writer, but if a writer opens the pipe and then closes it, that will be treated like an end of file. Although the named pipe persists on disk, it isn’t a stable communication channel if log writers and log readers can restart (as they will on a real server).
|
||||
|
||||
There’s a solution that’s a bit weird but very simple. Multiple processes can open the pipe for reading and writing, and the pipe will only close when _all_ readers or _all_ writers close it. All we need for a stable logging pipe is a daemon that holds the named pipe open for both reading and writing, without doing any actual reading or writing. I set this up on my personal server last year, and I wrote [a tiny, zero-config program to act as my pipe-holding daemon][4]. It just opens every file in its current working directory for both reading and writing. I run it from a directory that has symbolic links to every named pipe in my logging stack. The program runs in a loop that ends in a `wait()` for a `SIGHUP`. If I ever update the symlinks in the directory, I give the daemon a `kill -HUP` and it reopens them all. Sure, it could do its own directory watching, but the `SIGHUP` approach is simple and predictable, and the whole thing works reliably. Thanks to the pipe buffer, log writers and log readers can be shut down and restarted independently, any time, without breakage.
|
||||
|
||||
My server uses the [s6 supervision suite][5] to manage daemons, so I have s6-log reading from each logging pipe. The bottom part of the [s6-log documentation page][6] has some good insights into the problems with popular logging systems, and good ideas about better ways to do things.
|
||||
|
||||
### Imagine: a world without log rotation
|
||||
|
||||
Strictly speaking, named pipes aren’t necessary for race-free logs processing. The s6 suite encourages writing logs to some file descriptor (like standard error), and letting the supervision suite make sure those file descriptors point to something useful. However, the named pipe approach adds a few benefits:
|
||||
|
||||
* It doesn’t require any co-ordination between writer and reader
|
||||
* It integrates nicely with the software we have today
|
||||
* It gives things meaningful names (rather than `/dev/fd/4`)
|
||||
|
||||
|
||||
|
||||
I’ve worked with companies that spend about as much on their logging stacks as on their serving infrastructure, and, no, “we do logs processing” isn’t in their business models. Of course, log rotation and log shipping aren’t the only problems to blame, but it feels so wrong that we’ve made logs so complicated. If you work on any logging system, consider if you really need to juggle log files around. You could be helping to make the world a better place.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://theartofmachinery.com/2020/10/10/logging_with_named_pipes.html
|
||||
|
||||
作者:[Simon Arneaud][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://theartofmachinery.com
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://github.com/logrotate/logrotate
|
||||
[2]: https://www.nginx.com/resources/wiki/start/topics/examples/logrotation/
|
||||
[3]: https://community.splunk.com/t5/Getting-Data-In/Why-copytruncate-logrotate-does-not-play-well-with-splunk/td-p/196112
|
||||
[4]: https://gitlab.com/sarneaud/fileopenerd
|
||||
[5]: http://www.skarnet.org/software/s6/index.html
|
||||
[6]: http://www.skarnet.org/software/s6/s6-log.html
|
@ -0,0 +1,172 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Build a Kubernetes Minecraft server with Ansible's Helm modules)
|
||||
[#]: via: (https://opensource.com/article/20/10/kubernetes-minecraft-ansible)
|
||||
[#]: author: (Jeff Geerling https://opensource.com/users/geerlingguy)
|
||||
|
||||
Build a Kubernetes Minecraft server with Ansible's Helm modules
|
||||
======
|
||||
Deploy a Minecraft server into a Kubernetes cluster with Ansible's new
|
||||
collections.
|
||||
![Ship captain sailing the Kubernetes seas][1]
|
||||
|
||||
One of the best outcomes of Ansible's [move towards content collections][2] is it spreads the thousands of modules in [Ansible][3]'s "core" repository into many more independent repositories. This means movement on issues and modules that had long been delayed (often due to the [sheer volume of issues and pull requests][4] in the repo) can progress more rapidly.
|
||||
|
||||
Obviously, not all modules will get the same love and appreciation as others—that's the way open source works: more popular things flourish, as others may languish a bit—but one bright example of the positive impact has been the [Kubernetes][5] collection's ability to incorporate some long-awaited [Helm][6] modules.
|
||||
|
||||
Thanks especially to the work of [LucasBoisserie][7], three new Helm modules were merged into the Kubernetes collection:
|
||||
|
||||
* helm
|
||||
* helm_info
|
||||
* helm_repository
|
||||
|
||||
|
||||
|
||||
Ansible has long had a [helm module][8], but it was fairly broken for a long time, only worked with older versions of Helm, and was slated for deprecation in Ansible 2.14. That version of the module will still work the same in the regular community distribution of Ansible, as it's now been moved to the [community.general][9] collection.
|
||||
|
||||
But if you want to use these new modules to automate your Helm deployments using the Kubernetes container orchestration system, you can do it with the [community.kubernetes][10] collection.
|
||||
|
||||
### What is Helm?
|
||||
|
||||
Helm says it is "the best way to find, share, and use software built for Kubernetes."
|
||||
|
||||
There are currently dozens of ways to deploy software into Kubernetes and [OpenShift][11] clusters (you can even do it using Ansible natively with the [k8s module][12]), but Helm is often the easiest onramp to Kubernetes deployments, especially when you're starting out on your Kubernetes journey.
|
||||
|
||||
The way Helm works is that people maintain "charts," which are templates describing "how to deploy application XYZ" into Kubernetes. Charts can have "values" that override the default settings for a deployment's chart.
|
||||
|
||||
There are thousands of [charts on Helm Hub][13] you can use to install popular software. If your software is not included, you can build and host your own Helm chart repositories.
|
||||
|
||||
### What is Minecraft?
|
||||
|
||||
For a certain generation (or their parents), this question doesn't need an answer: [Minecraft][14] is the [best-selling video game of all time][15], and it appeals to an extremely wide audience because there are so many different ways you can play it.
|
||||
|
||||
I remember spending an hour here or there during my post-college years tending to a farm that I built in my little virtual Minecraft world. Minecraft can now run on almost any computing device with a screen, and networked play has become very popular. To support this, the Minecraft team maintains a [Minecraft server][16] application you can run to play networked games with your friends.
|
||||
|
||||
### Where does Ansible fit in?
|
||||
|
||||
I like to think of Ansible as the "glue" that holds automation together. I previously wrote about [how Ansible is useful in a cloud-native environment][17], so I won't rehash why I use Ansible to manage my Kubernetes infrastructure.
|
||||
|
||||
In this article, I'll show you how to write a short Ansible playbook to manage the setup of Helm's Minecraft chart in a cluster. In a real-world infrastructure, this playbook would be one small part of a set of plays that:
|
||||
|
||||
* Build or configure a Kubernetes cluster
|
||||
* Deploy monitoring tools into the cluster
|
||||
* Deploy applications into the cluster
|
||||
|
||||
|
||||
|
||||
Before you can write the playbook, you have to install Ansible's official [Kubernetes collection][10]. You can do this either by requiring it in a **requirements.yml** file (which could be used by Ansible Tower to install the collection automatically) or by manually installing it:
|
||||
|
||||
|
||||
```
|
||||
`ansible-galaxy collection install community.kubernetes`
|
||||
```
|
||||
|
||||
Once you have the collection, it's time to write the playbook. To make it easy for you to view the code or download the file, I've posted my **[minecraft.yml][18] **playbook as a Gist on GitHub.
|
||||
|
||||
The playbook uses many of the Kubernetes collection's modules:
|
||||
|
||||
1. The `k8s` module creates a namespace, `minecraft`.
|
||||
2. The `helm_repository` module adds the `itzg` Helm repository, which contains the Minecraft Helm chart.
|
||||
3. The `helm` module deploys the chart and creates the Minecraft server instance.
|
||||
4. The `k8s_info` module retrieves the NodePort where Minecraft is running so that you can connect to it from Minecraft.
|
||||
|
||||
|
||||
|
||||
The playbook assumes you have a running Kubernetes or OpenShift cluster and a kubeconfig file that points to that cluster already. If not, create a Minikube cluster on your workstation:
|
||||
|
||||
1. Make sure you have [Minikube][19] installed.
|
||||
2. Run `minikube start`, and wait for the cluster to be created.
|
||||
|
||||
|
||||
|
||||
Make sure you have [Ansible][20] and [Helm][21] installed, then run the playbook:
|
||||
|
||||
|
||||
```
|
||||
`ansible-playbook minecraft.yml`
|
||||
```
|
||||
|
||||
After a few minutes, the Minecraft server will generate a spawn area and be ready for connections! The playbook should provide the Minecraft NodePort at the end of its output (e.g., Minecraft NodePort: 32393).
|
||||
|
||||
Get the IP address of your Minikube cluster with `minikube ip`, add the NodePort to it (in my case, 192.168.64.19:32393), then open up Minecraft and connect to it:
|
||||
|
||||
1. Click **Multiplayer**.
|
||||
2. Click **Direct Connection**.
|
||||
3. Enter the server address (the Minikube IP and Minecraft NodePort).
|
||||
4. Click **Join Server**.
|
||||
|
||||
|
||||
|
||||
And voila! You should be able to play around in the little virtual Minecraft world that's running on your very own Kubernetes cluster.
|
||||
|
||||
![Minecraft gameplay][22]
|
||||
|
||||
(Jeff Geerling, [CC BY-SA 4.0][23])
|
||||
|
||||
View the server logs with:
|
||||
|
||||
|
||||
```
|
||||
`kubectl logs -f -n minecraft -l app=minecraft-minecraft;`
|
||||
```
|
||||
|
||||
In the logs, you can see that I was successful in finding many ways to die inside my little Minecraft world!
|
||||
|
||||
![Minecraft server logs][24]
|
||||
|
||||
(Jeff Geerling, [CC BY-SA 4.0][23])
|
||||
|
||||
### Take a step beyond
|
||||
|
||||
There are dozens of ways to deploy applications like a Minecraft server into a Kubernetes cluster. Luckily for us, Ansible already supports most of those options through its Kubernetes collection! And if you want to take a step beyond simple deployments and chart updates, you can use Ansible to build a [Kubernetes operator][25] with the Operator SDK—in fact, someone already made a [community operator][26] built with Ansible that runs a Minecraft server!
|
||||
|
||||
I was inspired to write this after using Ansible to manage a seven-node Kubernetes cluster built with Raspberry Pis. You can learn more about that in the [Turing Pi Cluster][27] GitHub repository.
|
||||
|
||||
* * *
|
||||
|
||||
If you want to learn more about Ansible, make sure to register for [AnsibleFest][28], a virtual experience on October 13-14.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/kubernetes-minecraft-ansible
|
||||
|
||||
作者:[Jeff Geerling][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/geerlingguy
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/ship_captain_devops_kubernetes_steer.png?itok=LAHfIpek (Ship captain sailing the Kubernetes seas)
|
||||
[2]: https://github.com/ansible-collections/overview
|
||||
[3]: https://www.ansible.com/
|
||||
[4]: https://emeraldreverie.org/2020/03/02/collections-the-backlog-view/
|
||||
[5]: https://kubernetes.io/
|
||||
[6]: https://helm.sh/
|
||||
[7]: https://github.com/LucasBoisserie
|
||||
[8]: https://docs.ansible.com/ansible/2.9/modules/helm_module.html
|
||||
[9]: https://github.com/ansible-collections/community.general/blob/master/plugins/modules/cloud/misc/helm.py
|
||||
[10]: https://github.com/ansible-collections/community.kubernetes
|
||||
[11]: https://www.openshift.com/
|
||||
[12]: https://docs.ansible.com/ansible/latest/collections/community/kubernetes/k8s_module.html#ansible-collections-community-kubernetes-k8s-module
|
||||
[13]: https://hub.helm.sh/
|
||||
[14]: https://www.minecraft.net/
|
||||
[15]: https://en.wikipedia.org/wiki/List_of_best-selling_video_games#List
|
||||
[16]: https://www.minecraft.net/en-us/download/server/
|
||||
[17]: https://www.ansible.com/blog/how-useful-is-ansible-in-a-cloud-native-kubernetes-environment
|
||||
[18]: https://gist.github.com/geerlingguy/2f4b0c06b4b696c8983b82dda655adf3
|
||||
[19]: https://kubernetes.io/docs/tasks/tools/install-minikube/
|
||||
[20]: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html
|
||||
[21]: https://helm.sh/docs/intro/install/
|
||||
[22]: https://opensource.com/sites/default/files/uploads/minecraft.png (Minecraft gameplay)
|
||||
[23]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[24]: https://opensource.com/sites/default/files/uploads/serverlogs.png (Minecraft server logs)
|
||||
[25]: https://www.redhat.com/en/topics/containers/what-is-a-kubernetes-operator
|
||||
[26]: https://github.com/fabianvf/game-server-operator
|
||||
[27]: https://github.com/geerlingguy/turing-pi-cluster
|
||||
[28]: https://www.ansible.com/ansiblefest
|
@ -0,0 +1,186 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Create an Ansible module for integrating your Google Calendar)
|
||||
[#]: via: (https://opensource.com/article/20/10/ansible-module-go)
|
||||
[#]: author: (Nicolas Leiva https://opensource.com/users/nicolas-leiva)
|
||||
|
||||
Create an Ansible module for integrating your Google Calendar
|
||||
======
|
||||
Learn how to write an Ansible module in Go to integrate Google Calendar
|
||||
into your automation workflow.
|
||||
![Business woman on laptop sitting in front of window][1]
|
||||
|
||||
In a [previous article][2], I explored how [Ansible][3] can integrate with Google Calendar for change management, but I didn't get into the details of the [Ansible module][4] that was built for this purpose. In this article, I will cover the nuts and bolts of it.
|
||||
|
||||
While most [Ansible modules][5]** **are written in [Python][6] (see [this example][7]), that's not the only option you have. You can use other programming languages if you prefer. And if you like [Go][8], this post is for you!
|
||||
|
||||
![Gopher illustration][9]
|
||||
|
||||
([Maria Letta's Free Gophers Pack][10], [Free Gophers License v1.0][11], modified with permission)
|
||||
|
||||
If you are new to Go, check out these [pointers to get started][12].
|
||||
|
||||
## Ansible and Go
|
||||
|
||||
There are at least four different ways that you can run a Go program from Ansible:
|
||||
|
||||
1. Install Go and run your Go code with the `go run` command from Ansible.
|
||||
2. Cross-compile your Go code for different platforms before execution. Then call the proper binary from Ansible, based on the facts you collect from the host.
|
||||
3. Run your Go code or compiled binary from a container with the `containers.podman` [collection][13]. Something along the lines of: [code] - name: Run Go container
|
||||
podman_container:
|
||||
name: go_test_container
|
||||
image: golang
|
||||
command: go version
|
||||
rm: true
|
||||
log_options: "path={{ log_file }}"
|
||||
```
|
||||
4. Create an [RPM][14] package of your Go code with something like [NFPM][15], and install it in the system of the target host. You can then call it with the Ansible modules [shell][16] or [command][17].
|
||||
|
||||
|
||||
|
||||
Running an RPM package or container is not Go-specific (cases 3 and 4), so I will focus on the first two options.
|
||||
|
||||
## Google Calendar API
|
||||
|
||||
You will need to interact with the [Google Calendar API][18], which provides code examples for different programming languages. The one for Go will be the base for your Ansible module.
|
||||
|
||||
The tricky part is [enabling the Calendar API][19] and downloading the credentials you generate in the [Google API Console][20] (`Credentials` > `+ CREATE CREDENTIALS` > `OAuth client ID` > `Desktop App`).
|
||||
|
||||
![Downloading credentials from Google API Console][21]
|
||||
|
||||
(Nicolas Leiva, [CC BY-SA 4.0][22])
|
||||
|
||||
The arrow shows where to download your OAuth 2.0 client credentials (JSON file) once you create them in [API Credentials][23].
|
||||
|
||||
## Calling the module from Ansible
|
||||
|
||||
The `calendar` module takes the `time` to validate as an argument. The example below provides the current time. You can typically get this from [Ansible facts][24] (`ansible_date_time`). The JSON output of the module is registered in a variable named `output` to be used in a subsequent task:
|
||||
```
|
||||
|
||||
|
||||
\- name: Check if timeslot is taken
|
||||
calendar:
|
||||
time: "{{ ansible_date_time.iso8601 }}"
|
||||
register: output
|
||||
|
||||
```
|
||||
You might wonder where the `calendar` module code lives and how Ansible executes it. Please bear with me for a moment; I'll get to this after I cover other pieces of the puzzle.
|
||||
|
||||
## Employ the time logic
|
||||
|
||||
With the Calendar API nuances out of the way, you can proceed to interact with the API and build a Go function to capture the module logic. The `time` is taken from the input arguments—in the playbook above—as the initial time (`min`) of the time window to validate (I arbitrarily chose a one-hour duration):
|
||||
```
|
||||
|
||||
|
||||
func isItBusy(min string) (bool, error) {
|
||||
...
|
||||
// max -> min.Add(1 * time.Hour)
|
||||
max, err := maxTime(min)
|
||||
// ...
|
||||
srv, err := calendar.New(client)
|
||||
// ...
|
||||
freebusyRequest := calendar.FreeBusyRequest{
|
||||
TimeMin: min,
|
||||
TimeMax: max,
|
||||
Items: []*calendar.FreeBusyRequestItem{&cal},
|
||||
}
|
||||
// ...
|
||||
freebusyRequestResponse, err := freebusyRequestCall.Do()
|
||||
// ...
|
||||
if len(freebusyRequestResponse.Calendars[name].Busy) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
```
|
||||
It [sends a `FreeBusyRequest`][25] to Google Calendar with the time window's initial and finish time (`min` and `max`). It also creates a calendar [client][26] (`srv`) to authenticate the account correctly using the JSON file with the OAuth 2.0 client credentials. In return, you get a list of events during this time window.
|
||||
|
||||
If you get zero events, the function returns `busy=false`. However, if there is at least one event during this time window, it means `busy=true`. You can check out the [full code][27] in my GitHub repository.
|
||||
|
||||
## Process the input and creating a response
|
||||
|
||||
Now, how does the Go code read the inputs arguments from Ansible and, in turn, generate a response that Ansible can process? The answer to this depends on whether you are running the [Go CLI][28] (command-line interface) or just executing a pre-compiled Go program binary (i.e., options 1 and 2 above).
|
||||
|
||||
Both options have their pros and cons. If you use the Go CLI, you can pass the arguments the way you prefer. However, to make it consistent with how it works for binaries you run from Ansible, both alternatives will read a JSON file in the examples presented here.
|
||||
|
||||
### Reading the arguments
|
||||
|
||||
As shown in the Go code snippet below, an input file is processed, and Ansible provides a path to it when it calls a binary.
|
||||
|
||||
The content of the file is unmarshaled into an instance (`moduleArg`) of a previously defined struct (`ModuleArgs`). This is how you tell the Go code which data you expect to receive. This method enables you to gain access to the `time` specified in the playbook via `moduleArg.time`, which is then passed to the time logic function (`isItBusy`) for processing:
|
||||
```
|
||||
|
||||
|
||||
// ModuleArgs are the module inputs
|
||||
type ModuleArgs struct {
|
||||
Time string
|
||||
}
|
||||
|
||||
func main() {
|
||||
...
|
||||
argsFile := os.Args[1]
|
||||
text, err := ioutil.ReadFile(argsFile)
|
||||
...
|
||||
var moduleArgs ModuleArgs
|
||||
err = json.Unmarshal(text, &moduleArgs)
|
||||
...
|
||||
busy, err := isItBusy(moduleArg.time)
|
||||
...
|
||||
}
|
||||
|
||||
```
|
||||
### Generating a response
|
||||
|
||||
The values to return are assigned to an instance of a `Response` object. Ansible will need this response includes a couple of boolean flags (`Changed` and `Failed`). You can add any other field you need; in this case, a `Busy` boolean value is carried to signal the response of the time logic function.
|
||||
|
||||
The response is marshaled into a message that you print out and Ansible can read:
|
||||
```
|
||||
|
||||
|
||||
// Response are the values returned from the module
|
||||
type Response struct {
|
||||
Msg string `json:"msg"`
|
||||
Busy bool `json:"busy"`
|
||||
Changed bool `json:"changed"`
|
||||
Failed bool `json:"failed"`
|
||||
}
|
||||
|
||||
func returnResponse(r Response) {
|
||||
...
|
||||
response, err = json.Marshal(r)
|
||||
...
|
||||
fmt.Println(string(response))
|
||||
...
|
||||
}
|
||||
|
||||
```
|
||||
You can check out the [full code][29] on GitHub.
|
||||
|
||||
## Execute a binary or Go code on the fly?
|
||||
|
||||
One of the cool things about Go is that you can cross-compile a Go program to run on different target operating systems and architectures. The binary files you compile can be executed in the target host without installing Go or any dependency.
|
||||
|
||||
This flexibility plays nicely with Ansible, which provides the target host details (`ansible_system` and `ansible_architecture`) via Ansible facts. In this example, the target architecture is fixed when compiling (`x86_64`), but binaries for macOS, Linux, and Windows are generated (via `make compile`). This produces the three files that are included in the [`library` folder][30] of the `go_role` role with the form of: `calendar_$system`:
|
||||
```
|
||||
|
||||
|
||||
⇨ tree roles/go_role/
|
||||
roles/go_role/
|
||||
├── library
|
||||
│ ├── calendar_darwin
|
||||
│ ├── calendar_linux
|
||||
│ ├── calendar_windows
|
||||
│ └── go_run
|
||||
└── tasks
|
||||
├── Darwin.yml
|
||||
├── Go.yml
|
||||
├── Linux.yml
|
||||
├── main.yml
|
||||
└── Win32NT.yml
|
||||
|
||||
```
|
||||
The [`go_role` role][31] that packages the `calendar`
|
@ -0,0 +1,103 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Linux Jargon Buster: What is Display Manager in Linux?)
|
||||
[#]: via: (https://itsfoss.com/display-manager/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
Linux Jargon Buster: What is Display Manager in Linux?
|
||||
======
|
||||
|
||||
_**In this chapter of the Linux Jargon Buster, you’ll learn about display manager in Linux. Is it part of the desktop environment? What does it do?**_
|
||||
|
||||
### What is display manager in Linux?
|
||||
|
||||
In simple words, a display manager is a program that provides graphical login capabilities for your Linux distribution. It controls the user sessions and manages user authentication. Display manager starts the [display server][1] and loads the [desktop environment][2] right after you enter your username and password.
|
||||
|
||||
The display manager is often synonymous to the login screen. It is the visible part of it after all. However, the visible login screen, also called greeter, is only a part of the display manager.
|
||||
|
||||
![Login screen is the visible part of a display manager][3]
|
||||
|
||||
Like [various desktop environments][4] and display servers, there are various display managers available as well. Let’s have a look at them.
|
||||
|
||||
### Different display managers
|
||||
|
||||
Some people think of the display manager as part of the desktop environment but that’s not true. It is a separate program.
|
||||
|
||||
A desktop environment may recommend a certain display manager but it doesn’t mean that it won’t work with some other display manager. If you ever installed more than one desktop environment in the same system, you would remember that a login screen (i.e. the display manager) allows you to switch the desktop environment.
|
||||
|
||||
![A display manager can be used with various desktop environments][5]
|
||||
|
||||
Though display manager is not part of the desktop environment itself, it is often developed by the same development team as the desktop environment. It also becomes identity of the desktop environment.
|
||||
|
||||
For example, the GNOME desktop environment develops GDM (GNOME Display Manager) and just by looking at the login screen, you would think of GNOME desktop environment.
|
||||
|
||||
![GNOME Login Screen with GDM][6]
|
||||
|
||||
Some popular display managers are:
|
||||
|
||||
* GDM ([GNOME Display Manager][7]): preferred by GNOME
|
||||
* [SDDM][8] (Simple Desktop Display Manager): preferred by KDE
|
||||
* LightDM: Developed by Ubuntu for Unity desktop
|
||||
|
||||
|
||||
|
||||
### Display managers can be customized
|
||||
|
||||
There are so many desktop environments available. Do they all have their own display managers? No. That’s not the case.
|
||||
|
||||
As I mentioned previously, the visible login screen is called greeter. This greeter can be customized to change the looks of the login screen.
|
||||
|
||||
In fact, many distributions and/or desktop environments have written their own greeter to give users a login screen that resembles their brand.
|
||||
|
||||
For example, Mint’s Cinnamon desktop uses LightDM but has its own greeter to give it more Minty (or should I say Cinnamon) looks.
|
||||
|
||||
![Linux Mint login screen based on LightDM][9]
|
||||
|
||||
Take a look at Kali Linux’s login screen:
|
||||
|
||||
![Kali Linux Login Screen][10]
|
||||
|
||||
If you are into coding and tweaking, you may modify or code your own greeter as per your liking.
|
||||
|
||||
### Changing display manager
|
||||
|
||||
You may [change the display manager][11] if you want. You need to install the display manager first. You’ll see the option to switch the display manager while installing.
|
||||
|
||||
![][12]
|
||||
|
||||
If you didn’t do it at that time, then you can change the display manager by manually configuring it later. The method to reconfigure the display manager is slightly different for different distributions and not in the scope of this article.
|
||||
|
||||
**Conclusion**
|
||||
|
||||
I hope you have a slight better understanding of the term display manager in Linux. The aim of this jargon buster series is to explain common Linux colloquial and technical terms in non-technical language without going into too much detail.
|
||||
|
||||
I welcome your comments and suggestion.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/display-manager/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/display-server/
|
||||
[2]: https://itsfoss.com/what-is-desktop-environment/
|
||||
[3]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2018/05/login-screen-opensuse.jpg?resize=800%2C474&ssl=1
|
||||
[4]: https://itsfoss.com/best-linux-desktop-environments/
|
||||
[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/deepin-session-ubuntu.jpg?resize=800%2C414&ssl=1
|
||||
[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/06/Login-screen-1.png?resize=800%2C450&ssl=1
|
||||
[7]: https://wiki.gnome.org/Projects/GDM
|
||||
[8]: https://github.com/sddm
|
||||
[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/linux-mint-login-screen.jpg?resize=800%2C418&ssl=1
|
||||
[10]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/kali-linux-login-screen.jpg?resize=799%2C450&ssl=1
|
||||
[11]: https://itsfoss.com/switch-gdm-and-lightdm-in-ubuntu-14-04/
|
||||
[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2014/06/Switch_between_gdm_and_lightgdm_Ubuntu.jpeg?resize=700%2C448&ssl=1
|
55
sources/tech/20201012 My top 7 keywords in Rust.md
Normal file
55
sources/tech/20201012 My top 7 keywords in Rust.md
Normal file
@ -0,0 +1,55 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (My top 7 keywords in Rust)
|
||||
[#]: via: (https://opensource.com/article/20/10/keywords-rust)
|
||||
[#]: author: (Mike Bursell https://opensource.com/users/mikecamel)
|
||||
|
||||
My top 7 keywords in Rust
|
||||
======
|
||||
Learn a handful of useful keywords from the Rust standard library.
|
||||
![Rustacean t-shirt][1]
|
||||
|
||||
I've been using [Rust][2] for a few months now, writing rather more of it than I expected—though quite a lot of that has been thrown away as I've learned, improved what I'm writing, and taken some more complex tasks beyond what I originally intended.
|
||||
|
||||
I still love it and thought it might be good to talk about some of the important keywords that come up again and again in Rust. I'll provide my personal summary of what they do, why you need to think about how you use them, and anything else that's useful, particularly for people who are new to Rust or coming from another language (such as Java; see my article [_Why I'm enjoying learning Rust as a Java programmer_][3]).
|
||||
|
||||
Without further ado, let's get going. A good place for further information is always the official Rust documentation—you'll probably want to start with the [std library][4].
|
||||
|
||||
1. **const** – You get to declare constants with const, and you should. This isn't rocket science, but do declare with const, and if you're going to use constants across different modules, then do the right thing and create a `lib.rs` file (the Rust default) into which you can put all of them with a nicely named module. I've had clashes of const variable names (and values!) across different files in different modules simply because I was too lazy to do anything other than cut and paste across files when I could have saved myself lots of work simply by creating a shared module.
|
||||
2. **let** – You don't _always_ need to declare a variable with a let statement, but your code will be clearer when you do. What's more, always add the type if you can. Rust will do its very best to guess what it should be, but it may not always be able to do so at runtime (in which case [Cargo][5], the compiler, will tell you), or it may even not do what you expect. In the latter case, it's always simpler for Cargo to complain that the function you're assigning from (for instance) doesn't match the declaration than for Rust to try to help you do the wrong thing, only for you to have to spend ages debugging elsewhere.
|
||||
3. **match** – match was new to me, and I love it. It's not dissimilar to "switch" in other languages but is used extensively in Rust. It makes for legible code, and Cargo will have a good go at warning you if you do something foolish (such as miss possible cases). My general rule of thumb, where I'm managing different options or doing branching, is to ask whether I can use match. If I can, I will.
|
||||
4. **mut** – When declaring a variable, if it's going to change after its initialisation, then you need to declare it mutable. A common mistake is to declare something mutable when it _isn't_ changed—but the compiler will warn you about that. If you get a warning from Cargo that a mutable variable isn't changed when you think it _is_, then you may wish to check the scope of the variable and make sure that you're using the right version.
|
||||
5. **return** – I actually very rarely use return, which is for returning a value from a function, because it's usually simpler and clearer to read if you just provide the value (or the function providing the return value) at the end of the function as the last line. Warning: you _will_ forget to omit the semicolon at the end of this line on many occasions; if you do, the compiler won't be happy.
|
||||
6. **unsafe** – This does what it says on the tin: If you want to do things where Rust can't guarantee memory safety, then you're going to need to use this keyword. I have absolutely no intention of declaring any of my Rust code unsafe now or at any point in the future; one of the reasons Rust is so friendly is because it stops this sort of hackery. If you really need to do this, think again, think yet again, and then redesign. Unless you're a seriously low-level systems programmer, _avoid_ unsafe.
|
||||
7. **use** – When you want to use an item, e.g., struct, variable, function, etc. from another crate, then you need to declare it at the beginning of the block where you'll be using it. Another common mistake is to do this but fail to add the crate (preferably with a minimum version number) to the `Cargo.toml` file.
|
||||
|
||||
|
||||
|
||||
This isn't the most complicated article I've ever written, I know, but it's the sort of article I would have appreciated when I was starting to learn Rust. I plan to create similar articles on key functions and other Rust must-knows: let me know if you have any requests!
|
||||
|
||||
* * *
|
||||
|
||||
_This article was originally published on [Alice, Eve, and Bob][6] and is reprinted with the author's permission._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/keywords-rust
|
||||
|
||||
作者:[Mike Bursell][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/mikecamel
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rustacean-tshirt.jpg?itok=u7LBmyaj (Rustacean t-shirt)
|
||||
[2]: https://www.rust-lang.org/
|
||||
[3]: https://opensource.com/article/20/5/rust-java
|
||||
[4]: https://doc.rust-lang.org/std/
|
||||
[5]: https://doc.rust-lang.org/cargo/
|
||||
[6]: https://aliceevebob.com/2020/09/01/rust-my-top-7-keywords/
|
118
sources/tech/20201013 Install MariaDB or MySQL on Linux.md
Normal file
118
sources/tech/20201013 Install MariaDB or MySQL on Linux.md
Normal file
@ -0,0 +1,118 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Install MariaDB or MySQL on Linux)
|
||||
[#]: via: (https://opensource.com/article/20/10/mariadb-mysql-linux)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
Install MariaDB or MySQL on Linux
|
||||
======
|
||||
Get started using an open source SQL database on your Linux system.
|
||||
![Person standing in front of a giant computer screen with numbers, data][1]
|
||||
|
||||
Both [MariaDB][2] and [MySQL][3] are open source databases that use SQL and share the same original codebase. MariaDB is a drop-in replacement for MySQL, so much so that you use the same command (`mysql`) to interact with MySQL and MariaDB databases. This article, therefore, applies equally to MariaDB and MySQL.
|
||||
|
||||
### Install MariaDB
|
||||
|
||||
You can install MariaDB using your Linux distribution's package manager. On most distributions, MariaDB is split into a server package and a client package. The server package provides the database "engine," the part of MariaDB that runs (usually on a physical server) in the background, listening for data input or requests for data output. The client package provides the command `mysql`, which you can use to communicate with the server.
|
||||
|
||||
On RHEL, Fedora, CentOS, or similar:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo dnf install mariadb mariadb-server`
|
||||
```
|
||||
|
||||
On Debian, Ubuntu, Elementary, or similar:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo apt install mariadb-client mariadb-server`
|
||||
```
|
||||
|
||||
Other systems may package MariaDB differently systems, so you may need to search your software repository to learn how your distribution's maintainers provide it.
|
||||
|
||||
### Start MariaDB
|
||||
|
||||
Because MariaDB is designed to function, in part, as a database server, it can run on one computer and be administered from another. As long as you have access to the computer running it, you can use the `mysql` command to administer the database. I ran MariaDB on my local computer when writing this article, but it's just as likely that you'll interact with a MariaDB database hosted on a remote system.
|
||||
|
||||
Before starting MariaDB, you must create an initial database. You should define the user you want MariaDB to use when initializing its file structure. By default, MariaDB uses the current user, but you probably want it to use a dedicated user account. Your package manager probably configured a system user and group for you. Use `grep` to find out whether there's a `mysql` group:
|
||||
|
||||
|
||||
```
|
||||
$ grep mysql /etc/group
|
||||
mysql❌27:
|
||||
```
|
||||
|
||||
You can also look in `/etc/passwd` for a dedicated user, but usually, where there's a group, there's also a user. If there isn't a dedicated `mysql` user and group, look through `/etc/group` for an obvious alternative (such as `mariadb`). Failing that, read your distribution's documentation to learn how MariaDB runs.
|
||||
|
||||
Assuming your install uses `mysql`, initialize the database environment:
|
||||
|
||||
|
||||
```
|
||||
$ sudo mysql_install_db --user=mysql
|
||||
Installing MariaDB/MySQL system tables in '/var/lib/mysql'...
|
||||
OK
|
||||
[...]
|
||||
```
|
||||
|
||||
The result of this step reveals the next tasks you must perform to configure MariaDB:
|
||||
|
||||
|
||||
```
|
||||
PLEASE REMEMBER TO SET A PASSWORD FOR THE MariaDB root USER !
|
||||
To do so, start the server, then issue the following commands:
|
||||
|
||||
'/usr/bin/mysqladmin' -u root password 'new-password'
|
||||
'/usr/bin/mysqladmin' -u root -h $(hostname) password 'new-password'
|
||||
|
||||
Alternatively you can run:
|
||||
'/usr/bin/mysql_secure_installation'
|
||||
|
||||
which will also give you the option of removing the test
|
||||
databases and anonymous user created by default. This is
|
||||
strongly recommended for production servers.
|
||||
```
|
||||
|
||||
Start MariaDB using your distribution's init system:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo systemctl start mariadb`
|
||||
```
|
||||
|
||||
To enable the MariaDB server to start upon boot:
|
||||
|
||||
|
||||
```
|
||||
`$ sudo systemctl enable --now mariadb`
|
||||
```
|
||||
|
||||
Now that you have a MariaDB server to communicate with, set a password for it:
|
||||
|
||||
|
||||
```
|
||||
mysqladmin -u root password 'myreallysecurepassphrase'
|
||||
mysqladmin -u root -h $(hostname) password 'myreallysecurepassphrase'
|
||||
```
|
||||
|
||||
Finally, if you intend to use this installation on a production server, run the `mysql_secure_installation` command before going live.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/mariadb-mysql-linux
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/data_metrics_analytics_desktop_laptop.png?itok=9QXd7AUr (Person standing in front of a giant computer screen with numbers, data)
|
||||
[2]: https://mariadb.org/
|
||||
[3]: https://www.mysql.com/
|
304
sources/tech/20201013 My first day using Ansible.md
Normal file
304
sources/tech/20201013 My first day using Ansible.md
Normal file
@ -0,0 +1,304 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (My first day using Ansible)
|
||||
[#]: via: (https://opensource.com/article/20/10/first-day-ansible)
|
||||
[#]: author: (David Both https://opensource.com/users/dboth)
|
||||
|
||||
My first day using Ansible
|
||||
======
|
||||
A sysadmin shares information and advice about putting Ansible into
|
||||
real-world use configuring computers on his network.
|
||||
![People work on a computer server with devices][1]
|
||||
|
||||
Getting a new computer, whether physical or virtual, up and running is time-consuming and requires a good deal of work—whether it's your first time or the 50th. For many years, I have used a series of scripts and RPMs that I created to install the packages I need and to perform many bits of configuration for my favorite tools. This approach has worked well and simplified my work, as well as reduced the amount of time I spend typing commands.
|
||||
|
||||
I am always looking for better ways of doing things, and, for several years now, I have been hearing and reading about [Ansible][2], which is a powerful tool for automating system configuration and management. Ansible allows a sysadmin to define a specific state for each host in one or more playbooks and then performs whatever tasks are necessary to bring the host to that state. This includes installing or deleting various resources such as RPM or Apt packages, configuration and other files, users, groups, and much more.
|
||||
|
||||
I have delayed learning how to use it for a long time because—stuff. Until recently, when I ran into a problem that I thought Ansible could easily solve.
|
||||
|
||||
This article is not a complete how-to for getting started with Ansible; rather, it is intended to provide insight into some of the issues that I encountered and some information that I found only in some very obscure places. Much of the information I found in various online discussions and Q&A groups about Ansible was incorrect. Errors ranged from information that was really old with no indication of its date or provenance to information that was just wrong.
|
||||
|
||||
The information in this article is known to work—although there might be other ways of accomplishing the same things—and it works with Ansible 2.9.13 and [Python][3] 3.8.5.
|
||||
|
||||
### My problem
|
||||
|
||||
All of my best learning experiences start with a problem I need to solve, and this was no exception.
|
||||
|
||||
I have been working on a little project to modify the configuration files for the [Midnight Commander][4] (mc) file manager and pushing them out to various systems on my network for testing. Although I have a script to automate this, it still requires a bit of fussing with a command-line loop to provide the names of the systems to which I want to push the new code. The large number of changes I was making to the configuration files made it necessary for me to push the new ones frequently. But, just when I thought I had my new configuration just right, I would find a problem and need to do another push after making the fix.
|
||||
|
||||
This environment made it difficult to keep track of which systems had the new files and which did not. I also have a couple of hosts that need to be treated differently. And my little bit of knowledge about Ansible suggested that it would probably be able to do all or most of what I need.
|
||||
|
||||
### Getting started
|
||||
|
||||
I had read a number of good articles and books about Ansible, but never in an "I have to get this working NOW!" kind of situation. And now was—well, NOW!
|
||||
|
||||
In rereading these documents, I discovered that they mostly talk about how to install Ansible from GitHub using—wait for it—Ansible. That is cool, but I really just wanted to get started, so I installed it on my Fedora workstation using DNF and the version in the Fedora repository. Easy.
|
||||
|
||||
But then I started looking for the file locations and trying to determine which configuration files I needed to modify, where to keep my playbooks, what a playbook even looks like, and what it does. I had lots of (so far) unanswered questions running around in my head.
|
||||
|
||||
So, without further descriptions of my tribulations, here are the things I discovered and that got me going.
|
||||
|
||||
### Configuration
|
||||
|
||||
Ansible's configuration files are kept in `/etc/ansible`. Makes sense, right, since `/etc` is where system programs are supposed to keep their configuration files. The two files I needed to work with are `ansible.cfg` and `hosts`.
|
||||
|
||||
#### ansible.cfg
|
||||
|
||||
After getting started with some of the exercises I found in the documents and online, I began receiving warning messages about deprecating certain older Python files. So, I set `deprecation_warnings` to `false` in `ansible.cfg` and those angry red warning messages stopped:
|
||||
|
||||
|
||||
```
|
||||
`deprecation_warnings = False`
|
||||
```
|
||||
|
||||
Those warnings are important, so I will revisit them later and figure out what I need to do. But for now, they no longer clutter the screen and obfuscate the errors I actually need to be concerned about.
|
||||
|
||||
#### The hosts file
|
||||
|
||||
Not the same as the `/etc/hosts` file, the `hosts` file is also known as the inventory file, and it lists the hosts on your network. This file allows grouping hosts together in related sets, such as servers, workstations, and pretty much any designation you need. This file contains its own help and plenty of examples, so I won't go into boring detail here. However, there are some things to know.
|
||||
|
||||
Hosts can be listed outside of any groups, but groups can be helpful in identifying hosts with one or more common characteristics. Groups use the INI format, so a server group looks like this:
|
||||
|
||||
|
||||
```
|
||||
[servers]
|
||||
server1
|
||||
server2
|
||||
...etc.
|
||||
```
|
||||
|
||||
A hostname must be present in this file for Ansible to work on it. Even though some subcommands allow you to specify a hostname, the command will fail unless the hostname is in the `hosts` file. A host can also be listed in multiple groups. So `server1` might also be a member of the `[webservers]` group in addition to the `[servers]` group, and a member of the `[ubuntu]` group to differentiate it from Fedora servers.
|
||||
|
||||
Ansible is smart. If the `all` argument is used for the hostname, Ansible scans the file and performs the defined tasks on all hosts listed in the file. Ansible will only attempt to work on each host once, no matter how many groups it appears in. This also means that there does not need to be a defined "all" group because Ansible can determine all hostnames in the file and create its own list of unique hostnames.
|
||||
|
||||
Another little thing to look out for is multiple entries for a single host. I use `CNAME` records in my DNS zone file to create aliased names that point to the [A records][5] for some of my hosts. That way, I can refer to a host as `host1` or `h1` or `myhost`. If you use multiple hostnames for the same host in the `hosts` file, Ansible will try to perform its tasks on all of those hostnames; it has no way of knowing that they refer to the same host. The good news is that this does not affect the overall result; it just takes a bit more time as Ansible works on the secondary hostnames and determines that all of the operations have already been performed.
|
||||
|
||||
### Ansible facts
|
||||
|
||||
Most of the materials I have read on Ansible talk about [Ansible facts][6], which "are data related to your remote systems, including operating systems, IP addresses, attached filesystems, and more." This information is available in other ways, such as `lshw`, `dmidecode`, the `/proc` filesystem, and more, but Ansible generates a JSON file containing this information. Each time Ansible runs, it generates this facts data. There is an amazing amount of information in this data stream, all of which are in `<"variable-name": "value">` pairs. All of these variables are available for use within an Ansible playbook. The best way to understand the huge amount of information available is to display it yourself:
|
||||
|
||||
|
||||
```
|
||||
`# ansible -m setup <hostname> | less`
|
||||
```
|
||||
|
||||
See what I mean? Everything you ever wanted to know about your host hardware and Linux distribution is there and usable in a playbook. I have not yet gotten to the point where I need to use those variables, but I am certain I will in the next couple of days.
|
||||
|
||||
### Modules
|
||||
|
||||
The `ansible` command above uses the `-m` option to specify the "setup" module. Ansible has many modules already built-in, so you do not need to use the `-m` for those. There are also many downloadable modules that can be installed, but the built-ins do everything I have needed for my current projects so far.
|
||||
|
||||
### Playbooks
|
||||
|
||||
Playbooks can be located almost anywhere. Since I need to run my playbooks as root, I placed mine in `/root/ansible`. As long as this directory is the present working directory (PWD) when I run Ansible, it can find my playbook. Ansible also has a runtime option to specify a different playbook and location.
|
||||
|
||||
Playbooks can contain comments, although I have seen very few articles or books that mention this. As a sysadmin who believes in documenting everything, I find using comments can be very helpful. This is not so much about saying the same things in the comments as I do in the task name; rather, it is about identifying the purpose of groups of tasks and ensuring that I record my reasons for doing certain things in a certain way or order. This can help with debugging problems later when I may have forgotten my original thinking.
|
||||
|
||||
Playbooks are simply collections of tasks that define the desired state of a host. A hostname or inventory group is specified at the beginning of the playbook and defines the hosts on which Ansible will run the playbook.
|
||||
|
||||
Here is a sample of my playbook:
|
||||
|
||||
|
||||
```
|
||||
################################################################################
|
||||
# This Ansible playbook updates Midnight commander configuration files. #
|
||||
################################################################################
|
||||
\- name: Update midnight commander configuration files
|
||||
hosts: all
|
||||
|
||||
tasks:
|
||||
- name: ensure midnight commander is the latest version
|
||||
dnf:
|
||||
name: mc
|
||||
state: present
|
||||
|
||||
- name: create ~/.config/mc directory for root
|
||||
file:
|
||||
path: /root/.config/mc
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: create ~/.config/mc directory for dboth
|
||||
file:
|
||||
path: /home/dboth/.config/mc
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: dboth
|
||||
group: dboth
|
||||
|
||||
- name: copy latest personal skin
|
||||
copy:
|
||||
src: /root/ansible/UpdateMC/files/MidnightCommander/DavidsGoTar.ini
|
||||
dest: /usr/share/mc/skins/DavidsGoTar.ini
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: copy latest mc ini file
|
||||
copy:
|
||||
src: /root/ansible/UpdateMC/files/MidnightCommander/ini
|
||||
dest: /root/.config/mc/ini
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: copy latest mc panels.ini file
|
||||
copy:
|
||||
src: /root/ansible/UpdateMC/files/MidnightCommander/panels.ini
|
||||
dest: /root/.config/mc/panels.ini
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: root
|
||||
<SNIP>
|
||||
```
|
||||
|
||||
The playbook starts with its own name and the hosts it will act on—in this case, all of the hosts listed in my `hosts` file. The `tasks` section lists the specific tasks required to bring the host into compliance with the desired state. This playbook starts with a task in which Ansible's built-in DNF updates Midnight Commander if it is not the most recent release. The next tasks ensure that the required directories are created if they do not exist, and the remainder of the tasks copy the files to the proper locations. These `file` and `copy` tasks can also set the ownership and file modes for the directories and files.
|
||||
|
||||
The details of my playbook are beyond the scope of this article, but I used a bit of a brute-force attack on the problem. There are other methods for determining which users need to have the files updated rather than using a task for each file for each user. My next objective is to simplify this playbook to use some of the more advanced techniques.
|
||||
|
||||
Running a playbook is easy; just use the `ansible-playbook` command. The .yml extension stands for YAML. I have seen several meanings for that, but my bet is on "Yet Another Markup Language," despite the fact that some claim that YAML is not one.
|
||||
|
||||
This command runs the playbook I created for updating my Midnight Commander files:
|
||||
|
||||
|
||||
```
|
||||
`# ansible-playbook -f 10 UpdateMC.yml`
|
||||
```
|
||||
|
||||
The `-f` option specifies that Ansible should fork up to 10 threads in order to perform operations in parallel. This can greatly speed overall task completion, especially when working on multiple hosts.
|
||||
|
||||
### Output
|
||||
|
||||
The output from a running playbook lists each task and the results. An `ok` means the machine state managed by the task is already defined in the task stanza. Because the state defined in the task is already true, Ansible did not need to perform the actions defined in the task stanza.
|
||||
|
||||
The response `changed` indicates that Ansible performed the task specified in the stanza in order to bring it to the desired state. In this case, the machine state defined in the stanza was not true, so the actions defined were performed to make it true. On a color-capable terminal, the `TASK` lines are shown in color. On my host with my amber-on-black terminal color configuration, the `TASK` lines are shown in amber, `changed` lines are in brown, and `ok` lines are shown in green. Error lines are displayed in red.
|
||||
|
||||
The following output is from the playbook I will eventually use to perform post-install configuration on new hosts:
|
||||
|
||||
|
||||
```
|
||||
PLAY [Post-installation updates, package installation, and configuration]
|
||||
|
||||
TASK [Gathering Facts]
|
||||
ok: [testvm2]
|
||||
|
||||
TASK [Ensure we have connectivity]
|
||||
ok: [testvm2]
|
||||
|
||||
TASK [Install all current updates]
|
||||
changed: [testvm2]
|
||||
|
||||
TASK [Install a few command line tools]
|
||||
changed: [testvm2]
|
||||
|
||||
TASK [copy latest personal Midnight Commander skin to /usr/share]
|
||||
changed: [testvm2]
|
||||
|
||||
TASK [create ~/.config/mc directory for root]
|
||||
changed: [testvm2]
|
||||
|
||||
TASK [Copy the most current Midnight Commander configuration files to /root/.config/mc]
|
||||
changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/DavidsGoTar.ini)
|
||||
changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/ini)
|
||||
changed: [testvm2] => (item=/root/ansible/PostInstallMain/files/MidnightCommander/panels.ini)
|
||||
|
||||
TASK [create ~/.config/mc directory in /etc/skel]
|
||||
changed: [testvm2]
|
||||
|
||||
<SNIP>
|
||||
```
|
||||
|
||||
### The cow
|
||||
|
||||
If you have the [cowsay][7] program installed on your computer, you will notice that the `TASK` names appear in the cow's speech bubble:
|
||||
|
||||
|
||||
```
|
||||
____________________________________
|
||||
< TASK [Ensure we have connectivity] >
|
||||
------------------------------------
|
||||
\ ^__^
|
||||
\ (oo)\\_______
|
||||
(__)\ )\/\
|
||||
||----w |
|
||||
|| ||
|
||||
```
|
||||
|
||||
If you do not have this fun feature and want it, install the cowsay package using your distribution's package manager. If you have this and don't want it, disable it with by setting `nocows = 1` in the `/etc/ansible/ansible.cfg` file.
|
||||
|
||||
I like the cow and think it is fun, but it reduces the amount of screen space that can be used to display messages. So I disabled it after it started getting in the way.
|
||||
|
||||
### Files
|
||||
|
||||
As with my Midnight Commander task, it is frequently necessary to install and maintain files of various types. There are as many "best practices" for creating a directory tree for storing files used in playbooks as there are sysadmins—or at least as many as the number of authors writing books and articles about Ansible.
|
||||
|
||||
I chose a simple structure that makes sense to me:
|
||||
|
||||
|
||||
```
|
||||
/root/ansible
|
||||
└── UpdateMC
|
||||
├── files
|
||||
│ └── MidnightCommander
|
||||
│ ├── DavidsGoTar.ini
|
||||
│ ├── ini
|
||||
│ └── panels.ini
|
||||
└── UpdateMC.yml
|
||||
```
|
||||
|
||||
You should use whatever structure works for you. Just be aware that some other sysadmin will likely need to work with whatever you set up, so there should be some level of logic to it. When I was using RPM and Bash scripts to perform my post-install tasks, my file repository was a bit scattered and definitely not structured with any logic. As I work through creating playbooks for many of my administrative tasks, I will introduce a much more logical structure for managing my files.
|
||||
|
||||
### Multiple playbook runs
|
||||
|
||||
It is safe to run a playbook as many times as needed or desired. Each task will only be executed if the state does not match the one specified in the task stanza. This makes it easy to recover from errors encountered during previous playbook runs. The playbook stops running when it encounters an error.
|
||||
|
||||
While testing my first playbook, I made many mistakes and corrected them. Each additional run of the playbook—assuming my fix is a good one—skips the tasks whose state already matches the specified one and executes those that did not. When my fix works, the previously failed task completes successfully, and any tasks after that one in my playbook also execute—until it encounters another error.
|
||||
|
||||
This also makes testing easy. I can add new tasks and, when I run the playbook, only those new tasks are performed because they are the only ones that do not match the test host's desired state.
|
||||
|
||||
### Some thoughts
|
||||
|
||||
Some tasks are not appropriate for Ansible because there are better methods for achieving a specific machine state. The use case that comes to mind is that of returning a VM to an initial state so that it can be used as many times as necessary to perform testing beginning with that known state. It is much easier to get the VM into the desired state and then to take a snapshot of the then-current machine state. Reverting to that snapshot is usually going to be easier and much faster than using Ansible to return the host to that desired state. This is something I do several times a day when researching articles or testing new code.
|
||||
|
||||
After completing my playbook for updating Midnight Commander, I started a new playbook that I will use to perform post-installation tasks on newly installed Fedora hosts. I have already made good progress, and the playbook is a bit more sophisticated and less brute-force than my first one.
|
||||
|
||||
On my very first day using Ansible, I created a playbook that solves a problem. I also started a second playbook that will solve the very big problem of post-install configuration. And I have learned a lot.
|
||||
|
||||
Although I really like using [Bash][8] scripts for many of my administrative tasks, I am already finding that Ansible can do everything I want—and in a way that can maintain the system in the state I want. After only a single day of use, I am an Ansible fan.
|
||||
|
||||
### Resources
|
||||
|
||||
The most complete and useful document I have found is the [User Guide][9] on the Ansible website. This document is intended as a reference and not a how-to or getting-started document.
|
||||
|
||||
Opensource.com has published many [articles about Ansible][10] over the years, and I have found most of them very helpful for my needs. The Enable Sysadmin website also has a lot of [Ansible articles][11] that I have found to be helpful. You can learn even more by checking out [AnsibleFest][12] happening this week (October 13-14, 2020). The event is completely virtual and free to register.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/first-day-ansible
|
||||
|
||||
作者:[David Both][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/dboth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/rh_003499_01_linux11x_cc.png?itok=XMDOouJR (People work on a computer server with devices)
|
||||
[2]: https://www.ansible.com/
|
||||
[3]: https://www.python.org/
|
||||
[4]: https://midnight-commander.org/
|
||||
[5]: https://en.wikipedia.org/wiki/List_of_DNS_record_types
|
||||
[6]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts
|
||||
[7]: https://en.wikipedia.org/wiki/Cowsay
|
||||
[8]: https://opensource.com/downloads/bash-cheat-sheet
|
||||
[9]: https://docs.ansible.com/ansible/latest/user_guide/index.html
|
||||
[10]: https://opensource.com/tags/ansible
|
||||
[11]: https://www.redhat.com/sysadmin/topics/ansible
|
||||
[12]: https://www.ansible.com/ansiblefest
|
@ -0,0 +1,70 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (What measured boot and trusted boot means for Linux)
|
||||
[#]: via: (https://opensource.com/article/20/10/measured-trusted-boot)
|
||||
[#]: author: (Mike Bursell https://opensource.com/users/mikecamel)
|
||||
|
||||
What measured boot and trusted boot means for Linux
|
||||
======
|
||||
When a trusted boot process is performed, the process not only measures
|
||||
each value but also performs a check against a known (and expected!)
|
||||
good value at the same time.
|
||||
![Brain on a computer screen][1]
|
||||
|
||||
Sometimes I'm looking around for a subject to write about, and realise that there's one that I assume that I've covered, but, on searching, discover that I haven't. One of those topics is measured boot and trusted boot—sometimes misleadingly referred to as "secure boot." There are specific procedures that use these terms with capital letters (e.g., Secure Boot), which I'm going to try to avoid discussing in this article. I'm more interested in the generic processes—and a major potential downfall—than in trying to go into the ins and outs of specifics. What follows is a (heavily edited) excerpt from my forthcoming book on trust in computing and the cloud for [Wiley][2].
|
||||
|
||||
In order to understand what measured boot and trusted boot aim to achieve, look at the Linux virtualisation stack: the components you run if you want to use virtual machines (VMs) on a Linux machine. This description is arguably over-simplified, but (as I noted above) I'm not interested in the specifics but in what I'm trying to achieve. I'll concentrate on the bottom four layers (at a rather simple level of abstraction): CPU/management engine; BIOS/EFI; firmware; and hypervisor, but I'll also consider a layer _just_ above the CPU/management engine, which interposes a Trusted Platform Module (TPM) and some instructions for how to perform one of the two processes (_measured boot_ and _trusted boot_). Once the system starts to boot, the TPM is triggered and starts its work. Alternative roots of trust, such as hardware security modules (HSMs), might also be used, but I will use TPMs, the most common example in this context, in my example.
|
||||
|
||||
In both cases (trusted boot and the measured boot), the basic flow starts with the TPM performing a measurement of the BIOS/EFI layer. This measurement involves checking the binary instructions to be carried out by this layer and creating a cryptographic hash of the binary image. The hash that's produced is then stored in one of several Platform Configuration Register (PCR) "slots" in the TPM. These can be thought of as pieces of memory that can be read later on - either by the TPM for its purposes or by entities external to the TPM - but that cannot be changed once they have been written. These pieces of memory are integrity protected from the time of their initially being written. This provides assurances that once a value is written to a PCR by the TPM, it can be considered constant for the lifetime of the system until power off or reboot.
|
||||
|
||||
After measuring the BIOS/EFI layer, the next layer (firmware) is measured. In this case, the resulting hash is combined with the previous hash (which was stored in the PCR slot) and then also stored in a PCR slot. The process continues until all the layers involved in the process have been measured and the hashes' results have been stored. There are (sometimes quite complex) processes to set up the original TPM values (I've skipped some of the more low-level steps in the process for simplicity) and to allow (hopefully authorised) changes to the layers for upgrading or security patching, for example. This "measured boot" process allows for entities to query the TPM after the process has completed and to check whether the values in the PCR slots correspond to the expected values, pre-calculated with "known good" versions of the various layers—that is, pre-checked versions whose provenance and integrity have already been established.
|
||||
|
||||
Various protocols exist to allow parties _external_ to the system to check the values (e.g., via a network connection) that the TPM attests to be correct: the process of receiving and checking such values from an external system is known as "remote attestation."
|
||||
|
||||
This process—measured boot—allows you to find out whether the underpinnings of your system—the lowest layers—are what you think they are. But what if they're not? Measured boot (unsurprisingly, given the name) measures but doesn't perform any other actions.
|
||||
|
||||
The alternative, "trusted boot," goes a step further. When a trusted boot process is performed, the process not only measures each value but also performs a check against a known (and expected!) good value at the same time. If the check fails, then the process will halt, and the booting of the system will fail. This may sound like a rather extreme approach to take on a system, but sometimes it is absolutely the right one. Where the system under consideration may have been compromised—which is one likely inference you can make from the failure of a trusted boot process—it is better for it to not be available at all than to be running based on flawed expectations.
|
||||
|
||||
This is all very well if I am the owner of the system being measured, have checked all of the various components being measured (and the measurements), and am happy that what's being booted is what I want.[1][3] But what if I'm using a system on the cloud, for instance, or any system owned and managed by someone else? In that case, I'm trusting the cloud provider (or owner/manager) with two things:
|
||||
|
||||
1. Doing all the measuring correctly and reporting correct results to me
|
||||
2. Building something I should trust in the first place
|
||||
|
||||
|
||||
|
||||
This is the problem with the nomenclature "trusted boot" and, even worse, "secure boot." Both imply that an absolute, objective property of a system has been established—it is "trusted" or "secure"—when this is clearly not the case. Obviously, it would be unfair to expect the designers of such processes to name them after the failure states—"untrusted boot" or "insecure boot"—but, unless I can be very certain that I trust the owner of the system to do step two entirely correctly (and in my best interests as the user of the system, rather than theirs as the owner), then I can make no stronger assertions.
|
||||
|
||||
There is an enormous temptation to take a system that has gone through a trusted boot process and label it a "trusted system" when _the very best_ assertion you can make is that the particular layers measured in the measured and/or trusted boot process have been asserted to be those the process expects to be present. Such a process says nothing at all about the fitness of the layers to provide assurances of behaviour nor about the correctness (or fitness to provide assurances of behaviour) of any subsequent layers on top of those.
|
||||
|
||||
It's important to note that designers of TPMs are quite clear what is being asserted and that assertions about trust should be made carefully and sparingly. Unluckily, however, the complexities of systems, the general low level of understanding of trust, and the complexities of context and transitive trust make it very easy for systems designers and implementors to do the wrong thing and assume that any system that has successfully performed a trusted boot process can be considered "trusted." It is also extremely important to remember that TPMs, as hardware roots of trust, offer one of the best mechanisms available for establishing a chain of trust in systems that you may be designing or implementing, and I plan to write an article about them soon.
|
||||
|
||||
* * *
|
||||
|
||||
1. Although this turns out to be _much_ harder to do than you might expect!
|
||||
|
||||
|
||||
|
||||
* * *
|
||||
|
||||
_This article was originally published on [Alice, Eve, and Bob][4] and is reprinted with the author's permission._
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/measured-trusted-boot
|
||||
|
||||
作者:[Mike Bursell][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/mikecamel
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/brain_computer_solve_fix_tool.png?itok=okq8joti (Brain on a computer screen)
|
||||
[2]: https://wiley.com/
|
||||
[3]: tmp.HkXCfJwlpF#1
|
||||
[4]: https://aliceevebob.com/2020/09/08/measured-and-trusted-boot/
|
@ -0,0 +1,192 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (2 Ways to Download Files From Linux Terminal)
|
||||
[#]: via: (https://itsfoss.com/download-files-from-linux-terminal/)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
2 Ways to Download Files From Linux Terminal
|
||||
======
|
||||
|
||||
If you are stuck to the Linux terminal, say on a server, how do you download a file from the terminal?
|
||||
|
||||
There is no download command in Linux but there are a couple of Linux commands for downloading file.
|
||||
|
||||
In this terminal trick, you’ll learn two ways to download file using command line in Linux.
|
||||
|
||||
I am using Ubuntu here but apart from the installation, rest of the commands are equally valid for all other Linux distributions.
|
||||
|
||||
### Download files from Linux terminal using wget command
|
||||
|
||||
![][1]
|
||||
|
||||
[wget][2] is perhaps the most used command line download manager for Linux and UNIX-like systems. You can download a single file, multiple files, entire directory or even an entire website using wget.
|
||||
|
||||
wget is non-interactive and can easily work in the background. This means you can easily use it in scripts or even build tools like [uGet download manager][3].
|
||||
|
||||
Let’s see how to use wget to download file from terminal.
|
||||
|
||||
#### Installing wget
|
||||
|
||||
Most Linux distributions come with wget preinstalled. It is also available in the repository of most distributions and you can easily install it using your distribution’s package manager.
|
||||
|
||||
On Ubuntu and Debian based distribution, you can use the [apt package manager][4] command:
|
||||
|
||||
```
|
||||
sudo apt install wget
|
||||
```
|
||||
|
||||
#### Download a file or webpage using wget
|
||||
|
||||
You just need to provide the URL of the file or webpage. It will download the file with its original name in the directory you are in.
|
||||
|
||||
```
|
||||
wget URL
|
||||
```
|
||||
|
||||
![][5]
|
||||
|
||||
To download multiple files, you’ll have to save their URLs in a text file and provide that text file as input to wget like this:
|
||||
|
||||
```
|
||||
wget -i download_files.txt
|
||||
```
|
||||
|
||||
#### Download files with a different name using wget
|
||||
|
||||
You’ll notice that a webpage is almost always saved as index.html with wget. It will be a good idea to provide custom name to downloaded file.
|
||||
|
||||
You can use the -O (uppercase O) option to provide the output filename while downloading.
|
||||
|
||||
```
|
||||
wget -O filename URL
|
||||
```
|
||||
|
||||
![][6]
|
||||
|
||||
#### Download a folder using wget
|
||||
|
||||
Suppose you are browsing an FTP server and you need to download an entire directory, you can use the recursive option
|
||||
|
||||
```
|
||||
wget -r ftp://server-address.com/directory
|
||||
```
|
||||
|
||||
#### Download an entire website using wget
|
||||
|
||||
Yes, you can totally do that. You can mirror an entire website with wget. By downloading an entire website I mean the entire public facing website structure.
|
||||
|
||||
While you can use the mirror option -m directly, it will be a good idea add:
|
||||
|
||||
* –convert-links : links are converted so that internal links are pointed to downloaded resource instead of web
|
||||
* –page-requisites: downloads additional things like style sheets so that the pages look better offline
|
||||
|
||||
|
||||
|
||||
```
|
||||
wget -m --convert-links --page-requisites website_address
|
||||
```
|
||||
|
||||
![][7]
|
||||
|
||||
#### Bonus Tip: Resume incomplete downloads
|
||||
|
||||
If you aborted the download by pressing C for some reasons, you can resume the previous download with option -c.
|
||||
|
||||
```
|
||||
wget -c
|
||||
```
|
||||
|
||||
### Download files from Linux command line using curl
|
||||
|
||||
Like wget, [curl][8] is also one of the most popular commands to download files in Linux terminal. There are so many ways to [use curl extensively][9] but I’ll focus on only the simple downloading here.
|
||||
|
||||
#### Installing curl
|
||||
|
||||
Though curl doesn’t come preinstalled, it is available in the official repositories of most distributions. You can use your distribution’s package manager to install it.
|
||||
|
||||
To [install curl on Ubuntu][10] and other Debian based distributions, use the following command:
|
||||
|
||||
```
|
||||
sudo apt install curl
|
||||
```
|
||||
|
||||
#### Download files or webpage using curl
|
||||
|
||||
If you use curl without any option with a URL, it will read the file and print it on the terminal screen.
|
||||
|
||||
To download a file using curl command in Linux terminal, you’ll have to use the -O (uppercase O) option:
|
||||
|
||||
```
|
||||
curl -O URL
|
||||
```
|
||||
|
||||
![][11]
|
||||
|
||||
It is simpler to download multiple files in Linux with curl. You just have to specify multiple URLs:
|
||||
|
||||
```
|
||||
curl -O URL1 URL2 URL3
|
||||
```
|
||||
|
||||
Keep in mind that curl is not as simple as wget. While wget saves webpages as index.html, curl will complain of remote file not having a name for webpages. You’ll have to save it with a custom name as described in the next section.
|
||||
|
||||
#### Download files with a different name
|
||||
|
||||
It could be confusing but to provide a custom name for the downloaded file (instead of the original source name), you’ll have to use -o (lowercase O) option:
|
||||
|
||||
```
|
||||
curl -o filename URL
|
||||
```
|
||||
|
||||
![][12]
|
||||
|
||||
Some times, curl wouldn’t just download the file as you expect it to. You’ll have to use option -L (for location) to download it correctly. This is because some times the links redirect to some other link and with option -L, it follows the final link.
|
||||
|
||||
#### Pause and resume download with curl
|
||||
|
||||
Like wget, you can also resume a paused download using curl with option -c:
|
||||
|
||||
```
|
||||
curl -c URL
|
||||
```
|
||||
|
||||
**Conclusion**
|
||||
|
||||
As always, there are multiple ways to do the same thing in Linux. Downloading files from the terminal is no different.
|
||||
|
||||
wget and curl are just two of the most popular commands for downloading files in Linux. There are more such command line tools. Terminal based web-browsers like [elinks][13], [w3m][14] etc can also be used for downloading files in command line.
|
||||
|
||||
Personally, for a simple download, I prefer using wget over curl. It is simpler and less confusing because you may have a difficult time figuring out why curl could not download a file in the expected format.
|
||||
|
||||
Your feedback and suggestions are welcome.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/download-files-from-linux-terminal/
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/Download-Files-from-Linux-terminal.png?resize=800%2C450&ssl=1
|
||||
[2]: https://www.gnu.org/software/wget/
|
||||
[3]: https://itsfoss.com/install-latest-uget-ubuntu-linux-mint/
|
||||
[4]: https://itsfoss.com/apt-command-guide/
|
||||
[5]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/download-file-in-linux-terminal-using-wget.png?resize=795%2C418&ssl=1
|
||||
[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/download-file-in-linux-terminal-using-wget-2.png?resize=795%2C418&ssl=1
|
||||
[7]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/download-entire-website-using-wget.png?resize=795%2C418&ssl=1
|
||||
[8]: https://curl.haxx.se/
|
||||
[9]: https://linuxhandbook.com/curl-command-examples/
|
||||
[10]: https://itsfoss.com/install-curl-ubuntu/
|
||||
[11]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/download-files-in-linux-using-curl.png?resize=795%2C418&ssl=1
|
||||
[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/download-files-in-linux-using-curl-1.png?resize=795%2C418&ssl=1
|
||||
[13]: http://elinks.or.cz/
|
||||
[14]: http://w3m.sourceforge.net/
|
@ -0,0 +1,81 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (LibreOffice Wants Apache to Drop the Ailing OpenOffice and Support LibreOffice Instead)
|
||||
[#]: via: (https://itsfoss.com/libreoffice-letter-openoffice/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
LibreOffice Wants Apache to Drop the Ailing OpenOffice and Support LibreOffice Instead
|
||||
======
|
||||
|
||||
It is a no-brainer that Apache OpenOffice is still a relevant recommendation when we think about [open source alternatives to Microsoft Office][1] for Linux users. However, for the past several years, the development of OpenOffice is pretty much stale.
|
||||
|
||||
Of course, it is not a shocker, considering Abhishek wrote about the [possibility of Apache OpenOffice shutting down][2] back in 2016.
|
||||
|
||||
Now, in an [open letter from The Document Foundation][3], they appeal Apache OpenOffice to recommend users to start using better alternatives like LibreOffice. In this article, I shall mention some highlights from the blog post by The Document Foundation and what it means to Apache OpenOffice.
|
||||
|
||||
![][4]
|
||||
|
||||
### Apache OpenOffice is History, LibreOffice is the Future?
|
||||
|
||||
Even though I didn’t use OpenOffice back in the day, it is safe to say that it is definitely not a modern open-source alternative to Microsoft Office. Not anymore, at least.
|
||||
|
||||
Yes, Apache OpenOffice is still something important for legacy users and was a great alternative a few years back.
|
||||
|
||||
Here’s the timeline of major releases for OpenOffice and LibreOffice:
|
||||
|
||||
![][5]
|
||||
|
||||
Now that there’s no significant development taking place for OpenOffice, what’s the future of Apache OpenOffice? A fairly active project with no major releases by the largest open source foundation?
|
||||
|
||||
It does not sound promising and that is exactly what The Document Foundation highlights in their open letter:
|
||||
|
||||
> OpenOffice(.org) – the “father project” of LibreOffice – was a great office suite, and changed the world. It has a fascinating history, but **since 2014, Apache OpenOffice (its current home) hasn’t had a single major release**. That’s right – no significant new features or major updates have arrived in over six years. Very few minor releases have been made, and there have been issues with timely security updates too.
|
||||
|
||||
For an average user, if they don’t know about [LibreOffice][6], I would definitely want them to know. But, should the Apache Foundation suggest OpenOffice users to try LibreOffice to experience a better or advanced office suite?
|
||||
|
||||
I don’t know, maybe yes, or no?
|
||||
|
||||
> …many users don’t know that LibreOffice exists. The OpenOffice brand is still so strong, even though the software hasn’t had a significant release for over six years, and is barely being developed or supported
|
||||
|
||||
As mentioned in the open letter, The Document Foundation highlights the advantages/improvements of LibreOffice over OpenOffice and appeals to Apache OpenOffice that they start recommending their users to try something better (i.e. LibreOffice):
|
||||
|
||||
> We appeal to Apache OpenOffice to do the right thing. Our goal should be to **get powerful, up-to-date and well-maintained productivity tools into the hands of as many people as possible**. Let’s work together on that!
|
||||
|
||||
### What Should Apache OpenOffice Do?
|
||||
|
||||
If OpenOffice does the work, users may not need the effort to look for alternatives. So, is it a good idea to call out another project about their slow development and suggest them to embrace the future tools and recommend them instead?
|
||||
|
||||
In an argument, one might say it is only fair to promote your competition if you’re done and have no interest in improving OpenOffice. And, there’s nothing wrong in that, the open-source community should always work together to ensure that new users get the best options available.
|
||||
|
||||
On another side, one might say that The Document Foundation is frustrated about OpenOffice still being something relevant in 2020, even without any significant improvements.
|
||||
|
||||
I won’t judge, but I think these conflicting thoughts come to my mind when I take a look at the open letter.
|
||||
|
||||
### Do you think it is time to put OpenOffice to rest and rely on LibreOffice?
|
||||
|
||||
Even though LibreOffice seems to be a superior choice and definitely deserves the limelight, what do you think should be done? Should Apache discontinue OpenOffice and redirect users to LibreOffice?
|
||||
|
||||
Your opinion is welcome.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/libreoffice-letter-openoffice/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/best-free-open-source-alternatives-microsoft-office/
|
||||
[2]: https://itsfoss.com/openoffice-shutdown/
|
||||
[3]: https://blog.documentfoundation.org/blog/2020/10/12/open-letter-to-apache-openoffice/
|
||||
[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/libre-office-open-office.png?resize=800%2C450&ssl=1
|
||||
[5]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/libre-office-open-office-derivatives.jpg?resize=800%2C166&ssl=1
|
||||
[6]: https://itsfoss.com/libreoffice-tips/
|
@ -0,0 +1,94 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (MellowPlayer is a Desktop App for Various Streaming Music Services)
|
||||
[#]: via: (https://itsfoss.com/mellow-player/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
MellowPlayer is a Desktop App for Various Streaming Music Services
|
||||
======
|
||||
|
||||
_**Brief: MellowPlayer is a free and open-source desktop that lets you integrate web-based music streaming services on Linux and Windows.**_
|
||||
|
||||
Undoubtedly, a lot of users prefer tuning in to streaming services to listen to their favorite music instead of purchasing individual music from stores or downloading them for a collection.
|
||||
|
||||
Of course, streaming services let you explore new music and help artists reach out to a wider audience easily. But, with so much music streaming services available ([Soundcloud][1], [Spotify][2], [YouTube Music][3], [Amazon Music][4], etc) it often becomes annoying to utilize them effectively while using your computer.
|
||||
|
||||
You may [install Spotify on Linux][5] but there is no desktop app for Amazon Music. So, potentially you cannot manage the streaming service from a single portal.
|
||||
|
||||
What if a desktop app lets you integrate streaming services on both Windows and Linux for free? In this article, I will talk about such an app — ‘[MellowPlayer][6]‘.
|
||||
|
||||
### MellowPlayer: Open Source App to Integrate Various Streaming Music Services
|
||||
|
||||
![][7]
|
||||
|
||||
MellowPlayer is a free and open-source cross-platform desktop app that lets you integrate multiple streaming services and manage them all from one interface.
|
||||
|
||||
There are several supported streaming services that you can integrate. You also get a certain level of control to tweak your experience from each individual service. For instance, you can set to automatically skip ads or mute them on YouTube.
|
||||
|
||||
The cross-platform support for both Windows and Linux is definitely a plus point.
|
||||
|
||||
Apart from the ability to manage the streaming services, it also integrates the player with your system tray to easily control the music. This means that you can use media keys on your keyboard to control the music player.
|
||||
|
||||
It is also worth noting that you can add a new service that is not officially supported by just creating a plugin for it yourself within the app. To let you know more about it, let me highlight all the key features below.
|
||||
|
||||
### Features of MellowPlayer
|
||||
|
||||
![][8]
|
||||
|
||||
* Cross-platform (Windows & Linux)
|
||||
* Free & Open-Source
|
||||
* Plugin-based Application to let you add new service by creating a plugin
|
||||
* Integrates the services as a native desktop app with the system tray
|
||||
* Supports hot keys
|
||||
* Notifications support
|
||||
* Listening history
|
||||
|
||||
|
||||
|
||||
### Installing MellowPlayer on Linux
|
||||
|
||||
![][9]
|
||||
|
||||
MellowPlayer is available as a [Flatpak package][10]. I know it’s disappointing for some but it’s just Flatpak for Linux and an executable file for Windows. In case you didn’t know, follow our guide on [using Flatpak on Linux][11] to get started.
|
||||
|
||||
[Download MellowPlayer][12]
|
||||
|
||||
### Wrapping Up
|
||||
|
||||
MellowPlayer is a handy desktop app for users who often dabble with multiple streaming services for music. Even though it works fine as per my test with SoundCloud, YouTube, and Spotify, I did notice that the app crashed when trying to re-size the window, just a heads up on that. You can explore more about it on its [GitLab page][13].
|
||||
|
||||
There are two similar applications that allow you to play multiple streaming music services: [Nuvola][14] and [Nuclear Music Player][15]. You may want to check them out.
|
||||
|
||||
Have you tried MellowPlayer? Feel free to share your thoughts in the comments below.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/mellow-player/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://soundcloud.com
|
||||
[2]: https://www.spotify.com
|
||||
[3]: https://music.youtube.com
|
||||
[4]: https://music.amazon.com/home
|
||||
[5]: https://itsfoss.com/install-spotify-ubuntu-linux/
|
||||
[6]: https://colinduquesnoy.gitlab.io/MellowPlayer/
|
||||
[7]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/mellowplayer-screenshot.jpg?resize=800%2C439&ssl=1
|
||||
[8]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/mellowplayer.png?resize=800%2C442&ssl=1
|
||||
[9]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/mellowplayer-system-integration.jpg?resize=800%2C438&ssl=1
|
||||
[10]: https://flathub.org/apps/details/com.gitlab.ColinDuquesnoy.MellowPlayer
|
||||
[11]: https://itsfoss.com/flatpak-guide/
|
||||
[12]: https://colinduquesnoy.gitlab.io/MellowPlayer/#features
|
||||
[13]: https://gitlab.com/ColinDuquesnoy/MellowPlayer
|
||||
[14]: https://itsfoss.com/nuvola-music-player/
|
||||
[15]: https://itsfoss.com/nuclear-music-player-linux/
|
86
sources/tech/20201014 Web of Trust, Part 1- Concept.md
Normal file
86
sources/tech/20201014 Web of Trust, Part 1- Concept.md
Normal file
@ -0,0 +1,86 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Web of Trust, Part 1: Concept)
|
||||
[#]: via: (https://fedoramagazine.org/web-of-trust-part-1-concept/)
|
||||
[#]: author: (Kevin "Eonfge" Degeling https://fedoramagazine.org/author/eonfge/)
|
||||
|
||||
Web of Trust, Part 1: Concept
|
||||
======
|
||||
|
||||
![][1]
|
||||
|
||||
Every day we rely on technologies who nobody can fully understand. Since well before the industrial revolution, complex and challenging tasks required an approach that broke out the different parts into smaller scale tasks. Each resulting in specialized knowledge used in some parts of our lives, leaving other parts to trust in skills that others had learned. This shared knowledge approach also applies to software. Even the most avid readers of this magazine, will likely not compile and validate every piece of code they run. This is simply because the world of computers is itself also too big for one person to grasp.
|
||||
|
||||
Still, even though it is nearly impossible to understand everything that happens within your PC when you are using it, that does not leave you blind and unprotected. FLOSS software shares trust, giving protection to all users, even if individual users can’t grasp all parts in the system. This multi-part article will discuss how this ‘Web of Trust’ works and how you can get involved.
|
||||
|
||||
But first we’ll have to take a step back and discuss the basic concepts, before we can delve into the details and the web. Also, a note before we start, security is not just about viruses and malware. Security also includes your privacy, your economic stability and your technological independence.
|
||||
|
||||
### One-Way System
|
||||
|
||||
By their design, computers can only work and function in the most rudimentary ways of logic: _True_ or _false_. _And_ or _Or._ This (boolean logic) is not readily accessible to humans, therefore we must do something special. We write applications in a code that we can (reasonably) comprehend (human readable). Once completed, we turn this human readable code into a code that the computer can comprehend (machine code).
|
||||
|
||||
The step of conversion is called compilation and/or building, and it’s a one-way process. Compiled code (machine code) is not really understandable by humans, and it takes special tools to study in detail. You can understand small chunks, but on the whole, an entire application becomes a black box.
|
||||
|
||||
This subtle difference shifts power. Power, in this case being the influence of one person over another person. The person who has written the human-readable version of the application and then releases it as compiled code to use by others, knows all about what the code does, while the end user knows a very limited scope. When using (software) in compiled form, it is impossible to know for certain what an application is intended to do, unless the original human readable code can be viewed.
|
||||
|
||||
### **The Nature of Power**
|
||||
|
||||
Spearheaded by Richard Stallman, this shift of power became a point of concern. This discussion started in the 1980s, for this was the time that computers left the world of academia and research, and entered the world of commerce and consumers. Suddenly, that power became a source of control and exploitation.
|
||||
|
||||
One way to combat this imbalance of power, was with the concept of FLOSS software. FLOSS Software is built on [4-Freedoms][2], which gives you a wide array of other ‘affiliated’ rights and guarantees. In essence, FLOSS software uses copyright-licensing as a form of moral contract, that forces software developers not to leverage the one-way power against their users. The principle way of doing this, is with the the GNU General Public Licenses, which Richard Stallman created and has since been promoting.
|
||||
|
||||
One of those guarantees, is that you can see the code that should be running on your device. When you get a device using FLOSS software, then the manufacturer should provide you the code that the device is using, as well as all instructions that you need to compile that code yourself. Then you can replace the code on the device with the version you can compile yourself. Even better, if you compare the version you have with the version on the device, you can see if the device manufacturer tried to cheat you or other customers.
|
||||
|
||||
This is where the web of Trust comes back into the picture. The Web of Trust implies that even if the vast majority of people can’t validate the workings of a device, that others can do so on their behalf. Journalists, security analysts and hobbyists, can do the work that others might be unable to do. And if they find something, they have the power to share their findings.
|
||||
|
||||
### Security by B**lind** **Trust**
|
||||
|
||||
This is of course, if the application and all components underneath it, are FLOSS. Proprietary software, or even software which is merely Open Source, has compiled versions that nobody can recreate and validate. Thus, you can never truly know if that software is secure. It might have a backdoor, it might sell your personal data, or it might be pushing a closed ecosystem to create a vendor-lock. With closed-source software, your security is as good as the company making the software is trustworthy.
|
||||
|
||||
For companies and developers, this actually creates another snare. While you might still care about your users and their security, you’re a liability: If a criminal can get to your official builds or supply-chain, then there is no way for anybody to discover that afterwards. An increasing number of attacks do not target users directly, but instead try to get in, by exploiting the trust the companies/developers have carefully grown.
|
||||
|
||||
You should also not underestimate pressure from outside: Governments can ask you to ignore a vulnerability, or they might even demand cooperation. Investment firms or shareholders, may also insist that you create a vendor-lock for future use. The blind trust that you demand of your users, can be used against you.
|
||||
|
||||
### Security by a Web of Trust
|
||||
|
||||
If you are a user, FLOSS software is good because others can warn you when they find suspicious elements. You can use any FLOSS device with minimal economic risk, and there are many FLOSS developers who care for your privacy. Even if the details are beyond you, there are rules in place to facilitate trust.
|
||||
|
||||
If you are a tinkerer, FLOSS is good because with a little extra work, you can check the promises of others. You can warn people when something goes wrong, and you can validate the warnings of others. You’re also able to check individual parts in a larger picture. The libraries used by FLOSS applications, are also open for review: It’s “Trust all the way down”.
|
||||
|
||||
For companies and developers, FLOSS is also a great reassurance that your trust can’t be easily subverted. If malicious actors wish to attack your users, then any irregularity can quickly be spotted. Last but not least, since you also stand to defend your customers economic well-being and privacy, you can use that as an important selling point to customers who care about their own security.
|
||||
|
||||
### Fedora’s case
|
||||
|
||||
Fedora embraces the concept of FLOSS and it stands strong to defend it. There are comprehensive [legal guidelines][3], and Fedora’s principles are directly referencing the 4-Freedoms: [Freedom, Friends, Features, and First][4]
|
||||
|
||||
![][5]
|
||||
|
||||
To this end, entire systems have been set up to facilitate this kind of security. Fedora works completely in the open, and any user can check the official servers. [Koji][6] is the name of the Fedora Buildsystem, and you can see every application and it’s build logs there. For added security, there is also [Bohdi][7], which orchestrates the deployment of an application. Multiple people must approve it, before the application can become available.
|
||||
|
||||
This creates the Web of Trust on which you can rely. Every package in the repository goes through the same process, and at every point somebody can intervene. There are also escalation systems in place to report issues, so that issues can quickly be tackled when they occur. Individual contributors also know that they can be reviewed at every time, which itself is already enough of a precaution to dissuade mischievous thoughts.
|
||||
|
||||
You don’t have to trust Fedora (implicitly), you can get something better; trust in users like you.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/web-of-trust-part-1-concept/
|
||||
|
||||
作者:[Kevin "Eonfge" Degeling][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/eonfge/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://fedoramagazine.org/wp-content/uploads/2020/09/weboftrust1-816x345.jpg
|
||||
[2]: https://fsfe.org/freesoftware/freesoftware.en.html
|
||||
[3]: https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing
|
||||
[4]: https://docs.fedoraproject.org/en-US/project/
|
||||
[5]: https://fedoramagazine.org/wp-content/uploads/2020/09/foundations_expand_1_freedom.png
|
||||
[6]: https://koji.fedoraproject.org/koji/index
|
||||
[7]: https://bodhi.fedoraproject.org/
|
947
translated/tech/20180414 Go on very small hardware Part 2.md
Normal file
947
translated/tech/20180414 Go on very small hardware Part 2.md
Normal file
@ -0,0 +1,947 @@
|
||||
[#]: collector: (oska874)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Go on very small hardware Part 2)
|
||||
[#]: via: (https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html)
|
||||
[#]: author: (Michał Derkacz https://ziutek.github.io/)
|
||||
|
||||
Go 语言在极小硬件上的运用(二)
|
||||
============================================================
|
||||
|
||||
|
||||
[][1]
|
||||
|
||||
|
||||
在本文的 [第一部分][2] 的结尾,我承诺要写关于 _interfaces_ 的内容。我不想在这里写有关接口的完整甚至简短的讲义。相反,我将展示一个简单的示例,来说明如何定义和使用接口,以及如何利用无处不在的 _io.Writer_ 接口。还有一些关于 _reflection_ 和 _semihosting_ 的内容。
|
||||
|
||||
接口是 Go 语言的重要组成部分。如果您想了解更多有关它们的信息,我建议您阅读 [Effective Go][3] 和 [Russ Cox 的文章][4]。
|
||||
|
||||
### 并发 Blinky – 回顾
|
||||
|
||||
当您阅读前面示例的代码时,您可能会注意到一个违反直觉的方式来打开或关闭 LED。 _Set_ 方法用于关闭 LED,_Clear_ 方法用于打开 LED。这是由于在 <ruby>漏极开路配置<rt>open-drain configuration</rt></ruby> 下驱动了 LED。我们可以做些什么来减少代码的混乱? 让我们用 _On_ 和 _Off_ 方法来定义 _LED_ 类型:
|
||||
|
||||
```
|
||||
type LED struct {
|
||||
pin gpio.Pin
|
||||
}
|
||||
|
||||
func (led LED) On() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func (led LED) Off() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
现在我们可以简单地调用 `led.On()` 和 `led.Off()`,这不会再引起任何疑惑了。
|
||||
|
||||
|
||||
在前面的所有示例中,我都尝试使用相同的 <ruby>漏极开路配置<rt>open-drain configuration</rt></ruby>来 避免代码复杂化。但是在最后一个示例中,对于我来说,将第三个 LED 连接到 GND 和 PA3 引脚之间并将 PA3 配置为<ruby>推挽模式<rt>push-pull mode</rt></ruby>会更容易。下一个示例将使用以此方式连接的 LED。
|
||||
|
||||
但是我们的新 _LED_ 类型不支持推挽配置。实际上,我们应该将其称为 _OpenDrainLED_,并定义另一个类型 _PushPullLED_:
|
||||
|
||||
```
|
||||
type PushPullLED struct {
|
||||
pin gpio.Pin
|
||||
}
|
||||
|
||||
func (led PushPullLED) On() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func (led PushPullLED) Off() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
请注意,这两种类型都具有相同的方法,它们的工作方式也相同。如果在 LED 上运行的代码可以同时使用这两种类型,而不必注意当前使用的是哪种类型,那就太好了。 _interface type_ 可以提供帮助:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"delay"
|
||||
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
type LED interface {
|
||||
On()
|
||||
Off()
|
||||
}
|
||||
|
||||
type PushPullLED struct{ pin gpio.Pin }
|
||||
|
||||
func (led PushPullLED) On() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func (led PushPullLED) Off() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func MakePushPullLED(pin gpio.Pin) PushPullLED {
|
||||
pin.Setup(&gpio.Config{Mode: gpio.Out, Driver: gpio.PushPull})
|
||||
return PushPullLED{pin}
|
||||
}
|
||||
|
||||
type OpenDrainLED struct{ pin gpio.Pin }
|
||||
|
||||
func (led OpenDrainLED) On() {
|
||||
led.pin.Clear()
|
||||
}
|
||||
|
||||
func (led OpenDrainLED) Off() {
|
||||
led.pin.Set()
|
||||
}
|
||||
|
||||
func MakeOpenDrainLED(pin gpio.Pin) OpenDrainLED {
|
||||
pin.Setup(&gpio.Config{Mode: gpio.Out, Driver: gpio.OpenDrain})
|
||||
return OpenDrainLED{pin}
|
||||
}
|
||||
|
||||
var led1, led2 LED
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(false)
|
||||
led1 = MakeOpenDrainLED(gpio.A.Pin(4))
|
||||
led2 = MakePushPullLED(gpio.A.Pin(3))
|
||||
}
|
||||
|
||||
func blinky(led LED, period int) {
|
||||
for {
|
||||
led.On()
|
||||
delay.Millisec(100)
|
||||
led.Off()
|
||||
delay.Millisec(period - 100)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
go blinky(led1, 500)
|
||||
blinky(led2, 1000)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
我们定义了 _LED_ 接口,它有两个方法: _On_ 和 _Off_。 _PushPullLED_ 和 _OpenDrainLED_ 类型代表两种驱动 LED 的方式。我们还定义了两个用作构造函数的 _Make_ _*LED_ 函数。这两种类型都实现了 _LED_ 接口,因此可以将这些类型的值赋给 _LED_ 类型的变量:
|
||||
|
||||
```
|
||||
led1 = MakeOpenDrainLED(gpio.A.Pin(4))
|
||||
led2 = MakePushPullLED(gpio.A.Pin(3))
|
||||
```
|
||||
|
||||
在这种情况下,可赋值性在编译时检查。赋值后,_led1_ 变量包含一个 `OpenDrainLED{gpio.A.Pin(4)}`,以及一个指向 _OpenDainLED_ 类型的方法集的指针。 `led1.On()` 调用大致对应于以下 C 代码:
|
||||
|
||||
```
|
||||
led1.methods->On(led1.value)
|
||||
```
|
||||
|
||||
如您所见,如果仅考虑函数调用的开销,这是相当便宜的抽象。
|
||||
|
||||
|
||||
但是,对接口的任何赋值都会导致包含有关已赋值类型的大量信息。对于由许多其他类型组成的复杂类型,可能会有很多信息:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10356 196 212 10764 2a0c cortexm0.elf
|
||||
```
|
||||
|
||||
如果我们不使用 [反射][5],可以通过避免包含类型和结构字段的名称来节省一些字节:
|
||||
|
||||
```
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10312 196 212 10720 29e0 cortexm0.elf
|
||||
```
|
||||
|
||||
生成的二进制文件仍然包含一些有关类型的必要信息和关于所有导出方法(带有名称)的完整信息。在运行时,主要是当您将存储在接口变量中的一个值赋值给任何其他变量时,需要此信息来检查可赋值性。
|
||||
|
||||
我们还可以通过重新编译所导入的包来删除它们的类型和字段名称:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
10272 196 212 10680 29b8 cortexm0.elf
|
||||
```
|
||||
|
||||
让我们加载这个程序,看看它是否按预期工作。这一次我们将使用 [st-flash][6] 命令:
|
||||
|
||||
```
|
||||
$ arm-none-eabi-objcopy -O binary cortexm0.elf cortexm0.bin
|
||||
$ st-flash write cortexm0.bin 0x8000000
|
||||
st-flash 1.4.0-33-gd76e3c7
|
||||
2018-04-10T22:04:34 INFO usb.c: -- exit_dfu_mode
|
||||
2018-04-10T22:04:34 INFO common.c: Loading device parameters....
|
||||
2018-04-10T22:04:34 INFO common.c: Device connected is: F0 small device, id 0x10006444
|
||||
2018-04-10T22:04:34 INFO common.c: SRAM size: 0x1000 bytes (4 KiB), Flash: 0x4000 bytes (16 KiB) in pages of 1024 bytes
|
||||
2018-04-10T22:04:34 INFO common.c: Attempting to write 10468 (0x28e4) bytes to stm32 address: 134217728 (0x8000000)
|
||||
Flash page at addr: 0x08002800 erased
|
||||
2018-04-10T22:04:34 INFO common.c: Finished erasing 11 pages of 1024 (0x400) bytes
|
||||
2018-04-10T22:04:34 INFO common.c: Starting Flash write for VL/F0/F3/F1_XL core id
|
||||
2018-04-10T22:04:34 INFO flash_loader.c: Successfully loaded flash loader in sram
|
||||
11/11 pages written
|
||||
2018-04-10T22:04:35 INFO common.c: Starting verification of write complete
|
||||
2018-04-10T22:04:35 INFO common.c: Flash written and verified! jolly good!
|
||||
|
||||
```
|
||||
|
||||
我没有将 NRST 信号连接到编程器,因此无法使用 _-reset_ 选项,必须按下 reset 按钮才能运行程序。
|
||||
|
||||

|
||||
|
||||
看来,_st-flash_ 与此板配合使用有点不可靠 (通常需要重置 ST-LINK 加密狗)。此外,当前版本不会通过 SWD 发出 reset 命令 (仅使用 NRST 信号)。 软件重置是不现实的,但是它通常是有效的,缺少它会将会带来不便。对于<ruby>电路板-程序员<rt>board-programmer</rt></ruby> 组合 _OpenOCD_ 工作得更好。
|
||||
|
||||
### UART
|
||||
|
||||
UART(<ruby>通用异步收发传输器<rt>Universal Aynchronous Receiver-Transmitter</rt></ruby>)仍然是当今微控制器最重要的外设之一。它的优点是以下属性的独特组合:
|
||||
|
||||
* 相对较高的速度,
|
||||
|
||||
* 仅两条信号线(在 <ruby>半双工<rt>half-duplex</rt></ruby> 通信的情况下甚至一条),
|
||||
|
||||
* 角色对称,
|
||||
|
||||
* 关于新数据的 <ruby>同步带内信令<rt>synchronous in-band signaling</rt></ruby>(起始位),
|
||||
|
||||
* 在传输 <ruby>字<rt>words</rt></ruby> 内的精确计时。
|
||||
|
||||
|
||||
这使得最初用于传输由 7-9 位 words 组成的异步消息的 UART,也被用于有效地实现各种其他物理协议,例如被 [WS28xx LEDs][7] 或 [1-wire][8] 设备使用的协议。
|
||||
|
||||
但是,我们将以其通常的角色使用 UART:从程序中打印文本消息。
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"rtos"
|
||||
|
||||
"stm32/hal/dma"
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/irq"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
"stm32/hal/usart"
|
||||
)
|
||||
|
||||
var tts *usart.Driver
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(true)
|
||||
tx := gpio.A.Pin(9)
|
||||
|
||||
tx.Setup(&gpio.Config{Mode: gpio.Alt})
|
||||
tx.SetAltFunc(gpio.USART1_AF1)
|
||||
d := dma.DMA1
|
||||
d.EnableClock(true)
|
||||
tts = usart.NewDriver(usart.USART1, d.Channel(2, 0), nil, nil)
|
||||
tts.Periph().EnableClock(true)
|
||||
tts.Periph().SetBaudRate(115200)
|
||||
tts.Periph().Enable()
|
||||
tts.EnableTx()
|
||||
|
||||
rtos.IRQ(irq.USART1).Enable()
|
||||
rtos.IRQ(irq.DMA1_Channel2_3).Enable()
|
||||
}
|
||||
|
||||
func main() {
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
}
|
||||
|
||||
func ttsISR() {
|
||||
tts.ISR()
|
||||
}
|
||||
|
||||
func ttsDMAISR() {
|
||||
tts.TxDMAISR()
|
||||
}
|
||||
|
||||
//c:__attribute__((section(".ISRs")))
|
||||
var ISRs = [...]func(){
|
||||
irq.USART1: ttsISR,
|
||||
irq.DMA1_Channel2_3: ttsDMAISR,
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
您会发现此代码可能有些复杂,但目前 STM32 HAL 中没有更简单的 UART 驱动程序(在某些情况下,简单的轮询驱动程序可能会很有用)。 _usart.Driver_ 是使用 DMA 和中断来卸载 CPU 的高效驱动程序。
|
||||
|
||||
STM32 USART 外设提供传统的 UART 及其同步版本。要将其用作输出,我们必须将其 Tx 信号连接到正确的 GPIO 引脚:
|
||||
|
||||
```
|
||||
tx.Setup(&gpio.Config{Mode: gpio.Alt})
|
||||
tx.SetAltFunc(gpio.USART1_AF1)
|
||||
```
|
||||
|
||||
在 Tx-only 模式下配置 _usart.Driver_ (rxdma 和 rxbuf 设置为 nil):
|
||||
|
||||
```
|
||||
tts = usart.NewDriver(usart.USART1, d.Channel(2, 0), nil, nil)
|
||||
```
|
||||
|
||||
我们使用它的 _WriteString_ 方法来打印这句名句。让我们清理所有内容并编译该程序:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
12728 236 176 13140 3354 cortexm0.elf
|
||||
```
|
||||
|
||||
要查看某些内容,您需要在 PC 中使用 UART 外设。
|
||||
|
||||
**请勿使用 RS232 端口或 USB 转 RS232 转换器!**
|
||||
|
||||
STM32 系列使用 3.3V 逻辑,但是 RS232 可以产生 -15 V ~ +15 V 的电压,这可能会损坏您的 MCU。您需要使用 3.3 V 逻辑的 USB 转 UART 转换器。流行的转换器基于 FT232 或 CP2102 芯片。
|
||||
|
||||

|
||||
|
||||
您还需要一些终端仿真程序 (我更喜欢 [picocom][9])。刷新新图像,运行终端仿真器,然后按几次 reset 按钮:
|
||||
|
||||
```
|
||||
$ openocd -d0 -f interface/stlink.cfg -f target/stm32f0x.cfg -c 'init; program cortexm0.elf; reset run; exit'
|
||||
Open On-Chip Debugger 0.10.0+dev-00319-g8f1f912a (2018-03-07-19:20)
|
||||
Licensed under GNU GPL v2
|
||||
For bug reports, read
|
||||
http://openocd.org/doc/doxygen/bugs.html
|
||||
debug_level: 0
|
||||
adapter speed: 1000 kHz
|
||||
adapter_nsrst_delay: 100
|
||||
none separate
|
||||
adapter speed: 950 kHz
|
||||
target halted due to debug-request, current mode: Thread
|
||||
xPSR: 0xc1000000 pc: 0x080016f4 msp: 0x20000a20
|
||||
adapter speed: 4000 kHz
|
||||
** Programming Started **
|
||||
auto erase enabled
|
||||
target halted due to breakpoint, current mode: Thread
|
||||
xPSR: 0x61000000 pc: 0x2000003a msp: 0x20000a20
|
||||
wrote 13312 bytes from file cortexm0.elf in 1.020185s (12.743 KiB/s)
|
||||
** Programming Finished **
|
||||
adapter speed: 950 kHz
|
||||
$
|
||||
$ picocom -b 115200 /dev/ttyUSB0
|
||||
picocom v3.1
|
||||
|
||||
port is : /dev/ttyUSB0
|
||||
flowcontrol : none
|
||||
baudrate is : 115200
|
||||
parity is : none
|
||||
databits are : 8
|
||||
stopbits are : 1
|
||||
escape is : C-a
|
||||
local echo is : no
|
||||
noinit is : no
|
||||
noreset is : no
|
||||
hangup is : no
|
||||
nolock is : no
|
||||
send_cmd is : sz -vv
|
||||
receive_cmd is : rz -vv -E
|
||||
imap is :
|
||||
omap is :
|
||||
emap is : crcrlf,delbs,
|
||||
logfile is : none
|
||||
initstring : none
|
||||
exit_after is : not set
|
||||
exit is : no
|
||||
|
||||
Type [C-a] [C-h] to see available commands
|
||||
Terminal ready
|
||||
Hello, World!
|
||||
Hello, World!
|
||||
Hello, World!
|
||||
```
|
||||
|
||||
每次按下 reset 按钮都会产生新的 “Hello,World!”行。一切都在按预期进行。
|
||||
|
||||
要查看此 MCU 的 <ruby>双向<rt>bi-directional</rt></ruby> UART 代码,请查看 [此示例][10]。
|
||||
|
||||
### io.Writer 接口
|
||||
|
||||
_io.Writer_ 接口可能是 Go 中第二种最常用的接口类型,紧接在 _error_ 接口之后。其定义如下所示:
|
||||
|
||||
```
|
||||
type Writer interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
}
|
||||
```
|
||||
|
||||
_usart.Driver_ 实现了 _io.Writer_ ,因此我们可以替换:
|
||||
|
||||
```
|
||||
tts.WriteString("Hello, World!\r\n")
|
||||
```
|
||||
|
||||
为
|
||||
|
||||
```
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
```
|
||||
|
||||
此外,您需要将 _io_ 包添加到 _import_ 部分。
|
||||
|
||||
_io.WriteString_ 函数的声明如下所示:
|
||||
|
||||
```
|
||||
func WriteString(w Writer, s string) (n int, err error)
|
||||
```
|
||||
|
||||
如您所见,_io.WriteString_ 允许使用实现了 _io.Writer_ 接口的任何类型来编写字符串。在内部,它检查基础类型是否具有 _WriteString_ 方法,并使用该方法代替 _Write_ (如果可用)。
|
||||
|
||||
让我们编译修改后的程序:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15456 320 248 16024 3e98 cortexm0.elf
|
||||
```
|
||||
|
||||
如您所见,_io.WriteString_ 导致二进制文件的大小显着增加:15776-12964 = 2812字节。 Flash 上没有太多空间了。是什么引起了这么大规模的增长?
|
||||
|
||||
使用这个命令:
|
||||
|
||||
```
|
||||
arm-none-eabi-nm --print-size --size-sort --radix=d cortexm0.elf
|
||||
```
|
||||
|
||||
我们可以打印两种情况下按其大小排序的所有符号。通过过滤和分析获得的数据(awk,diff),我们可以找到大约 80 个新符号。最大的十个如下所示:
|
||||
|
||||
```
|
||||
> 00000062 T stm32$hal$usart$Driver$DisableRx
|
||||
> 00000072 T stm32$hal$usart$Driver$RxDMAISR
|
||||
> 00000076 T internal$Type$Implements
|
||||
> 00000080 T stm32$hal$usart$Driver$EnableRx
|
||||
> 00000084 t errors$New
|
||||
> 00000096 R $8$stm32$hal$usart$Driver$$
|
||||
> 00000100 T stm32$hal$usart$Error$Error
|
||||
> 00000360 T io$WriteString
|
||||
> 00000660 T stm32$hal$usart$Driver$Read
|
||||
```
|
||||
|
||||
因此,即使我们不使用 _usart.Driver.Read_ 方法进行编译,也与 _DisableRx_、_RxDMAISR_、_EnableRx_ 以及上面未提及的其他方法相同。不幸的是,如果您为接口赋值了一些内容,那么它的完整方法集是必需的(包含所有依赖项)。对于使用大多数方法的大型程序来说,这不是问题。但是对于我们这种极简的情况而言,这是一个巨大的负担。
|
||||
|
||||
我们已经接近 MCU 的极限,但让我们尝试打印一些数字(您需要在 _import_ 部分中用 _strconv_ 替换 _io_ 包):
|
||||
|
||||
```
|
||||
func main() {
|
||||
a := 12
|
||||
b := -123
|
||||
|
||||
tts.WriteString("a = ")
|
||||
strconv.WriteInt(tts, a, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
tts.WriteString("b = ")
|
||||
strconv.WriteInt(tts, b, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
|
||||
tts.WriteString("hex(a) = ")
|
||||
strconv.WriteInt(tts, a, 16, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
tts.WriteString("hex(b) = ")
|
||||
strconv.WriteInt(tts, b, 16, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
}
|
||||
```
|
||||
|
||||
与使用 _io.WriteString_ 函数的情况一样,_strconv.WriteInt_ 的第一个参数的类型为 _io.Writer_ 。
|
||||
|
||||
|
||||
```
|
||||
$ egc
|
||||
/usr/local/arm/bin/arm-none-eabi-ld: /home/michal/firstemgo/cortexm0.elf section `.rodata' will not fit in region `Flash'
|
||||
/usr/local/arm/bin/arm-none-eabi-ld: region `Flash' overflowed by 692 bytes
|
||||
exit status 1
|
||||
```
|
||||
|
||||
这一次我们的空间用完了。让我们试着精简一下有关类型的信息:
|
||||
|
||||
```
|
||||
$ cd $HOME/emgo
|
||||
$ ./clean.sh
|
||||
$ cd $HOME/firstemgo
|
||||
$ egc -nf -nt
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15876 316 320 16512 4080 cortexm0.elf
|
||||
```
|
||||
|
||||
很接近,但很合适。让我们加载并运行此代码:
|
||||
|
||||
```
|
||||
a = 12
|
||||
b = -123
|
||||
hex(a) = c
|
||||
hex(b) = -7b
|
||||
```
|
||||
|
||||
Emgo 中的 _strconv_ 包与 Go 中的原型有很大的不同。 它旨在直接用于写入格式化的数字,并且在许多情况下可以替换繁重的 _fmt_ 包。 这就是为什么函数名称以 _Write_ 而不是 _Format_ 开头,并具有额外的两个参数的原因。 以下是其用法示例:
|
||||
|
||||
```
|
||||
func main() {
|
||||
b := -123
|
||||
strconv.WriteInt(tts, b, 10, 0, 0)
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, ' ')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, '0')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, 6, '.')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, ' ')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, '0')
|
||||
tts.WriteString("\r\n")
|
||||
strconv.WriteInt(tts, b, 10, -6, '.')
|
||||
tts.WriteString("\r\n")
|
||||
}
|
||||
```
|
||||
|
||||
下面是它的输出:
|
||||
|
||||
```
|
||||
-123
|
||||
-123
|
||||
-00123
|
||||
..-123
|
||||
-123
|
||||
-123
|
||||
-123..
|
||||
```
|
||||
|
||||
### Unix 流 和 <ruby>莫尔斯电码<rt>Morse code</rt></ruby>
|
||||
|
||||
得益于事实上大多数写入功能的函数都使用 _io.Writer_ 而不是具体类型(例如 C 中的 _FILE_ ),因此我们获得了类似于 _Unix stream_ 的功能。在 Unix 中,我们可以轻松地组合简单的命令来执行更大的任务。例如,我们可以通过以下方式将文本写入文件:
|
||||
|
||||
```
|
||||
echo "Hello, World!" > file.txt
|
||||
```
|
||||
|
||||
`>` 操作符将前面命令的输出流写入文件。还有 `|` 操作符,用于连接相邻命令的输出流和输入流。
|
||||
|
||||
|
||||
多亏了流,我们可以轻松地转换/过滤任何命令的输出。例如,要将所有字母转换为大写,我们可以通过 _tr_ 命令过滤 echo 的输出:
|
||||
```
|
||||
echo "Hello, World!" | tr a-z A-Z > file.txt
|
||||
```
|
||||
|
||||
为了显示 _io.Writer_ 和 Unix 流之间的类比,让我们编写以下代码:
|
||||
|
||||
```
|
||||
io.WriteString(tts, "Hello, World!\r\n")
|
||||
```
|
||||
|
||||
采用以下伪 unix 形式:
|
||||
|
||||
```
|
||||
io.WriteString "Hello, World!" | usart.Driver usart.USART1
|
||||
```
|
||||
|
||||
下一个示例将显示如何执行此操作:
|
||||
|
||||
```
|
||||
io.WriteString "Hello, World!" | MorseWriter | usart.Driver usart.USART1
|
||||
```
|
||||
|
||||
让我们来创建一个简单的编码器,它使用莫尔斯电码对写入的文本进行编码:
|
||||
|
||||
```
|
||||
type MorseWriter struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func (w *MorseWriter) Write(s []byte) (int, error) {
|
||||
var buf [8]byte
|
||||
for n, c := range s {
|
||||
switch {
|
||||
case c == '\n':
|
||||
c = ' ' // Replace new lines with spaces.
|
||||
case 'a' <= c && c <= 'z':
|
||||
c -= 'a' - 'A' // Convert to upper case.
|
||||
}
|
||||
if c < ' ' || 'Z' < c {
|
||||
continue // c is outside ASCII [' ', 'Z']
|
||||
}
|
||||
var symbol morseSymbol
|
||||
if c == ' ' {
|
||||
symbol.length = 1
|
||||
buf[0] = ' '
|
||||
} else {
|
||||
symbol = morseSymbols[c-'!']
|
||||
for i := uint(0); i < uint(symbol.length); i++ {
|
||||
if (symbol.code>>i)&1 != 0 {
|
||||
buf[i] = '-'
|
||||
} else {
|
||||
buf[i] = '.'
|
||||
}
|
||||
}
|
||||
}
|
||||
buf[symbol.length] = ' '
|
||||
if _, err := w.W.Write(buf[:symbol.length+1]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
type morseSymbol struct {
|
||||
code, length byte
|
||||
}
|
||||
|
||||
//emgo:const
|
||||
var morseSymbols = [...]morseSymbol{
|
||||
{1<<0 | 1<<1 | 1<<2, 4}, // ! ---.
|
||||
{1<<1 | 1<<4, 6}, // " .-..-.
|
||||
{}, // #
|
||||
{1<<3 | 1<<6, 7}, // $ ...-..-
|
||||
|
||||
// Some code omitted...
|
||||
|
||||
{1<<0 | 1<<3, 4}, // X -..-
|
||||
{1<<0 | 1<<2 | 1<<3, 4}, // Y -.--
|
||||
{1<<0 | 1<<1, 4}, // Z --..
|
||||
}
|
||||
```
|
||||
|
||||
您可以在 [这里][11] 找到完整的 _morseSymbols_ 数组。 `//emgo:const` 指令确保 _morseSymbols_ 数组不会被复制到 RAM 中。
|
||||
|
||||
现在我们可以通过两种方式打印句子:
|
||||
|
||||
```
|
||||
func main() {
|
||||
s := "Hello, World!\r\n"
|
||||
mw := &MorseWriter{tts}
|
||||
|
||||
io.WriteString(tts, s)
|
||||
io.WriteString(mw, s)
|
||||
}
|
||||
```
|
||||
|
||||
我们使用指向 _MorseWriter_ `&MorseWriter{tts}` 的指针而不是简单的 `MorseWriter{tts}` 值,因为 _MorseWriter_ 太大,不适合接口变量。
|
||||
|
||||
|
||||
与 Go 不同,Emgo 不会为存储在接口变量中的值动态分配内存。接口类型的大小受限制,等于三个指针(适合 _slice_ )或两个 _float64_(适合 _complex128_ )的大小,以较大者为准。它可以直接存储所有基本类型和小型 “结构体/数组” 的值,但是对于较大的值,您必须使用指针。
|
||||
|
||||
让我们编译此代码并查看其输出:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
15152 324 248 15724 3d6c cortexm0.elf
|
||||
```
|
||||
|
||||
```
|
||||
Hello, World!
|
||||
.... . .-.. .-.. --- --..-- .-- --- .-. .-.. -.. ---.
|
||||
```
|
||||
|
||||
### 终极 Blinky
|
||||
|
||||
_Blinky_ 等效于 _Hello,World!_ 程序的硬件。一旦有了 Morse 编码器,我们就可以轻松地将两者结合起来以获得 _Ultimate Blinky_ 程序:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"delay"
|
||||
"io"
|
||||
|
||||
"stm32/hal/gpio"
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
var led gpio.Pin
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
gpio.A.EnableClock(false)
|
||||
led = gpio.A.Pin(4)
|
||||
|
||||
cfg := gpio.Config{Mode: gpio.Out, Driver: gpio.OpenDrain, Speed: gpio.Low}
|
||||
led.Setup(&cfg)
|
||||
}
|
||||
|
||||
type Telegraph struct {
|
||||
Pin gpio.Pin
|
||||
Dotms int // Dot length [ms]
|
||||
}
|
||||
|
||||
func (t Telegraph) Write(s []byte) (int, error) {
|
||||
for _, c := range s {
|
||||
switch c {
|
||||
case '.':
|
||||
t.Pin.Clear()
|
||||
delay.Millisec(t.Dotms)
|
||||
t.Pin.Set()
|
||||
delay.Millisec(t.Dotms)
|
||||
case '-':
|
||||
t.Pin.Clear()
|
||||
delay.Millisec(3 * t.Dotms)
|
||||
t.Pin.Set()
|
||||
delay.Millisec(t.Dotms)
|
||||
case ' ':
|
||||
delay.Millisec(3 * t.Dotms)
|
||||
}
|
||||
}
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
telegraph := &MorseWriter{Telegraph{led, 100}}
|
||||
for {
|
||||
io.WriteString(telegraph, "Hello, World! ")
|
||||
}
|
||||
}
|
||||
|
||||
// Some code omitted...
|
||||
|
||||
```
|
||||
|
||||
在上面的示例中,我省略了 _MorseWriter_ 类型的定义,因为它已在前面展示过。完整版可通过 [这里][12] 获取。让我们编译它并运行:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
11772 244 244 12260 2fe4 cortexm0.elf
|
||||
```
|
||||
|
||||

|
||||
|
||||
### 反射
|
||||
|
||||
是的,Emgo 支持 [反射][13]。 _reflect_ 包尚未完成,但是已完成的部分足以实现 _fmt.Print_ 函数族了。来看看我们可以在小型 MCU 上做什么。
|
||||
|
||||
为了减少内存使用,我们将使用 [semihosting][14] 作为标准输出。为了方便起见,我们还编写了简单的 _println_ 函数,它在某种程度上类似于 _fmt.Println_。
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"debug/semihosting"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"stm32/hal/system"
|
||||
"stm32/hal/system/timer/systick"
|
||||
)
|
||||
|
||||
var stdout semihosting.File
|
||||
|
||||
func init() {
|
||||
system.SetupPLL(8, 1, 48/8)
|
||||
systick.Setup(2e6)
|
||||
|
||||
var err error
|
||||
stdout, err = semihosting.OpenFile(":tt", semihosting.W)
|
||||
for err != nil {
|
||||
}
|
||||
}
|
||||
|
||||
type stringer interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
func println(args ...interface{}) {
|
||||
for i, a := range args {
|
||||
if i > 0 {
|
||||
stdout.WriteString(" ")
|
||||
}
|
||||
switch v := a.(type) {
|
||||
case string:
|
||||
stdout.WriteString(v)
|
||||
case int:
|
||||
strconv.WriteInt(stdout, v, 10, 0, 0)
|
||||
case bool:
|
||||
strconv.WriteBool(stdout, v, 't', 0, 0)
|
||||
case stringer:
|
||||
stdout.WriteString(v.String())
|
||||
default:
|
||||
stdout.WriteString("%unknown")
|
||||
}
|
||||
}
|
||||
stdout.WriteString("\r\n")
|
||||
}
|
||||
|
||||
type S struct {
|
||||
A int
|
||||
B bool
|
||||
}
|
||||
|
||||
func main() {
|
||||
p := &S{-123, true}
|
||||
|
||||
v := reflect.ValueOf(p)
|
||||
|
||||
println("kind(p) =", v.Kind())
|
||||
println("kind(*p) =", v.Elem().Kind())
|
||||
println("type(*p) =", v.Elem().Type())
|
||||
|
||||
v = v.Elem()
|
||||
|
||||
println("*p = {")
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
ft := v.Type().Field(i)
|
||||
fv := v.Field(i)
|
||||
println(" ", ft.Name(), ":", fv.Interface())
|
||||
}
|
||||
println("}")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
_semihosting.OpenFile_ 函数允许在主机端 打开/创建 文件。特殊路径 _:tt_ 对应于主机的标准输出。
|
||||
|
||||
_println_ 函数接受任意数量的参数,每个参数的类型都是任意的:
|
||||
|
||||
```
|
||||
func println(args ...interface{})
|
||||
```
|
||||
|
||||
可能是因为任何类型都实现了空接口 _interface{}_。 _println_ 使用 [类型开关][15] 打印字符串,整数和布尔值:
|
||||
|
||||
```
|
||||
switch v := a.(type) {
|
||||
case string:
|
||||
stdout.WriteString(v)
|
||||
case int:
|
||||
strconv.WriteInt(stdout, v, 10, 0, 0)
|
||||
case bool:
|
||||
strconv.WriteBool(stdout, v, 't', 0, 0)
|
||||
case stringer:
|
||||
stdout.WriteString(v.String())
|
||||
default:
|
||||
stdout.WriteString("%unknown")
|
||||
}
|
||||
```
|
||||
|
||||
此外,它还支持任何实现了 _stringer_ 接口的类型,即任何具有 _String()_ 方法的类型。在任何 _case_ 子句中,_v_ 变量具有正确的类型,与 _case_ 关键字后列出的类型相同。
|
||||
|
||||
|
||||
reflect.ValueOf(p) 函数以允许以编程方式分析其类型和内容的形式返回 _p_。如您所见,我们甚至可以使用 `v.Elem()` 取消引用指针,并打印所有结构体及其名称。
|
||||
|
||||
让我们尝试编译这段代码。现在,让我们看看如果不使用类型和字段名进行编译会产生什么结果:
|
||||
|
||||
```
|
||||
$ egc -nt -nf
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
16028 216 312 16556 40ac cortexm0.elf
|
||||
```
|
||||
|
||||
闪存上只剩下 140 个可用字节。让我们使用启用了 semihosting 的 OpenOCD 加载它:
|
||||
|
||||
```
|
||||
$ openocd -d0 -f interface/stlink.cfg -f target/stm32f0x.cfg -c 'init; program cortexm0.elf; arm semihosting enable; reset run'
|
||||
Open On-Chip Debugger 0.10.0+dev-00319-g8f1f912a (2018-03-07-19:20)
|
||||
Licensed under GNU GPL v2
|
||||
For bug reports, read
|
||||
http://openocd.org/doc/doxygen/bugs.html
|
||||
debug_level: 0
|
||||
adapter speed: 1000 kHz
|
||||
adapter_nsrst_delay: 100
|
||||
none separate
|
||||
adapter speed: 950 kHz
|
||||
target halted due to debug-request, current mode: Thread
|
||||
xPSR: 0xc1000000 pc: 0x08002338 msp: 0x20000a20
|
||||
adapter speed: 4000 kHz
|
||||
** Programming Started **
|
||||
auto erase enabled
|
||||
target halted due to breakpoint, current mode: Thread
|
||||
xPSR: 0x61000000 pc: 0x2000003a msp: 0x20000a20
|
||||
wrote 16384 bytes from file cortexm0.elf in 0.700133s (22.853 KiB/s)
|
||||
** Programming Finished **
|
||||
semihosting is enabled
|
||||
adapter speed: 950 kHz
|
||||
kind(p) = ptr
|
||||
kind(*p) = struct
|
||||
type(*p) =
|
||||
*p = {
|
||||
X. : -123
|
||||
X. : true
|
||||
}
|
||||
```
|
||||
|
||||
如果您实际运行过此代码,则会注意到 semihosting 运行缓慢,尤其是在逐字节写入时(缓冲很有用)。
|
||||
|
||||
如您所见,`*p` 没有类型名称,并且所有结构字段都具有相同的 _X._ 名称。让我们再次编译该程序,这次不带 _-nt -nf_ 选项:
|
||||
|
||||
```
|
||||
$ egc
|
||||
$ arm-none-eabi-size cortexm0.elf
|
||||
text data bss dec hex filename
|
||||
16052 216 312 16580 40c4 cortexm0.elf
|
||||
```
|
||||
|
||||
现在已经包括了类型和字段名称,但仅在 ~~_main.go_ 文件中~~ _main_ 包中定义了它们。该程序的输出如下所示:
|
||||
|
||||
```
|
||||
kind(p) = ptr
|
||||
kind(*p) = struct
|
||||
type(*p) = S
|
||||
*p = {
|
||||
A : -123
|
||||
B : true
|
||||
}
|
||||
```
|
||||
|
||||
反射是任何易于使用的序列化库的关键部分,而像 [JSON][16] 这样的序列化 ~~算法~~ 在<ruby>物联网<rt>IoT</rt></ruby>时代也越来越重要。
|
||||
|
||||
这些就是我完成的本文的第二部分。我认为有机会进行第三部分,更具娱乐性的部分,在那里我们将各种有趣的设备连接到这块板上。如果这块板装不下,我们就换一块大一点的。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html
|
||||
|
||||
作者:[Michał Derkacz ][a]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]:https://ziutek.github.io/
|
||||
[1]:https://ziutek.github.io/2018/04/14/go_on_very_small_hardware2.html
|
||||
[2]:https://ziutek.github.io/2018/03/30/go_on_very_small_hardware.html
|
||||
[3]:https://golang.org/doc/effective_go.html#interfaces
|
||||
[4]:https://research.swtch.com/interfaces
|
||||
[5]:https://blog.golang.org/laws-of-reflection
|
||||
[6]:https://github.com/texane/stlink
|
||||
[7]:http://www.world-semi.com/solution/list-4-1.html
|
||||
[8]:https://en.wikipedia.org/wiki/1-Wire
|
||||
[9]:https://github.com/npat-efault/picocom
|
||||
[10]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/usart/main.go
|
||||
[11]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/morseuart/main.go
|
||||
[12]:https://github.com/ziutek/emgo/blob/master/egpath/src/stm32/examples/f030-demo-board/morseled/main.go
|
||||
[13]:https://blog.golang.org/laws-of-reflection
|
||||
[14]:http://infocenter.arm.com/help/topic/com.arm.doc.dui0471g/Bgbjjgij.html
|
||||
[15]:https://golang.org/doc/effective_go.html#type_switch
|
||||
[16]:https://en.wikipedia.org/wiki/JSON
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
@ -7,44 +7,43 @@
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-home-page/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Home Page
|
||||
构建一个即时消息应用(八):Home 页面
|
||||
======
|
||||
|
||||
This post is the 8th on a series:
|
||||
本文是该系列的第八篇。
|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
* [Part 4: Messages][4]
|
||||
* [Part 5: Realtime Messages][5]
|
||||
* [Part 6: Development Login][6]
|
||||
* [Part 7: Access Page][7]
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
* [第四篇: 消息][4]
|
||||
* [第五篇: 实时消息][5]
|
||||
* [第六篇: 仅用于开发的登录][6]
|
||||
* [第七篇: Access 页面][7]
|
||||
|
||||
|
||||
继续前端部分,让我们在本文中完成 Home 页面的开发。 我们将添加一个开始对话的表单和一个包含最新对话的列表。
|
||||
|
||||
Continuing the frontend, let’s finish the home page in this post. We’ll add a form to start conversations and a list with the latest ones.
|
||||
|
||||
### Conversation Form
|
||||
### 对话表单
|
||||
|
||||
![conversation form screenshot][8]
|
||||
|
||||
In the `static/pages/home-page.js` file add some markup in the HTML view.
|
||||
转到 `static/ages/home-page.js` 文件,在 HTML 视图中添加一些标记。
|
||||
|
||||
```
|
||||
```html
|
||||
<form id="conversation-form">
|
||||
<input type="search" placeholder="Start conversation with..." required>
|
||||
</form>
|
||||
```
|
||||
|
||||
Add that form just below the section in which we displayed the auth user and logout button.
|
||||
将该表单添加到我们显示 auth user 和 logout 按钮部分的下方。
|
||||
|
||||
```
|
||||
```js
|
||||
page.getElementById('conversation-form').onsubmit = onConversationSubmit
|
||||
```
|
||||
|
||||
Now we can listen to the “submit” event to create the conversation.
|
||||
现在我们可以监听 “submit” 事件来创建对话了。
|
||||
|
||||
```
|
||||
```js
|
||||
import http from '../http.js'
|
||||
import { navigate } from '../router.js'
|
||||
|
||||
@ -79,15 +78,15 @@ function createConversation(username) {
|
||||
}
|
||||
```
|
||||
|
||||
On submit we do a POST request to `/api/conversations` with the username and redirect to the conversation page (for the next post).
|
||||
在提交时,我们使用用户名对 `/api/conversations` 进行 POST 请求,并重定向到 conversation 页面 (用于下一篇文章)。
|
||||
|
||||
### Conversation List
|
||||
### 对话列表
|
||||
|
||||
![conversation list screenshot][9]
|
||||
|
||||
In the same file, we are going to make the `homePage()` function async to load the conversations first.
|
||||
还是在这个文件中,我们将创建 `homePage()` 函数用来先异步加载对话。
|
||||
|
||||
```
|
||||
```js
|
||||
export default async function homePage() {
|
||||
const conversations = await getConversations().catch(err => {
|
||||
console.error(err)
|
||||
@ -101,24 +100,24 @@ function getConversations() {
|
||||
}
|
||||
```
|
||||
|
||||
Then, add a list in the markup to render conversations there.
|
||||
然后,在标记中添加一个列表来渲染对话。
|
||||
|
||||
```
|
||||
```html
|
||||
<ol id="conversations"></ol>
|
||||
```
|
||||
|
||||
Add it just below the current markup.
|
||||
将其添加到当前标记的正下方。
|
||||
|
||||
```
|
||||
```js
|
||||
const conversationsOList = page.getElementById('conversations')
|
||||
for (const conversation of conversations) {
|
||||
conversationsOList.appendChild(renderConversation(conversation))
|
||||
}
|
||||
```
|
||||
|
||||
So we can append each conversation to the list.
|
||||
因此,我们可以将每个对话添加到这个列表中。
|
||||
|
||||
```
|
||||
```js
|
||||
import { avatar, escapeHTML } from '../shared.js'
|
||||
|
||||
function renderConversation(conversation) {
|
||||
@ -146,11 +145,11 @@ function renderConversation(conversation) {
|
||||
}
|
||||
```
|
||||
|
||||
Each conversation item contains a link to the conversation page and displays the other participant info and a preview of the last message. Also, you can use `.hasUnreadMessages` to add a class to the item and do some styling with CSS. Maybe a bolder font or accent the color.
|
||||
每个对话条目都包含一个指向对话页面的链接,并显示其他参与者信息和最后一条消息的预览。另外,您可以使用 `.hasUnreadMessages` 向该条目添加一个类,并使用 CSS 进行一些样式设置。也许是粗体字体或强调颜色。
|
||||
|
||||
Note that we’re escaping the message content. That function comes from `static/shared.js`:
|
||||
请注意,我们需要转义信息的内容。该函数来自于 `static/shared.js` 文件:
|
||||
|
||||
```
|
||||
```js
|
||||
export function escapeHTML(str) {
|
||||
return str
|
||||
.replace(/&/g, '&')
|
||||
@ -161,35 +160,35 @@ export function escapeHTML(str) {
|
||||
}
|
||||
```
|
||||
|
||||
That prevents displaying as HTML the message the user wrote. If the user happens to write something like:
|
||||
这会阻止将用户编写的消息显示为 HTML。如果用户碰巧编写了类似以下内容的代码:
|
||||
|
||||
```
|
||||
```js
|
||||
<script>alert('lololo')</script>
|
||||
```
|
||||
|
||||
It would be very annoying because that script will be executed 😅
|
||||
So yeah, always remember to escape content from untrusted sources.
|
||||
这将非常烦人,因为该脚本将被执行😅。
|
||||
所以,永远记住要转义来自不可信来源的内容。
|
||||
|
||||
### Messages Subscription
|
||||
### 消息订阅
|
||||
|
||||
Last but not least, I want to subscribe to the message stream here.
|
||||
最后但并非最不重要的一点,我想在这里订阅消息流。
|
||||
|
||||
```
|
||||
```js
|
||||
const unsubscribe = subscribeToMessages(onMessageArrive)
|
||||
page.addEventListener('disconnect', unsubscribe)
|
||||
```
|
||||
|
||||
Add that line in the `homePage()` function.
|
||||
在 `homePage()` 函数中添加这一行。
|
||||
|
||||
```
|
||||
```js
|
||||
function subscribeToMessages(cb) {
|
||||
return http.subscribe('/api/messages', cb)
|
||||
}
|
||||
```
|
||||
|
||||
The `subscribe()` function returns a function that once called it closes the underlying connection. That’s why I passed it to the “disconnect” event; so when the user leaves the page, the event stream will be closed.
|
||||
函数 `subscribe()` 返回一个函数,该函数一旦调用就会关闭底层连接。这就是为什么我把它传递给 <ruby>“断开连接”<rt>disconnect</rt></ruby>事件的原因;因此,当用户离开页面时,事件流将被关闭。
|
||||
|
||||
```
|
||||
```js
|
||||
async function onMessageArrive(message) {
|
||||
const conversationLI = document.querySelector(`li[data-id="${message.conversationID}"]`)
|
||||
if (conversationLI !== null) {
|
||||
@ -221,12 +220,12 @@ function getConversation(id) {
|
||||
}
|
||||
```
|
||||
|
||||
Every time a new message arrives, we go and query for the conversation item in the DOM. If found, we add the `has-unread-messages` class to the item, and update the view. If not found, it means the message is from a new conversation created just now. We go and do a GET request to `/api/conversations/{conversationID}` to get the conversation in which the message was created and prepend it to the conversation list.
|
||||
每次有新消息到达时,我们都会在 DOM 中查询会话条目。如果找到,我们会将 `has-unread-messages` 类添加到该条目中,并更新视图。如果未找到,则表示该消息来自刚刚创建的新对话。我们去做一个对 `/api/conversations/{conversationID}` 的 GET 请求,以获取在其中创建消息的对话,并将其放在对话列表的前面。
|
||||
|
||||
* * *
|
||||
|
||||
That covers the home page 😊
|
||||
On the next post we’ll code the conversation page.
|
||||
以上这些涵盖了主页的所有内容 😊。
|
||||
在下一篇文章中,我们将对 conversation 页面进行编码。
|
||||
|
||||
[Souce Code][10]
|
||||
|
||||
@ -236,7 +235,7 @@ via: https://nicolasparada.netlify.com/posts/go-messenger-home-page/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
译者:[译者ID](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
@ -1,5 +1,5 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: ( )
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
@ -7,31 +7,31 @@
|
||||
[#]: via: (https://nicolasparada.netlify.com/posts/go-messenger-conversation-page/)
|
||||
[#]: author: (Nicolás Parada https://nicolasparada.netlify.com/)
|
||||
|
||||
Building a Messenger App: Conversation Page
|
||||
构建一个即时消息应用(九):Conversation 页面
|
||||
======
|
||||
|
||||
This post is the 9th and last in a series:
|
||||
本文是该系列的第九篇,也是最后一篇。
|
||||
|
||||
* [Part 1: Schema][1]
|
||||
* [Part 2: OAuth][2]
|
||||
* [Part 3: Conversations][3]
|
||||
* [Part 4: Messages][4]
|
||||
* [Part 5: Realtime Messages][5]
|
||||
* [Part 6: Development Login][6]
|
||||
* [Part 7: Access Page][7]
|
||||
* [Part 8: Home Page][8]
|
||||
* [第一篇: 模式][1]
|
||||
* [第二篇: OAuth][2]
|
||||
* [第三篇: 对话][3]
|
||||
* [第四篇: 消息][4]
|
||||
* [第五篇: 实时消息][5]
|
||||
* [第六篇: 仅用于开发的登录][6]
|
||||
* [第七篇: Access 页面][7]
|
||||
* [第八篇: Home 页面][8]
|
||||
|
||||
|
||||
|
||||
In this post we’ll code the conversation page. This page is the chat between the two users. At the top we’ll show info about the other participant, below, a list of the latest messages and a message form at the bottom.
|
||||
在这篇文章中,我们将对<ruby>对话<rt>conversation</rt></ruby>页面进行编码。此页面是两个用户之间的聊天室。在顶部我们将显示其他参与者的信息,下面接着的是最新消息列表,以及底部的消息表单。
|
||||
|
||||
### Chat heading
|
||||
### 聊天标题
|
||||
|
||||
![chat heading screenshot][9]
|
||||
|
||||
Let’s start by creating the file `static/pages/conversation-page.js` with the following content:
|
||||
让我们从创建 `static/pages/conversation-page.js` 文件开始,它包含以下内容:
|
||||
|
||||
```
|
||||
```js
|
||||
import http from '../http.js'
|
||||
import { navigate } from '../router.js'
|
||||
import { avatar, escapeHTML } from '../shared.js'
|
||||
@ -65,17 +65,17 @@ function getConversation(id) {
|
||||
}
|
||||
```
|
||||
|
||||
This page receives the conversation ID the router extracted from the URL.
|
||||
此页面接收路由从 URL 中提取的会话 ID。
|
||||
|
||||
First it does a GET request to `/api/conversations/{conversationID}` to get info about the conversation. In case of error, we show it and redirect back to `/`. Then we render info about the other participant.
|
||||
首先,它向 `/api/ conversations/{conversationID}` 发起一个 GET 请求,以获取有关对话的信息。 如果出现错误,我们会将其显示,并重定向回 `/`。然后我们呈现有关其他参与者的信息。
|
||||
|
||||
### Conversation List
|
||||
### 对话列表
|
||||
|
||||
![chat heading screenshot][10]
|
||||
|
||||
We’ll fetch the latest messages too to display them.
|
||||
我们也会获取最新的消息并显示它们。
|
||||
|
||||
```
|
||||
```js
|
||||
let conversation, messages
|
||||
try {
|
||||
[conversation, messages] = await Promise.all([
|
||||
@ -85,32 +85,32 @@ try {
|
||||
}
|
||||
```
|
||||
|
||||
Update the `conversationPage()` function to fetch the messages too. We use `Promise.all()` to do both request at the same time.
|
||||
更新 `conversationPage()` 函数以获取消息。我们使用 `Promise.all()` 同时执行这两个请求。
|
||||
|
||||
```
|
||||
```js
|
||||
function getMessages(conversationID) {
|
||||
return http.get(`/api/conversations/${conversationID}/messages`)
|
||||
}
|
||||
```
|
||||
|
||||
A GET request to `/api/conversations/{conversationID}/messages` gets the latest messages of the conversation.
|
||||
发起对 `/api/conversations/{conversationID}/messages` 的 GET 请求可以获取对话中的最新消息。
|
||||
|
||||
```
|
||||
```html
|
||||
<ol id="messages"></ol>
|
||||
```
|
||||
|
||||
Now, add that list to the markup.
|
||||
现在,将该列表添加到标记中。
|
||||
|
||||
```
|
||||
```js
|
||||
const messagesOList = page.getElementById('messages')
|
||||
for (const message of messages.reverse()) {
|
||||
messagesOList.appendChild(renderMessage(message))
|
||||
}
|
||||
```
|
||||
|
||||
So we can append messages to the list. We show them in reverse order.
|
||||
这样我们就可以将消息附加到列表中了。我们以时间倒序来显示它们。
|
||||
|
||||
```
|
||||
```js
|
||||
function renderMessage(message) {
|
||||
const messageContent = escapeHTML(message.content)
|
||||
const messageDate = new Date(message.createdAt).toLocaleString()
|
||||
@ -127,28 +127,28 @@ function renderMessage(message) {
|
||||
}
|
||||
```
|
||||
|
||||
Each message item displays the message content itself with its timestamp. Using `.mine` we can append a different class to the item so maybe you can show the message to the right.
|
||||
每个消息条目显示消息内容本身及其时间戳。使用 `.mine`,我们可以将不同的 css 类附加到条目,这样您就可以将消息显示在右侧。
|
||||
|
||||
### Message Form
|
||||
### 消息表单
|
||||
|
||||
![chat heading screenshot][11]
|
||||
|
||||
```
|
||||
```html
|
||||
<form id="message-form">
|
||||
<input type="text" placeholder="Type something" maxlength="480" required>
|
||||
<button>Send</button>
|
||||
</form>
|
||||
```
|
||||
|
||||
Add that form to the current markup.
|
||||
将该表单添加到当前标记中。
|
||||
|
||||
```
|
||||
```js
|
||||
page.getElementById('message-form').onsubmit = messageSubmitter(conversationID)
|
||||
```
|
||||
|
||||
Attach an event listener to the “submit” event.
|
||||
将事件监听器附加到 “submit” 事件。
|
||||
|
||||
```
|
||||
```js
|
||||
function messageSubmitter(conversationID) {
|
||||
return async ev => {
|
||||
ev.preventDefault()
|
||||
@ -191,19 +191,20 @@ function createMessage(content, conversationID) {
|
||||
}
|
||||
```
|
||||
|
||||
We make use of [partial application][12] to have the conversation ID in the “submit” event handler. It takes the message content from the input and does a POST request to `/api/conversations/{conversationID}/messages` with it. Then prepends the newly created message to the list.
|
||||
|
||||
### Messages Subscription
|
||||
我们利用 [partial application][12] 在 “submit” 事件处理程序中获取对话 ID。它 从输入中获取消息内容,并用它对 `/api/conversations/{conversationID}/messages` 发出 POST 请求。 然后将新创建的消息添加到列表中。
|
||||
|
||||
To make it realtime we’ll subscribe to the message stream in this page also.
|
||||
### 消息订阅
|
||||
|
||||
```
|
||||
为了实现实时,我们还将订阅此页面中的消息流。
|
||||
|
||||
```js
|
||||
page.addEventListener('disconnect', subscribeToMessages(messageArriver(conversationID)))
|
||||
```
|
||||
|
||||
Add that line in the `conversationPage()` function.
|
||||
将该行添加到 `conversationPage()` 函数中。
|
||||
|
||||
```
|
||||
```js
|
||||
function subscribeToMessages(cb) {
|
||||
return http.subscribe('/api/messages', cb)
|
||||
}
|
||||
@ -229,14 +230,14 @@ function readMessages(conversationID) {
|
||||
}
|
||||
```
|
||||
|
||||
We also make use of partial application to have the conversation ID here.
|
||||
When a new message arrives, first we check if it’s from this conversation. If it is, we go a prepend a message item to the list and do a POST request to `/api/conversations/{conversationID}/read_messages` to updated the last time the participant read messages.
|
||||
在这里我们仍然使用 partial application 来获取会话 ID。
|
||||
当新消息到达时,我们首先检查它是否来自此对话。如果是,我们会将消息条目预先添加到列表中,并向`/api/conversations/{conversationID}/read_messages`发起 POST 一个请求,以更新参与者上次阅读消息的时间。
|
||||
|
||||
* * *
|
||||
|
||||
That concludes this series. The messenger app is now functional.
|
||||
本系列到此结束。 Messenger app 现在可以运行了。
|
||||
|
||||
~~I’ll add pagination on the conversation and message list, also user searching before sharing the source code. I’ll updated once it’s ready along with a hosted demo 👨💻~~
|
||||
~~我将在对话和消息列表中添加分页功能,并在共享源代码之前添加用户搜索。我会在准备好的时候和<ruby>托管的演示<rt>a hosted demo</rt></ruby>👨💻一起更新它~~
|
||||
|
||||
[Souce Code][13] • [Demo][14]
|
||||
|
||||
@ -246,7 +247,7 @@ via: https://nicolasparada.netlify.com/posts/go-messenger-conversation-page/
|
||||
|
||||
作者:[Nicolás Parada][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/译者ID)
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
@ -0,0 +1,273 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Using Yarn on Ubuntu and Other Linux Distributions)
|
||||
[#]: via: (https://itsfoss.com/install-yarn-ubuntu)
|
||||
[#]: author: (Abhishek Prakash https://itsfoss.com/author/abhishek/)
|
||||
|
||||
在 Ubuntu 和其他 Linux 发行版上使用 Yarn
|
||||
======
|
||||
|
||||
***本速成教程向您展示了在 Ubuntu 和 Debian Linux 上安装 Yarn 包管理器的官方方法。您还将学习到一些基本的 Yarn 命令以及彻底删除 Yarn 的步骤。***
|
||||
|
||||
[Yarn][1] 是 Facebook 开发的开源 JavaScript 包管理器。它是流行的 npm 包管理器的一个替代品,或者应该说是改进。 [Facebook 开发团队][2] 创建 Yarn 是为了克服 [npm][3] 的缺点。 Facebook 声称 Yarn 比 npm 更快、更可靠、更安全。
|
||||
|
||||
与 npm 一样,Yarn 为您提供一种自动安装、更新、配置和删除从全局注册表中检索到的程序包的方法。
|
||||
|
||||
Yarn 的优点是它更快,因为它缓存了已下载的每个包,所以无需再次下载。它还将操作并行化,以最大化资源利用率。在执行每个已安装的包代码之前,Yarn 还使用 [校验和来验证完整性][4]。 Yarn 还保证在一个系统上运行的安装,在任何其他系统上都会以完全相同地方式工作。
|
||||
|
||||
如果您正 [在 Ubuntu 上使用 nodejs][5],那么您的系统上可能已经安装了 npm。在这种情况下,您可以通过以下方式使用 npm 全局安装 Yarn:
|
||||
|
||||
```
|
||||
sudo npm install yarn -g
|
||||
```
|
||||
|
||||
不过,我推荐使用官方方式在 Ubuntu/Debian 上安装 Yarn。
|
||||
|
||||
### 在 Ubuntu 和 Debian 上安装 Yarn [官方方式]
|
||||
|
||||
![Yarn JS][6]
|
||||
|
||||
这里提到的指令应该适用于所有版本的 Ubuntu,例如 Ubuntu 18.04、16.04 等。同样的指令集也适用于 Debian 和其他基于 Debian 的发行版。
|
||||
|
||||
由于本教程使用 curl 来添加 Yarn 项目的 GPG 密钥,所以最好验证一下您是否已经安装了 curl。
|
||||
|
||||
```
|
||||
sudo apt install curl
|
||||
```
|
||||
|
||||
如果 curl 尚未安装,则上面的命令将安装它。既然有了 curl,您就可以使用它以如下方式添加 Yarn 项目的 GPG 密钥:
|
||||
|
||||
```
|
||||
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
```
|
||||
|
||||
在此之后,将存储库添加到源列表中,以便将来可以轻松地升级 Yarn 包,并进行其余系统更新:
|
||||
|
||||
```
|
||||
sudo sh -c 'echo "deb https://dl.yarnpkg.com/debian/ stable main" >> /etc/apt/sources.list.d/yarn.list'
|
||||
```
|
||||
|
||||
您现在可以继续了。[更新 Ubuntu][7] 或 Debian 系统,以刷新可用软件包列表,然后安装 Yarn:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install yarn
|
||||
```
|
||||
|
||||
这将一起安装 Yarn 和 nodejs。该过程完成后,请验证是否已成功安装 Yarn。 您可以通过检查 Yarn 版本来做到这一点。
|
||||
|
||||
```
|
||||
yarn --version
|
||||
```
|
||||
|
||||
对我来说,它显示了这样的输出:
|
||||
|
||||
```
|
||||
yarn --version
|
||||
1.12.3
|
||||
```
|
||||
|
||||
这意味着我的系统上安装了 Yarn 版本 1.12.3。
|
||||
|
||||
### 使用 Yarn
|
||||
|
||||
我假设您对 JavaScript 编程以及依赖项的工作原理有一些基本的了解。我在这里不做详细介绍。我将向您展示一些基本的 Yarn 命令,这些命令将帮助您入门。
|
||||
|
||||
#### 使用 Yarn 创建一个新项目
|
||||
|
||||
与 npm 一样,Yarn 也可以使用 package.json 文件。在这里添加依赖项。所有依赖包都缓存在项目根目录下的 node_modules 目录中。
|
||||
|
||||
在项目的根目录中,运行以下命令以生成新的 package.json 文件:
|
||||
|
||||
它会问您一些问题。您可以按 Enter 跳过或使用默认值。
|
||||
|
||||
```
|
||||
yarn init
|
||||
yarn init v1.12.3
|
||||
question name (test_yarn): test_yarn_proect
|
||||
question version (1.0.0): 0.1
|
||||
question description: Test Yarn
|
||||
question entry point (index.js):
|
||||
question repository url:
|
||||
question author: abhishek
|
||||
question license (MIT):
|
||||
question private:
|
||||
success Saved package.json
|
||||
Done in 82.42s.
|
||||
```
|
||||
|
||||
这样,您就得到了一个如下的 package.json 文件:
|
||||
|
||||
```
|
||||
{
|
||||
"name": "test_yarn_proect",
|
||||
"version": "0.1",
|
||||
"description": "Test Yarn",
|
||||
"main": "index.js",
|
||||
"author": "abhishek",
|
||||
"license": "MIT"
|
||||
}
|
||||
```
|
||||
|
||||
现在您有了 package.json,您可以手动编辑它以添加或删除包依赖项,也可以使用 Yarn 命令(首选)。
|
||||
|
||||
#### 使用 Yarn 添加依赖项
|
||||
|
||||
您可以通过以下方式添加对特定包的依赖关系:
|
||||
|
||||
```
|
||||
yarn add <package_name>
|
||||
```
|
||||
|
||||
例如,如果您想在项目中使用 [Lodash][8],则可以使用 Yarn 添加它,如下所示:
|
||||
|
||||
```
|
||||
yarn add lodash
|
||||
yarn add v1.12.3
|
||||
info No lockfile found.
|
||||
[1/4] Resolving packages…
|
||||
[2/4] Fetching packages…
|
||||
[3/4] Linking dependencies…
|
||||
[4/4] Building fresh packages…
|
||||
success Saved lockfile.
|
||||
success Saved 1 new dependency.
|
||||
info Direct dependencies
|
||||
└─ [email protected]
|
||||
info All dependencies
|
||||
└─ [email protected]
|
||||
Done in 2.67s.
|
||||
```
|
||||
|
||||
您可以看到,此依赖项已自动添加到 package.json 文件中:
|
||||
|
||||
```
|
||||
{
|
||||
"name": "test_yarn_proect",
|
||||
"version": "0.1",
|
||||
"description": "Test Yarn",
|
||||
"main": "index.js",
|
||||
"author": "abhishek",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.11"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
默认情况下,Yarn 将在依赖项中添加最新版本的包。如果要使用特定版本,可以在添加时指定。
|
||||
|
||||
```
|
||||
yarn add package@version-or-tag
|
||||
```
|
||||
|
||||
像往常一样,您也可以手动更新 package.json 文件。
|
||||
|
||||
#### 使用 Yarn 升级依赖项
|
||||
|
||||
您可以使用以下命令将特定依赖项升级到其最新版本:
|
||||
|
||||
```
|
||||
yarn upgrade <package_name>
|
||||
```
|
||||
|
||||
它将查看所涉及的包是否具有较新的版本,并且会相应地对其进行更新。
|
||||
|
||||
您还可以通过以下方式更改已添加的依赖项的版本:
|
||||
|
||||
```
|
||||
yarn upgrade package_name@version_or_tag
|
||||
```
|
||||
|
||||
您还可以使用一个命令将项目的所有依赖项升级到它们的最新版本:
|
||||
|
||||
```
|
||||
yarn upgrade
|
||||
```
|
||||
|
||||
它将检查所有依赖项的版本,如果有任何较新的版本,则会更新它们。
|
||||
|
||||
#### 使用 Yarn 删除依赖项
|
||||
|
||||
您可以通过以下方式从项目的依赖项中删除包:
|
||||
|
||||
```
|
||||
yarn remove <package_name>
|
||||
```
|
||||
|
||||
#### 安装所有项目依赖项
|
||||
|
||||
如果对您 project.json 文件进行了任何更改,则应该运行
|
||||
|
||||
```
|
||||
yarn
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
一次安装所有依赖项。
|
||||
|
||||
### 如何从 Ubuntu 或 Debian 中删除 Yarn
|
||||
|
||||
我将通过介绍从系统中删除 Yarn 的步骤来完成本教程,如果您使用上述步骤安装 Yarn 的话。如果您意识到不再需要 Yarn 了,则可以将它删除。
|
||||
|
||||
使用以下命令删除 Yarn 及其依赖项。
|
||||
|
||||
```
|
||||
sudo apt purge yarn
|
||||
```
|
||||
|
||||
您也应该从源列表中把存储库信息一并删除掉:
|
||||
|
||||
```
|
||||
sudo rm /etc/apt/sources.list.d/yarn.list
|
||||
```
|
||||
|
||||
下一步删除已添加到受信任密钥的 GPG 密钥是可选的。但要做到这一点,您需要知道密钥。您可以使用 `apt-key` 命令获得它:
|
||||
|
||||
```
|
||||
Warning: apt-key output should not be parsed (stdout is not a terminal) pub rsa4096 2016-10-05 [SC] 72EC F46A 56B4 AD39 C907 BBB7 1646 B01B 86E5 0310 uid [ unknown] Yarn Packaging yarn@dan.cx sub rsa4096 2016-10-05 [E] sub rsa4096 2019-01-02 [S] [expires: 2020-02-02]
|
||||
```
|
||||
|
||||
这里的密钥是以 pub 开始的行中 GPG 密钥指纹的最后 8 个字符。
|
||||
|
||||
因此,对于我来说,密钥是 `86E50310`,我将使用以下命令将其删除:
|
||||
|
||||
```
|
||||
sudo apt-key del 86E50310
|
||||
```
|
||||
|
||||
您会在输出中看到 OK,并且 Yarn 包的 GPG 密钥将从系统信任的 GPG 密钥列表中删除。
|
||||
|
||||
我希望本教程可以帮助您在 Ubuntu、Debian、Linux Mint、 elementary OS 等操作系统上安装 Yarn。 我提供了一些基本的 Yarn 命令,以帮助您入门,并完成了从系统中删除 Yarn 的完整步骤。
|
||||
|
||||
希望您喜欢本教程,如果有任何疑问或建议,请随时在下面留言。
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/install-yarn-ubuntu
|
||||
|
||||
作者:[Abhishek Prakash][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/abhishek/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://yarnpkg.com/lang/en/
|
||||
[2]: https://code.fb.com/
|
||||
[3]: https://www.npmjs.com/
|
||||
[4]: https://itsfoss.com/checksum-tools-guide-linux/
|
||||
[5]: https://itsfoss.com/install-nodejs-ubuntu/
|
||||
[6]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2019/01/yarn-js-ubuntu-debian.jpeg?resize=800%2C450&ssl=1
|
||||
[7]: https://itsfoss.com/update-ubuntu/
|
||||
[8]: https://lodash.com/
|
@ -1,303 +0,0 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (TCP window scaling, timestamps and SACK)
|
||||
[#]: via: (https://fedoramagazine.org/tcp-window-scaling-timestamps-and-sack/)
|
||||
[#]: author: (Florian Westphal https://fedoramagazine.org/author/strlen/)
|
||||
|
||||
TCP 窗口缩放,时间戳和 SACK
|
||||
======
|
||||
|
||||
![][1]
|
||||
|
||||
Linux TCP 协议栈具有无数个 _sysctl_ 旋钮,允许更改其行为。 这包括可用于接收或发送操作的内存量,套接字的最大数量、可选特性和协议扩展。
|
||||
|
||||
有很多文章出于各种“性能调优”或“安全性”原因,建议禁用 TCP 扩展,比如时间戳或<ruby>选择性确认<rt>selective acknowledgments</rt></ruby> (SACK)。
|
||||
|
||||
本文提供了这些扩展的功能背景,默认情况下处于启用状态的原因,它们之间是如何关联的,以及为什么通常情况下将它们关闭是个坏主意。
|
||||
|
||||
### TCP 窗口缩放
|
||||
|
||||
TCP 可以维持的数据传输速率受到几个因素的限制。其中包括:
|
||||
|
||||
* 往返时间(RTT)。这是数据包到达目的地并返回回复所花费的时间。越低越好。
|
||||
* 所涉及的网络路径的最低链路速度
|
||||
* 丢包频率
|
||||
* 新数据可用于传输的速度。 例如,CPU 需要能够以足够快的速度将数据传递到网络适配器。如果 CPU 需要首先加密数据,则适配器可能必须等待新数据。同样地,如果磁盘存储不能足够快地读取数据,则磁盘存储可能会成为瓶颈。
|
||||
* TCP 接收窗口的最大可能大小。接收窗口决定 TCP 在必须等待接收方报告接收到该数据之前可以传输多少数据 (以字节为单位)。这是由接收方宣布的。接收方将在读取并确认接收到传入数据时不断更新此值。接收窗口当前值包含在 [TCP 报头][2] 中,它是 TCP 发送的每个数据段的一部分。因此,只要发送方接收到来自对等方的确认,它就知道当前的接收窗口。这意味着往返时间(RTT)越长,发送方获得接收窗口更新所需的时间就越长。
|
||||
|
||||
|
||||
TCP 被限制为最多 64KB 的未确认(正在传输)数据。在大多数网络场景中,这甚至还不足以维持一个像样的数据速率。让我们看看一些例子。
|
||||
|
||||
##### 理论数据速率
|
||||
|
||||
由于往返时间 (RTT) 为 100 毫秒,TCP 每秒最多可以传输 640KB。在延迟 1 秒的情况下,最大理论数据速率降至 64KB/s。
|
||||
|
||||
这是因为接收窗口的原因。一旦发送了 64KB 的数据,接收窗口就已经满了。发送方必须等待,直到对等方通知它应用程序已经读取了至少一部分数据。
|
||||
|
||||
发送的第一个段会把 TCP 窗口缩减一个自身的大小。在接收窗口值的更新可用之前,需要往返一次。当更新以 1 秒的延迟到达时,即使链路有足够的可用带宽,也会导致 64KB 的限制。
|
||||
|
||||
为了充分利用一个具有几毫秒延迟的快速网络,必须有一个比传统 TCP 支持的窗口大的窗口。“64KB 限制”是协议规范的产物:TCP 头只为接收窗口大小保留 16 位。这允许接收窗口高达 64KB。在 TCP 协议最初设计时,这个大小并没有被视为一个限制。
|
||||
|
||||
不幸的是,想通过仅仅更改 TCP 头来支持更大的最大窗口值是不可能的。如果这样做就意味着 TCP 的所有实现都必须同时更新,否则它们将无法相互理解。为了解决这个问题,需要改变接收窗口值的解释。
|
||||
|
||||
“窗口缩放选项”允许这样做,同时保持与现有实现的兼容性。
|
||||
|
||||
#### TCP 选项:向后兼容的协议扩展
|
||||
|
||||
TCP 支持可选扩展。 这允许使用新特性增强协议,而无需立即更新所有实现。 当 TCP 启动器连接到对等方时,它还会发送一个支持的扩展列表。 所有扩展名都遵循相同的格式:一个唯一的选项号,后跟选项的长度以及选项数据本身。
|
||||
|
||||
TCP 响应程序检查连接请求中包含的所有选项号。 如果它遇到一个不能理解的选项号,则会跳过
|
||||
该选项号附带的“长度”字节的数据,并检查下一个选项号。 响应者忽略了从答复中无法理解的内容。 这使发送方和接收方都够了解所支持的通用选项集。
|
||||
|
||||
使用窗口缩放时,选项数据总是由单个数字组成。
|
||||
|
||||
### 窗口缩放选项
|
||||
|
||||
```
|
||||
窗口缩放选项 (WSopt): Kind: 3, Length: 3
|
||||
+---------+---------+---------+
|
||||
| Kind=3 |Length=3 |shift.cnt|
|
||||
+---------+---------+---------+
|
||||
1 1 1
|
||||
```
|
||||
|
||||
[窗口缩放][3] 选项告诉对等点,应该使用给定的数字缩放 TCP 标头中的接收窗口值,以获取实际大小。
|
||||
|
||||
例如,一个宣告窗口缩放比例因子为 7 的 TCP 启动器试图指示响应程序,任何将来携带接收窗口值为 512 的数据包实际上都会宣告 65536 字节的窗口。 增加了 128 倍。这将允许最大为 8MB 的 TCP 窗口。
|
||||
|
||||
不能理解此选项的 TCP 响应程序将会忽略它。 为响应连接请求而发送的 TCP 数据包(SYN-ACK)不包含窗口缩放选项。在这种情况下,双方只能使用 64k 的窗口大小。幸运的是,默认情况下,几乎每个 TCP 堆栈都支持并启用此选项,包括 Linux。
|
||||
|
||||
响应程序包括它自己所需的比例因子。两个对等点可以使用不同的号码。宣布比例因子为 0 也是合法的。这意味着对等点应该逐字处理它接收到的接收窗口值,但它允许应答方向上的缩放值,然后接收方可以使用更大的接收窗口。
|
||||
|
||||
与 SACK 或 TCP 时间戳不同,窗口缩放选项仅出现在 TCP 连接的前两个数据包中,之后无法更改。也不可能通过查看不包含初始连接三次握手的连接的数据包捕获来确定比例因子。
|
||||
|
||||
支持的最大比例因子为 14。这将允许 TCP 窗口的大小高达 1GB。
|
||||
|
||||
##### 窗口缩放的缺点
|
||||
|
||||
在非常特殊的情况下,它可能导致数据损坏。 在禁用该选项之前——通常情况下是不可能的。 还有一种解决方案可以防止这种情况。不幸的是,有些人在没有意识到与窗口缩放的关系的情况下禁用了该解决方案。 首先,让我们看一下需要解决的实际问题。 想象以下事件序列:
|
||||
|
||||
1. 发送方发送段:s_1,s_2,s_3,... s_n
|
||||
2. 接收方看到:s_1,s_3,.. s_n,并发送对 s_1 的确认。
|
||||
3. 发送方认为 s_2 丢失,然后再次发送。 它还发送段 s_n+1 中包含的新数据。
|
||||
4. 接收方然后看到:s_2,s_n+1,s_2:数据包 s_2 被接收两次。
|
||||
|
||||
例如,当发送方过早触发重新传输时,可能会发生这种情况。 在正常情况下,即使使用窗口缩放,这种错误的重传也绝不会成为问题。 接收方将只丢弃重复项。
|
||||
|
||||
#### 从旧数据到新数据
|
||||
|
||||
TCP 序列号最多可以为 4GB。如果它变得大于此值,则序列会回绕到 0,然后再次增加。这本身不是问题,但是如果这种问题发生得足够快,则上述情况可能会造成歧义。
|
||||
|
||||
如果在正确的时刻发生回绕,则序列号 s_2(重新发送的数据包)可能已经大于 s_n+1。 因此,在最后的步骤(4)中,接收器可以将其解释为:s_2,s_n+1,s_n+m,即它可以将 **“旧”** 数据包 s_2 视为包含新数据。
|
||||
|
||||
通常,这不会发生,因为即使在高带宽链接上,“回绕”也只会每隔几秒钟或几分钟发生一次。原始和不需要的重传之间的间隔将小得多。
|
||||
|
||||
例如,对于 50MB/s 的传输速度,副本要延迟到一分钟以上才会成为问题。序列号的包装速度不够快,小的延迟才会导致这个问题。
|
||||
|
||||
一旦 TCP 达到 “GB/s” 的吞吐率,序列号的包装速度就会非常快,以至于即使只有几毫秒的延迟也可能会造成 TCP 无法再检测到的重复项。通过解决接收窗口太小的问题,TCP 现在可以用于以前无法实现的网络速度,这会产生一个新的,尽管很少见的问题。为了在 RTT 非常低的环境中安全使用 GB/s 的速度,接收方必须能够检测到这些旧副本,而不必仅依赖序列号。
|
||||
|
||||
### TCP 时间戳
|
||||
|
||||
#### 最佳使用日期。
|
||||
|
||||
用最简单的术语来说,[TCP 时间戳][3]只是在数据包上添加时间戳,以解决由非常快速的序列号回绕引起的歧义。 如果一个段看起来包含新数据,但其时间戳早于最后一个在窗口内的数据包,则该序列号已被重新包装,而“新”数据包实际上是一个较旧的副本。 这解决了即使在极端情况下重传的歧义。
|
||||
|
||||
但是,该扩展不仅仅是检测旧数据包。 TCP 时间戳的另一个主要功能是更精确的往返时间测量(RTTm)。
|
||||
|
||||
#### 需要准确的 RTT 估算
|
||||
|
||||
当两个对等方都支持时间戳时,每个 TCP 段都携带两个附加数字:时间戳值和时间戳回显。
|
||||
|
||||
```
|
||||
TCP 时间戳选项 (TSopt): Kind: 8, Length: 10
|
||||
+-------+----+----------------+-----------------+
|
||||
|Kind=8 | 10 |TS Value (TSval)|EchoReply (TSecr)|
|
||||
+-------+----+----------------+-----------------+
|
||||
1 1 4 4
|
||||
```
|
||||
|
||||
准确的 RTT 估算对于 TCP 性能至关重要。 TCP 自动重新发送未确认的数据。 重传由计时器触发:如果超时,则 TCP 会将尚未收到确认的一个或多个数据包视为丢失。 然后再发送一次。
|
||||
|
||||
但是,“尚未得到确认” 并不意味着该段已丢失。 也有可能是接收方到目前为止没有发送确认,或者确认仍在传输中。 这就造成了一个两难的困境:TCP 必须等待足够长的时间,才能让这种轻微的延迟变得无关紧要,但它也不能等待太久。
|
||||
|
||||
##### 低网络延迟 VS 高网络延迟
|
||||
|
||||
在延迟较高的网络中,如果计时器触发过快,TCP 经常会将时间和带宽浪费在不必要的重发上。
|
||||
|
||||
然而,在延迟较低的网络中,等待太长时间会导致真正发生数据包丢失时吞吐量降低。因此,在低延迟网络中,计时器应该比高延迟网络中更早到期。 所以,TCP 重传超时不能使用固定常量值作为超时。它需要根据其在网络中所经历的延迟来调整该值。
|
||||
|
||||
##### RTT(往返时间)的测量
|
||||
|
||||
TCP 选择基于预期往返时间(RTT)的重传超时。 RTT 事先是未知的。它是通过测量发送段与 TCP 接收到该段所承载数据的确认之间的增量来估算的。
|
||||
|
||||
由于多种因素使其而变得复杂。
|
||||
|
||||
* 出于性能原因,TCP 不会为收到的每个数据包生成新的确认。它等待的时间非常短:如果有更多的数据段到达,则可以通过单个 ACK 数据包确认其接收。这称为<ruby>“累积确认”<rt>cumulative ACK</rt></ruby>。
|
||||
* 往返时间并不恒定。 这是有多种因素造成的。例如,客户端可能是一部移动电话,随其移动而切换到不同的基站。也可能是当链路或 CPU 利用率提高时,数据包交换花费了更长的时间。
|
||||
* 必须重新发送的数据包在计算过程中必须被忽略。
|
||||
这是因为发送方无法判断重传数据段的 ACK 是在确认原始传输 (毕竟已到达) 还是在确认重传。
|
||||
|
||||
最后一点很重要:当 TCP 忙于从丢失中恢复时,它可能仅接收到重传段的 ACK。这样,它就无法在此恢复阶段测量(更新)RTT。所以,它无法调整重传超时,然后超时将以指数级增长。那是一种非常具体的情况(它假设其他机制,如快速重传或 SACK 不起作用)。但是,使用 TCP 时间戳,即使在这种情况下也会进行 RTT 评估。
|
||||
|
||||
如果使用了扩展,则对等方将从 TCP 段扩展空间中读取时间戳值并将其存储在本地。然后,它将该值放入作为 “时间戳回显” 发回的所有数据段中。
|
||||
|
||||
因此,该选项带有两个时间戳:它的发送方自己的时间戳和它从对等方收到的最新时间戳。原始发送方使用“回显时间戳”来计算 RTT。它是当前时间戳时钟与“时间戳回显”中所反映的值之间的增量。
|
||||
|
||||
##### 时间戳的其他用用途
|
||||
|
||||
TCP 时间戳甚至还有除 PAWS 和 RTT 测量以外的其他用途。例如,可以检测是否不需要重发。如果该确认携带较旧的时间戳回显,则该确认针对的是初始数据包,而不是重新发送的数据包。
|
||||
|
||||
TCP 时间戳的另一个更晦涩的用例与 TCP [syn cookie][4] 功能有关。
|
||||
|
||||
##### 在服务器端建立 TCP 连接
|
||||
|
||||
当连接请求到达的速度快于服务器应用程序可以接受新的传入连接的速度时,连接积压最终将达到其极限。这可能是由于系统配置错误或应用程序中的错误引起的。当一个或多个客户端发送连接请求而不对 “SYN ACK” 响应做出反应时,也会发生这种情况。这将用不完整的连接填充连接队列。这些条目需要几秒钟才会超时。这被称为<ruby>“同步洪水攻击”<rt>syn flood attack</rt></ruby>。
|
||||
|
||||
##### TCP 时间戳和 TCP Syn Cookie
|
||||
|
||||
即使队列已满,某些 TCP 协议栈也允许继续接受新连接。发生这种情况时,Linux 内核将在系统日志中打印一条突出的消息:
|
||||
|
||||
> P 端口上可能发生 SYN 泛洪。正在发送 Cookie。检查 SNMP 计数器。
|
||||
|
||||
此机制将完全绕过连接队列。通常存储在连接队列中的信息被编码到 SYN/ACK 响应 TCP 序列号中。当 ACK 返回时,可以根据序列号重建队列条目。
|
||||
|
||||
序列号只有有限的空间来存储信息。 因此,使用 “TCP Syn Cookie” 机制建立的连接不能支持 TCP 选项。
|
||||
|
||||
但是,对两个对等点都通用的 TCP 选项可以存储在时间戳中。 ACK 数据包在时间戳回显字段中反映了该值,这也允许恢复已达成共识的 TCP 选项。否则,cookie 连接受标准的 64KB 接收窗口限制。
|
||||
|
||||
##### 常见误区 —— 时间戳不利于性能
|
||||
|
||||
不幸的是,一些指南建议禁用 TCP 时间戳,以减少内核访问时间戳时钟来获取当前时间所需的次数。这是不正确的。如前所述,RTT 估算是 TCP 的必要部分。因此,内核在接收/发送数据包时总是采用微秒级的时间戳。
|
||||
|
||||
在包处理步骤的其余部分中,Linux 会重用 RTT 估算所需的时钟时间戳。这还避免了将时间戳添加到传出 TCP 数据包的额外时钟访问。
|
||||
|
||||
整个时间戳选项在每个数据包中仅需要 10 个字节的 TCP 选项空间,这并没有显著减少可用于数据包有效负载的空间。
|
||||
|
||||
##### 常见误区 —— 时间戳是个安全问题
|
||||
|
||||
一些安全审计工具和 (较旧的) 博客文章建议禁用 TCP 时间戳,因为据称它们泄露了系统正常运行时间:这样一来,便可以估算系统/内核的补丁级别。这在过去是正确的:时间戳时钟基于不断增加的值,该值在每次系统引导时都以固定值开始。时间戳值可以估计机器已经运行了多长时间 (正常运行时间)。
|
||||
|
||||
从 Linux 4.12 开始,TCP 时间戳不再显示正常运行时间。发送的所有时间戳值都使用对等设备特定的偏移量。时间戳值也每 49 天换行一次。
|
||||
|
||||
换句话说,从地址 “A” 出发,或者终到地址 “A” 的连接看到的时间戳与到远程地址 “B” 的连接看到的时间戳不同。
|
||||
|
||||
运行 _sysctl net.ipv4.tcp_timeamp=2_ 以禁用随机化偏移。这使得分析由诸如 _Wireshark_ 或 _tcpdump_ 之类的工具记录的数据包跟踪变得更容易 —— 从主机发送的数据包在其 TCP 选项时间戳中都具有相同的时钟基准。因此,对于正常操作,默认设置应保持不变。
|
||||
|
||||
### 选择性确认
|
||||
|
||||
如果丢失同一数据窗口中的多个数据包,TCP 将会出现问题。 这是因为 TCP 确认是累积的,但仅适用于按顺序到达的数据包。例如:
|
||||
|
||||
* 发送方发送段 s_1,s_2,s_3,... s_n
|
||||
* 发送方收到 s_2 的 ACK
|
||||
* 这意味着 s_1 和 s_2 都已收到,并且发送方不再需要保留这些段。
|
||||
* s_3 是否应该重新发送? s_4呢? s_n?
|
||||
|
||||
发送方等待 “重传超时” 或 “重复 ACK” 以使 s_2 到达。如果发生重传超时或到达 s_2 的多个重复 ACK,则发送方再次发送 s_3。
|
||||
|
||||
如果发送方收到对 s_n 的确认,则 s_3 是唯一丢失的数据包。这是理想的情况。仅发送单个丢失的数据包。
|
||||
|
||||
如果发送方收到的确认段小于 s_n,例如 s_4,则意味着丢失了多个数据包。
|
||||
发送方也需要重传下一个数据段。
|
||||
|
||||
##### 重传策略
|
||||
|
||||
可能只是重复相同的序列:重新发送下一个数据包,直到接收方指示它已处理了直至 s_n 的所有数据包为止。这种方法的问题在于,它需要一个 RTT,直到发送方知道接下来必须重新发送的数据包为止。尽管这种策略可以避免不必要的重传,但要等到 TCP 重新发送整个数据窗口后,它可能要花几秒钟甚至更长的时间。
|
||||
|
||||
另一种方法是一次重新发送几个数据包。当丢失了几个数据包时,此方法可使 TCP 恢复更快。在上面的示例中,TCP 重新发送了 s_3,s_4,s_5,...,但是只能确保已丢失 s_3。
|
||||
|
||||
从延迟的角度来看,这两种策略都不是最佳的。如果只有一个数据包需要重新发送,第一种策略是快速的,但是当多个数据包丢失时,它花费的时间太长。
|
||||
|
||||
即使必须重新发送多个数据包,第二个也是快速的,但是以浪费带宽为代价。此外,这样的 TCP 发送方在进行不必要的重传时可能已经发送了新数据。
|
||||
|
||||
通过可用信息,TCP 无法知道丢失了哪些数据包。这就是 TCP [选择性确认][5](SACK)的用武之地了。就像窗口缩放和时间戳一样,它是另一个可选的但非常有用的 TCP 特性。
|
||||
|
||||
##### SACK 选项
|
||||
|
||||
```
|
||||
TCP Sack-Permitted Option: Kind: 4, Length 2
|
||||
+---------+---------+
|
||||
| Kind=4 | Length=2|
|
||||
+---------+---------+
|
||||
```
|
||||
|
||||
支持此扩展的发送方在连接请求中包括 “允许 SACK” 选项。如果两个端点都支持扩展,则检测到数据流中丢失数据包的对等点可以将此信息通知发送方。
|
||||
|
||||
```
|
||||
TCP SACK Option: Kind: 5, Length: Variable
|
||||
+--------+--------+
|
||||
| Kind=5 | Length |
|
||||
+--------+--------+--------+--------+
|
||||
| Left Edge of 1st Block |
|
||||
+--------+--------+--------+--------+
|
||||
| Right Edge of 1st Block |
|
||||
+--------+--------+--------+--------+
|
||||
| |
|
||||
/ . . . /
|
||||
| |
|
||||
+--------+--------+--------+--------+
|
||||
| Left Edge of nth Block |
|
||||
+--------+--------+--------+--------+
|
||||
| Right Edge of nth Block |
|
||||
+--------+--------+--------+--------+
|
||||
```
|
||||
|
||||
接收方遇到 segment_s2 后跟 s_5 ... s_n,则在发送对 s_2 的确认时将包括一个 SACK 块:
|
||||
|
||||
```
|
||||
|
||||
+--------+-------+
|
||||
| Kind=5 | 10 |
|
||||
+--------+------+--------+-------+
|
||||
| Left edge: s_5 |
|
||||
+--------+--------+-------+------+
|
||||
| Right edge: s_n |
|
||||
+--------+-------+-------+-------+
|
||||
```
|
||||
|
||||
这告诉发送方到 s_2 的段都是按顺序到达的,但也让发送方知道段 s_5 至 s_n 也已收到。 然后,发送方可以重新发送这两个数据包,并继续发送新数据。
|
||||
|
||||
##### 神话般的无损网络
|
||||
|
||||
从理论上讲,如果连接不会丢包,那么 SACK 就没有任何优势。或者连接具有如此低的延迟,甚至等待一个完整的 RTT 都无关紧要。
|
||||
|
||||
在实践中,无损行为几乎是不可能保证的。
|
||||
即使网络及其所有交换机和路由器具有足够的带宽和缓冲区空间,数据包仍然可能丢失:
|
||||
|
||||
* 主机操作系统可能面临内存压力并丢弃数据包。请记住,一台主机可能同时处理数万个数据包流。
|
||||
* CPU 可能无法足够快地消耗掉来自网络接口的传入数据包。这会导致网络适配器本身中的数据包丢失。
|
||||
* 如果 TCP 时间戳不可用,即使一个非常小的 RTT 的连接也可能在丢失恢复期间暂时停止。
|
||||
|
||||
使用 SACK 不会增加 TCP 数据包的大小,除非连接遇到数据包丢失。因此,几乎没有理由禁用此功能。几乎所有的 TCP 协议栈都支持 SACK —— 它通常只在不进行 TCP 批量数据传输的低功耗 IOT 类似设备上才不存在。
|
||||
|
||||
当 Linux 系统接受来自此类设备的连接时,TCP 会自动为受影响的连接禁用 SACK。
|
||||
|
||||
### 总结
|
||||
|
||||
本文中研究的三个 TCP 扩展都与 TCP 性能有关,最好都保留其默认设置:enabled。
|
||||
|
||||
TCP 握手可确保仅使用双方都可以理解的扩展,因此,永远不需因为对等方可能不支持而全局禁用扩展。
|
||||
|
||||
关闭这些扩展会导致严重的性能损失,尤其是在 TCP 窗口缩放和 SACK 的情况下。 可以禁用 TCP 时间戳而不会立即造成不利影响,但是现在没有令人信服的理由这样做了。
|
||||
启用它们还可以支持 TCP 选项,即使在 SYN cookie 生效时也是如此。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://fedoramagazine.org/tcp-window-scaling-timestamps-and-sack/
|
||||
|
||||
作者:[Florian Westphal][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[译者ID](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://fedoramagazine.org/author/strlen/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://fedoramagazine.org/wp-content/uploads/2020/08/tcp-window-scaling-816x346.png
|
||||
[2]: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
|
||||
[3]: https://www.rfc-editor.org/info/rfc7323
|
||||
[4]: https://en.wikipedia.org/wiki/SYN_cookies
|
||||
[5]: https://www.rfc-editor.org/info/rfc2018
|
@ -0,0 +1,284 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (robsean)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Learn the basics of programming with C)
|
||||
[#]: via: (https://opensource.com/article/20/8/c-programming-cheat-sheet)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
使用 C 语言学习基本的编程
|
||||
======
|
||||
我们将所有的 C 语言要素放置到一份易读的备忘录上。
|
||||
![备忘录封面图片][1]
|
||||
|
||||
在 1972 年,丹尼斯·里奇在贝尔实验室,在几年前,他和他的团队成员发明了 Unix 。在创建了一个经久不衰的操作系统(至今仍在使用)之后,他需要一种好的方法来编程这些 Unix 计算机,以便它们可用执行新的任务。在现在看来,这很奇怪,但在当时,编程语言相对较少;Fortran,Lisp,[Algol][2] 以及 B 语言都很流行,但是,对于贝尔实验室的研究员们想要做的事情来说,它们还是远远不够的。丹尼斯·里奇创造了他自己的解决方案,表现出一种以程序员的主要特征而闻名的特质,他称之为 C 语言,并且在近 50 年后,它仍在广泛的使用。
|
||||
|
||||
### 为什么你应该学习 C 语言
|
||||
|
||||
今天,这里有很多语言为程序员提供比 C 语言更多是特性。最明显的一种语言是 C++ 语言,一种相当明显的命名方式的语言,它在 C 语言之上构建,创建了一种很好的面向对象的语言。不过,这里有很多其它的语言,它们都很好的存在理由。计算机擅长始终如一的重复,因此任何可预见的东西都足以构建到一种语言中,对程序员来说这意味着更少的工作量。为什么要在 C++ 语言中一行 (`long x = long(n);`) 可以做到的相同的事时,而要花费两行来改写 C 语言中的一个 `int` 为一个 `long` ?
|
||||
|
||||
然而,C 语言在今天仍然有用。
|
||||
|
||||
首先,C 语言是一种最小和简单的的语言。这里没有超出编程基础的非常高级的概念,很大程度上是因为 C 语言简直是现代编程语言的基础之一。例如,C 语言的特性数组,但是它不提供字典(除非你自己写)。当你学习 C 语言时,你将学习编程的构建语句块,它可以帮助你辨别出当前语言的改善和精心制作的设计构思。
|
||||
|
||||
因为 C 语言是一种最小的编程语言,你的应用程序很可能会获得性能上的提升,这在很多其它编程语言中是看不到的。当你考虑你的代码可以执行多快的时候,它很容易被卷入到下面的速度竞赛,因此,询问你是否 _需要_ 更快的速度来完成一项特定的任务是很重要的。与 Python 或 Java 相比,使用 C 语言,你没有必要为每一行的代码所困扰。C 语言程序运行很快。这是 Linux 内核使用 C 语言编写的一个很好的理由。
|
||||
|
||||
最后,C 语言很容易入门,特别是,如果你正在运行 Linux ,你可能已经在运行 C 语言代码,因为 Linux 系统包含 GNU C 库(`glibc`)。为了编写和构建 C 语言程序, 你需要做的全部工作就是安装一个编译器,打开一个文本编辑器,开始编码。
|
||||
|
||||
### 开始学习 C 语言
|
||||
|
||||
如果你正在运行 Linux ,你可以使用你的软件包管理器安装一个 C 编译器。在 Fedora 或 RHEL 上:
|
||||
|
||||
|
||||
```
|
||||
$ sudo dnf install gcc
|
||||
```
|
||||
|
||||
在 Debian 及其衍生系统上:
|
||||
|
||||
|
||||
```
|
||||
$ sudo apt install build-essential
|
||||
```
|
||||
|
||||
在 macOS 上,你可以 [安装 Homebrew][3] ,并使用它来安装 [GCC][4]:
|
||||
|
||||
|
||||
```
|
||||
`$ brew install gcc`
|
||||
```
|
||||
|
||||
在 Windows 上, 你可以使用 [MinGW][5] 安装一套最小的包含 GCC的 GNU 实用程序集。
|
||||
|
||||
在 Linux 或 macOS 上验证你已经安装的 GCC:
|
||||
|
||||
|
||||
```
|
||||
$ gcc --version
|
||||
gcc (GCC) x.y.z
|
||||
Copyright (C) 20XX Free Software Foundation, Inc.
|
||||
```
|
||||
|
||||
在 Windows 上, 提供 EXE 文件的完整路径:
|
||||
|
||||
|
||||
```
|
||||
PS> C:\MinGW\bin\gcc.exe --version
|
||||
gcc.exe (MinGW.org GCC Build-2) x.y.z
|
||||
Copyright (C) 20XX Free Software Foundation, Inc.
|
||||
```
|
||||
|
||||
### C 语法
|
||||
|
||||
C 语言不是一种脚本型的语言。它是一种编译型的语言,这意味着它由 C 编译器处理来产生一个二进制可执行文件。这不同于一种脚本型语言(像:[Bash][6] )或一种混合型语言(像:[Python][7] )。
|
||||
|
||||
在 C 语言中,你创建 _函数_ 来实施你渴望做到的任务。默认情况下,一个名称为 `main` 的函数将被执行。
|
||||
|
||||
这里是一个使用 C 语言写的简单的 "hello world" 程序:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
printf("Hello world");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
第一行包含一个被称为 `stdio.h` (标准输入和输出) 的 _头文件_ ,它基本上是自由的、非常初级的 C 语言代码,你可以在你自己的程序中重复使用。创建一个由一个基本的输出语句构成的名称为 `main` 的函数。保存这些文本到一个被称为 `hello.c` 的文件中,然后使用 GCC 编译它:
|
||||
|
||||
|
||||
```
|
||||
`$ gcc hello.c --output hello`
|
||||
```
|
||||
|
||||
尝试运行你的 C 语言程序:
|
||||
|
||||
|
||||
```
|
||||
$ ./hello
|
||||
Hello world$
|
||||
```
|
||||
|
||||
#### 返回值
|
||||
|
||||
一个函数在执行后“返回”一些东西是 Unix 哲学的一部分:在成功时不返回任何东西,在失败使返回其它的一些东西(例如,一个错误信息)。这些返回代码通常使用数字(确切地说是整数)表示: 0 表示没有东西,任何大于 0 的数字都表示一些不成功的状态。
|
||||
|
||||
Unix 和 Linux 被设计成在运行成功时要求沉默的是很明智的。这样,你可以总是通过假设在执行一系列命令后没有获得任何错误或警告来期待成功。类似地,在 C 语言中的函数在设计上也不希望出现错误。
|
||||
|
||||
你可以使用一小处使你的出现出现错误的修改来自己看到这些设计:
|
||||
|
||||
|
||||
```
|
||||
include <stdio.h>
|
||||
|
||||
int main() {
|
||||
printf("Hello world");
|
||||
return 1;
|
||||
}
|
||||
```
|
||||
|
||||
编译它:
|
||||
|
||||
|
||||
```
|
||||
$ gcc hello.c --output failer
|
||||
```
|
||||
|
||||
现在使用一个内置的 Linux 测试来运行它。仅在成功时,`&&` 操作符执行一个命令的第二部分。例如:
|
||||
|
||||
|
||||
```
|
||||
$ echo "success" && echo "it worked"
|
||||
success
|
||||
it worked
|
||||
```
|
||||
|
||||
在 _失败_ 时,`||`测试执行一个命令的第二部分。
|
||||
|
||||
|
||||
```
|
||||
$ ls blah || echo "it did not work"
|
||||
ls: cannot access 'blah': No such file or directory
|
||||
it did not work
|
||||
```
|
||||
|
||||
现在,尝试你的程序,在成功时,它 _不_ 返回 0 ;而是返回 1 :
|
||||
|
||||
|
||||
```
|
||||
$ ./failer &&; echo "it worked"
|
||||
String is: hello
|
||||
```
|
||||
|
||||
这个程序成功地执行,但是没有触发第二个命令。
|
||||
|
||||
#### 变量和类型
|
||||
|
||||
在一些语言中,你可以在不具体指定变量所包含的数据的 _类型_ 的情况下创建变量。这些语言如此设计使得解释器对一个变量运行一些测试来企图发现变量什么样的数据类型。例如,Python 知道当你创建一个表达式时,`var=1` 定义了一个整型数,将 `var` 添加一些东西上,这显然是一个整型数。它同样知道当你连接 `hello` 和 `world` 时,单词 `world` 是一个字符串。
|
||||
|
||||
C 语言不会为你做任何这些识别和调查;你必需定义你自己的变量类型。这里有几种变量类型,包括整型(int),字符型(char),浮点型(float),布尔型(Boolean)。
|
||||
|
||||
你可能也注意到这里没有字符串类型。与 Python 和 Java 和 Lua 以及其它的编程语言不同,C 语言没有字符串类型,而是将字符串看作一个字符数组。
|
||||
|
||||
这里是一些简单的代码,它建立了一个 `char` 数组变量,然后使用 [printf][9] 将数组变量和一段简单的信息打印到你的屏幕上:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
char var[6] = "hello";
|
||||
printf("Your string is: %s\r\n",var);
|
||||
}
|
||||
```
|
||||
|
||||
你可能会注意到,这个代码示例向一个由五个字母组成的单词提供六个字符的空间。这是因为在字符串的结尾有处一个隐藏的终止符,终止符占用数组中的一个字节。你可以通过编译和执行代码来运行它:
|
||||
|
||||
|
||||
```
|
||||
$ gcc hello.c --output hello
|
||||
$ ./hello
|
||||
hello
|
||||
```
|
||||
|
||||
### 函数
|
||||
|
||||
和其它的编程语言一样,C 函数采取可选函数。你可以通过定义你希望一个函数接受的数据类型来将参数从一个函数传递到另一个函数:
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
|
||||
int printmsg(char a[]) {
|
||||
printf("String is: %s\r\n",a);
|
||||
}
|
||||
|
||||
int main() {
|
||||
char a[6] = "hello";
|
||||
printmsg(a);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
使用这种方法简单将一个函数分解为两个函数的方法并不是非常有用,但是它证明 `main` 默认运行以及如何在函数之间传递数据。
|
||||
|
||||
### 条件语句
|
||||
|
||||
在真实的编程中,你通常希望你的代码根据数据做出判断。这是使用This is done with _条件_ 语句完成的,并且 `if` 语句是它们中最基础的一个语句。
|
||||
|
||||
为了使这个示例程序更具动态性,你可以包含 `string.h` 头文件,它包含用于检查(顾名思义)字符串的代码。尝试使用来自 `string.h` 文件中的 `strlen` 函数测试传递给 `printmsg` 函数的字符串是否大于 0 :
|
||||
|
||||
|
||||
```
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int printmsg(char a[]) {
|
||||
size_t len = strlen(a);
|
||||
if ( len > 0) {
|
||||
printf("String is: %s\r\n",a);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
char a[6] = "hello";
|
||||
printmsg(a);
|
||||
return 1;
|
||||
}
|
||||
```
|
||||
|
||||
正如在这个示例中所实现的,示例条件永远都不会是真实的,因为所提供的字符串总是 "hello" ,它的长度总是大于 0 。`echo` 命令的这个不够认真的重新实施的最后接触是接受来自用户的输入。
|
||||
|
||||
### 命令参数
|
||||
|
||||
`stdio.h` 文件包含的代码在每次程序启动时提供两个参数: 一个是包含在命令 (`argc`) 中的项目总数,一个是包含每个项目 (`argv`) 的数组。例如, 假设你发出这个虚构的命令:
|
||||
|
||||
|
||||
```
|
||||
$ foo -i bar
|
||||
```
|
||||
|
||||
`argc` 是 3 个,`argv` 的内容是:
|
||||
|
||||
* `argv[0] = foo`
|
||||
* `argv[1] = -i`
|
||||
* `argv[2] = bar`
|
||||
|
||||
|
||||
|
||||
你可以修改示例 C 语言程序来接受 `argv[2]` 作为字符串,而不是默认来接受`hello` 吗?
|
||||
|
||||
### 命令式编程语言
|
||||
|
||||
C 语言是一种命令式编程语言。它不是面向对象的,它没有类结构。使用 C 语言的经验可以教你很多关于如何处理数据和如何更好地管理你的代码运行时生成的数据。充分地使用 C 语言,你最后能够编写其它语言 (例如 Python 和 Lua) 能够使用的库。
|
||||
|
||||
为了学习更多关于 C 的知识,你需要使用它。在 `/usr/include/` 中查找有用的 C 语言头文件,并且看看你可以做什么小任务来使其对你学习 C 语言有用。 正如你学习时,使用 [Jim Hall][12] 的 FreeDOS 的 [C 语言忘备录][11]。它在一张双面纸忘备录上放置所有的基本要素,因此在你实施练习时,你可以立即访问 C 语言语法的所有要素。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/8/c-programming-cheat-sheet
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[robsean](https://github.com/robsean)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/coverimage_cheat_sheet.png?itok=lYkNKieP (Cheat Sheet cover image)
|
||||
[2]: https://opensource.com/article/20/6/algol68
|
||||
[3]: https://opensource.com/article/20/6/homebrew-mac
|
||||
[4]: https://gcc.gnu.org/
|
||||
[5]: https://opensource.com/article/20/8/gnu-windows-mingw
|
||||
[6]: https://opensource.com/resources/what-bash
|
||||
[7]: https://opensource.com/resources/python
|
||||
[8]: http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
|
||||
[9]: https://opensource.com/article/20/8/printf
|
||||
[10]: http://www.opengroup.org/onlinepubs/009695399/functions/strlen.html
|
||||
[11]: https://opensource.com/downloads/c-programming-cheat-sheet
|
||||
[12]: https://opensource.com/users/jim-hall
|
@ -0,0 +1,119 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (robsean)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (GNOME 3.38 is Here With Customizable App Grid, Performance Improvements and Tons of Other Changes)
|
||||
[#]: via: (https://itsfoss.com/gnome-3-38-release/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
GNOME 3.38 携可定制应用程序网格,性能改善和大量其它的更改而来
|
||||
======
|
||||
|
||||
[GNOME 3.36][1] 带来大量急需改善,同时也带来性能的重大提升。现在,在6个月后,我们终于和具有一系列的更改的 GNOME 3.38 一起到来。
|
||||
|
||||
### GNOME 3.38 主要特色
|
||||
|
||||
这里是 GNOME 3.38 (代码名称:Orbis) 的主要亮点:
|
||||
|
||||
[更多 Linux 视频,请订阅我们的 YouTube 频道][2]
|
||||
|
||||
#### 可定制应用程序菜单
|
||||
|
||||
作为 GNOME 3.38 重大更改中的一部分,应用程序网格或应用程序菜单现在是可以可定制的。
|
||||
|
||||
现在,你可以通过拖拽每个应用程序图标来创建文件夹,将它们移到/移出文件夹,并且可以在应用程序网格中重新设置回来。你也可以在应用程序网格中如你所想一样的重新定位图标。
|
||||
|
||||
![][3]
|
||||
|
||||
此外,这些变化是一些即将到来的未来设计更改更新的基本组成部分 — 因此,看到我们可以期待的东西会很令人兴奋。
|
||||
|
||||
#### 日历菜单更新
|
||||
|
||||
![][4]
|
||||
|
||||
随着最近一次的 GNOME 更新,通知区整洁了很多,但是现在随着 GNOME 3.38 的到来,你终于可以通过访问日历区正下方的日历事件来更方便地处理事情。
|
||||
|
||||
它不是一个主要的可见改造,但是它也有不少的改善。
|
||||
|
||||
#### 家长控制改善
|
||||
|
||||
你将会注意作为 GNOME 3.38 一部分的家长控制服务。它支持与桌面,shell,设置以及其它各种各样组件的集成来帮助你限制用户可以访问的内容。
|
||||
|
||||
#### 重新启动按钮
|
||||
|
||||
一些细微的改善导致了巨大的变化,重新启动按钮正是其中的一个变化。先单击 “关闭电源” / “关机” 按钮,再单击 “重新启动” 按钮的操作来重新启动系统总是让人很烦闷。
|
||||
|
||||
因此,随着 GNOME 3.38 的到来,你将最终会注意到一个作为单独按钮的 “重新启动” ,这将节省你的单击次数,平复你烦闷的心情。
|
||||
|
||||
#### 屏幕录制改善
|
||||
|
||||
[GNOME shell 的内置屏幕录制][5] 现在是一项独立的系统服务,这可能会使录制屏幕成为一种平滑流畅的体验。
|
||||
|
||||
另外,窗口截屏也有一些改善,并修复了一些错误。
|
||||
|
||||
#### GNOME 应用程序更新
|
||||
|
||||
GNOME 计算器也收到很多的错误修复。除此之外,你也将发现 [epiphany GNOME 浏览器][6] 的一些重大改变.
|
||||
|
||||
GNOME Boxes 现在允许你从一个操作系统列表中选择将要运行的操作系统,GNOME 地图也有一些图像用户界面上的更改。
|
||||
|
||||
当然,不仅限于这些,你页将注意到 GNOME 控制中心,联系人,照片,Nautilus,以及其它一些软件包的细微更新和修复。
|
||||
|
||||
#### 性能和多显示器支持改善
|
||||
|
||||
这里有一大堆隐藏改善来全面地改善 GNOME 3.38 。 例如,[Mutter][7] 有一些重要的修复,它现在允许在两个显示器中使用不同的刷新频率。
|
||||
|
||||
![][8]
|
||||
|
||||
先前,如果一台显示器的刷新频率为 60 Hz,而另一台的刷新频率为 144 Hz ,那么刷新频率较慢的显示器将限制另外一台显示器的刷新频率。但是,随着在 GNOME 3.38 中的改善,它将能够处理多个显示器,而不会使显示器相互限制。
|
||||
|
||||
另外,[Phoronix][9] 报告的一些更改指出,在一些情况下,缩短大约 10% 的渲染时间。因此,巨大的性能优化是很确定的。
|
||||
|
||||
#### 各种各样的其它更改
|
||||
|
||||
* 电池百分比指示器
|
||||
* 在电源菜单中的重新启动选项
|
||||
* 新的欢迎参观N
|
||||
* 指纹登录
|
||||
* 二维码扫描共享 Wi-Fi 热点
|
||||
* GNOME 浏览器的隐私和其它改善
|
||||
* GNOME 地图现在反应敏捷并能根据屏幕大小改变其大小
|
||||
* 重新修订的图标
|
||||
|
||||
|
||||
|
||||
你可以在它们的官方 [更改日志][10] 中找到一个详细的更改列表。
|
||||
|
||||
### 总结
|
||||
|
||||
GNOME 3.38 确实是一个令人赞叹的改善 GNOME 用户体验的更新。尽管 GNOME 3.36 带来了性能的很大改善, 但是针对 GNOME 3.38 的更多优化仍然是一件非常好的事.
|
||||
|
||||
GNOME 3.38 将在 Ubuntu 20.10 和 [Fedora 33][11] 中可用。Arch 和 Manjaro 用户应该很快就能获得。
|
||||
|
||||
我认为在正确的方向上有大量的更改。你觉得呢?
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/gnome-3-38-release/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[robsean](https://github.com/robsean)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/gnome-3-36-release/
|
||||
[2]: https://www.youtube.com/c/itsfoss?sub_confirmation=1
|
||||
[3]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-app-arranger.jpg?resize=799%2C450&ssl=1
|
||||
[4]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-3-38-calendar-menu.png?resize=800%2C721&ssl=1
|
||||
[5]: https://itsfoss.com/gnome-screen-recorder/
|
||||
[6]: https://en.wikipedia.org/wiki/GNOME_Web
|
||||
[7]: https://en.wikipedia.org/wiki/Mutter_(software)
|
||||
[8]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/09/gnome-multi-monitor-refresh-rate.jpg?resize=800%2C369&ssl=1
|
||||
[9]: https://www.phoronix.com/scan.php?page=news_item&px=GNOME-3.38-Last-Min-Mutter
|
||||
[10]: https://help.gnome.org/misc/release-notes/3.38
|
||||
[11]: https://itsfoss.com/fedora-33/
|
@ -0,0 +1,101 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Simplify your web experience with this internet protocol alternative)
|
||||
[#]: via: (https://opensource.com/article/20/10/gemini-internet-protocol)
|
||||
[#]: author: (Seth Kenlon https://opensource.com/users/seth)
|
||||
|
||||
使用此互联网协议替代方案简化你的 Web 体验
|
||||
======
|
||||
用 Gemini 协议发现更安静、更简单的互联网新角落。
|
||||
![Person typing on a 1980's computer][1]
|
||||
|
||||
如果你已经上网了很长时间,或者只是了解很多,你可能还记得一个早期的文本共享协议,叫做 [Gopher][2]。Gopher 最终被 HTTP 协议所取代,当然,HTTP 协议是现代万维网的基础。对于很多人来说,”互联网“和”万维网“是一回事,因为很多人并不会有意识地做_不_在 www 子域下的事情。
|
||||
|
||||
但一直以来,都有各种网络协议在互联网络上共享信息。Telnet、FTP、SSH、Torrent、GNUnet 等等。最近,在这一系列的替代品中又多了一个,它叫 [Gemini][3]。
|
||||
|
||||
Gemini 协议,以”水星计划“和”阿波罗计划“的基础实验之间的太空任务命名,旨在和平地处在 Gopher 和 HTTP 之间。无论如何,它的目的并不是要取代现代 Web,但它确实试图创造一个简化的网络和一个现代化的 Gopher。
|
||||
|
||||
它的发展虽然可能很年轻,但意义重大,原因有很多。当然,人们会因为技术和哲学上的原因而对现代 Web 表示质疑,但它只是一般的臃肿。当你真正想要的是一个非常具体的问题的可靠答案时,那么无数次点击谷歌搜索的结果让人感觉过头了。
|
||||
|
||||
许多人使用 Gopher 就是因为这个原因:它的规模小到可以让小众的兴趣很容易找到。然而,Gopher 是一个旧的协议,它对编程、网络和浏览做出了一些假设,但这些假设已经不再适用了。 Gemini 的目标是将最好的网络带入一种类似于 Gopher 但易于编程的格式。一个简单的 Gemini 浏览器可以用几百行代码写成,并且有一个非常好的浏览器用 1600 行左右写成。这对于程序员、学生和极简主义者来说都是一个强大的功能。
|
||||
|
||||
### 如何浏览 Gemini
|
||||
|
||||
就像早期的网络一样,Gemini 的规模很小,有一个运行 Gemini 网站的已知服务器列表。就像浏览 HTTP 站点需要一个网页浏览器一样,访问 Gemini 站点也需要一个 Gemini 浏览器。已经有几个可用的,在 [Gemini 网站][4]上列出。
|
||||
|
||||
最简单的一个是 [AV-98][5] 客户端。它是用 Python 编写的,在终端中运行。要想试试的话,请下载它:
|
||||
|
||||
|
||||
```
|
||||
`$ git clone https://tildegit.org/solderpunk/AV-98.git`
|
||||
```
|
||||
|
||||
进入下载目录,运行 AV-98:
|
||||
|
||||
|
||||
```
|
||||
$ cd AV-98.git
|
||||
$ python3 ./main.py
|
||||
```
|
||||
|
||||
客户端是一个交互式的提示。它有有限的几个命令,主要的命令是简单的 `go`,后面跟着一个 Gemini 服务器地址。进入已知的 [Gemini 服务器][6]列表,选择一个看起来很有趣的服务器,然后尝试访问它:
|
||||
|
||||
|
||||
```
|
||||
AV-98> go gemini://example.club
|
||||
|
||||
Welcome to the example.club Gemini server!
|
||||
|
||||
Here are some folders of ASCII art:
|
||||
|
||||
[1] Penguins
|
||||
[2] Wildebeests
|
||||
[3] Demons
|
||||
```
|
||||
|
||||
导航是按照编号的链接来进行的。例如,要进入 Penguins 目录,输入 `1` 然后按回车键:
|
||||
|
||||
|
||||
```
|
||||
AV-98> 1
|
||||
|
||||
[1] Gentoo
|
||||
[2] Emperor
|
||||
[3] Little Blue
|
||||
```
|
||||
|
||||
要返回,输入 `back` 并按回车键:
|
||||
|
||||
|
||||
```
|
||||
`AV-98> back`
|
||||
```
|
||||
|
||||
更多命令,请输入 `help`。
|
||||
|
||||
### Gemini 作为你的 web 替代
|
||||
|
||||
Gemini 协议非常简单,初级和中级程序员都可以为其编写客户端,而且它是在互联网上分享内容的一种简单快捷的方式。虽然万维网的无处不在对广泛传播是有利的,但总有替代方案的空间。看看 Gemini,发现更安静、更简单的互联网的新角落。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/gemini-internet-protocol
|
||||
|
||||
作者:[Seth Kenlon][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/seth
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/1980s-computer-yearbook.png?itok=eGOYEKK- (Person typing on a 1980's computer)
|
||||
[2]: https://en.wikipedia.org/wiki/Gopher_%28protocol%29
|
||||
[3]: https://gemini.circumlunar.space/
|
||||
[4]: https://gemini.circumlunar.space/clients.html
|
||||
[5]: https://tildegit.org/solderpunk/AV-98
|
||||
[6]: https://portal.mozz.us/gemini/gemini.circumlunar.space/servers
|
@ -0,0 +1,111 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (Integrate your calendar with Ansible to avoid schedule conflicts)
|
||||
[#]: via: (https://opensource.com/article/20/10/calendar-ansible)
|
||||
[#]: author: (Nicolas Leiva https://opensource.com/users/nicolas-leiva)
|
||||
|
||||
将你的日历与 Ansible 集成,以避免与日程冲突
|
||||
======
|
||||
通过将一个日历应用集成到 Ansible 中来确保你的自动化工作流计划不会与其他东西冲突。
|
||||
![Calendar close up snapshot][1]
|
||||
|
||||
”随时“是执行自动化工作流的好时机吗?答案可能是否定的,原因各不相同。
|
||||
|
||||
如果你希望避免同时进行更改,以最大限度地减少对关键业务流程的影响,并降低意外服务中断的风险,那么其他人不应该试图在你的自动化运行的同时进行更改。
|
||||
|
||||
在某些情况下,可能存在一个正在进行的计划维护窗口。 或者,可能有大型事件即将来临、一个关键的业务时间,或者假期,你或许不想在星期五晚上进行更改。
|
||||
|
||||
![Street scene with a large calendar and people walking][2]
|
||||
|
||||
([Curtis MacNewton][3], [CC BY-ND 2.0][4])
|
||||
|
||||
无论出于什么原因,你都希望将此信息发送到你的自动化平台,并防止在特定时间段内执行周期或临时任务。在变更管理的行话中,我说的是当变更活动不应该发生时,指定封锁窗口。
|
||||
|
||||
### Ansible 中的日历集成
|
||||
|
||||
如何在 [Ansible][5] 中实现这个功能?虽然它本身没有日历功能,但 Ansible 的可扩展性将允许它与任何具有 API 的日历应用集成。
|
||||
|
||||
目标是这样的:在执行任何自动化或变更活动之前,你要执行一个 `pre-task` ,它会检查日历中是否已经安排了某些事情(目前或最近),并确认你没有在一个阻塞的时间段中。
|
||||
|
||||
想象一下,你有一个名为 `calendar` 的虚构模块,它可以连接到一个远程日历,比如 Google 日历,以确定你指定的时间是否已经以其他方式被标记为繁忙。你可以写一个类似这样的 playbook:
|
||||
|
||||
|
||||
```
|
||||
\- name: Check if timeslot is taken
|
||||
calendar:
|
||||
time: "{{ ansible_date_time.iso8601 }}"
|
||||
register: output
|
||||
```
|
||||
|
||||
Ansible 实际会给出 `ansible_date_time`,将其传递给 `calendar` 模块,以验证时间的可用性,以便它可以注册响应 (`output`),用于后续任务。
|
||||
|
||||
如果你的日历是这样的:
|
||||
|
||||
![Google Calendar screenshot][6]
|
||||
|
||||
(Nicolas Leiva, [CC BY-SA 4.0][7])
|
||||
|
||||
那么这个任务的输出就会高亮这个时间段被占用的事实 (`busy: true`):
|
||||
|
||||
|
||||
```
|
||||
ok: [localhost] => {
|
||||
"output": {
|
||||
"busy": true,
|
||||
"changed": false,
|
||||
"failed": false,
|
||||
"msg": "The timeslot 2020-09-02T17:53:43Z is busy: true"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 阻止任务运行
|
||||
|
||||
接下来,[Ansible Conditionals][8] 将帮助阻止所有之后任务的执行。一个简单的例子,你可以在下一个任务上使用 `when` 语句来强制它只有当上一个输出中的 `busy` 字段不是 `true` 时,它才会运行:
|
||||
|
||||
|
||||
```
|
||||
tasks:
|
||||
- shell: echo "Run this only when not busy!"
|
||||
when: not output.busy
|
||||
```
|
||||
|
||||
### 总结
|
||||
|
||||
在[上一篇文章][9]中,我说过 Ansible 是一个将事物连接在一起的框架,将不同的构建相互连接,以协调端到端自动化工作流。
|
||||
|
||||
这篇文章探讨了 playbook 如何与日历应用集成以检查可用性。然而,我只做了一些表面工作!例如,你的任务也可以阻止日历中的一个时间段,这里的发挥空间很大。
|
||||
|
||||
在我的下一篇文章中,我将深入 `calendar` 模块是如何构建的,以及其他编程语言如何与 Ansible 一起使用。如果你和我一样是 [Go][10] 的粉丝,请继续关注!
|
||||
|
||||
* * *
|
||||
|
||||
_这篇文章最初发表在 Medium 上,名为 [Ansible and Google Calendar integration for change management][11],采用 CC BY-SA 4.0 许可,经许可后转载。_
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://opensource.com/article/20/10/calendar-ansible
|
||||
|
||||
作者:[Nicolas Leiva][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://opensource.com/users/nicolas-leiva
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://opensource.com/sites/default/files/styles/image-full-size/public/lead-images/calendar.jpg?itok=jEKbhvDT (Calendar close up snapshot)
|
||||
[2]: https://opensource.com/sites/default/files/uploads/street-calendar.jpg (Street scene with a large calendar and people walking)
|
||||
[3]: https://www.flickr.com/photos/7841127@N02/4217116202
|
||||
[4]: https://creativecommons.org/licenses/by-nd/2.0/
|
||||
[5]: https://docs.ansible.com/ansible/latest/index.html
|
||||
[6]: https://opensource.com/sites/default/files/uploads/googlecalendarexample.png (Google Calendar screenshot)
|
||||
[7]: https://creativecommons.org/licenses/by-sa/4.0/
|
||||
[8]: https://docs.ansible.com/ansible/latest/user_guide/playbooks_conditionals.html
|
||||
[9]: https://medium.com/swlh/python-and-ansible-to-automate-a-network-security-workflow-28b9a44660c6
|
||||
[10]: https://golang.org/
|
||||
[11]: https://medium.com/swlh/ansible-and-google-calendar-integration-for-change-management-7c00553b3d5a
|
@ -0,0 +1,134 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (gxlct008)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How to Install Deepin Desktop on Ubuntu 20.04 LTS)
|
||||
[#]: via: (https://itsfoss.com/install-deepin-ubuntu/)
|
||||
[#]: author: (Ankush Das https://itsfoss.com/author/ankush/)
|
||||
|
||||
如何在 Ubuntu 20.04 LTS 上安装 Deepin 桌面
|
||||
======
|
||||
|
||||
_**本教程向您展示在 Ubuntu 上安装 Deepin 桌面环境的正确步骤。还提到了移除步骤。**_
|
||||
|
||||
毫无疑问,Deepin 是一个 [漂亮的 Linux 发行版][1]。最近发布的 [Deepin version 20][2] 让它更加美观了。
|
||||
|
||||
现在,[Deepin Linux][3] 是基于 [Debian][4] 的,默认的存储库镜像太慢了。如果您更愿意使用 Ubuntu,可以选择 [UbuntuDDE Linux 发行版][5] 形式的 Ubuntu 的 Deepin 变体。它还不是 [官方的 Ubuntu 风格][6] 之一。
|
||||
|
||||
[重新安装新的发行版][7] 是一个麻烦,因为您会丢失数据,您将不得不在新安装的 UbuntuDDE 上重新安装您的应用程序。
|
||||
|
||||
一个更简单的选择是在现有的 Ubuntu 系统上安装 Deepin 桌面环境。毕竟,您可以轻松地在一个系统中安装多个 [桌面环境][8]。
|
||||
|
||||
不要烦恼,这很容易做到,如果您不喜欢,也可以恢复这些更改。让我来告诉你怎么做。
|
||||
|
||||
### 在 Ubuntu 20.04 上安装 Deepin 桌面
|
||||
|
||||
![][9]
|
||||
|
||||
|
||||
UbuntuDDE 团队已为他们的发行版创建了一个 PPA,您可以使用相同的 PPA 在 Ubuntu 20.04 上安装 Deepin 桌面。请记住,此 PPA 仅适用于 Ubuntu 20.04。请阅读有关 [在 Ubuntu 中使用 PPA][10]。
|
||||
|
||||
没有 Deepin 版本 20
|
||||
|
||||
您将在此处使用 PPA 安装的 Deepin 桌面还不是新的 Deepin 桌面版本 20。它可能会在 Ubuntu 20.10 发布后出现,但是我们不能保证任何事情。
|
||||
|
||||
以下是您需要遵循的步骤:
|
||||
|
||||
**步骤 1**:您需要首先在终端上输入以下内容,来添加 [Ubuntu DDE Remix 团队的官方 PPA][11]:
|
||||
|
||||
```
|
||||
sudo add-apt-repository ppa:ubuntudde-dev/stable
|
||||
```
|
||||
|
||||
**步骤 2**:添加存储库以后,继而安装 Deepin 桌面。
|
||||
|
||||
```
|
||||
sudo apt install ubuntudde-dde
|
||||
```
|
||||
|
||||
![][12]
|
||||
|
||||
现在,安装将启动,一段时间后,将要求您选择<ruby>显示管理器<rt>display manager</rt></ruby>。
|
||||
|
||||
![][13]
|
||||
|
||||
如果需要深度桌面主题的锁屏,则需要选择 “**lightdm**”。如果不需要,您可以将其设置为 “**gdm3**”。
|
||||
|
||||
如果您看不到此选项,可以通过键入以下命令来获得它,然后选择您首选的显示管理器:
|
||||
|
||||
```
|
||||
sudo dpkg-reconfigure lightdm
|
||||
```
|
||||
|
||||
**步骤 3**: 完成后,您必须退出并通过选择 “**Deepin**” 会话再次登录,或者重新启动系统。
|
||||
|
||||
![][14]
|
||||
|
||||
|
||||
就是这样。马上在您的 Ubuntu 20.04 LTS 系统上享受深度体验吧!
|
||||
|
||||
![][15]
|
||||
|
||||
### 从 Ubuntu 20.04 删除 Deepin 桌面
|
||||
|
||||
如果您不喜欢这种体验,或者由于某些原因它有 bug,可以按照以下步骤将其删除。
|
||||
|
||||
**步骤 1**: 如果您已将 “lightdm” 设置为显示管理器,则需要在卸载 Deepin 之前将显示管理器设置为 “gdm3”。为此,请键入以下命令:
|
||||
|
||||
```
|
||||
sudo dpkg-reconfigure lightdm
|
||||
```
|
||||
|
||||
![Select gdm3 on this screen][13]
|
||||
|
||||
然后,选择 **gdm3** 继续。
|
||||
|
||||
完成此操作后,您只需输入以下命令即可完全删除 Deepin:
|
||||
|
||||
```
|
||||
sudo apt remove startdde ubuntudde-dde
|
||||
```
|
||||
|
||||
您只需重启即可回到原来的 Ubuntu 桌面。如果图标没有响应,只需打开终端(**CTRL + ALT + T**)并输入:
|
||||
|
||||
|
||||
```
|
||||
reboot
|
||||
```
|
||||
|
||||
**总结**
|
||||
|
||||
有不同的 [桌面环境选择][16] 是件好事。如果您真的喜欢 Deepin 桌面界面,那么这可能是在 Ubuntu 上体验 Deepin 的一种方式。
|
||||
|
||||
如果您有任何疑问或遇到任何问题,请在评论中告诉我。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/install-deepin-ubuntu/
|
||||
|
||||
作者:[Ankush Das][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[gxlct008](https://github.com/gxlct008)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/ankush/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/beautiful-linux-distributions/
|
||||
[2]: https://itsfoss.com/deepin-20-review/
|
||||
[3]: https://www.deepin.org/en/
|
||||
[4]: https://www.debian.org/
|
||||
[5]: https://itsfoss.com/ubuntudde/
|
||||
[6]: https://itsfoss.com/which-ubuntu-install/
|
||||
[7]: https://itsfoss.com/reinstall-ubuntu/
|
||||
[8]: https://itsfoss.com/what-is-desktop-environment/
|
||||
[9]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/ubuntu-20-with-deepin.jpg?resize=800%2C386&ssl=1
|
||||
[10]: https://itsfoss.com/ppa-guide/
|
||||
[11]: https://launchpad.net/~ubuntudde-dev/+archive/ubuntu/stable
|
||||
[12]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/10/deepin-desktop-install.png?resize=800%2C534&ssl=1
|
||||
[13]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/deepin-display-manager.jpg?resize=800%2C521&ssl=1
|
||||
[14]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/10/deepin-session-ubuntu.jpg?resize=800%2C414&ssl=1
|
||||
[15]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/ubuntu-20-with-deepin-1.png?resize=800%2C589&ssl=1
|
||||
[16]: https://itsfoss.com/best-linux-desktop-environments/
|
@ -0,0 +1,185 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (geekpi)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (How to Remove Physical Volume from a Volume Group in LVM)
|
||||
[#]: via: (https://www.2daygeek.com/linux-remove-delete-physical-volume-pv-from-volume-group-vg-in-lvm/)
|
||||
[#]: author: (Magesh Maruthamuthu https://www.2daygeek.com/author/magesh/)
|
||||
|
||||
如何从 LVM 的卷组中删除物理卷?
|
||||
======
|
||||
|
||||
如果 LVM 不再需要使用某个设备,你可以使用 vgreduce 命令从卷组中删除物理卷。
|
||||
|
||||
vgreduce 命令通过删除物理卷来缩小卷组的容量。
|
||||
|
||||
但要确保物理卷没有被任何逻辑卷使用,请使用 pvdisplay 命令。
|
||||
|
||||
如果物理卷仍在使用,你必须使用 pvmove 命令将数据转移到另一个物理卷。
|
||||
|
||||
数据转移后,它就可以从卷组中删除。
|
||||
|
||||
最后使用 pvremove 命令删除空物理卷上的 LVM 标签和 LVM 元数据。
|
||||
|
||||
* **第一部分:[如何在 Linux 中创建/配置 LVM(逻辑卷管理)][1]**。
|
||||
* **第二部分:[如何在 Linux 中扩展/增加 LVM 大小(逻辑卷调整)][2]**。
|
||||
* **第三部分:[如何在 Linux 中减少/缩小 LVM 大小(逻辑卷调整)][3]**。
|
||||
|
||||
|
||||
|
||||
![][4]
|
||||
|
||||
### 1) 将扩展移动到现有物理卷上
|
||||
|
||||
使用 pvs 命令检查是否使用了所需的物理卷(我们计划删除 LVM 中的 **“/dev/sdb1”** 磁盘)。
|
||||
|
||||
```
|
||||
# pvs -o+pv_used
|
||||
|
||||
PV VG Fmt Attr PSize PFree Used
|
||||
/dev/sda1 myvg lvm2 a- 75.00G 14.00G 61.00G
|
||||
/dev/sdb1 myvg lvm2 a- 50.00G 45.00G 5.00G
|
||||
/dev/sdc1 myvg lvm2 a- 17.15G 12.15G 5.00G
|
||||
```
|
||||
|
||||
如果使用了,请检查卷组中的其他物理卷是否有足够的空闲空间。
|
||||
|
||||
如果有的话,你可以在需要删除的设备上运行 pvmove 命令。扩展将被分配到其他设备上。
|
||||
|
||||
```
|
||||
# pvmove /dev/sdb1
|
||||
|
||||
/dev/sdb1: Moved: 2.0%
|
||||
…
|
||||
/dev/sdb1: Moved: 79.2%
|
||||
…
|
||||
/dev/sdb1: Moved: 100.0%
|
||||
```
|
||||
|
||||
当 pvmove 命令完成后。再次使用 pvs 命令检查物理卷是否有空闲。
|
||||
|
||||
```
|
||||
# pvs -o+pv_used
|
||||
|
||||
PV VG Fmt Attr PSize PFree Used
|
||||
/dev/sda1 myvg lvm2 a- 75.00G 9.00G 66.00G
|
||||
/dev/sdb1 myvg lvm2 a- 50.00G 50.00G 0
|
||||
/dev/sdc1 myvg lvm2 a- 17.15G 12.15G 5.00G
|
||||
```
|
||||
|
||||
如果它是空闲的,使用 vgreduce 命令从卷组中删除物理卷 /dev/sdb1。
|
||||
|
||||
```
|
||||
# vgreduce myvg /dev/sdb1
|
||||
Removed "/dev/sdb1" from volume group "myvg"
|
||||
```
|
||||
|
||||
最后,运行 pvremove 命令从 LVM 配置中删除磁盘。现在,磁盘已经完全从 LVM 中移除,可以用于其他用途。
|
||||
|
||||
```
|
||||
# pvremove /dev/sdb1
|
||||
Labels on physical volume "/dev/sdb1" successfully wiped.
|
||||
```
|
||||
|
||||
### 2) 移动扩展到新磁盘
|
||||
|
||||
如果你在卷组中的其他物理卷上没有足够的可用扩展。使用以下步骤添加新的物理卷。
|
||||
|
||||
向存储组申请新的 LUN。分配完毕后,运行以下命令来**[在 Linux 中发现新添加的 LUN 或磁盘][5]**。
|
||||
|
||||
```
|
||||
# ls /sys/class/scsi_host
|
||||
host0
|
||||
```
|
||||
|
||||
```
|
||||
# echo "- - -" > /sys/class/scsi_host/host0/scan
|
||||
```
|
||||
|
||||
```
|
||||
# fdisk -l
|
||||
```
|
||||
|
||||
操作系统中检测到磁盘后,使用 pvcreate 命令创建物理卷。
|
||||
|
||||
```
|
||||
# pvcreate /dev/sdd1
|
||||
Physical volume "/dev/sdd1" successfully created
|
||||
```
|
||||
|
||||
使用以下命令将新的物理卷 /dev/sdd1 添加到现有卷组 vg01 中。
|
||||
|
||||
```
|
||||
# vgextend vg01 /dev/sdd1
|
||||
Volume group "vg01" successfully extended
|
||||
```
|
||||
|
||||
现在,使用 pvs 命令查看你添加的新磁盘 **“/dev/sdd1”**。
|
||||
|
||||
```
|
||||
# pvs -o+pv_used
|
||||
|
||||
PV VG Fmt Attr PSize PFree Used
|
||||
/dev/sda1 myvg lvm2 a- 75.00G 14.00G 61.00G
|
||||
/dev/sdb1 myvg lvm2 a- 50.00G 0 50.00G
|
||||
/dev/sdc1 myvg lvm2 a- 17.15G 12.15G 5.00G
|
||||
/dev/sdd1 myvg lvm2 a- 60.00G 60.00G 0
|
||||
```
|
||||
|
||||
使用 pvmove 命令将数据从 /dev/sdb1 移动到 /dev/sdd1。
|
||||
|
||||
```
|
||||
# pvmove /dev/sdb1 /dev/sdd1
|
||||
|
||||
/dev/sdb1: Moved: 10.0%
|
||||
…
|
||||
/dev/sdb1: Moved: 79.7%
|
||||
…
|
||||
/dev/sdb1: Moved: 100.0%
|
||||
```
|
||||
|
||||
数据移动到新磁盘后。再次使用 pvs 命令检查物理卷是否空闲。
|
||||
|
||||
```
|
||||
# pvs -o+pv_used
|
||||
|
||||
PV VG Fmt Attr PSize PFree Used
|
||||
/dev/sda1 myvg lvm2 a- 75.00G 14.00G 61.00G
|
||||
/dev/sdb1 myvg lvm2 a- 50.00G 50.00G 0
|
||||
/dev/sdc1 myvg lvm2 a- 17.15G 12.15G 5.00G
|
||||
/dev/sdd1 myvg lvm2 a- 60.00G 10.00G 50.00G
|
||||
```
|
||||
|
||||
如果空闲,使用 vgreduce 命令从卷组中删除物理卷 /dev/sdb1。
|
||||
|
||||
```
|
||||
# vgreduce myvg /dev/sdb1
|
||||
Removed "/dev/sdb1" from volume group "myvg"
|
||||
```
|
||||
|
||||
最后,运行 pvremove 命令从 LVM 配置中删除磁盘。现在,磁盘已经完全从 LVM 中移除,可以用于其他用途。
|
||||
|
||||
```
|
||||
# pvremove /dev/sdb1
|
||||
Labels on physical volume "/dev/sdb1" successfully wiped.
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://www.2daygeek.com/linux-remove-delete-physical-volume-pv-from-volume-group-vg-in-lvm/
|
||||
|
||||
作者:[Magesh Maruthamuthu][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[geekpi](https://github.com/geekpi)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://www.2daygeek.com/author/magesh/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://www.2daygeek.com/create-lvm-storage-logical-volume-manager-in-linux/
|
||||
[2]: https://www.2daygeek.com/extend-increase-resize-lvm-logical-volume-in-linux/
|
||||
[3]: https://www.2daygeek.com/reduce-shrink-decrease-resize-lvm-logical-volume-in-linux/
|
||||
[4]: data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7
|
||||
[5]: https://www.2daygeek.com/scan-detect-luns-scsi-disks-on-redhat-centos-oracle-linux/
|
@ -0,0 +1,136 @@
|
||||
[#]: collector: (lujun9972)
|
||||
[#]: translator: (rakino)
|
||||
[#]: reviewer: ( )
|
||||
[#]: publisher: ( )
|
||||
[#]: url: ( )
|
||||
[#]: subject: (6 Essential Things To Do After Installing Manjaro Linux)
|
||||
[#]: via: (https://itsfoss.com/things-to-do-after-installing-manjaro/)
|
||||
[#]: author: (Dimitrios Savvopoulos https://itsfoss.com/author/dimitrios/)
|
||||
|
||||
安装 Manjaro Linux 后要做的 6 件事
|
||||
======
|
||||
|
||||
所以,你刚刚[全新安装了 Manjaro Linux][1],那么现在该做什么呢?
|
||||
|
||||
下面是我推荐你在安装后进行的一些步骤。
|
||||
|
||||
不过说实话,这些都是我在安装 Manjaro 后喜欢做的事,根据你的需求,步骤可能会有所不同。
|
||||
|
||||
### 推荐在安装完 Manjaro Linux 后去做的事
|
||||
|
||||
![][2]
|
||||
|
||||
我使用的是 Xfce 版的 Manjaro,但这些步骤也适用于 [Manjaro][3] 的其它桌面环境版本。
|
||||
|
||||
#### 1、设置最快的镜像
|
||||
|
||||
在更新系统之前,我建议先整理一下镜像列表。在刷新 Manjaro 系统和从软件仓库下载软件包的时候,优化后的镜像列表会对系统的性能产生明显的影响。
|
||||
|
||||
打开终端模拟器并输入以下命令:
|
||||
|
||||
```
|
||||
sudo pacman-mirrors --fasttrack
|
||||
```
|
||||
|
||||
![][4]
|
||||
|
||||
#### 2、更新系统
|
||||
|
||||
保持系统更新可以降低安全漏洞的发生机率,在安装新的软件之前也建议刷新系统的软件仓库。
|
||||
|
||||
你可以用下面的命令来[更新 Manjaro 系统][5]:
|
||||
|
||||
```
|
||||
sudo pacman -Syu
|
||||
```
|
||||
|
||||
![][6]
|
||||
|
||||
#### 3、启用 AUR,Snap 以及 Flatpak 支持
|
||||
|
||||
[<ruby>Arch 用户仓库<rt>Arch User Repository</rt></ruby>(AUR)][7]是用户选择[基于 Arch Linux 的系统][8]的一个主要理由。你可以在 AUR 中访问到大量的附加软件。
|
||||
|
||||
(译注:AUR 中的 PKGBUILD 均为用户上传且未经审核,使用者需要自负责任,在构建软件包前请注意检查其中内容是否合理。)
|
||||
|
||||
作为可选项,你可以直接在 Pamac 图形化软件包管理器中启用对 [Snap][9] 以及 [Flatpak][10] 的支持。
|
||||
|
||||
![][11]
|
||||
|
||||
#### 启用 TRIM(仅 SSD)
|
||||
|
||||
如果你的根分区已经安装在了 SSD 上,启用 [TRIM][12] 会是你在安装 Manjaro 后需要做的一件事。TRIM 会帮助清理 SSD 中的块,从而延长 SSD 的使用寿命。
|
||||
|
||||
要在 Manjaro 中启用 TRIM,请在终端中输入以下命令:
|
||||
|
||||
```
|
||||
sudo systemctl enable fstrim.timer
|
||||
```
|
||||
|
||||
![][13]
|
||||
|
||||
#### 5、安装内核(高级用户)
|
||||
|
||||
我在 [Manjaro 评测][14]中提到的一个话题就是,你可以在图形界面中轻易地更换内核。
|
||||
|
||||
喜欢使用命令行?你也可以在终端中列出系统中已安装的内核以及安装新的内核。
|
||||
|
||||
列出已安装的内核:
|
||||
|
||||
```
|
||||
mhwd-kernel -li
|
||||
```
|
||||
|
||||
**安装新内核**(以最新的 5.8 版本内核为例)**:**
|
||||
|
||||
```
|
||||
sudo mhwd-kernel -i linux58
|
||||
```
|
||||
|
||||
![][15]
|
||||
|
||||
#### 6、安装微软 TrueType 字体(如果需要)
|
||||
|
||||
我经常在个人电脑上编辑工作文件,因此我需要 Times New Roman 或 Arial 等微软字体。
|
||||
|
||||
如果你也需要使用微软字体,可以从 [AUR][7] 中取得这个[软件包][16]。如果你想要在命令行中管理 AUR 软件包,可以选择安装一个 [AUR 助手][17]。
|
||||
|
||||
![][18]
|
||||
|
||||
#### 结论
|
||||
|
||||
如果你想在一个预配置、为桌面优化的发行版上享受 Arch Linux 的优点,[Manjaro是一个很好的发行版][19]。虽然它预置了很多东西,但由于每个人设置和需求的不同,有几个步骤是不能提前完成的。
|
||||
|
||||
除开已经提到的步骤,还有哪一步对你来说是必不可少的?请在下面的评论中告诉我们。
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
via: https://itsfoss.com/things-to-do-after-installing-manjaro/
|
||||
|
||||
作者:[Dimitrios Savvopoulos][a]
|
||||
选题:[lujun9972][b]
|
||||
译者:[rakino](https://github.com/rakino)
|
||||
校对:[校对者ID](https://github.com/校对者ID)
|
||||
|
||||
本文由 [LCTT](https://github.com/LCTT/TranslateProject) 原创编译,[Linux中国](https://linux.cn/) 荣誉推出
|
||||
|
||||
[a]: https://itsfoss.com/author/dimitrios/
|
||||
[b]: https://github.com/lujun9972
|
||||
[1]: https://itsfoss.com/install-manjaro-linux/
|
||||
[2]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/10/things-to-do-after-installing-manjaro.jpg?resize=800%2C450&ssl=1
|
||||
[3]: https://manjaro.org
|
||||
[4]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/08/manjaro-fasttrack.png?resize=800%2C600&ssl=1
|
||||
[5]: https://itsfoss.com/update-arch-linux/
|
||||
[6]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/03/sudo-pacman-Syu.png?resize=800%2C504&ssl=1
|
||||
[7]: https://itsfoss.com/aur-arch-linux/
|
||||
[8]: https://itsfoss.com/arch-based-linux-distros/
|
||||
[9]: https://itsfoss.com/use-snap-packages-ubuntu-16-04/
|
||||
[10]: https://itsfoss.com/flatpak-guide/
|
||||
[11]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/08/pamac-2.png?resize=800%2C600&ssl=1
|
||||
[12]: https://en.wikipedia.org/wiki/Trim_(computing)
|
||||
[13]: https://i0.wp.com/itsfoss.com/wp-content/uploads/2020/08/fstrim.timer_.png?resize=800%2C600&ssl=1
|
||||
[14]: https://itsfoss.com/manjaro-linux-review/
|
||||
[15]: https://i2.wp.com/itsfoss.com/wp-content/uploads/2020/08/manjaro-cli-kernels.png?resize=800%2C600&ssl=1
|
||||
[16]: https://aur.archlinux.org/packages/ttf-ms-fonts
|
||||
[17]: https://itsfoss.com/best-aur-helpers/
|
||||
[18]: https://i1.wp.com/itsfoss.com/wp-content/uploads/2020/08/ttf-ms-fonts.png?resize=800%2C600&ssl=1
|
||||
[19]: https://itsfoss.com/why-use-manjaro-linux/
|
Loading…
Reference in New Issue
Block a user