Merge pull request #48 from robertparley/master

版本更新
This commit is contained in:
暮晨 2023-09-17 18:58:17 +08:00 committed by GitHub
commit 71884112da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 18913 additions and 516 deletions

8
CHANGELOG.md Normal file → Executable file
View File

@ -1 +1,9 @@
* 2018-12-16 更新至[30e05a5](https://github.com/satwikkansal/wtfpython/commit/30e05a5973930c38cdb59f1c02b85b19b22ac531)
* 2022-01-20 开始更新[cd4d7c0](https://github.com/satwikkansal/wtfpython/commit/cd4d7c0e340789bd001e5e9eae0e3c5bb7c7f7f1)
* 2022-02-09 更新至[cd4d7c0](https://github.com/satwikkansal/wtfpython/commit/cd4d7c0e340789bd001e5e9eae0e3c5bb7c7f7f1)
* 2022-11-15 更新至[ea1e228](https://github.com/satwikkansal/wtfpython/commit/ea1e228407f2f7efc297e6b773aabf376f6def8e)
* 2023-09-06 更新至[19d4b07](https://github.com/satwikkansal/wtfpython/commit/19d4b075152d93e5bc75c5d08279338a895cfa27)

2397
README.md Normal file → Executable file

File diff suppressed because it is too large Load Diff

24
irrelevant/insert_ids.py Normal file
View File

@ -0,0 +1,24 @@
import uuid
new_file = []
original_file = []
fname = "../README.md"
def generate_random_id_comment():
random_id = uuid.uuid4()
return f"<!-- Example ID: {random_id} --!>"
with open(fname, "r") as f:
original_file = f.readlines()
for line in original_file:
new_file.append(line)
if line.strip().startswith("### "):
new_file.append(generate_random_id_comment())
with open(fname, "w") as f:
f.write("".join(new_file))

View File

@ -0,0 +1,395 @@
"""
An inefficient monolithic piece of code that'll generate jupyter notebook
from the projects main README.
PS: If you are a recruiter, please don't judge me by this piece of code. I wrote it
in hurry. I know this is messy and can be simplified, but I don't want to change it
much because it just works.
Simplifictions and improvements through patches are more than welcome however :)
#TODOs
- CLI arguments for running this thing
- Add it to prepush hook
- Add support for skip comments, to skip examples that are not meant for notebook environment.
- Use templates?
"""
import json
import os
import pprint
fpath = os.path.join(os.path.dirname( __file__ ), '..', 'README.md')
examples = []
# The globals
current_example = 1
sequence_num = 1
current_section_name = ""
STATEMENT_PREFIXES = ["...", ">>> ", "$ "]
HOSTED_NOTEBOOK_INSTRUCTIONS = """
## 托管笔记本指南
这只是通过 jupyter notebook 浏览 wtfpython 的实验性尝试因为一些示例是只读的
- 它们要么需要托管运行时不支持的 Python 版本
- 要么它们无法在此环境中复现
预期的输出已经存在于代码单元之后的折叠单元中 Google colab 提供 Python22.7 Python33.6默认运行环境 您可以在这些 Python2 特定示例之间切换 对于特定于其他次要版本的示例您可以简单地参考折叠的输出目前无法在托管笔记本中控制次要版本 您可以检查当前的活动版本
```py
>>> import sys
>>> sys.version
# Prints out Python version here.
```
话虽如此大多数示例都按预期工作 如果您遇到任何问题请随时查阅 wtfpython 上的原始内容并在 repo 中创建问题 祝君顺利
---
"""
def generate_code_block(statements, output):
"""
Generates a code block that executes the given statements.
:param statements: The list of statements to execute.
:type statements: list(str)
"""
global sequence_num
result = {
"type": "code",
"sequence_num": sequence_num,
"statements": statements,
"output": output
}
sequence_num += 1
return result
def generate_markdown_block(lines):
"""
Generates a markdown block from a list of lines.
"""
global sequence_num
result = {
"type": "markdown",
"sequence_num": sequence_num,
"value": lines
}
sequence_num += 1
return result
def is_interactive_statement(line):
for prefix in STATEMENT_PREFIXES:
if line.lstrip().startswith(prefix):
return True
return False
def parse_example_parts(lines, title, current_line):
"""
Parse the given lines and return a dictionary with two keys:
build_up, which contains all the text before an H4 (explanation) is encountered,
and
explanation, which contains all the text after build_up until --- or another H3 is encountered.
"""
parts = {
"build_up": [],
"explanation": []
}
content = [title]
statements_so_far = []
output_so_far = []
next_line = current_line
# store build_up till an H4 (explanation) is encountered
while not (next_line.startswith("#### ")or next_line.startswith('---')):
# Watching out for the snippets
if next_line.startswith("```py"):
# It's a snippet, whatever found until now is text
is_interactive = False
output_encountered = False
if content:
parts["build_up"].append(generate_markdown_block(content))
content = []
next_line = next(lines)
while not next_line.startswith("```"):
if is_interactive_statement(next_line):
is_interactive = True
if (output_so_far):
parts["build_up"].append(generate_code_block(statements_so_far, output_so_far))
statements_so_far, output_so_far = [], []
statements_so_far.append(next_line)
else:
# can be either output or normal code
if is_interactive:
output_so_far.append(next_line)
elif output_encountered:
output_so_far.append(next_line)
else:
statements_so_far.append(next_line)
next_line = next(lines)
# Snippet is over
parts["build_up"].append(generate_code_block(statements_so_far, output_so_far))
statements_so_far, output_so_far = [], []
next_line = next(lines)
else:
# It's a text, go on.
content.append(next_line)
next_line = next(lines)
# Explanation encountered, save any content till now (if any)
if content:
parts["build_up"].append(generate_markdown_block(content))
# Reset stuff
content = []
statements_so_far, output_so_far = [], []
# store lines again until --- or another H3 is encountered
while not (next_line.startswith("---") or
next_line.startswith("### ")):
if next_line.lstrip().startswith("```py"):
# It's a snippet, whatever found until now is text
is_interactive = False
if content:
parts["explanation"].append(generate_markdown_block(content))
content = []
next_line = next(lines)
while not next_line.lstrip().startswith("```"):
if is_interactive_statement(next_line):
is_interactive = True
if (output_so_far):
parts["explanation"].append(generate_code_block(statements_so_far, output_so_far))
statements_so_far, output_so_far = [], []
statements_so_far.append(next_line)
else:
# can be either output or normal code
if is_interactive:
output_so_far.append(next_line)
else:
statements_so_far.append(next_line)
next_line = next(lines)
# Snippet is over
parts["explanation"].append(generate_code_block(statements_so_far, output_so_far))
statements_so_far, output_so_far = [], []
next_line = next(lines)
else:
# It's a text, go on.
content.append(next_line)
next_line = next(lines)
# All done
if content:
parts["explanation"].append(generate_markdown_block(content))
return next_line, parts
def remove_from_beginning(tokens, line):
for token in tokens:
if line.lstrip().startswith(token):
line = line.replace(token, "")
return line
def inspect_and_sanitize_code_lines(lines):
"""
Remove lines from the beginning of a code block that are not statements.
:param lines: A list of strings, each representing a line in the code block.
:returns is_print_present, sanitized_lines: A boolean indicating whether print was present in the original code and a list of strings representing
sanitized lines. The latter may be an empty list if all input lines were removed as comments or whitespace (and thus did not contain any statements).
This function does not remove blank lines at the end of `lines`.
"""
tokens_to_remove = STATEMENT_PREFIXES
result = []
is_print_present = False
for line in lines:
line = remove_from_beginning(tokens_to_remove, line)
if line.startswith("print ") or line.startswith("print("):
is_print_present = True
result.append(line)
return is_print_present, result
def convert_to_cells(cell_contents, read_only):
"""
Converts a list of dictionaries containing markdown and code cells into a Jupyter notebook.
:param cell_contents: A list of dictionaries, each
dictionary representing either a markdown or code cell. Each dictionary should have the following keys: "type", which is either "markdown" or "code",
and "value". The value for type = 'markdown' is the content as string, whereas the value for type = 'code' is another dictionary with two keys,
statements and output. The statements key contains all lines in between ```py\n``` (including) until ```\n```, while output contains all lines after
```.output py\n```.
:type cell_contents: List[Dict]
:param read_only (optional): If True then only print outputs are included in converted
cells. Default False
:type read_only (optional): bool
:returns A Jupyter notebook containing all cells from input parameter `cell_contents`.
Each converted cell has metadata attribute collapsed set to true if it's code-cell otherwise None if it's markdow-cell.
"""
cells = []
for stuff in cell_contents:
if stuff["type"] == "markdown":
# todo add metadata later
cells.append(
{
"cell_type": "markdown",
"metadata": {},
"source": stuff["value"]
}
)
elif stuff["type"] == "code":
if read_only:
# Skip read only
# TODO: Fix
cells.append(
{
"cell_type": "markdown",
"metadata": {},
"source": ["```py\n"] + stuff["statements"] + ["```\n"] + ["```py\n"] + stuff['output'] + ["```\n"]
}
)
continue
is_print_present, sanitized_code = inspect_and_sanitize_code_lines(stuff["statements"])
if is_print_present:
cells.append(
{
"cell_type": "code",
"metadata": {
"collapsed": True,
},
"execution_count": None,
"outputs": [{
"name": "stdout",
"output_type": "stream",
"text": stuff["output"]
}],
"source": sanitized_code
}
)
else:
cells.append(
{
"cell_type": "code",
"execution_count": None,
"metadata": {
"collapsed": True
},
"outputs": [{
"data": {
"text/plain": stuff["output"]
},
"output_type": "execute_result",
"metadata": {},
"execution_count": None
}],
"source": sanitized_code
}
)
return cells
def convert_to_notebook(pre_examples_content, parsed_json, post_examples_content):
"""
Convert a JSON file containing the examples to a Jupyter Notebook.
"""
result = {
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 2
}
notebook_path = "wtf.ipynb"
result["cells"] += convert_to_cells([generate_markdown_block(pre_examples_content)], False)
for example in parsed_json:
parts = example["parts"]
build_up = parts.get("build_up")
explanation = parts.get("explanation")
read_only = example.get("read_only")
if build_up:
result["cells"] += convert_to_cells(build_up, read_only)
if explanation:
result["cells"] += convert_to_cells(explanation, read_only)
result["cells"] += convert_to_cells([generate_markdown_block(post_examples_content)], False)
#pprint.pprint(result, indent=2)
with open(notebook_path, "w") as f:
json.dump(result, f, indent=2)
with open(fpath, 'r+', encoding="utf-8") as f:
lines = iter(f.readlines())
line = next(lines)
result = []
pre_examples_phase = True
pre_stuff = []
post_stuff = []
try:
while True:
if line.startswith("## "):
pre_examples_phase = False
# A section is encountered
current_section_name = line.replace("## ", "").strip()
section_text = []
line = next(lines)
# Until a new section is encountered
while not (line.startswith("## ") or line.startswith("# ")):
# check if it's a H3
if line.startswith("### "):
# An example is encountered
title_line = line
line = next(lines)
read_only = False
while line.strip() == "" or line.startswith('<!--'):
#TODO: Capture example ID here using regex.
if '<!-- read-only -->' in line:
read_only = True
line = next(lines)
example_details = {
"id": current_example,
"title": title_line.replace("### ", ""),
"section": current_section_name,
"read_only": read_only
}
line, example_details["parts"] = parse_example_parts(lines, title_line, line)
result.append(example_details)
current_example += 1
else:
section_text.append(line)
line = next(lines)
else:
if pre_examples_phase:
pre_stuff.append(line)
else:
post_stuff.append(line)
line = next(lines)
except StopIteration as e:
#pprint.pprint(result, indent=2)
pre_stuff.append(HOSTED_NOTEBOOK_INSTRUCTIONS)
result.sort(key = lambda x: x["read_only"])
convert_to_notebook(pre_stuff, result, post_stuff)

View File

@ -0,0 +1,7 @@
## Generating the notebook
- Expand the relative links in README.md to absolute ones
- Remove the TOC in README.md (because Google colab generates its own anyway)
- Reorder the examples, so that the ones that work are upfront.
- Run the `notebook_generator.py`, it will generate a notebook named `wtf.ipynb`
- Revert the README.md changes (optional)

View File

@ -0,0 +1,152 @@
Skipping lines?
a
Well, something is fishy...
a
Time for some hash brownies!
f
Evaluation time discrepancy
f
Modifying a dictionary while iterating over it
c
Deleting a list item while iterating over it
c
Backslashes at the end of string
f
Brace yourself!
t*
"this" is love :heart:
t*
Okay Python, Can you make me fly?
t*
`goto`, but why?
t*
Let's meet Friendly Language Uncle For Life
t*
Inpinity
t*
Strings can be tricky sometimes
f*
`+=` is faster
m
Let's make a giant string!
m
Yes, it exists!
t
`is` is not what it is!
f
`is not ...` is not `is (not ...)`
f
The function inside loop sticks to the same output
f
Loop variables leaking out of local scope!
c
A tic-tac-toe where X wins in the first attempt!
f
Beware of default mutable arguments!
c
Same operands, different story!
c
Mutating the immutable!
f
Using a variable not defined in scope
c
The disappearing variable from outer scope
f
Return return everywhere!
f
When True is actually False
f
Be careful with chained operations
c
Name resolution ignoring class scope
c
From filled to None in one instruction...
f
Explicit typecast of strings
m
Class attributes and instance attributes
f
Catching the Exceptions!
f
Midnight time doesn't exist?
f
What's wrong with booleans?
f
Needle in a Haystack
c
Teleportation
a*
yielding None
f
The surprising comma
f
For what?
f
not knot!
f
Subclass relationships
f*
Mangling time!
t*
Deep down, we're all the same.
f*
Half triple-quoted strings
f
Implicit key type conversion
f*
Stubborn `del` operator
c*
Let's see if you can guess this?
f
Minor Ones
m

View File

@ -0,0 +1,53 @@
"""
This script parses the README.md and generates the table
`CONTRIBUTORS.md`.
No longer works since we've moved on contributors to CONTRIBUTORS.md entirely.
"""
import pprint
import re
import requests
regex = ("[sS]uggested by @(\S+) in \[this\]\(https:\/\/github\.com\/satwikkansal"
"\/wtf[pP]ython\/issues\/(\d+)\) issue")
fname = "README.md"
contribs = {}
table_header = """
| Contributor | Github | Issues |
|-------------|--------|--------|
"""
table_row = '| {} | [{}](https://github.com/{}) | {} |'
issue_format = '[#{}](https:/github.com/satwikkansal/wtfpython/issues/{})'
rows_so_far = []
github_rest_api = "https://api.github.com/users/{}"
with open(fname, 'r') as f:
file_content = f.read()
matches = re.findall(regex, file_content)
for match in matches:
if contribs.get(match[0]) and match[1] not in contribs[match[0]]:
contribs[match[0]].append(match[1])
else:
contribs[match[0]] = [match[1]]
for handle, issues in contribs.items():
issue_string = ', '.join([issue_format.format(i, i) for i in issues])
resp = requests.get(github_rest_api.format(handle))
name = handle
if resp.status_code == 200:
pprint.pprint(resp.json()['name'])
else:
print(handle, resp.content)
rows_so_far.append(table_row.format(name,
handle,
handle,
issue_string))
print(table_header + "\n".join(rows_so_far))

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
"""
This inefficient module would parse the README.md in the initial version of
WTFPython, and enable me to categorize and reorder a hell lot of examples with
the help of the file `add_categories` (part of which is automatically
generated).
After the refactor, this module would not work now with necessary updates in
the code.
"""
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
fname = "README.md"
snippets = []
with open(fname, 'r') as f:
lines = iter(f.readlines())
line = lines.next()
try:
while True:
# check if it's a H3
if line.startswith("### "):
title = line.replace("### ", "")
description = []
next_line = lines.next()
# store lines till an H4 (explanation) is encountered
while not next_line.startswith("#### "):
description.append(next_line)
next_line = lines.next()
explanation = []
# store lines again until --- or another H3 is encountered
while not (next_line.startswith("---") or
next_line.startswith("### ")):
explanation.append(next_line)
next_line = lines.next()
# Store the results finally
snippets.append({
"title": title,
"description": '\n'.join(description),
"explanation": '\n'.join(explanation)
})
line = next_line
else:
line = lines.next()
except StopIteration:
snippets.append({
"title": title,
"description": '\n'.join(description),
"explanation": '\n'.join(explanation)
})
'''
# Create a file
file_content = "\n\n".join([snip["title"] for snip in snippets])
with open("add_categories", "w") as f:
f.write(file_content)
'''
snips_by_title = {}
with open("add_categories", "r") as f:
content = iter(f.readlines())
try:
while True:
title = content.next()
cat = content.next().strip()
is_new = True if cat[-1]=="*" else False
cat = cat.replace('*','')
snips_by_title[title] = {
"category": cat,
"is_new": is_new
}
content.next()
except StopIteration:
pass
for idx, snip in enumerate(snippets):
snippets[idx]["category"] = snips_by_title[snip["title"]]["category"]
snippets[idx]["is_new"] = snips_by_title[snip["title"]]["is_new"]
snips_by_cat = {}
for snip in snippets:
cat = snip["category"]
if not snips_by_cat.get(cat):
snips_by_cat[cat] = []
snips_by_cat[cat].append(snip)
snippet_template = """
### ▶ {title}{is_new}
{description}
{explanation}
---
"""
category_template = """
---
## {category}
{content}
"""
result = ""
category_names = {
"a": "Appearances are Deceptive!",
"t": "The Hiddent treasures",
"f": "Strain your Brain",
"c": "Be careful of these",
"m": "Miscallaneous"
}
categories_in_order = ["a", "t", "f", "c", "m"]
for category in categories_in_order:
snips = snips_by_cat[category]
for i, snip in enumerate(snips):
print(i, ":", snip["title"])
content = ""
for _ in snips:
snip = snips[int(raw_input())]
is_new = " *" if snip["is_new"] else ""
content += snippet_template.format(title=snip["title"].strip(),
is_new=is_new,
description=snip["description"].strip().replace("\n\n", "\n"),
explanation=snip["explanation"].strip().replace("\n\n", "\n"))
result += category_template.format(category=category_names[category], content=content.replace("\n\n\n", "\n\n"))
with open("generated.md", "w") as f:
f.write(result.replace("\n\n\n", "\n\n"))
print("Done!")

13878
irrelevant/wtf.ipynb Executable file

File diff suppressed because it is too large Load Diff