Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit e2fbace

Browse files
committed
Chapter 11
1 parent a15e127 commit e2fbace

File tree

3 files changed

+73
-0
lines changed

3 files changed

+73
-0
lines changed

‎C11_download_all_XKCD_comics.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Downloads every single XKCD comic.
2+
3+
import os
4+
import requests
5+
import bs4
6+
7+
url = 'http://xkcd.com' # Starting url
8+
os.makedirs('xkcd', exist_ok=True) # Store comics in ./xkcd
9+
10+
while not url.endswith('#'):
11+
# Download the page
12+
print('Downloading page %s...' % url)
13+
res = requests.get(url)
14+
res.raise_for_status()
15+
16+
soup = bs4.BeautifulSoup(res.text, "html.parser")
17+
comicElem = soup.select('#comic img')
18+
if not comicElem:
19+
print('Could not find the comic image.')
20+
else:
21+
comicUrl = 'http:' + comicElem[0].get('src')
22+
# Download the image
23+
print('Downloading image %s...' % comicUrl)
24+
res = requests.get(comicUrl)
25+
res.raise_for_status()
26+
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
27+
for chunk in res.iter_content(100000):
28+
imageFile.write(chunk)
29+
imageFile.close()
30+
31+
# Get the Prev button's url.
32+
prevLink = soup.select('a[rel="prev"]')[0]
33+
url = 'http://xkcd.com' + prevLink.get('href')
34+
35+
print('Done.')

‎C11_feeling_lucky.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#! python3
2+
# Opens several Google search results.
3+
4+
import requests
5+
import webbrowser
6+
import bs4
7+
import sys
8+
9+
print('Googling...')
10+
res = requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1:]))
11+
12+
res.raise_for_status()
13+
14+
# Retrieve top search result links.
15+
soup = bs4.BeautifulSoup(res.text, "html.parser")
16+
17+
# Open a browser tab for each result.
18+
linkElems = soup.select('.r a')
19+
numOpen = min(5, len(linkElems))
20+
for i in range(numOpen):
21+
webbrowser.open('http://google.com' + linkElems[i].get('href'))

‎C11_maplt.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#! python3
2+
# mapIt.py - Launches a map in the browser using an address from the
3+
# command line or clipboard.
4+
5+
import webbrowser
6+
import sys
7+
import pyperclip
8+
9+
if len(sys.argv) > 1:
10+
# Get address from command line.
11+
address = ' '.join(sys.argv[1:])
12+
else:
13+
# Get address from clipboard.
14+
address = pyperclip.paste()
15+
16+
webbrowser.open('https://www.google.com/maps/place/' + address)
17+

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /