Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit e450c2e

Browse files
committed
Update
1 parent 748a28f commit e450c2e

File tree

6 files changed

+113
-13
lines changed

6 files changed

+113
-13
lines changed

‎.vscode/settings.json‎

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"python.pythonPath": "/usr/bin/python"
3+
}

‎instapic.py‎

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
#!/usr/bin/env python
2+
3+
## Instagram Post Image Downloader
4+
## Install Python pip Modules
5+
6+
# Python2 (pip)
7+
## pip install wget
8+
## pip install beautifulsoup4
9+
## pip install lxml
10+
11+
# Python3 (pip)
12+
## pip3 install wget
13+
## pip3 install beautifulsoup4
14+
## pip3 install lxml
15+
16+
from bs4 import BeautifulSoup
17+
import wget
18+
19+
try: #python3
20+
from urllib.request import urlopen
21+
except: #python2
22+
from urllib2 import urlopen
23+
input = raw_input
24+
25+
## User input
26+
url = input("033円[1;32mEnter a Instagram Post URL : 033円[1;m")
27+
28+
insta_post = urlopen(url)
29+
bs = BeautifulSoup(insta_post , "lxml")
30+
31+
## Find Insta Post Image
32+
metatag = bs.find("meta", {"property": "og:image"})
33+
34+
if metatag is not None:
35+
36+
print (metatag["content"])
37+
print ("\n")
38+
39+
print ("Image Started Downloading.......")
40+
41+
## Download Image via Wget
42+
filename = wget.download(metatag["content"])
43+
print ("\n")
44+
45+
print ("Done")
46+
print ("\n")
47+
48+
else:
49+
print ("Error")
File renamed without changes.

‎og.py‎ renamed to ‎test/og.py‎

File renamed without changes.

‎test/weblink.py‎

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
#!/usr/bin/env python
2+
3+
from BeautifulSoup import BeautifulSoup
4+
#import urllib2
5+
import cfscrape
6+
import re
7+
8+
#html_page = urllib2.urlopen("https://example.com")
9+
10+
# Get the text at the set URL
11+
scraper = cfscrape.create_scraper()
12+
13+
url = "https://example.com"
14+
cfurl = scraper.get(url).content
15+
soup = BeautifulSoup(cfurl)
16+
for link in soup.findAll('a', attrs={'href': re.compile("^(http|https)://")}):
17+
18+
## Print Output
19+
print link.get('href')

‎weblink.py‎

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,48 @@
11
#!/usr/bin/env python
22

3-
from BeautifulSoup import BeautifulSoup
4-
#import urllib2
5-
import cfscrape
6-
import re
7-
8-
#html_page = urllib2.urlopen("https://example.com")
9-
10-
# Get the text at the set URL
11-
scraper = cfscrape.create_scraper()
3+
## Install Python pip Modules
4+
5+
# Python2 (pip)
6+
## pip install cfscrape
7+
## pip install beautifulsoup4
8+
## pip install lxml
9+
10+
# Python3 (pip)
11+
## pip3 install cfscrape
12+
## pip3 install beautifulsoup4
13+
## pip3 install lxml
14+
15+
#from bs4 import BeautifulSoup
16+
#import cfscrape
17+
#import re
18+
19+
import sys
1220

13-
url = "https://example.com"
21+
VER = 2
22+
23+
try:
24+
if sys.version_info >= (3,0):
25+
VER = 3
26+
from bs4 import BeautifulSoup
27+
import cfscrape
28+
import re
29+
else:
30+
input = raw_input
31+
from bs4 import BeautifulSoup
32+
import cfscrape
33+
import re
34+
except:
35+
pass
36+
37+
38+
## User input
39+
url = input("033円[1;32mEnter a URL : 033円[1;m")
40+
41+
scraper = cfscrape.create_scraper()
1442
cfurl = scraper.get(url).content
15-
soup = BeautifulSoup(cfurl)
43+
soup = BeautifulSoup(cfurl, "lxml")
1644
for link in soup.findAll('a', attrs={'href': re.compile("^(http|https)://")}):
1745

18-
## Print Output
19-
print link.get('href')
46+
urls = link.get("href")
47+
print (urls)
48+

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /