Below you see the product of my first baby-steps in programming. The purpose of the script is twofold: 1. Take html input of a specific website, process it, and return relevant info such as document id, text, and headline. 2. Generate a count of the words in all the articles.
The script is working and does what it is supposed to, however, I cannot help but feel that I'm missing a lot in terms of performance.
import re
import pandas as pd
from urllib.request import urlopen as uReq
from sklearn.feature_extraction.text import CountVectorizer
TAG_RE = re.compile(r'<[^>]+>')
def RemoveTags(text):
"""Remove all html tags"""
return TAG_RE.sub('', text)
ESCAPES_RE = re.compile(r'\\.')
def RemoveEscapes(text):
"""Remove extra escape characters from encoding"""
return ESCAPES_RE.sub('', text)
def ReadFromLink(link):
"""Read html from link and return raw html"""
with uReq(link) as response:
html = response.read()#
html = str(html).lower()
return html.lower()
def ArticleRaw(html):
"""Find articles in html"""
article = re.findall(r'<doc>.*?</doc>', html)
return article
def GetDocID(html):
"""Find document ids in html"""
docid = re.findall(r'<docid>(.*?)</docid>', html)
docid = [docid.strip() for docid in docid]
docid = [int(docid) for docid in docid]
return docid
def GetHeadline(html):
"""Find headlines in html"""
headline = re.findall(r'<headline>(.*?)</headline>', html)
headline = [RemoveTags(headline) for headline in headline]
headline = [RemoveEscapes(headline) for headline in headline]
return headline
def GetMainText(html):
"""Find maintext in html"""
maintext = re.findall(r'<text>(.*?)</text>', html)
maintext = [RemoveTags(maintext) for maintext in maintext]
maintext = [RemoveEscapes(maintext) for maintext in maintext]
maintext = [' '.join(maintext.split()) for maintext in maintext]
return maintext
link = link
html = ReadFromLink(link)
ArticlesDict = {
"docid": GetDocID(html),
"raw_article": ArticleRaw(html),
"headline": GetHeadline(html),
"maintext": GetMainText(html)
}
def CountFeatures(text):
documents = ArticlesDict['maintext']
# Stem first?
vector = CountVectorizer()
x = vector.fit_transform(documents)
df_features = pd.DataFrame(x.toarray(), columns = vector.get_feature_names())
return df_features
df_features = CountFeatures(df_articles['maintext'])
1 Answer 1
If I may suggest, using a tool like Beautiful Soup can greatly help you get around html elements in a simple way
http://www.pythonforbeginners.com/python-on-the-web/web-scraping-with-beautifulsoup/
Here you have a very brief example on how it operates
from bs4 import BeautifulSoup
import requests
r = requests.get("http://any_url_you_want.com")
data = r.text
soup = BeautifulSoup(data)
for text in soup.find_all('text'):
# Here you do whatever you want with text
You can adapt your methods to use the functions depending on the tags, or however you want
Check also this article, explains quite well what you can do with it and is accessible for beginners
-
1\$\begingroup\$ There is now also github.com/kennethreitz/requests-html :) \$\endgroup\$hjpotter92– hjpotter922018年03月03日 02:06:44 +00:00Commented Mar 3, 2018 at 2:06
-
\$\begingroup\$ I've tried it out and it works! Thank you very much for the feedback. \$\endgroup\$Daniel Hansen– Daniel Hansen2018年03月07日 08:57:38 +00:00Commented Mar 7, 2018 at 8:57
-
\$\begingroup\$ Very happy to hear :) I will also take a look at requests-html, thanks hjpotter92 for the suggestion \$\endgroup\$A. Romeu– A. Romeu2018年03月07日 09:09:38 +00:00Commented Mar 7, 2018 at 9:09
Explore related questions
See similar questions with these tags.