Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit c80b5e7

Browse files
style: format code with autopep8
Format code with autopep8 This commit fixes the style issues introduced in 78af4d5 according to the output from Autopep8. Details: https://app.deepsource.com/gh/avinashkranjan/Amazing-Python-Scripts/transform/f6709fad-8cc1-4138-98d2-2d7671be2d41/
1 parent 78af4d5 commit c80b5e7

File tree

1 file changed

+167
-157
lines changed

1 file changed

+167
-157
lines changed

‎Fetch_Contributions/fetch_contributions.py

Lines changed: 167 additions & 157 deletions
Original file line numberDiff line numberDiff line change
@@ -8,163 +8,173 @@
88

99

1010
class Fetch_PullRequests:
11-
"""
12-
Fetches the pull requests of a user in a organization.
13-
"""
14-
def __init__(self, username, organization, filename):
15-
"""
16-
:param username: github user
17-
:param organization: Organisation name
18-
:param filename: filename, it's optional
19-
"""
20-
self.ORG_URL = f"https://github.com/orgs/{organization}/repositories"
21-
self.URL = f"https://github.com/{organization}"
22-
self.organization = organization
23-
self.username = username
24-
self.filename = filename
25-
26-
def _list_of_repositories(self):
27-
"""
28-
Function lists the repositories of the organisation.
29-
30-
Returns
31-
-------
32-
list
33-
lists the repositories
34-
35-
"""
36-
page = requests.get(self.ORG_URL)
37-
tree = html.fromstring(page.content)
38-
number_of_pages = tree.xpath('//*[@id="org-repositories"]/div/div/div[2]/div/em/@data-total-pages')
39-
Repositories = []
40-
if len(number_of_pages) == 0:
41-
Repositories.extend(tree.xpath(
42-
'//*[contains(concat( " ", @class, " " ), concat( " ", "wb-break-all", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "d-inline-block", " " ))]/text()'))
43-
else:
44-
for number in range(1, int(number_of_pages[0]) + 1):
45-
page_ = requests.get(self.ORG_URL + f"?page={number}")
46-
tree = html.fromstring(page_.content)
47-
Repositories.extend(tree.xpath(
48-
'//*[contains(concat( " ", @class, " " ), concat( " ", "wb-break-all", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "d-inline-block", " " ))]/text()'))
49-
50-
return list(pd.Series(list(set(Repositories))).str.strip().values)
51-
52-
def _extract_pullrequests(self, repo):
53-
"""
54-
Function fetches the pull request of a repo.
55-
56-
Parameters
57-
----------
58-
repo: str
59-
repository name
60-
61-
Returns
62-
-------
63-
pandas dataframe
64-
dataframe consists of columns - "Title to PR", "Link of PR", "Status(Merged/Closed/Open)"
65-
66-
"""
67-
# initializing the lists to store the title, link and status of the pull request
68-
Title = []
69-
Link = []
70-
Status = []
71-
URL = self.URL + f"/{repo}/pulls?q=is%3Apr+author%3A{self.username}"
72-
page = requests.get(URL)
73-
tree = html.fromstring(page.content)
74-
# to determine the number of pages
75-
number_of_pages = tree.xpath('//*[@id="repo-content-pjax-container"]/div/div[6]/div/em/@data-total-pages')
76-
77-
if len(number_of_pages) == 0:
78-
# Title.extend(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/text()'))
79-
soup = BeautifulSoup(page.text, 'html.parser')
80-
# "Title may contain text in <code> tags. So,to handle it we use beautiful soup.
81-
for tag in soup.find_all('a', attrs={'class': 'markdown-title'}):
82-
Title.append(tag.text.strip())
83-
Link.extend(
84-
tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/@href'))
85-
Status.extend(tree.xpath(
86-
'//*[contains(concat( " ", @class, " " ), concat( " ", "pl-3", " " ))]/span/@aria-label'))
87-
88-
else:
89-
for number in range(1, int(number_of_pages[0]) + 1):
90-
URL = self.URL + f"/{repo}/pulls?page={number}&q=is%3Apr+author%3A{self.username}"
91-
page = requests.get(URL)
92-
tree = html.fromstring(page.content)
93-
94-
# Title.extend(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/text()'))
95-
soup = BeautifulSoup(page.text, 'html.parser')
96-
# Names = tree.xpath(
97-
# '//*[contains(concat( " ", @class, " " ), concat( " ", "opened-by", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "Link--muted", " " ))]/text()')
98-
99-
for tag in soup.find_all('a', attrs={'class': 'markdown-title'}):
100-
Title.append(tag.text.strip())
101-
Link.extend(tree.xpath(
102-
'//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/@href'))
103-
Status.extend(tree.xpath(
104-
'//*[contains(concat( " ", @class, " " ), concat( " ", "pl-3", " " ))]/span/@aria-label'))
105-
106-
Data = {
107-
"Title to PR": Title,
108-
"Link of PR": Link,
109-
"Status(Merged/Closed/Open)": Status
110-
}
111-
112-
# creating a dataframe with the above dictionary
113-
dataframe = pd.DataFrame.from_dict(Data)
114-
# dataframe.head()
115-
116-
# make necessary changes to the columns of dataframe before returning it
117-
dataframe['Status(Merged/Closed/Open)'] = dataframe['Status(Merged/Closed/Open)'].astype(str).str.replace(
118-
" pull request",
119-
"", regex=False)
120-
if dataframe['Link of PR'].dtype!='O':
121-
dataframe['Link of PR'] = dataframe['Link of PR'].astype(str)
122-
dataframe['Link of PR'] = 'https://github.com' + dataframe['Link of PR']
123-
124-
return dataframe
125-
126-
def get_pullrequests(self):
127-
"""
128-
Function pass the repo parameter to the "_extract_pullrequests" to fetch the pull requests of the particular repo.
129-
130-
Returns
131-
-------
132-
str
133-
return str saying that the file is stored if markdown is not empty.
134-
135-
"""
136-
dataframe = pd.DataFrame()
137-
for repo in self._list_of_repositories():
138-
dataframe = dataframe.append(self._extract_pullrequests(repo), ignore_index=True)
139-
140-
markdown = dataframe.to_markdown()
141-
142-
if len(markdown) > 0:
143-
# creating a markdown file
144-
# markdown_file = open(f"{self.filename}.md", "w")
145-
with open(f"{self.filename}.md", "w") as markdown_file:
146-
markdown_file.write(markdown)
147-
148-
return "Markdown File is successfully stored"
149-
150-
return "No pull requests found !!"
11+
"""
12+
Fetches the pull requests of a user in a organization.
13+
"""
14+
15+
def __init__(self, username, organization, filename):
16+
"""
17+
:param username: github user
18+
:param organization: Organisation name
19+
:param filename: filename, it's optional
20+
"""
21+
self.ORG_URL = f"https://github.com/orgs/{organization}/repositories"
22+
self.URL = f"https://github.com/{organization}"
23+
self.organization = organization
24+
self.username = username
25+
self.filename = filename
26+
27+
def _list_of_repositories(self):
28+
"""
29+
Function lists the repositories of the organisation.
30+
31+
Returns
32+
-------
33+
list
34+
lists the repositories
35+
36+
"""
37+
page = requests.get(self.ORG_URL)
38+
tree = html.fromstring(page.content)
39+
number_of_pages = tree.xpath(
40+
'//*[@id="org-repositories"]/div/div/div[2]/div/em/@data-total-pages')
41+
Repositories = []
42+
if len(number_of_pages) == 0:
43+
Repositories.extend(tree.xpath(
44+
'//*[contains(concat( " ", @class, " " ), concat( " ", "wb-break-all", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "d-inline-block", " " ))]/text()'))
45+
else:
46+
for number in range(1, int(number_of_pages[0]) + 1):
47+
page_ = requests.get(self.ORG_URL + f"?page={number}")
48+
tree = html.fromstring(page_.content)
49+
Repositories.extend(tree.xpath(
50+
'//*[contains(concat( " ", @class, " " ), concat( " ", "wb-break-all", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "d-inline-block", " " ))]/text()'))
51+
52+
return list(pd.Series(list(set(Repositories))).str.strip().values)
53+
54+
def _extract_pullrequests(self, repo):
55+
"""
56+
Function fetches the pull request of a repo.
57+
58+
Parameters
59+
----------
60+
repo: str
61+
repository name
62+
63+
Returns
64+
-------
65+
pandas dataframe
66+
dataframe consists of columns - "Title to PR", "Link of PR", "Status(Merged/Closed/Open)"
67+
68+
"""
69+
# initializing the lists to store the title, link and status of the pull request
70+
Title = []
71+
Link = []
72+
Status = []
73+
URL = self.URL + f"/{repo}/pulls?q=is%3Apr+author%3A{self.username}"
74+
page = requests.get(URL)
75+
tree = html.fromstring(page.content)
76+
# to determine the number of pages
77+
number_of_pages = tree.xpath(
78+
'//*[@id="repo-content-pjax-container"]/div/div[6]/div/em/@data-total-pages')
79+
80+
if len(number_of_pages) == 0:
81+
# Title.extend(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/text()'))
82+
soup = BeautifulSoup(page.text, 'html.parser')
83+
# "Title may contain text in <code> tags. So,to handle it we use beautiful soup.
84+
for tag in soup.find_all('a', attrs={'class': 'markdown-title'}):
85+
Title.append(tag.text.strip())
86+
Link.extend(
87+
tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/@href'))
88+
Status.extend(tree.xpath(
89+
'//*[contains(concat( " ", @class, " " ), concat( " ", "pl-3", " " ))]/span/@aria-label'))
90+
91+
else:
92+
for number in range(1, int(number_of_pages[0]) + 1):
93+
URL = self.URL + \
94+
f"/{repo}/pulls?page={number}&q=is%3Apr+author%3A{self.username}"
95+
page = requests.get(URL)
96+
tree = html.fromstring(page.content)
97+
98+
# Title.extend(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/text()'))
99+
soup = BeautifulSoup(page.text, 'html.parser')
100+
# Names = tree.xpath(
101+
# '//*[contains(concat( " ", @class, " " ), concat( " ", "opened-by", " " ))]//*[contains(concat( " ", @class, " " ), concat( " ", "Link--muted", " " ))]/text()')
102+
103+
for tag in soup.find_all('a', attrs={'class': 'markdown-title'}):
104+
Title.append(tag.text.strip())
105+
Link.extend(tree.xpath(
106+
'//*[contains(concat( " ", @class, " " ), concat( " ", "markdown-title", " " ))]/@href'))
107+
Status.extend(tree.xpath(
108+
'//*[contains(concat( " ", @class, " " ), concat( " ", "pl-3", " " ))]/span/@aria-label'))
109+
110+
Data = {
111+
"Title to PR": Title,
112+
"Link of PR": Link,
113+
"Status(Merged/Closed/Open)": Status
114+
}
115+
116+
# creating a dataframe with the above dictionary
117+
dataframe = pd.DataFrame.from_dict(Data)
118+
# dataframe.head()
119+
120+
# make necessary changes to the columns of dataframe before returning it
121+
dataframe['Status(Merged/Closed/Open)'] = dataframe['Status(Merged/Closed/Open)'].astype(str).str.replace(
122+
" pull request",
123+
"", regex=False)
124+
if dataframe['Link of PR'].dtype != 'O':
125+
dataframe['Link of PR'] = dataframe['Link of PR'].astype(str)
126+
dataframe['Link of PR'] = 'https://github.com' + \
127+
dataframe['Link of PR']
128+
129+
return dataframe
130+
131+
def get_pullrequests(self):
132+
"""
133+
Function pass the repo parameter to the "_extract_pullrequests" to fetch the pull requests of the particular repo.
134+
135+
Returns
136+
-------
137+
str
138+
return str saying that the file is stored if markdown is not empty.
139+
140+
"""
141+
dataframe = pd.DataFrame()
142+
for repo in self._list_of_repositories():
143+
dataframe = dataframe.append(
144+
self._extract_pullrequests(repo), ignore_index=True)
145+
146+
markdown = dataframe.to_markdown()
147+
148+
if len(markdown) > 0:
149+
# creating a markdown file
150+
# markdown_file = open(f"{self.filename}.md", "w")
151+
with open(f"{self.filename}.md", "w") as markdown_file:
152+
markdown_file.write(markdown)
153+
154+
return "Markdown File is successfully stored"
155+
156+
return "No pull requests found !!"
151157

152158

153159
if __name__ == "__main__":
154-
parser = argparse.ArgumentParser()
155-
parser.add_argument("-u", "--username", action="store_true")
156-
parser.add_argument("user", type=str, help="The name of the user to get the pull requests")
157-
parser.add_argument("-o", "--organization", action="store_true")
158-
parser.add_argument("organization_name", type=str, help="the organisation where user made the pull requests")
159-
parser.add_argument("-f", "--file", nargs="?")
160-
parser.add_argument("filename", type=str, nargs="?", help="filename to store the markdown table")
161-
args = parser.parse_args()
162-
if args.filename:
163-
file_name = args.filename
164-
else:
165-
file_name = "Markdown_file"
166-
if args.username and args.organization:
167-
response = Fetch_PullRequests(args.user, args.organization_name, file_name)
168-
print(response.get_pullrequests())
169-
else:
170-
print("Please pass atleast two arguments: '--username', '--organisation'")
160+
parser = argparse.ArgumentParser()
161+
parser.add_argument("-u", "--username", action="store_true")
162+
parser.add_argument(
163+
"user", type=str, help="The name of the user to get the pull requests")
164+
parser.add_argument("-o", "--organization", action="store_true")
165+
parser.add_argument("organization_name", type=str,
166+
help="the organisation where user made the pull requests")
167+
parser.add_argument("-f", "--file", nargs="?")
168+
parser.add_argument("filename", type=str, nargs="?",
169+
help="filename to store the markdown table")
170+
args = parser.parse_args()
171+
if args.filename:
172+
file_name = args.filename
173+
else:
174+
file_name = "Markdown_file"
175+
if args.username and args.organization:
176+
response = Fetch_PullRequests(
177+
args.user, args.organization_name, file_name)
178+
print(response.get_pullrequests())
179+
else:
180+
print("Please pass atleast two arguments: '--username', '--organisation'")

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /