Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
This repository was archived by the owner on Jun 29, 2024. It is now read-only.

Commit b1a69c3

Browse files
Add files via upload
1 parent b5be041 commit b1a69c3

File tree

4 files changed

+94
-0
lines changed

4 files changed

+94
-0
lines changed

‎task1.py‎

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import requests
2+
3+
api_key = '78c68b12139482d7e0784209bd15f555'
4+
5+
user_input = input("Enter city: ")
6+
7+
weather_data = requests.get(
8+
f"https://api.openweathermap.org/data/2.5/weather?q={user_input}&units=imperial&APPID={api_key}")
9+
10+
if weather_data.json()['cod'] == '404':
11+
print("No City Found")
12+
else:
13+
weather = weather_data.json()['weather'][0]['main']
14+
temp = round(weather_data.json()['main']['temp'])
15+
16+
print(f"The weather in {user_input} is: {weather}")
17+
print(f"The temperature in {user_input} is: {temp}oF")

‎task2.py‎

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import requests
2+
from bs4 import BeautifulSoup
3+
4+
# Get the HTML text
5+
url = "https://quotes.toscrape.com/"
6+
response = requests.get(url)
7+
text = response.text
8+
9+
# Parse the text with Beautiful Soup
10+
soup = BeautifulSoup(text, "lxml")
11+
12+
# Extract authors
13+
authors = soup.find_all("small", class_="author")
14+
author_set = set(author.text.strip() for author in authors)
15+
16+
# Extract quotes
17+
quotes = soup.find_all("span", class_="text")
18+
quote_list = [quote.text.strip() for quote in quotes]
19+
20+
# Extract top ten tags
21+
top_tags = soup.find("div", class_="tags-box")
22+
tags = top_tags.find_all("a")
23+
tag_list = [tag.text.strip() for tag in tags]
24+
25+
# Loop through all pages to get unique authors (if applicable)
26+
def get_page_authors(page_url):
27+
# Your implementation here
28+
pass

‎task3.py‎

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import nltk
2+
from nltk.tokenize import word_tokenize
3+
4+
# Download NLTK data
5+
nltk.download('punkt')
6+
7+
# Define chatbot responses
8+
responses = {
9+
"hi": "Hello! How can I assist you?",
10+
"how are you": "I'm just a program, so I don't have feelings, but I'm here to help!",
11+
# Add more predefined responses here
12+
}
13+
14+
def preprocess_input(user_input):
15+
tokens = word_tokenize(user_input.lower())
16+
# Additional preprocessing steps if needed
17+
return tokens
18+
19+
def chatbot_response(user_input):
20+
tokens = preprocess_input(user_input)
21+
for query, response in responses.items():
22+
if any(token in query for token in tokens):
23+
return response
24+
return "I didn't understand. Can you please rephrase?"
25+
26+
if __name__ == "__main__":
27+
while True:
28+
user_query = input("You: ")
29+
if user_query.lower() == "exit":
30+
print("Chatbot: Goodbye!")
31+
break
32+
print("Chatbot:", chatbot_response(user_query))

‎task4.py‎

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from PyPDF2 import PdfFileReader, PdfFileWriter
2+
3+
def split_pdf(input_pdf, output_folder):
4+
pdf_reader = PdfFileReader(input_pdf)
5+
total_pages = pdf_reader.numPages
6+
7+
for page_num in range(total_pages):
8+
pdf_writer = PdfFileWriter()
9+
pdf_writer.addPage(pdf_reader.getPage(page_num))
10+
11+
output_file = f"{output_folder}/page_{page_num + 1}.pdf"
12+
with open(output_file, "wb") as output_pdf:
13+
pdf_writer.write(output_pdf)
14+
15+
print(f"Page {page_num + 1} saved as {output_file}")
16+
17+

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /