Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
This repository was archived by the owner on Jun 29, 2024. It is now read-only.

Commit e08c54e

Browse files
tasks of internship
internship of python
1 parent b5be041 commit e08c54e

File tree

6 files changed

+4458
-0
lines changed

6 files changed

+4458
-0
lines changed

‎CHETANKALE/Task 1/Weather App.py‎

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
import requests
2+
import matplotlib.pyplot as plt
3+
from datetime import datetime
4+
5+
API_KEY = '7136d46d4edf4c9bbcf124053242205'
6+
BASE_URL = 'http://api.weatherapi.com/v1/'
7+
8+
def get_weather_data(city):
9+
current_weather_url = f"{BASE_URL}current.json?key={API_KEY}&q={city}"
10+
forecast_url = f"{BASE_URL}forecast.json?key={API_KEY}&q={city}&days=5"
11+
12+
current_weather_response = requests.get(current_weather_url)
13+
forecast_response = requests.get(forecast_url)
14+
15+
if current_weather_response.status_code == 200 and forecast_response.status_code == 200:
16+
current_weather = current_weather_response.json()
17+
forecast = forecast_response.json()
18+
return current_weather, forecast
19+
else:
20+
print(f"Error fetching weather data. Status codes: {current_weather_response.status_code}, {forecast_response.status_code}")
21+
if current_weather_response.status_code != 200:
22+
print(f"Current weather response: {current_weather_response.text}")
23+
if forecast_response.status_code != 200:
24+
print(f"Forecast response: {forecast_response.text}")
25+
return None, None
26+
27+
def display_current_weather(current_weather):
28+
print("Current Weather:")
29+
location = current_weather['location']
30+
current = current_weather['current']
31+
print(f"Location: {location['name']}, {location['country']}")
32+
print(f"Temperature: {current['temp_c']}°C")
33+
print(f"Weather: {current['condition']['text']}")
34+
print(f"Humidity: {current['humidity']}%")
35+
print(f"Wind Speed: {current['wind_kph']} kph")
36+
print("-" * 40)
37+
38+
def display_forecast(forecast):
39+
print("5-Day Forecast:")
40+
forecast_days = forecast['forecast']['forecastday']
41+
for day in forecast_days:
42+
date = day['date']
43+
day_info = day['day']
44+
temp = day_info['avgtemp_c']
45+
weather = day_info['condition']['text']
46+
print(f"{date}: {temp}°C, {weather}")
47+
print("-" * 40)
48+
49+
def plot_temperature_trends(forecast):
50+
dates = []
51+
temps = []
52+
53+
forecast_days = forecast['forecast']['forecastday']
54+
for day in forecast_days:
55+
date = datetime.strptime(day['date'], '%Y-%m-%d')
56+
temp = day['day']['avgtemp_c']
57+
dates.append(date)
58+
temps.append(temp)
59+
60+
plt.figure(figsize=(10, 5))
61+
plt.plot(dates, temps, marker='o', linestyle='-', color='b')
62+
plt.title('Temperature Trends Over Next 5 Days')
63+
plt.xlabel('Date')
64+
plt.ylabel('Temperature (°C)')
65+
plt.grid(True)
66+
plt.show()
67+
68+
def main():
69+
city = input("Enter the city name: ")
70+
current_weather, forecast = get_weather_data(city)
71+
72+
if current_weather and forecast:
73+
display_current_weather(current_weather)
74+
display_forecast(forecast)
75+
plot_temperature_trends(forecast)
76+
else:
77+
print("Could not retrieve weather data.")
78+
79+
if __name__ == "__main__":
80+
main()

‎CHETANKALE/Task 2/Web Scraper.py‎

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import requests
2+
from bs4 import BeautifulSoup
3+
import pandas as pd
4+
import json
5+
6+
def fetch_html(url):
7+
"""Fetch HTML content from a given URL."""
8+
headers = {
9+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
10+
}
11+
response = requests.get(url, headers=headers)
12+
if response.status_code == 200:
13+
return response.text
14+
else:
15+
print(f"Failed to fetch the page. Status code: {response.status_code}")
16+
return None
17+
18+
def parse_html(html):
19+
"""Parse HTML content and extract data."""
20+
soup = BeautifulSoup(html, 'html.parser')
21+
data = []
22+
23+
# Extracting table data from Wikipedia
24+
table = soup.find('table', {'class': 'wikitable'})
25+
if table:
26+
headers = [header.text.strip() for header in table.find_all('th')]
27+
rows = table.find_all('tr')
28+
for row in rows[1:]: # Skip the header row
29+
cols = row.find_all('td')
30+
row_data = [col.text.strip() for col in cols]
31+
if row_data:
32+
data.append(row_data)
33+
return headers, data
34+
else:
35+
print("Table not found")
36+
return None, None
37+
38+
def save_to_csv(headers, data, filename):
39+
"""Save data to a CSV file."""
40+
df = pd.DataFrame(data, columns=headers)
41+
df.to_csv(filename, index=False)
42+
print(f"Data saved to {filename}")
43+
44+
def save_to_json(headers, data, filename):
45+
"""Save data to a JSON file."""
46+
data_dict = [dict(zip(headers, row)) for row in data]
47+
with open(filename, 'w') as json_file:
48+
json.dump(data_dict, json_file, indent=4)
49+
print(f"Data saved to {filename}")
50+
51+
def main():
52+
url = "https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations)"
53+
html = fetch_html(url)
54+
55+
if html:
56+
headers, data = parse_html(html)
57+
58+
if headers and data:
59+
csv_filename = 'countries_population.csv'
60+
json_filename = 'countries_population.json'
61+
62+
save_to_csv(headers, data, csv_filename)
63+
save_to_json(headers, data, json_filename)
64+
else:
65+
print("No data found to save.")
66+
else:
67+
print("Failed to retrieve HTML content.")
68+
69+
if __name__ == "__main__":
70+
main()

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /