Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
This repository was archived by the owner on Jun 29, 2024. It is now read-only.

Commit 9c656a1

Browse files
committed
2 parents 170b7a3 + a91d3ec commit 9c656a1

File tree

12 files changed

+647
-0
lines changed

12 files changed

+647
-0
lines changed

‎Calc.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#a simple python program to perform basic tasks like addition,subtraction,multiplication,division
2+
print('please select any of the number for performing arithmetic operations')
3+
print("1.Addition")
4+
print('2.Subtraction')
5+
print('3.Multiplication')
6+
print('4.Division')
7+
print('5.exit')
8+
a=int(input('Enter any of the number for performing arithmetic operations'))
9+
def ari(a,var1,var2):
10+
a,var1,var2=a,var1,var2
11+
if(a==1):
12+
print(var1+var2)
13+
if(a==2):
14+
print(var1-var2)
15+
if(a==3):
16+
print(var1*var2)
17+
if(a==4):
18+
print(var1/var2)
19+
return
20+
21+
#Enter Two numbers
22+
if((a>0) and (a<5)):
23+
var1 = int(input('Enter First number: '))
24+
var2 = int(input('Enter Second number: '))
25+
ari(a,var1,var2)
26+
elif(a==5):
27+
exit()
28+
else:
29+
print('Invalid Option')
30+
print('please select 1/2/3/4/5 only')

‎TASK-10.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import seaborn as sns
2+
import pandas as pd
3+
import matplotlib.pyplot as plt
4+
5+
# Load the Iris dataset from Seaborn
6+
iris = sns.load_dataset("iris")
7+
numeric_iris = iris.drop(columns='species')
8+
9+
# Display the first few rows of the dataset
10+
print("First few rows of the dataset:")
11+
print(iris.head())
12+
13+
# Summary statistics
14+
print("\nSummary statistics:")
15+
print(iris.describe())
16+
17+
# Checking for missing values
18+
print("\nMissing values:")
19+
print(iris.isnull().sum())
20+
21+
# Visualizations
22+
# Pairplot
23+
sns.pairplot(iris, hue="species")
24+
plt.title("Pairplot of Iris Dataset")
25+
plt.show()
26+
27+
# Boxplot
28+
plt.figure(figsize=(10, 6))
29+
sns.boxplot(data=iris, orient="h")
30+
plt.title("Boxplot of Iris Dataset")
31+
plt.show()
32+
33+
# Histograms
34+
plt.figure(figsize=(10, 6))
35+
iris.hist()
36+
plt.suptitle("Histograms of Iris Dataset")
37+
plt.show()
38+
39+
# Correlation heatmap
40+
plt.figure(figsize=(8, 6))
41+
sns.heatmap(numeric_iris.corr(), annot=True, cmap="coolwarm")
42+
plt.title("Correlation Heatmap of Iris Dataset")
43+
plt.show()

‎TASK-11.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import pandas as pd
2+
import numpy as np
3+
import matplotlib.pyplot as plt
4+
from sklearn.model_selection import train_test_split
5+
from sklearn.linear_model import LinearRegression
6+
from sklearn.metrics import mean_squared_error
7+
8+
# Fetch the Boston housing dataset from the original source
9+
data_url = "http://lib.stat.cmu.edu/datasets/boston"
10+
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22, header=None)
11+
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
12+
target = raw_df.values[1::2, 2]
13+
14+
# Split the dataset into training and testing sets
15+
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42)
16+
17+
# Create and train the linear regression model
18+
model = LinearRegression()
19+
model.fit(X_train, y_train)
20+
21+
# Make predictions on the training and testing sets
22+
y_train_pred = model.predict(X_train)
23+
y_test_pred = model.predict(X_test)
24+
25+
# Calculate the mean squared error for training and testing sets
26+
train_mse = mean_squared_error(y_train, y_train_pred)
27+
test_mse = mean_squared_error(y_test, y_test_pred)
28+
29+
print("Train MSE:", train_mse)
30+
print("Test MSE:", test_mse)
31+
32+
# Plot residuals
33+
plt.scatter(y_train_pred, y_train_pred - y_train, c='blue', marker='o', label='Training data')
34+
plt.scatter(y_test_pred, y_test_pred - y_test, c='green', marker='s', label='Test data')
35+
plt.xlabel('Predicted values')
36+
plt.ylabel('Residuals')
37+
plt.legend(loc='upper left')
38+
plt.hlines(y=0, xmin=min(y_train_pred.min(), y_test_pred.min()), xmax=max(y_train_pred.max(), y_test_pred.max()), color='red')
39+
plt.title('Residuals plot')
40+
plt.show()

‎TASK-12.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
from PIL import Image
2+
import os
3+
4+
def get_size_format(b, factor=1024, suffix="B"):
5+
"""
6+
Scale bytes to its proper byte format.
7+
e.g: 1253656 => '1.20MB', 1253656678 => '1.17GB'
8+
"""
9+
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
10+
if b < factor:
11+
return f"{b:.2f}{unit}{suffix}"
12+
b /= factor
13+
return f"{b:.2f}Y{suffix}"
14+
15+
def compress_img(image_name, new_size_ratio=0.9, quality=90, width=None, height=None, to_jpg=True):
16+
try:
17+
# Load the image into memory
18+
img = Image.open(image_name)
19+
20+
# Print the original image shape
21+
print("[*] Image shape:", img.size)
22+
23+
# Get the original image size in bytes
24+
image_size = os.path.getsize(image_name)
25+
print("[*] Size before compression:", get_size_format(image_size))
26+
27+
if width and height:
28+
# If width and height are set, resize with them instead
29+
img = img.resize((width, height), Image.LANCZOS)
30+
elif new_size_ratio < 1.0:
31+
# If resizing ratio is below 1.0, multiply width & height with this ratio to reduce image size
32+
img = img.resize((int(img.size[0] * new_size_ratio), int(img.size[1] * new_size_ratio)), Image.LANCZOS)
33+
34+
# Split the filename and extension
35+
filename, ext = os.path.splitext(image_name)
36+
37+
# Make a new filename appending "_compressed" to the original file name
38+
if to_jpg:
39+
# Change the extension to JPEG
40+
new_filename = f"{filename}_compressed.jpg"
41+
# Ensure image is in RGB mode for JPEG
42+
if img.mode in ("RGBA", "LA"):
43+
img = img.convert("RGB")
44+
else:
45+
# Retain the same extension of the original image
46+
new_filename = f"{filename}_compressed{ext}"
47+
48+
# Save the compressed image
49+
img.save(new_filename, optimize=True, quality=quality)
50+
51+
# Print the new image shape
52+
print("[+] New Image shape:", img.size)
53+
54+
# Get the new image size in bytes
55+
new_image_size = os.path.getsize(new_filename)
56+
print("[*] Size after compression:", get_size_format(new_image_size))
57+
print(f"[*] Compressed image saved as: {new_filename}")
58+
59+
except FileNotFoundError:
60+
print("Error: The file was not found.")
61+
except OSError as e:
62+
print(f"Error: {e}")
63+
64+
# Example usage:
65+
input_image = input("Enter the path to the image: ")
66+
compress_img(input_image, new_size_ratio=0.8, quality=80, width=800, height=600)

‎TASK-5.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import requests
2+
import datetime
3+
4+
# Your OpenWeatherMap API key
5+
API_KEY = '69e7dd8a8069d4066de2a18ea5996e36'
6+
BASE_URL = 'http://api.openweathermap.org/data/2.5/'
7+
8+
# Function to get current weather data
9+
def get_current_weather(city):
10+
url = f"{BASE_URL}weather?q={city}&appid={API_KEY}&units=metric"
11+
response = requests.get(url)
12+
return response.json()
13+
14+
# Function to get weather forecast data
15+
def get_forecast(city):
16+
url = f"{BASE_URL}forecast?q={city}&appid={API_KEY}&units=metric"
17+
response = requests.get(url)
18+
return response.json()
19+
20+
# Function to display weather data
21+
def display_weather_data(city):
22+
current_weather = get_current_weather(city)
23+
forecast = get_forecast(city)
24+
25+
if current_weather.get("cod") != 200 or forecast.get("cod") != "200":
26+
print("Failed to retrieve data. Please check the city name or API key.")
27+
return
28+
29+
# Current weather
30+
print(f"\nCurrent weather in {city.capitalize()}:")
31+
print(f"Temperature: {current_weather['main']['temp']}°C")
32+
print(f"Weather: {current_weather['weather'][0]['description']}")
33+
print(f"Humidity: {current_weather['main']['humidity']}%")
34+
print(f"Wind Speed: {current_weather['wind']['speed']} m/s")
35+
36+
# Forecast
37+
print(f"\n5-Day Forecast for {city.capitalize()}:")
38+
for item in forecast['list']:
39+
timestamp = item['dt']
40+
date_time = datetime.datetime.fromtimestamp(timestamp)
41+
if date_time.hour == 12: # Show data for 12 PM each day
42+
temp = item['main']['temp']
43+
weather = item['weather'][0]['description']
44+
print(f"{date_time.strftime('%Y-%m-%d %H:%M:%S')}: {temp}°C, {weather}")
45+
46+
# Temperature trends (Average temperatures per day)
47+
temp_trends = {}
48+
for item in forecast['list']:
49+
date = datetime.datetime.fromtimestamp(item['dt']).date()
50+
if date not in temp_trends:
51+
temp_trends[date] = []
52+
temp_trends[date].append(item['main']['temp'])
53+
54+
print(f"\nTemperature Trends in {city.capitalize()}:")
55+
for date, temps in temp_trends.items():
56+
avg_temp = sum(temps) / len(temps)
57+
print(f"{date}: {avg_temp:.2f}°C")
58+
59+
# Main function
60+
def main():
61+
city = input("Enter the city name: ")
62+
display_weather_data(city)
63+
64+
if __name__ == "__main__":
65+
main()

‎TASK-6.py

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
import requests
2+
from bs4 import BeautifulSoup
3+
import pandas as pd
4+
import json
5+
from urllib.parse import urljoin
6+
7+
# Function to check robots.txt for scraping permission
8+
def check_robots_txt(base_url):
9+
robots_url = urljoin(base_url, '/robots.txt')
10+
response = requests.get(robots_url)
11+
if response.status_code == 200:
12+
robots_txt = response.text
13+
if "Disallow: /" in robots_txt:
14+
return False
15+
return True
16+
return False
17+
18+
# Function to list all tables in the HTML
19+
def list_tables(soup):
20+
tables = soup.find_all("table")
21+
if not tables:
22+
raise Exception("No tables found on the webpage.")
23+
table_summaries = []
24+
for i, table in enumerate(tables):
25+
summary = table.attrs.get("summary", f"Table {i+1}")
26+
table_summaries.append(summary)
27+
return tables, table_summaries
28+
29+
# Function to extract data from the selected table
30+
def extract_data(table):
31+
data = []
32+
headers = [header.text.strip() for header in table.find_all("th")]
33+
rows = table.find_all("tr")[1:] # Skipping the header row
34+
for row in rows:
35+
cells = row.find_all("td")
36+
row_data = [cell.text.strip() for cell in cells]
37+
data.append(row_data)
38+
return headers, data
39+
40+
# Main function to perform web scraping
41+
def main():
42+
# Read the URL from the user
43+
base_url = input("Enter the URL of the website to scrape: ")
44+
45+
# Check if scraping is allowed
46+
if not check_robots_txt(base_url):
47+
print("It is not possible to perform web scraping on this website.")
48+
return
49+
50+
# Send a GET request to fetch the raw HTML content
51+
response = requests.get(base_url)
52+
if response.status_code != 200:
53+
raise Exception(f"Failed to load page {base_url}")
54+
55+
# Parse the content with BeautifulSoup
56+
soup = BeautifulSoup(response.content, "html.parser")
57+
58+
# List all tables
59+
try:
60+
tables, table_summaries = list_tables(soup)
61+
except Exception as e:
62+
print(f"Error during table listing: {e}")
63+
return
64+
65+
# Display the tables to the user and ask for a selection
66+
print("Tables found on the webpage:")
67+
for i, summary in enumerate(table_summaries):
68+
print(f"{i + 1}: {summary}")
69+
70+
try:
71+
table_index = int(input("Enter the number of the table you want to scrape: ")) - 1
72+
if table_index < 0 or table_index >= len(tables):
73+
raise ValueError("Invalid table number selected.")
74+
except ValueError as e:
75+
print(f"Error during table selection: {e}")
76+
return
77+
78+
# Extract data from the selected table
79+
try:
80+
headers, data = extract_data(tables[table_index])
81+
except Exception as e:
82+
print(f"Error during data extraction: {e}")
83+
return
84+
85+
# Convert to DataFrame
86+
df = pd.DataFrame(data, columns=headers)
87+
88+
# Save to CSV
89+
df.to_csv("scraped_data.csv", index=False)
90+
91+
# Save to JSON
92+
df.to_json("scraped_data.json", orient="records")
93+
94+
print("Data has been scraped and saved to scraped_data.csv and scraped_data.json")
95+
96+
if __name__ == "__main__":
97+
main()

‎TASK-7.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import nltk
2+
from nltk.chat.util import Chat, reflections
3+
4+
# Define pairs of patterns and responses
5+
pairs = [
6+
(r'hi|hello|hey', ['Hello!', 'Hey there!', 'Hi! How can I help you?']),
7+
(r'how are you?', ['I\'m doing well, thank you!', 'I\'m good, thanks for asking!']),
8+
(r'what\'s your name\??', ['I\'m a chatbot!', 'You can call me ChatBot.']),
9+
(r'(.*) your name\??', ['I\'m a chatbot!', 'You can call me ChatBot.']),
10+
# Add more patterns and responses as needed
11+
]
12+
13+
# Create a Chatbot instance
14+
chatbot = Chat(pairs, reflections)
15+
16+
print("Welcome! Type 'quit' to end the conversation.")
17+
18+
# Start the conversation loop
19+
while True:
20+
user_input = input("You: ")
21+
if user_input.lower() == 'quit':
22+
print("ChatBot: Bye! Have a great day!")
23+
break
24+
else:
25+
response = chatbot.respond(user_input)
26+
print("ChatBot:", response)

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /