From 74388b24669b2b4b9037cb15dc50133bcc9c00c3 Mon Sep 17 00:00:00 2001 From: "pixeebot[bot]" <104101892+pixeebot[bot]@users.noreply.github.com> Date: Sat, 2 Nov 2024 03:42:12 +0000 Subject: [PATCH] Automatically Close Resources --- .../google-search-newsletter.py | 18 +++--- Image-Captioning/image-captioning.py | 20 +++---- Lyrics_Genius_API/lyrics.py | 1 - PDFToWord/main.py | 1 - Social-Media-Links-From-Website/script.py | 58 +++++++++---------- Tarot Reader/tarot_card_reader.py | 7 +-- Text_Summary/text_summary.py | 4 +- Video to PDF/script.py | 9 ++- chat_bot/_Project.py | 12 ++-- 9 files changed, 58 insertions(+), 72 deletions(-) diff --git a/Google-Search-Newsletter/google-search-newsletter.py b/Google-Search-Newsletter/google-search-newsletter.py index 038223cee1..dc3ee18972 100644 --- a/Google-Search-Newsletter/google-search-newsletter.py +++ b/Google-Search-Newsletter/google-search-newsletter.py @@ -75,16 +75,14 @@ def scrape_news(): # get all elements containing links for each news title all_links = browser.find_elements_by_xpath('//g-card/div/div/div[2]/a') - - # open file for writing - file = open(newsletter_file, 'w') - - # loop over each title and link, print each to the file - for heading, link in zip(all_headings, all_links): - file.write('\n\n') - file.write(heading.text) - file.write('\n') - file.write(link.get_attribute('href')) + with open(newsletter_file, 'w') as file: + + # loop over each title and link, print each to the file + for heading, link in zip(all_headings, all_links): + file.write('\n\n') + file.write(heading.text) + file.write('\n') + file.write(link.get_attribute('href')) browser.close() print('Done. Search results exported to "newsletter.txt"') diff --git a/Image-Captioning/image-captioning.py b/Image-Captioning/image-captioning.py index 039519f354..1c5303a579 100644 --- a/Image-Captioning/image-captioning.py +++ b/Image-Captioning/image-captioning.py @@ -188,18 +188,14 @@ def data_generator(train_description, encoding_train, word_to_idx, max_len, batc yield [[np.array(x1), np.array(x2)], np.array(y)] x1, x2, y = [], [], [] n = 0 - - -# WORD EMBEDDINGS -# The text data should be embedded before passing to RNN/LSTM layer -f = open("files/glove.6B.50d.txt", encoding='utf8') -embedding_index = {} - -for line in f: - values = line.split() - word = values[0] - word_embedding = np.array(values[1:], dtype='float') - embedding_index[word] = word_embedding +with open("files/glove.6B.50d.txt", encoding='utf8') as f: + embedding_index = {} + + for line in f: + values = line.split() + word = values[0] + word_embedding = np.array(values[1:], dtype='float') + embedding_index[word] = word_embedding def get_embedding_matrix(vocab_size=2574): diff --git a/Lyrics_Genius_API/lyrics.py b/Lyrics_Genius_API/lyrics.py index dc0fcc82d8..7b279605b0 100644 --- a/Lyrics_Genius_API/lyrics.py +++ b/Lyrics_Genius_API/lyrics.py @@ -2,7 +2,6 @@ # File for writing the Lyrics filename = input('Enter a filename: ') or 'Lyrics.txt' -file = open(filename, "w+") # Acquire a Access Token to connect with Genius API genius = lg.Genius( diff --git a/PDFToWord/main.py b/PDFToWord/main.py index ae2e973f9b..14cca33a7e 100644 --- a/PDFToWord/main.py +++ b/PDFToWord/main.py @@ -5,7 +5,6 @@ # Take PDF's path as input pdf = input("Enter the path to your file: ") assert os.path.exists(pdf), "File not found at, "+str(pdf) -f = open(pdf, 'r+') # Ask for custom name for the word doc doc_name_choice = input( diff --git a/Social-Media-Links-From-Website/script.py b/Social-Media-Links-From-Website/script.py index 56a3e9d13d..ba39fbc219 100644 --- a/Social-Media-Links-From-Website/script.py +++ b/Social-Media-Links-From-Website/script.py @@ -57,36 +57,34 @@ # Reading sites from csv file and writing output to a new csv file elif val == 2: - - # Taking file path as input and opening it - csv_file = open(input("\nEnter file path : ")) - - # Iterating through links in csv file - for link in csv_file: - link = str(link).strip() - r = req.get(link) - # print(r) - - if r.status_code == 200: - found_links = { - "url": link, - "instagram": "", - "facebook": "", - "twitter": "", - "linkedin": "", - "youtube": "", - } - all_links = re.findall( - r"\b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6})|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?(?:/[\w\.-]*)*/?)\b", - r.text) - # print(all_links) - for i in all_links: - for j in media_links: - if i.find(j)> 0: - found_links[j] = i - output_link_format.append(found_links) - else: - print(link, "did not return status code 200") + with open(input("\nEnter file path : ")) as csv_file: + + # Iterating through links in csv file + for link in csv_file: + link = str(link).strip() + r = req.get(link) + # print(r) + + if r.status_code == 200: + found_links = { + "url": link, + "instagram": "", + "facebook": "", + "twitter": "", + "linkedin": "", + "youtube": "", + } + all_links = re.findall( + r"\b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6})|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?(?:/[\w\.-]*)*/?)\b", + r.text) + # print(all_links) + for i in all_links: + for j in media_links: + if i.find(j)> 0: + found_links[j] = i + output_link_format.append(found_links) + else: + print(link, "did not return status code 200") # print(output_link_format) # Generating output csv file diff --git a/Tarot Reader/tarot_card_reader.py b/Tarot Reader/tarot_card_reader.py index dc3a695d32..a848739ee0 100644 --- a/Tarot Reader/tarot_card_reader.py +++ b/Tarot Reader/tarot_card_reader.py @@ -1,9 +1,6 @@ import random - - -# get tarot deck of 78 cards and 'predictions' from tarot.txt -file_handle = open("./Tarot Reader/tarot.txt", "r") -cards = file_handle.readlines() +with open("./Tarot Reader/tarot.txt", "r") as file_handle: + cards = file_handle.readlines() tarot_deck = [] for card in cards: tarot_deck.append(card) diff --git a/Text_Summary/text_summary.py b/Text_Summary/text_summary.py index c7d90c8d5c..930f051619 100644 --- a/Text_Summary/text_summary.py +++ b/Text_Summary/text_summary.py @@ -19,8 +19,8 @@ def read_article(file_name): :param file_name: Path of text file (line 12) :return: sentences """ - file = open(file_name, 'r', encoding="utf-8") - filedata = file.readlines() + with open(file_name, 'r', encoding="utf-8") as file: + filedata = file.readlines() article = filedata[0].split(". ") sentences = [] diff --git a/Video to PDF/script.py b/Video to PDF/script.py index f9cf68c5f2..8359a6bc20 100644 --- a/Video to PDF/script.py +++ b/Video to PDF/script.py @@ -60,12 +60,11 @@ def text_to_pdf(file): pdf.add_page() pdf.set_font("Arial", size=12) effective_page_width = pdf.w - 2*pdf.l_margin + with open(file, "r") as f: - f = open(file, "r") - - for x in f: - pdf.multi_cell(effective_page_width, 0.15, x) - pdf.ln(0.5) + for x in f: + pdf.multi_cell(effective_page_width, 0.15, x) + pdf.ln(0.5) pdf.output("../Video to PDF/my_pdf.pdf") diff --git a/chat_bot/_Project.py b/chat_bot/_Project.py index 4cb15fca90..8e9298c2d2 100644 --- a/chat_bot/_Project.py +++ b/chat_bot/_Project.py @@ -99,10 +99,10 @@ def topic_1(): top = 'd1_technology.txt' global a - a = open(top, 'r') + with open(top, 'r') as a: - global doc - doc = a.readlines() + global doc + doc = a.readlines() frame_topic.pack_forget() frame_chat.pack() @@ -174,10 +174,10 @@ def write_file(): """ global b - b = open(top, 'a') + with open(top, 'a') as b: - b.write(chat_raw) - b.write('\n') + b.write(chat_raw) + b.write('\n') button_write.place_forget() feed_answer()