diff --git a/Cryptography/README.md b/Cryptography/README.md new file mode 100644 index 0000000000..6d62a3f69f --- /dev/null +++ b/Cryptography/README.md @@ -0,0 +1,26 @@ +# CRYPTOGRAPHY + + +## DESCRIPTION +The objective of this project is to encode and decode messages using a common key. In this project, users have to enter the message to encode or decode. Users have to select the mode to choose the encoding and decoding process. The same key must be used to process the encoding and decoding for the same message. + + +## PROJECT REQUISITES +To build this project we will use the basic concept of python, Tkinter, and base64 library. + +- Tkinter is a standard GUI python library. +- base64 module provides a function to encode the binary data to ASCII characters and decode that ASCII characters back to binary data. +- Tkinter is a standard GUI python library base64 module that provides a function to encode the binary data to ASCII characters and decode that ASCII characters back to binary data. + + +## PROJECT STRUCTURE +These are the step to build message encode – decode python project- + +- Import module +- Create display window +- Define function +- Define labels and buttons + + +## AUTHOR NAME +[ANUSHKA CHITRANSHI](https://github.com/codebuzzer01) diff --git a/Cryptography/crypto.py b/Cryptography/crypto.py new file mode 100644 index 0000000000..0383740e56 --- /dev/null +++ b/Cryptography/crypto.py @@ -0,0 +1,116 @@ +from tkinter import Tk +from tkinter import Label +from tkinter import BOTTOM +from tkinter import StringVar +from tkinter import Entry +from tkinter import Button +import base64 + +# initialize window +root = Tk() +root.geometry('500x300') +root.resizable(0, 0) + +# title of the window +root.title("Cryptography World") + +# label +Label(root, text='ENCODE DECODE', font='arial 20 bold').pack() +Label(root, text='By Anushka Chitranshi', font='arial 20 bold').pack(side=BOTTOM) + +# define variables +Text = StringVar() +private_key = StringVar() +mode = StringVar() +Result = StringVar() + + +# function to encode +def Encode(key, message): + """Encode the message.""" + enc = [] + for i in enumerate(message): + key_c = key[i % len(key)] + enc.append(chr((ord(message[i]) + ord(key_c)) % 256)) + return base64.urlsafe_b64encode("".join(enc).encode()).decode() + + +# function to decode +def Decode(key, message): + """Decode the message.""" + dec = [] + message = base64.urlsafe_b64decode(message).decode() + for i in enumerate(message): + key_c = key[i % len(key)] + dec.append(chr((256 + ord(message[i]) - ord(key_c)) % 256)) + return "".join(dec) + + +# function to set mode +def Mode(): + """Take mode of cryptography.""" + if mode.get() == 'e': + Result.set(Encode(private_key.get(), Text.get())) + elif mode.get() == 'd': + Result.set(Decode(private_key.get(), Text.get())) + else: + Result.set('Invalid Mode') + + +# Function to exit window +def Exit(): + """Exit the window.""" + root.destroy() + + +# Function to reset +def Reset(): + """Reset the screen.""" + Text.set("") + private_key.set("") + mode.set("") + Result.set("") + + +Label( + root, font='arial 12 bold', text='MESSAGE' +).place(x=60, y=60) +Entry( + root, font='arial 10', textvariable=Text, bg='ghost white' +).place(x=290, y=60) + + +# key +Label(root, font='arial 12 bold', text='KEY').place(x=60, y=90) +Entry( + root, font='arial 10', textvariable=private_key , bg='ghost white' +).place(x=290, y=90) + +# mode +Label( + root, font='arial 12 bold', text='MODE(e-encode, d-decode)' +).place(x=60, y=120) +Entry( + root, font='arial 10', textvariable=mode , bg='ghost white' +).place(x=290, y=120) + +# result +Entry( + root, font='arial 10 bold', textvariable=Result, bg='ghost white' +).place(x=290, y=150) + +# result button +Button( + root, font='arial 10 bold', text='RESULT', padx=2, bg='LightGray', command=Mode +).place(x=60, y=150) + +# reset button +Button( + root, font='anson', text='RESET', width=6, command=Reset, bg='Green', padx=2 +).place(x=80, y=190) + +# exit button +Button( + root, font='anson', text='EXIT', width=6, command=Exit, bg='Red', padx=2, pady=2 +).place(x=180, y=190) +root.mainloop() diff --git a/Gender-Age Detection/README.md b/Gender-Age Detection/README.md new file mode 100644 index 0000000000..713d2156df --- /dev/null +++ b/Gender-Age Detection/README.md @@ -0,0 +1,26 @@ +# GENDER-AGE DETECTION + +## DESCRIPTION + +The objective of this project is to identify the gender and approximate age of a person given in an image automatically. + +## PROJECT REQUISITES + +To build this project we will use + +- Deep Learning to accurately identify the gender and age of a person from a single image of a face. +- We will use this [dataset](https://www.kaggle.com/ttungl/adience-benchmark-gender-and-age-classification) +- The predicted gender may be one of ‘Male’ and ‘Female’, and the predicted age may be one of the following 8 ranges- (0 – 2), (4 – 6), (8 – 12), (15 – 20), (25 – 32), (38 – 43), (48 – 53), (60 – 100). + +## PROJECT STRUCTURE + +These are the step to build Gender-Age Detection python project- + +- Detect faces +- Classify into Male/Female +- Classify into one of the 8 age ranges +- Put the results on the image and display it + +## AUTHOR NAME + +[ANUSHKA CHITRANSHI](https://github.com/codebuzzer01) diff --git a/Gender-Age Detection/age_deploy.prototxt b/Gender-Age Detection/age_deploy.prototxt new file mode 100644 index 0000000000..9570d5c8a4 --- /dev/null +++ b/Gender-Age Detection/age_deploy.prototxt @@ -0,0 +1,175 @@ +name: "CaffeNet" +input: "data" +input_dim: 1 +input_dim: 3 +input_dim: 227 +input_dim: 227 +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 7 + stride: 4 + } +} +layers { + name: "relu1" + type: RELU + bottom: "conv1" + top: "conv1" +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm1" + type: LRN + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + } +} +layers { + name: "relu2" + type: RELU + bottom: "conv2" + top: "conv2" +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm2" + type: LRN + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv3" + type: CONVOLUTION + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layers{ + name: "relu3" + type: RELU + bottom: "conv3" + top: "conv3" +} +layers { + name: "pool5" + type: POOLING + bottom: "conv3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "fc6" + type: INNER_PRODUCT + bottom: "pool5" + top: "fc6" + inner_product_param { + num_output: 512 + } +} +layers { + name: "relu6" + type: RELU + bottom: "fc6" + top: "fc6" +} +layers { + name: "drop6" + type: DROPOUT + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc7" + type: INNER_PRODUCT + bottom: "fc6" + top: "fc7" + inner_product_param { + num_output: 512 + } +} +layers { + name: "relu7" + type: RELU + bottom: "fc7" + top: "fc7" +} +layers { + name: "drop7" + type: DROPOUT + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc8" + type: INNER_PRODUCT + bottom: "fc7" + top: "fc8" + inner_product_param { + num_output: 8 + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "fc8" + top: "prob" +} \ No newline at end of file diff --git a/Gender-Age Detection/age_net.caffemodel b/Gender-Age Detection/age_net.caffemodel new file mode 100644 index 0000000000..8391af479b Binary files /dev/null and b/Gender-Age Detection/age_net.caffemodel differ diff --git a/Gender-Age Detection/gad.py b/Gender-Age Detection/gad.py new file mode 100644 index 0000000000..413bd700fd --- /dev/null +++ b/Gender-Age Detection/gad.py @@ -0,0 +1,75 @@ +import cv2 +import math +import argparse + +def highlightFace(net, frame, conf_threshold=0.7): + frameOpencvDnn=frame.copy() + frameHeight=frameOpencvDnn.shape[0] + frameWidth=frameOpencvDnn.shape[1] + blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False) + + net.setInput(blob) + detections=net.forward() + faceBoxes=[] + for i in range(detections.shape[2]): + confidence=detections[0,0,i,2] + if confidence>conf_threshold: + x1=int(detections[0,0,i,3]*frameWidth) + y1=int(detections[0,0,i,4]*frameHeight) + x2=int(detections[0,0,i,5]*frameWidth) + y2=int(detections[0,0,i,6]*frameHeight) + faceBoxes.append([x1,y1,x2,y2]) + cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8) + return frameOpencvDnn,faceBoxes + + +parser=argparse.ArgumentParser() +parser.add_argument('--image') + +args=parser.parse_args() + +faceProto="opencv_face_detector.pbtxt" +faceModel="opencv_face_detector_uint8.pb" +ageProto="age_deploy.prototxt" +ageModel="age_net.caffemodel" +genderProto="gender_deploy.prototxt" +genderModel="gender_net.caffemodel" + +MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746) +ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)'] +genderList=['Male','Female'] + +faceNet=cv2.dnn.readNet(faceModel,faceProto) +ageNet=cv2.dnn.readNet(ageModel,ageProto) +genderNet=cv2.dnn.readNet(genderModel,genderProto) + +video=cv2.VideoCapture(args.image if args.image else 0) +padding=20 +while cv2.waitKey(1)<0: + hasFrame,frame=video.read() + if not hasFrame: + cv2.waitKey() + break + + resultImg,faceBoxes=highlightFace(faceNet,frame) + if not faceBoxes: + print("No face detected") + + for faceBox in faceBoxes: + face=frame[max(0,faceBox[1]-padding): + min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding) + :min(faceBox[2]+padding, frame.shape[1]-1)] + + blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False) + genderNet.setInput(blob) + genderPreds=genderNet.forward() + gender=genderList[genderPreds[0].argmax()] + print(f'Gender: {gender}') + + ageNet.setInput(blob) + agePreds=ageNet.forward() + age=ageList[agePreds[0].argmax()] + print(f'Age: {age[1:-1]} years') + + cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA) + cv2.imshow("Detecting age and gender", resultImg) diff --git a/Gender-Age Detection/gender_deploy.prototxt b/Gender-Age Detection/gender_deploy.prototxt new file mode 100644 index 0000000000..65fd69caf8 --- /dev/null +++ b/Gender-Age Detection/gender_deploy.prototxt @@ -0,0 +1,175 @@ +name: "CaffeNet" +input: "data" +input_dim: 10 +input_dim: 3 +input_dim: 227 +input_dim: 227 +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 7 + stride: 4 + } +} +layers { + name: "relu1" + type: RELU + bottom: "conv1" + top: "conv1" +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm1" + type: LRN + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + } +} +layers { + name: "relu2" + type: RELU + bottom: "conv2" + top: "conv2" +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm2" + type: LRN + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv3" + type: CONVOLUTION + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layers{ + name: "relu3" + type: RELU + bottom: "conv3" + top: "conv3" +} +layers { + name: "pool5" + type: POOLING + bottom: "conv3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "fc6" + type: INNER_PRODUCT + bottom: "pool5" + top: "fc6" + inner_product_param { + num_output: 512 + } +} +layers { + name: "relu6" + type: RELU + bottom: "fc6" + top: "fc6" +} +layers { + name: "drop6" + type: DROPOUT + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc7" + type: INNER_PRODUCT + bottom: "fc6" + top: "fc7" + inner_product_param { + num_output: 512 + } +} +layers { + name: "relu7" + type: RELU + bottom: "fc7" + top: "fc7" +} +layers { + name: "drop7" + type: DROPOUT + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc8" + type: INNER_PRODUCT + bottom: "fc7" + top: "fc8" + inner_product_param { + num_output: 2 + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "fc8" + top: "prob" +} \ No newline at end of file diff --git a/Gender-Age Detection/gender_net.caffemodel b/Gender-Age Detection/gender_net.caffemodel new file mode 100644 index 0000000000..87862497f4 Binary files /dev/null and b/Gender-Age Detection/gender_net.caffemodel differ diff --git a/Gender-Age Detection/girl1.jpg b/Gender-Age Detection/girl1.jpg new file mode 100644 index 0000000000..b2285277f6 Binary files /dev/null and b/Gender-Age Detection/girl1.jpg differ diff --git a/Gender-Age Detection/girl2.jpg b/Gender-Age Detection/girl2.jpg new file mode 100644 index 0000000000..0961996f26 Binary files /dev/null and b/Gender-Age Detection/girl2.jpg differ diff --git a/Gender-Age Detection/kid1.jpg b/Gender-Age Detection/kid1.jpg new file mode 100644 index 0000000000..0c9a06ea27 Binary files /dev/null and b/Gender-Age Detection/kid1.jpg differ diff --git a/Gender-Age Detection/man1.jpg b/Gender-Age Detection/man1.jpg new file mode 100644 index 0000000000..d67d6339e4 Binary files /dev/null and b/Gender-Age Detection/man1.jpg differ diff --git a/Gender-Age Detection/minion.jpg b/Gender-Age Detection/minion.jpg new file mode 100644 index 0000000000..e1dfb3ac2c Binary files /dev/null and b/Gender-Age Detection/minion.jpg differ diff --git a/Gender-Age Detection/opencv_face_detector.pbtxt b/Gender-Age Detection/opencv_face_detector.pbtxt new file mode 100644 index 0000000000..d2dde7d4cd --- /dev/null +++ b/Gender-Age Detection/opencv_face_detector.pbtxt @@ -0,0 +1,2362 @@ +node { + name: "data" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } +} +node { + name: "data_bn/FusedBatchNorm" + op: "FusedBatchNorm" + input: "data:0" + input: "data_bn/gamma" + input: "data_bn/beta" + input: "data_bn/mean" + input: "data_bn/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "data_scale/Mul" + op: "Mul" + input: "data_bn/FusedBatchNorm" + input: "data_scale/mul" +} +node { + name: "data_scale/BiasAdd" + op: "BiasAdd" + input: "data_scale/Mul" + input: "data_scale/add" +} +node { + name: "SpaceToBatchND/block_shape" + op: "Const" + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + int_val: 1 + int_val: 1 + } + } + } +} +node { + name: "SpaceToBatchND/paddings" + op: "Const" + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + dim { + size: 2 + } + } + int_val: 3 + int_val: 3 + int_val: 3 + int_val: 3 + } + } + } +} +node { + name: "Pad" + op: "SpaceToBatchND" + input: "data_scale/BiasAdd" + input: "SpaceToBatchND/block_shape" + input: "SpaceToBatchND/paddings" +} +node { + name: "conv1_h/Conv2D" + op: "Conv2D" + input: "Pad" + input: "conv1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "VALID" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "conv1_h/BiasAdd" + op: "BiasAdd" + input: "conv1_h/Conv2D" + input: "conv1_h/bias" +} +node { + name: "BatchToSpaceND" + op: "BatchToSpaceND" + input: "conv1_h/BiasAdd" +} +node { + name: "conv1_bn_h/FusedBatchNorm" + op: "FusedBatchNorm" + input: "BatchToSpaceND" + input: "conv1_bn_h/gamma" + input: "conv1_bn_h/beta" + input: "conv1_bn_h/mean" + input: "conv1_bn_h/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "conv1_scale_h/Mul" + op: "Mul" + input: "conv1_bn_h/FusedBatchNorm" + input: "conv1_scale_h/mul" +} +node { + name: "conv1_scale_h/BiasAdd" + op: "BiasAdd" + input: "conv1_scale_h/Mul" + input: "conv1_scale_h/add" +} +node { + name: "Relu" + op: "Relu" + input: "conv1_scale_h/BiasAdd" +} +node { + name: "conv1_pool/MaxPool" + op: "MaxPool" + input: "Relu" + attr { + key: "ksize" + value { + list { + i: 1 + i: 3 + i: 3 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "layer_64_1_conv1_h/Conv2D" + op: "Conv2D" + input: "conv1_pool/MaxPool" + input: "layer_64_1_conv1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "layer_64_1_bn2_h/FusedBatchNorm" + op: "BiasAdd" + input: "layer_64_1_conv1_h/Conv2D" + input: "layer_64_1_conv1_h/Conv2D_bn_offset" +} +node { + name: "layer_64_1_scale2_h/Mul" + op: "Mul" + input: "layer_64_1_bn2_h/FusedBatchNorm" + input: "layer_64_1_scale2_h/mul" +} +node { + name: "layer_64_1_scale2_h/BiasAdd" + op: "BiasAdd" + input: "layer_64_1_scale2_h/Mul" + input: "layer_64_1_scale2_h/add" +} +node { + name: "Relu_1" + op: "Relu" + input: "layer_64_1_scale2_h/BiasAdd" +} +node { + name: "layer_64_1_conv2_h/Conv2D" + op: "Conv2D" + input: "Relu_1" + input: "layer_64_1_conv2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "add" + op: "Add" + input: "layer_64_1_conv2_h/Conv2D" + input: "conv1_pool/MaxPool" +} +node { + name: "layer_128_1_bn1_h/FusedBatchNorm" + op: "FusedBatchNorm" + input: "add" + input: "layer_128_1_bn1_h/gamma" + input: "layer_128_1_bn1_h/beta" + input: "layer_128_1_bn1_h/mean" + input: "layer_128_1_bn1_h/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "layer_128_1_scale1_h/Mul" + op: "Mul" + input: "layer_128_1_bn1_h/FusedBatchNorm" + input: "layer_128_1_scale1_h/mul" +} +node { + name: "layer_128_1_scale1_h/BiasAdd" + op: "BiasAdd" + input: "layer_128_1_scale1_h/Mul" + input: "layer_128_1_scale1_h/add" +} +node { + name: "Relu_2" + op: "Relu" + input: "layer_128_1_scale1_h/BiasAdd" +} +node { + name: "layer_128_1_conv_expand_h/Conv2D" + op: "Conv2D" + input: "Relu_2" + input: "layer_128_1_conv_expand_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "layer_128_1_conv1_h/Conv2D" + op: "Conv2D" + input: "Relu_2" + input: "layer_128_1_conv1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "layer_128_1_bn2/FusedBatchNorm" + op: "BiasAdd" + input: "layer_128_1_conv1_h/Conv2D" + input: "layer_128_1_conv1_h/Conv2D_bn_offset" +} +node { + name: "layer_128_1_scale2/Mul" + op: "Mul" + input: "layer_128_1_bn2/FusedBatchNorm" + input: "layer_128_1_scale2/mul" +} +node { + name: "layer_128_1_scale2/BiasAdd" + op: "BiasAdd" + input: "layer_128_1_scale2/Mul" + input: "layer_128_1_scale2/add" +} +node { + name: "Relu_3" + op: "Relu" + input: "layer_128_1_scale2/BiasAdd" +} +node { + name: "layer_128_1_conv2/Conv2D" + op: "Conv2D" + input: "Relu_3" + input: "layer_128_1_conv2/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "add_1" + op: "Add" + input: "layer_128_1_conv2/Conv2D" + input: "layer_128_1_conv_expand_h/Conv2D" +} +node { + name: "layer_256_1_bn1/FusedBatchNorm" + op: "FusedBatchNorm" + input: "add_1" + input: "layer_256_1_bn1/gamma" + input: "layer_256_1_bn1/beta" + input: "layer_256_1_bn1/mean" + input: "layer_256_1_bn1/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "layer_256_1_scale1/Mul" + op: "Mul" + input: "layer_256_1_bn1/FusedBatchNorm" + input: "layer_256_1_scale1/mul" +} +node { + name: "layer_256_1_scale1/BiasAdd" + op: "BiasAdd" + input: "layer_256_1_scale1/Mul" + input: "layer_256_1_scale1/add" +} +node { + name: "Relu_4" + op: "Relu" + input: "layer_256_1_scale1/BiasAdd" +} +node { + name: "SpaceToBatchND_1/paddings" + op: "Const" + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + dim { + size: 2 + } + } + int_val: 1 + int_val: 1 + int_val: 1 + int_val: 1 + } + } + } +} +node { + name: "layer_256_1_conv_expand/Conv2D" + op: "Conv2D" + input: "Relu_4" + input: "layer_256_1_conv_expand/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "conv4_3_norm/l2_normalize" + op: "L2Normalize" + input: "Relu_4:0" + input: "conv4_3_norm/l2_normalize/Sum/reduction_indices" +} +node { + name: "conv4_3_norm/mul_1" + op: "Mul" + input: "conv4_3_norm/l2_normalize" + input: "conv4_3_norm/mul" +} +node { + name: "conv4_3_norm_mbox_loc/Conv2D" + op: "Conv2D" + input: "conv4_3_norm/mul_1" + input: "conv4_3_norm_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv4_3_norm_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "conv4_3_norm_mbox_loc/Conv2D" + input: "conv4_3_norm_mbox_loc/bias" +} +node { + name: "flatten/Reshape" + op: "Flatten" + input: "conv4_3_norm_mbox_loc/BiasAdd" +} +node { + name: "conv4_3_norm_mbox_conf/Conv2D" + op: "Conv2D" + input: "conv4_3_norm/mul_1" + input: "conv4_3_norm_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv4_3_norm_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "conv4_3_norm_mbox_conf/Conv2D" + input: "conv4_3_norm_mbox_conf/bias" +} +node { + name: "flatten_6/Reshape" + op: "Flatten" + input: "conv4_3_norm_mbox_conf/BiasAdd" +} +node { + name: "Pad_1" + op: "SpaceToBatchND" + input: "Relu_4" + input: "SpaceToBatchND/block_shape" + input: "SpaceToBatchND_1/paddings" +} +node { + name: "layer_256_1_conv1/Conv2D" + op: "Conv2D" + input: "Pad_1" + input: "layer_256_1_conv1/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "VALID" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "layer_256_1_bn2/FusedBatchNorm" + op: "BiasAdd" + input: "layer_256_1_conv1/Conv2D" + input: "layer_256_1_conv1/Conv2D_bn_offset" +} +node { + name: "BatchToSpaceND_1" + op: "BatchToSpaceND" + input: "layer_256_1_bn2/FusedBatchNorm" +} +node { + name: "layer_256_1_scale2/Mul" + op: "Mul" + input: "BatchToSpaceND_1" + input: "layer_256_1_scale2/mul" +} +node { + name: "layer_256_1_scale2/BiasAdd" + op: "BiasAdd" + input: "layer_256_1_scale2/Mul" + input: "layer_256_1_scale2/add" +} +node { + name: "Relu_5" + op: "Relu" + input: "layer_256_1_scale2/BiasAdd" +} +node { + name: "layer_256_1_conv2/Conv2D" + op: "Conv2D" + input: "Relu_5" + input: "layer_256_1_conv2/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "add_2" + op: "Add" + input: "layer_256_1_conv2/Conv2D" + input: "layer_256_1_conv_expand/Conv2D" +} +node { + name: "layer_512_1_bn1/FusedBatchNorm" + op: "FusedBatchNorm" + input: "add_2" + input: "layer_512_1_bn1/gamma" + input: "layer_512_1_bn1/beta" + input: "layer_512_1_bn1/mean" + input: "layer_512_1_bn1/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "layer_512_1_scale1/Mul" + op: "Mul" + input: "layer_512_1_bn1/FusedBatchNorm" + input: "layer_512_1_scale1/mul" +} +node { + name: "layer_512_1_scale1/BiasAdd" + op: "BiasAdd" + input: "layer_512_1_scale1/Mul" + input: "layer_512_1_scale1/add" +} +node { + name: "Relu_6" + op: "Relu" + input: "layer_512_1_scale1/BiasAdd" +} +node { + name: "layer_512_1_conv_expand_h/Conv2D" + op: "Conv2D" + input: "Relu_6" + input: "layer_512_1_conv_expand_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "layer_512_1_conv1_h/Conv2D" + op: "Conv2D" + input: "Relu_6" + input: "layer_512_1_conv1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "layer_512_1_bn2_h/FusedBatchNorm" + op: "BiasAdd" + input: "layer_512_1_conv1_h/Conv2D" + input: "layer_512_1_conv1_h/Conv2D_bn_offset" +} +node { + name: "layer_512_1_scale2_h/Mul" + op: "Mul" + input: "layer_512_1_bn2_h/FusedBatchNorm" + input: "layer_512_1_scale2_h/mul" +} +node { + name: "layer_512_1_scale2_h/BiasAdd" + op: "BiasAdd" + input: "layer_512_1_scale2_h/Mul" + input: "layer_512_1_scale2_h/add" +} +node { + name: "Relu_7" + op: "Relu" + input: "layer_512_1_scale2_h/BiasAdd" +} +node { + name: "layer_512_1_conv2_h/convolution/SpaceToBatchND" + op: "SpaceToBatchND" + input: "Relu_7" + input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/block_shape" + input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/paddings" +} +node { + name: "layer_512_1_conv2_h/convolution" + op: "Conv2D" + input: "layer_512_1_conv2_h/convolution/SpaceToBatchND" + input: "layer_512_1_conv2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "VALID" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "layer_512_1_conv2_h/convolution/BatchToSpaceND" + op: "BatchToSpaceND" + input: "layer_512_1_conv2_h/convolution" + input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/block_shape" + input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/crops" +} +node { + name: "add_3" + op: "Add" + input: "layer_512_1_conv2_h/convolution/BatchToSpaceND" + input: "layer_512_1_conv_expand_h/Conv2D" +} +node { + name: "last_bn_h/FusedBatchNorm" + op: "FusedBatchNorm" + input: "add_3" + input: "last_bn_h/gamma" + input: "last_bn_h/beta" + input: "last_bn_h/mean" + input: "last_bn_h/std" + attr { + key: "epsilon" + value { + f: 1.00099996416e-05 + } + } +} +node { + name: "last_scale_h/Mul" + op: "Mul" + input: "last_bn_h/FusedBatchNorm" + input: "last_scale_h/mul" +} +node { + name: "last_scale_h/BiasAdd" + op: "BiasAdd" + input: "last_scale_h/Mul" + input: "last_scale_h/add" +} +node { + name: "last_relu" + op: "Relu" + input: "last_scale_h/BiasAdd" +} +node { + name: "conv6_1_h/Conv2D" + op: "Conv2D" + input: "last_relu" + input: "conv6_1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv6_1_h/BiasAdd" + op: "BiasAdd" + input: "conv6_1_h/Conv2D" + input: "conv6_1_h/bias" +} +node { + name: "conv6_1_h/Relu" + op: "Relu" + input: "conv6_1_h/BiasAdd" +} +node { + name: "conv6_2_h/Conv2D" + op: "Conv2D" + input: "conv6_1_h/Relu" + input: "conv6_2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "conv6_2_h/BiasAdd" + op: "BiasAdd" + input: "conv6_2_h/Conv2D" + input: "conv6_2_h/bias" +} +node { + name: "conv6_2_h/Relu" + op: "Relu" + input: "conv6_2_h/BiasAdd" +} +node { + name: "conv7_1_h/Conv2D" + op: "Conv2D" + input: "conv6_2_h/Relu" + input: "conv7_1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv7_1_h/BiasAdd" + op: "BiasAdd" + input: "conv7_1_h/Conv2D" + input: "conv7_1_h/bias" +} +node { + name: "conv7_1_h/Relu" + op: "Relu" + input: "conv7_1_h/BiasAdd" +} +node { + name: "Pad_2" + op: "SpaceToBatchND" + input: "conv7_1_h/Relu" + input: "SpaceToBatchND/block_shape" + input: "SpaceToBatchND_1/paddings" +} +node { + name: "conv7_2_h/Conv2D" + op: "Conv2D" + input: "Pad_2" + input: "conv7_2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "VALID" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "conv7_2_h/BiasAdd" + op: "BiasAdd" + input: "conv7_2_h/Conv2D" + input: "conv7_2_h/bias" +} +node { + name: "BatchToSpaceND_2" + op: "BatchToSpaceND" + input: "conv7_2_h/BiasAdd" +} +node { + name: "conv7_2_h/Relu" + op: "Relu" + input: "BatchToSpaceND_2" +} +node { + name: "conv8_1_h/Conv2D" + op: "Conv2D" + input: "conv7_2_h/Relu" + input: "conv8_1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv8_1_h/BiasAdd" + op: "BiasAdd" + input: "conv8_1_h/Conv2D" + input: "conv8_1_h/bias" +} +node { + name: "conv8_1_h/Relu" + op: "Relu" + input: "conv8_1_h/BiasAdd" +} +node { + name: "conv8_2_h/Conv2D" + op: "Conv2D" + input: "conv8_1_h/Relu" + input: "conv8_2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv8_2_h/BiasAdd" + op: "BiasAdd" + input: "conv8_2_h/Conv2D" + input: "conv8_2_h/bias" +} +node { + name: "conv8_2_h/Relu" + op: "Relu" + input: "conv8_2_h/BiasAdd" +} +node { + name: "conv9_1_h/Conv2D" + op: "Conv2D" + input: "conv8_2_h/Relu" + input: "conv9_1_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv9_1_h/BiasAdd" + op: "BiasAdd" + input: "conv9_1_h/Conv2D" + input: "conv9_1_h/bias" +} +node { + name: "conv9_1_h/Relu" + op: "Relu" + input: "conv9_1_h/BiasAdd" +} +node { + name: "conv9_2_h/Conv2D" + op: "Conv2D" + input: "conv9_1_h/Relu" + input: "conv9_2_h/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv9_2_h/BiasAdd" + op: "BiasAdd" + input: "conv9_2_h/Conv2D" + input: "conv9_2_h/bias" +} +node { + name: "conv9_2_h/Relu" + op: "Relu" + input: "conv9_2_h/BiasAdd" +} +node { + name: "conv9_2_mbox_loc/Conv2D" + op: "Conv2D" + input: "conv9_2_h/Relu" + input: "conv9_2_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv9_2_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "conv9_2_mbox_loc/Conv2D" + input: "conv9_2_mbox_loc/bias" +} +node { + name: "flatten_5/Reshape" + op: "Flatten" + input: "conv9_2_mbox_loc/BiasAdd" +} +node { + name: "conv9_2_mbox_conf/Conv2D" + op: "Conv2D" + input: "conv9_2_h/Relu" + input: "conv9_2_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv9_2_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "conv9_2_mbox_conf/Conv2D" + input: "conv9_2_mbox_conf/bias" +} +node { + name: "flatten_11/Reshape" + op: "Flatten" + input: "conv9_2_mbox_conf/BiasAdd" +} +node { + name: "conv8_2_mbox_loc/Conv2D" + op: "Conv2D" + input: "conv8_2_h/Relu" + input: "conv8_2_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv8_2_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "conv8_2_mbox_loc/Conv2D" + input: "conv8_2_mbox_loc/bias" +} +node { + name: "flatten_4/Reshape" + op: "Flatten" + input: "conv8_2_mbox_loc/BiasAdd" +} +node { + name: "conv8_2_mbox_conf/Conv2D" + op: "Conv2D" + input: "conv8_2_h/Relu" + input: "conv8_2_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv8_2_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "conv8_2_mbox_conf/Conv2D" + input: "conv8_2_mbox_conf/bias" +} +node { + name: "flatten_10/Reshape" + op: "Flatten" + input: "conv8_2_mbox_conf/BiasAdd" +} +node { + name: "conv7_2_mbox_loc/Conv2D" + op: "Conv2D" + input: "conv7_2_h/Relu" + input: "conv7_2_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv7_2_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "conv7_2_mbox_loc/Conv2D" + input: "conv7_2_mbox_loc/bias" +} +node { + name: "flatten_3/Reshape" + op: "Flatten" + input: "conv7_2_mbox_loc/BiasAdd" +} +node { + name: "conv7_2_mbox_conf/Conv2D" + op: "Conv2D" + input: "conv7_2_h/Relu" + input: "conv7_2_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv7_2_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "conv7_2_mbox_conf/Conv2D" + input: "conv7_2_mbox_conf/bias" +} +node { + name: "flatten_9/Reshape" + op: "Flatten" + input: "conv7_2_mbox_conf/BiasAdd" +} +node { + name: "conv6_2_mbox_loc/Conv2D" + op: "Conv2D" + input: "conv6_2_h/Relu" + input: "conv6_2_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv6_2_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "conv6_2_mbox_loc/Conv2D" + input: "conv6_2_mbox_loc/bias" +} +node { + name: "flatten_2/Reshape" + op: "Flatten" + input: "conv6_2_mbox_loc/BiasAdd" +} +node { + name: "conv6_2_mbox_conf/Conv2D" + op: "Conv2D" + input: "conv6_2_h/Relu" + input: "conv6_2_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "conv6_2_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "conv6_2_mbox_conf/Conv2D" + input: "conv6_2_mbox_conf/bias" +} +node { + name: "flatten_8/Reshape" + op: "Flatten" + input: "conv6_2_mbox_conf/BiasAdd" +} +node { + name: "fc7_mbox_loc/Conv2D" + op: "Conv2D" + input: "last_relu" + input: "fc7_mbox_loc/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "fc7_mbox_loc/BiasAdd" + op: "BiasAdd" + input: "fc7_mbox_loc/Conv2D" + input: "fc7_mbox_loc/bias" +} +node { + name: "flatten_1/Reshape" + op: "Flatten" + input: "fc7_mbox_loc/BiasAdd" +} +node { + name: "mbox_loc" + op: "ConcatV2" + input: "flatten/Reshape" + input: "flatten_1/Reshape" + input: "flatten_2/Reshape" + input: "flatten_3/Reshape" + input: "flatten_4/Reshape" + input: "flatten_5/Reshape" + input: "mbox_loc/axis" +} +node { + name: "fc7_mbox_conf/Conv2D" + op: "Conv2D" + input: "last_relu" + input: "fc7_mbox_conf/weights" + attr { + key: "dilations" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } +} +node { + name: "fc7_mbox_conf/BiasAdd" + op: "BiasAdd" + input: "fc7_mbox_conf/Conv2D" + input: "fc7_mbox_conf/bias" +} +node { + name: "flatten_7/Reshape" + op: "Flatten" + input: "fc7_mbox_conf/BiasAdd" +} +node { + name: "mbox_conf" + op: "ConcatV2" + input: "flatten_6/Reshape" + input: "flatten_7/Reshape" + input: "flatten_8/Reshape" + input: "flatten_9/Reshape" + input: "flatten_10/Reshape" + input: "flatten_11/Reshape" + input: "mbox_conf/axis" +} +node { + name: "mbox_conf_reshape" + op: "Reshape" + input: "mbox_conf" + input: "reshape_before_softmax" +} +node { + name: "mbox_conf_softmax" + op: "Softmax" + input: "mbox_conf_reshape" + attr { + key: "axis" + value { + i: 2 + } + } +} +node { + name: "mbox_conf_flatten" + op: "Flatten" + input: "mbox_conf_softmax" +} +node { + name: "PriorBox_0" + op: "PriorBox" + input: "conv4_3_norm/mul_1" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + } + float_val: 2.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 60 + } + } + attr { + key: "min_size" + value { + i: 30 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 8.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "PriorBox_1" + op: "PriorBox" + input: "last_relu" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + float_val: 2.0 + float_val: 3.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 111 + } + } + attr { + key: "min_size" + value { + i: 60 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 16.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "PriorBox_2" + op: "PriorBox" + input: "conv6_2_h/Relu" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + float_val: 2.0 + float_val: 3.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 162 + } + } + attr { + key: "min_size" + value { + i: 111 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 32.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "PriorBox_3" + op: "PriorBox" + input: "conv7_2_h/Relu" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 2 + } + } + float_val: 2.0 + float_val: 3.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 213 + } + } + attr { + key: "min_size" + value { + i: 162 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 64.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "PriorBox_4" + op: "PriorBox" + input: "conv8_2_h/Relu" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + } + float_val: 2.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 264 + } + } + attr { + key: "min_size" + value { + i: 213 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 100.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "PriorBox_5" + op: "PriorBox" + input: "conv9_2_h/Relu" + input: "data" + attr { + key: "aspect_ratio" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1 + } + } + float_val: 2.0 + } + } + } + attr { + key: "clip" + value { + b: false + } + } + attr { + key: "flip" + value { + b: true + } + } + attr { + key: "max_size" + value { + i: 315 + } + } + attr { + key: "min_size" + value { + i: 264 + } + } + attr { + key: "offset" + value { + f: 0.5 + } + } + attr { + key: "step" + value { + f: 300.0 + } + } + attr { + key: "variance" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 4 + } + } + float_val: 0.10000000149 + float_val: 0.10000000149 + float_val: 0.20000000298 + float_val: 0.20000000298 + } + } + } +} +node { + name: "mbox_priorbox" + op: "ConcatV2" + input: "PriorBox_0" + input: "PriorBox_1" + input: "PriorBox_2" + input: "PriorBox_3" + input: "PriorBox_4" + input: "PriorBox_5" + input: "mbox_loc/axis" +} +node { + name: "detection_out" + op: "DetectionOutput" + input: "mbox_loc" + input: "mbox_conf_flatten" + input: "mbox_priorbox" + attr { + key: "background_label_id" + value { + i: 0 + } + } + attr { + key: "code_type" + value { + s: "CENTER_SIZE" + } + } + attr { + key: "confidence_threshold" + value { + f: 0.00999999977648 + } + } + attr { + key: "keep_top_k" + value { + i: 200 + } + } + attr { + key: "nms_threshold" + value { + f: 0.449999988079 + } + } + attr { + key: "num_classes" + value { + i: 2 + } + } + attr { + key: "share_location" + value { + b: true + } + } + attr { + key: "top_k" + value { + i: 400 + } + } +} +node { + name: "reshape_before_softmax" + op: "Const" + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 3 + } + } + int_val: 0 + int_val: -1 + int_val: 2 + } + } + } +} +library { +} \ No newline at end of file diff --git a/Gender-Age Detection/opencv_face_detector_uint8.pb b/Gender-Age Detection/opencv_face_detector_uint8.pb new file mode 100644 index 0000000000..124bdd1acc Binary files /dev/null and b/Gender-Age Detection/opencv_face_detector_uint8.pb differ diff --git a/Gender-Age Detection/woman1.jpg b/Gender-Age Detection/woman1.jpg new file mode 100644 index 0000000000..f575ffb921 Binary files /dev/null and b/Gender-Age Detection/woman1.jpg differ diff --git a/Gender-Age Detection/woman3.jpg b/Gender-Age Detection/woman3.jpg new file mode 100644 index 0000000000..48acfafae6 Binary files /dev/null and b/Gender-Age Detection/woman3.jpg differ diff --git a/Instagram Follow- NotFollow/README.md b/Instagram Follow- NotFollow/README.md new file mode 100644 index 0000000000..40c4b4a1c9 --- /dev/null +++ b/Instagram Follow- NotFollow/README.md @@ -0,0 +1,13 @@ +# Instagram Unfollowers Bot + +## Install Chrome Driver + +- https://chromedriver.storage.googleapis.com/index.html?path=90.0.4430.24/ + +## Install Selenium + +- pip3 install selenium + +## OUTPUT : + +Screenshot 2021-04-23 at 3 20 37 PM diff --git a/Instagram Follow- NotFollow/main.py b/Instagram Follow- NotFollow/main.py new file mode 100644 index 0000000000..f256a23032 --- /dev/null +++ b/Instagram Follow- NotFollow/main.py @@ -0,0 +1,100 @@ +from selenium import webdriver +from time import sleep +import datetime +from prettytable import PrettyTable + +start = datetime.datetime.now() + + +table = PrettyTable() +column_names = ["Non-Followers"] + + +class InstaBot: + """ for login + """ + + def __init__(self, username, pw): + self.driver = webdriver.Chrome() + self.username = username + self.driver.get("https://instagram.com") + sleep(2) + self.driver.find_element_by_xpath( + "//input[@name=\"username\"]").send_keys(username) + self.driver.find_element_by_xpath( + "//input[@name=\"password\"]").send_keys(pw) + self.driver.find_element_by_xpath("//button[@type=\"submit\"]").click() + sleep(4) + self.url = self.driver.current_url + if self.url == "https://www.instagram.com/accounts/onetap/?next=%2F": + self.driver.find_element_by_xpath( + "//button[contains(text(), 'Not Now')]").click() + sleep(4) + self.driver.find_element_by_xpath( + "//button[contains(text(), 'Not Now')]").click() + else: + sleep(2) + self.driver.find_element_by_xpath( + "//button[contains(text(), 'Not Now')]").click() + sleep(1) + + def get_unfollowers(self): + """ names of unfollowers + """ + self.driver.find_element_by_xpath( + "//a[contains(@href,'/{}')]".format(self.username)).click() + sleep(2) + self.driver.find_element_by_xpath( + "//a[contains(@href,'/following')]").click() + following = self._get_names() + sleep(2) + self.driver.find_element_by_xpath( + "//a[contains(@href,'/followers')]").click() + sleep(2) + followers = self._get_names() + + notfollowingback = [ + user for user in following if user not in followers] + + table.add_column(column_names[0], notfollowingback) + + print(table) + + def _get_names(self): + """ names of unfollowers + """ + + sleep(2) + scroll_box = self.driver.find_element_by_xpath( + '/html/body/div[5]/div/div/div[2]') + last_ht, ht = 0, 1 + + # Keep scrolling till you can't go down any further + while last_ht != ht: + last_ht = ht + sleep(1) + ht = self.driver.execute_script( + """ + arguments[0].scrollTo(0, arguments[0].scrollHeight); + return arguments[0].scrollHeight; + """, scroll_box) + + # Gets the list of accounts + links = scroll_box.find_elements_by_tag_name('a') + names = [name.text for name in links if name.text != ''] + + sleep(1) + + # Closes the box + close_btn = self.driver.find_element_by_xpath( + '/html/body/div[5]/div/div/div[1]/div/div[2]') + close_btn.click() + + return names + + +usr_name = input("Enter Username : ") +password = input("Enter Password : ") + +my_bot = InstaBot(usr_name, password) +my_bot.get_unfollowers() diff --git a/LeetCode-Scrapper/README.md b/LeetCode-Scrapper/README.md new file mode 100644 index 0000000000..4e73f59dde --- /dev/null +++ b/LeetCode-Scrapper/README.md @@ -0,0 +1,17 @@ +# LeetCode Scraper +This python script will let the user to scrape 'n' number of LeetCode problems from any category/difficulty in [Leetcode](https://leetcode.com/problemset/all), as provided by the user. The functionality of the script is to gain the information regarding particular leetcode problem in different PDFs. + +## Prerequisites: +Download the required packages from the following command in you terminal.(Make sure you're in the same project directory) + +` pip3 install -r requirements.txt ` + +To run this script, you need to have selenium installed and configure webdriver to use chrome browser in your$PATH. You can directly download chrome driver from the link below- https://chromedriver.chromium.org/downloads. Then, just enter the chrome driver path as asked in the prompt. + +## Running the script: +After installing all the requirements,run this command in your terminal. + +` python3 ques.py ` + +## Output: +This script will generate 'n' number of different PDFs in the same folder to store the problem information, specifically problem title, problem statement, test cases, and the problem link. diff --git a/LeetCode-Scrapper/ques.py b/LeetCode-Scrapper/ques.py new file mode 100644 index 0000000000..3cdd2ccf2b --- /dev/null +++ b/LeetCode-Scrapper/ques.py @@ -0,0 +1,111 @@ +from selenium import webdriver +from selenium.webdriver.common.desired_capabilities import DesiredCapabilities +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By +from selenium.common.exceptions import NoSuchElementException +from selenium.common.exceptions import TimeoutException +import os +from fpdf import FPDF + +options = webdriver.ChromeOptions() +options.add_argument("--headless") + + +capa = DesiredCapabilities.CHROME +capa["pageLoadStrategy"] = "none" + +print("Enter Chrome Driver path: ") +input_driver_path = input() +driver = webdriver.Chrome(input_driver_path) +#the base url of leetcode problem set page +baseurl="https://leetcode.com/problemset/all" +wait = WebDriverWait(driver, 15) + +#the difficulty level of all the of all the problems +problem_difficulty = {"Easy": "?difficulty=Easy", "Medium": "?difficulty=Medium", "Hard": "?difficulty=hard"} + +def get_problem(category, no_of_problems): + + prblm_info = {} + try: + #checking if there is no network or any other iisue + driver.get(baseurl + '/' + category) + wait.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='question-app']/div/div[2]/div[2]/div[2]/table/tbody[1]/tr[1]"))) + except TimeoutException as exception: + print("Couldn't fetch problem. Network issue or page slow to render. Try again") + os._exit(-1) + + for problem_index in range(1, no_of_problems + 1): + #set problem name + problem_name = driver.find_element_by_xpath("//*[@id='question-app']/div/div[2]/div[2]/div[2]/table/tbody[1]/tr[{}]/td[3]".format(problem_index)).text + #set problem url + problem_url = driver.find_element_by_xpath("//*[@id='question-app']/div/div[2]/div[2]/div[2]/table/tbody[1]/tr[{}]/td[3]/div/a".format(problem_index)).get_attribute('href') + print(problem_name," ",problem_url) + prblm_info[problem_name] = problem_url + return prblm_info + +def get_description(problem_url,problem_name): + try: + #check if the element is founded, and located in the correct format + driver.get(problem_url) + wait.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='app']/div/div[2]/div/div/div[1]/div/div[1]/div[1]/div/div[2]/div/div[2]/div/p[1]"))) + problem_title= problem_name + problem_statement = driver.find_element_by_xpath("//*[@id='app']/div/div[2]/div/div/div[1]/div/div[1]/div[1]/div/div[2]/div/div[2]/div/p[1]").text + problem_test_cases = driver.find_element_by_xpath("//*[@id='app']/div/div[2]/div/div/div[1]/div/div[1]/div[1]/div/div[2]/div/div[2]/div/pre[1]").text + + + if (problem_test_cases.find("Output") == -1): + problem_test_cases = "Input\n" + problem_test_cases + problem_test_cases+="\nOutput\n" + problem_test_cases += driver.find_element_by_xpath("//*[@id='problem-statement']/pre[2]").text + + else: + driver.execute_script("window.stop();") + problem={'title':problem_title,'statement':problem_statement,'test_case':problem_test_cases,'url':problem_url} + return problem + + except NoSuchElementException as e: + print("Couldn't scrap the element, Unable to locate it") + problem=None + except TimeoutException as exception: + print("Couldn't scrap the element, Unable to locate it") + problem=None + +def to_pdf(problem): + pdf = FPDF() + pdf.add_page() + pdf.set_font("Arial", size = 15) + #set title + title=problem["title"].encode('latin-1', 'replace').decode('latin-1') + #set statement + statement=problem["statement"].encode('latin-1', 'replace').decode('latin-1') + #set test cases + test_case=problem["test_case"].encode('latin-1', 'replace').decode('latin-1') + #set url + url=problem["url"] + pdf.cell(200, 10, txt =title, ln = 1, align = 'C') + pdf.multi_cell(200, 10, txt =statement, align = 'L') + pdf.multi_cell(200, 10, txt =test_case, align = 'L') + pdf.write(5, 'Problem_Link: ') + pdf.write(5,url,url) + title = title.rstrip() + pdf.output("./LeetCode-Scrapper/"+title+".pdf") + + +def main(): + category=input("Choose difficulty level from \n Easy \n Medium \n Hard \n\n : ") + no_of_problems=int(input("Enter the number of problems to be scrapped : ")) + info = get_problem(problem_difficulty[category], no_of_problems) + for name, url in info.items(): + problem=get_description(url,name) + if(problem is not None ): + to_pdf(problem) + else: + pass + +if __name__ == '__main__': + main() + +#Close the driver path +driver.close() diff --git a/LeetCode-Scrapper/requirements.txt b/LeetCode-Scrapper/requirements.txt new file mode 100644 index 0000000000..b0ce67411f --- /dev/null +++ b/LeetCode-Scrapper/requirements.txt @@ -0,0 +1,4 @@ +fpdf==1.7.2 +requests==2.24.0 +selenium==3.141.0 +urllib3==1.25.11 \ No newline at end of file diff --git a/Malaria/Images/cell1.jpg b/Malaria/Images/cell1.jpg new file mode 100644 index 0000000000..04ebd896ad Binary files /dev/null and b/Malaria/Images/cell1.jpg differ diff --git a/Malaria/Models/malaria.h5 b/Malaria/Models/malaria.h5 new file mode 100644 index 0000000000..80dfec146e Binary files /dev/null and b/Malaria/Models/malaria.h5 differ diff --git a/Malaria/main.py b/Malaria/main.py new file mode 100644 index 0000000000..0c7b038c96 --- /dev/null +++ b/Malaria/main.py @@ -0,0 +1,25 @@ +# import important libraries +import os +from PIL import Image +import tensorflow as tf +import numpy as np + +# Configuring user's pc so that tensorflow model built on other's pc can run +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' + +# reading image +val_path = input("Enter image path name eg.Images/cell1.jpg: ") +img = Image.open(val_path) +# confiiguring input image according to the model's requirement +img = img.resize((36, 36)) +img = np.asarray(img) +img = img.reshape((1, 36, 36, 3)) +img = img.astype(np.float64) +model_path = "./Malaria/Models/malaria.h5" +model = tf.keras.models.load_model(model_path) +pred = np.argmax(model.predict(img)[0]) +if pred == 1: + print("Infected Cell") +else: + print("Healthy Cell") diff --git a/Malaria/model_training.py b/Malaria/model_training.py new file mode 100644 index 0000000000..94e1581965 --- /dev/null +++ b/Malaria/model_training.py @@ -0,0 +1,89 @@ +# importing the libraries for loading data and visualisation +import os +import cv2 +import numpy as np +from PIL import Image +# import for train-test-split +from sklearn.model_selection import train_test_split +# import for One Hot Encoding +from keras.utils import to_categorical +# importing libraries for Model +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Conv2D, MaxPooling2D +from tensorflow.keras.layers import Dense, Flatten, Dropout, BatchNormalization + +# loading the data of images and setting their labels +data = [] +labels = [] + +Parasitized = os.listdir("../input/malaria/cell_images/Parasitized/") + +for a in Parasitized: + try: + imageP = cv2.imread("../input/malaria/cell_images/Parasitized/" + a) + image_from_arrayP = Image.fromarray(imageP, 'RGB') + size_imageP = image_from_arrayP.resize((36, 36)) + data.append(np.array(size_imageP)) + labels.append(0) + except AttributeError: + print("") + +Uninfected = os.listdir("../input/malaria/cell_images/Uninfected/") + +for b in Uninfected: + try: + imageU = cv2.imread("../input/malaria/cell_images/Uninfected/" + b) + image_from_arrayU = Image.fromarray(imageU, 'RGB') + size_imageU = image_from_arrayU.resize((36, 36)) + data.append(np.array(size_imageU)) + labels.append(1) + except AttributeError: + print("") + +# Creating single numpy array of all the images and labels +data1 = np.array(data) +labels1 = np.array(labels) +print('Cells : {} and labels : {}'.format(data1.shape, labels1.shape)) + +# lets shuffle the data and labels before splitting them into training and testing sets +n = np.arange(data1.shape[0]) +np.random.shuffle(n) +data2 = data1[n] +labels2 = labels1[n] + +# Splitting the dataset into the Training set and Test set +X_train, X_valid, y_train,y_valid = train_test_split(data2, + labels2, test_size=0.2, random_state=0) +X_trainF = X_train.astype('float32') +X_validF = X_valid.astype('float32') +y_trainF = to_categorical(y_train) +y_validF = to_categorical(y_valid) + +classifier = Sequential() +# CNN layers +classifier.add(Conv2D(32, kernel_size=(3, 3), + input_shape=(36, 36, 3), activation='relu')) +classifier.add(MaxPooling2D(pool_size=(2, 2))) +classifier.add(BatchNormalization(axis=-1)) +classifier.add(Dropout(0.5)) # Dropout prevents overfitting +classifier.add(Conv2D(32, kernel_size=(3, 3), + input_shape=(36, 36, 3), activation='relu')) +classifier.add(MaxPooling2D(pool_size=(2, 2))) +classifier.add(BatchNormalization(axis=-1)) +classifier.add(Dropout(0.5)) +classifier.add(Flatten()) +classifier.add(Dense(units=128, activation='relu')) +classifier.add(BatchNormalization(axis=-1)) +classifier.add(Dropout(0.5)) +classifier.add(Dense(units=2, activation='softmax')) +classifier.compile(optimizer='adam', + loss='categorical_crossentropy', metrics=['accuracy']) +history = classifier.fit(X_trainF, y_trainF, + batch_size=120, epochs=15, + verbose=1, validation_data=(X_validF, y_validF)) +classifier.summary() + +y_pred = classifier.predict(X_validF) +y_predF = np.argmax(y_pred, axis=1) +y_valid_one = np.argmax(y_validF, axis=1) +classifier.save("./Malaria/Models/malaria.h5") diff --git a/Malaria/requirements.txt b/Malaria/requirements.txt new file mode 100644 index 0000000000..7284323a30 --- /dev/null +++ b/Malaria/requirements.txt @@ -0,0 +1,9 @@ +numpy==1.19.2 +pandas==1.1.2 +h5py==2.10.0 +tensorflow==2.4.1 +pillow==7.2.0 +Keras==2.2.5 +PIL=8.2.0 +opencv-python==4.5.1.48 +scikit-learn>=0.18 diff --git a/NSE Stocks GUI/README.md b/NSE Stocks GUI/README.md new file mode 100644 index 0000000000..3ad00ee34e --- /dev/null +++ b/NSE Stocks GUI/README.md @@ -0,0 +1,38 @@ +# NSE Stock Data + +Running this Script would allow the user to go through NSE Stock data scraped from [NSE Website](https://www.nseindia.com), based on their choice of available categories. + +## Setup instructions + +In order to run this script, you need to have Python and pip installed on your system. After you're done installing Python and pip, run the following command from your terminal to install the requirements from the same folder (directory) of the project. + +``` +pip install -r requirements.txt +``` + +As this script uses selenium, you will need to install the chrome webdriver from [this link](https://sites.google.com/a/chromium.org/chromedriver/downloads) + +After satisfying all the requirements for the project, Open the terminal in the project folder and run + +``` +python stocks.py +``` + +or + +``` +python3 stocks.py +``` + +depending upon the python version. Make sure that you are running the command from the same virtual environment in which the required modules are installed. + +## Output + +The user can select different categories from the GUI as shown in the sample screen shot below + +![Screenshot of the GUI](https://i.postimg.cc/FRnNzQMP/nse.png) + + +## Author + +[Ayush Jain](https://github.com/Ayushjain2205) diff --git a/NSE Stocks GUI/requirements.txt b/NSE Stocks GUI/requirements.txt new file mode 100644 index 0000000000..b74b115ce7 --- /dev/null +++ b/NSE Stocks GUI/requirements.txt @@ -0,0 +1,3 @@ +requests +beautifulsoup4 +selenium \ No newline at end of file diff --git a/NSE Stocks GUI/stocks.py b/NSE Stocks GUI/stocks.py new file mode 100644 index 0000000000..0981c1181d --- /dev/null +++ b/NSE Stocks GUI/stocks.py @@ -0,0 +1,122 @@ +import requests +from bs4 import BeautifulSoup +import tkinter as tk +from tkinter import ttk +from tkinter import font as tkFont +from selenium import webdriver +from selenium.webdriver.common.keys import Keys +import time + +driver_path = input('Enter path for chromedriver: ') + +# Categories and their URL slugs +most_active = {'Most Active equities - Main Board':'mae_mainboard_tableC','Most Active equities - SME':'mae_sme_tableC','Most Active equities - ETFs':'mae_etf_tableC', + 'Most Active equities - Price Spurts':'mae_pricespurts_tableC', 'Most Active equities - Volume Spurts':'mae_volumespurts_tableC'} +top_20 = {'NIFTY 50 Top 20 Gainers':'topgainer-Table','NIFTY 50 Top 20 Losers':'toplosers-Table'} + +# Function to generate request url based on user choice +def generate_url(): + category_choice = category.get() + if(category_choice in most_active): + page = 'most-active-equities' + else: + page = 'top-gainers-loosers' + url = 'https://www.nseindia.com/market-data/{}'.format(page) + return url + +# Function to scrape stock data from generated URL +def scraper(): + url = generate_url() + driver = webdriver.Chrome(driver_path) + driver.get(url) + + # Wait for results to load + time.sleep(5) + html = driver.page_source + + # Start scraping resultant html data + soup = BeautifulSoup(html, 'html.parser') + + # Based on choice scrape div + category_choice = category.get() + if category_choice in most_active : + category_div = most_active[category_choice] + else : + category_div = top_20[category_choice] + + # Find the table to scrape + results = soup.find("table", {"id": category_div}) + rows = results.findChildren('tr') + + table_data = [] + row_values = [] + # Append stock data into a list + for row in rows: + cells = row.findChildren(['th', 'td']) + for cell in cells: + value = cell.text.strip() + value = " ".join(value.split()) + row_values.append(value) + table_data.append(row_values) + row_values = [] + + # Formatting the stock data stored in the list + stocks_data = "" + for stock in table_data: + single_record = "" + for cell in stock: + format_cell = "{:<20}" + single_record += format_cell.format(cell[:20]) + single_record += "\n" + stocks_data += single_record + + # Adding the formatted data into tkinter GUI + query_label.config(state=tk.NORMAL) + query_label.delete(1.0,"end") + query_label.insert(1.0,stocks_data) + query_label.config(state=tk.DISABLED) + driver.close() + +# Creating tkinter window +window = tk.Tk() +window.title('NSE Stock data') +window.geometry('1200x1000') +window.configure(bg='white') + +style = ttk.Style() +style.configure('my.TButton', font=('Helvetica', 16)) +style.configure('my.TFrame', background='white') + +# label text for title +ttk.Label(window, text="NSE Stock market data", + background='white', foreground="SpringGreen2", + font=("Helvetica", 30, 'bold')).grid(row=0, column=1) + +# label +ttk.Label(window, text="Select Market data to get:", background = 'white', + font=("Helvetica", 15)).grid(column=0, + row=5, padx=10, pady=25) + +# Combobox creation +category = ttk.Combobox( + window, width=60, state='readonly',font="Helvetica 15") + +submit_btn = ttk.Button(window, text="Get Stock Data!", style='my.TButton', command = scraper) + +# Adding combobox drop down list +category['values'] = ('Most Active equities - Main Board','Most Active equities - SME','Most Active equities - ETFs','Most Active equities - Price Spurts', + 'Most Active equities - Volume Spurts','NIFTY 50 Top 20 Gainers','NIFTY 50 Top 20 Losers') + +category.grid(column=1, row=5, padx=10) +category.current(0) + +submit_btn.grid(row=5, column=3, pady=5, padx=15, ipadx=5) + +frame = ttk.Frame(window, style='my.TFrame') +frame.place(relx=0.50, rely=0.12, relwidth=0.98, relheight=0.90, anchor="n") + +# To display stock data +query_label = tk.Text(frame , ,width="500", bg="alice blue") +query_label.grid(row=7, columnspan=2) + +window.mainloop()

AltStyle によって変換されたページ (->オリジナル) /