| 
 | 1 | +{  | 
 | 2 | + "cells": [  | 
 | 3 | + {  | 
 | 4 | + "cell_type": "markdown",  | 
 | 5 | + "metadata": {},  | 
 | 6 | + "source": [  | 
 | 7 | + "# importing the modules"  | 
 | 8 | + ]  | 
 | 9 | + },  | 
 | 10 | + {  | 
 | 11 | + "cell_type": "code",  | 
 | 12 | + "execution_count": 1,  | 
 | 13 | + "metadata": {  | 
 | 14 | + "collapsed": true  | 
 | 15 | + },  | 
 | 16 | + "outputs": [],  | 
 | 17 | + "source": [  | 
 | 18 | + "#importin the library from the pip\n",  | 
 | 19 | + "#installing libraries \n",  | 
 | 20 | + "#pip3 install numpy, pandas, scikit\n",  | 
 | 21 | + "import numpy as np\n",  | 
 | 22 | + "import pandas as pd\n",  | 
 | 23 | + "from sklearn import datasets"  | 
 | 24 | + ]  | 
 | 25 | + },  | 
 | 26 | + {  | 
 | 27 | + "cell_type": "markdown",  | 
 | 28 | + "metadata": {},  | 
 | 29 | + "source": [  | 
 | 30 | + "# Loading the Data"  | 
 | 31 | + ]  | 
 | 32 | + },  | 
 | 33 | + {  | 
 | 34 | + "cell_type": "code",  | 
 | 35 | + "execution_count": 2,  | 
 | 36 | + "metadata": {  | 
 | 37 | + "collapsed": true  | 
 | 38 | + },  | 
 | 39 | + "outputs": [],  | 
 | 40 | + "source": [  | 
 | 41 | + "#this is the import dataset from the scikit learn\n",  | 
 | 42 | + "wine = datasets.load_wine()"  | 
 | 43 | + ]  | 
 | 44 | + },  | 
 | 45 | + {  | 
 | 46 | + "cell_type": "markdown",  | 
 | 47 | + "metadata": {},  | 
 | 48 | + "source": [  | 
 | 49 | + "# Features and Labels"  | 
 | 50 | + ]  | 
 | 51 | + },  | 
 | 52 | + {  | 
 | 53 | + "cell_type": "code",  | 
 | 54 | + "execution_count": 3,  | 
 | 55 | + "metadata": {  | 
 | 56 | + "collapsed": true  | 
 | 57 | + },  | 
 | 58 | + "outputs": [],  | 
 | 59 | + "source": [  | 
 | 60 | + "#here x denotes to the Features For the Data\n",  | 
 | 61 | + "X = wine.data\n",  | 
 | 62 | + "#here y denotes to the Labels for the data\n",  | 
 | 63 | + "\"\"\"target is the labels for the data it consists of the classes or the prediction values\"\"\"\n",  | 
 | 64 | + "y = wine.target"  | 
 | 65 | + ]  | 
 | 66 | + },  | 
 | 67 | + {  | 
 | 68 | + "cell_type": "markdown",  | 
 | 69 | + "metadata": {},  | 
 | 70 | + "source": [  | 
 | 71 | + "# Train_Test_Split"  | 
 | 72 | + ]  | 
 | 73 | + },  | 
 | 74 | + {  | 
 | 75 | + "cell_type": "raw",  | 
 | 76 | + "metadata": {},  | 
 | 77 | + "source": [  | 
 | 78 | + "\"\"\"here we will separate the into the parts train part and the test part and into the split part of the data\n",  | 
 | 79 | + "X_train, y_train consists of the only training features and the labels Example:- train_size = 0.8 it will consider \n",  | 
 | 80 | + "80 persent training and 20 persent test (X_test, y_test) \"\"\""  | 
 | 81 | + ]  | 
 | 82 | + },  | 
 | 83 | + {  | 
 | 84 | + "cell_type": "code",  | 
 | 85 | + "execution_count": 4,  | 
 | 86 | + "metadata": {},  | 
 | 87 | + "outputs": [],  | 
 | 88 | + "source": [  | 
 | 89 | + "from sklearn.model_selection import train_test_split\n",  | 
 | 90 | + "X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)"  | 
 | 91 | + ]  | 
 | 92 | + },  | 
 | 93 | + {  | 
 | 94 | + "cell_type": "markdown",  | 
 | 95 | + "metadata": {},  | 
 | 96 | + "source": [  | 
 | 97 | + "# Standerlization"  | 
 | 98 | + ]  | 
 | 99 | + },  | 
 | 100 | + {  | 
 | 101 | + "cell_type": "raw",  | 
 | 102 | + "metadata": {},  | 
 | 103 | + "source": [  | 
 | 104 | + "\"\"\"Here we are going to discuss about the scaling techniques the main important scaling technique is StandardScaler\n",  | 
 | 105 | + "which will allow Data in Between the [1, 0] Tis is one of the most import preprocessing technique\"\"\""  | 
 | 106 | + ]  | 
 | 107 | + },  | 
 | 108 | + {  | 
 | 109 | + "cell_type": "code",  | 
 | 110 | + "execution_count": 5,  | 
 | 111 | + "metadata": {},  | 
 | 112 | + "outputs": [],  | 
 | 113 | + "source": [  | 
 | 114 | + "from sklearn.preprocessing import StandardScaler\n",  | 
 | 115 | + "sc = StandardScaler()\n",  | 
 | 116 | + "X_train = sc.fit_transform(X_train)\n",  | 
 | 117 | + "X_test = sc.transform(X_test)"  | 
 | 118 | + ]  | 
 | 119 | + },  | 
 | 120 | + {  | 
 | 121 | + "cell_type": "markdown",  | 
 | 122 | + "metadata": {},  | 
 | 123 | + "source": [  | 
 | 124 | + "# SVM(Support Vector Machine)"  | 
 | 125 | + ]  | 
 | 126 | + },  | 
 | 127 | + {  | 
 | 128 | + "cell_type": "raw",  | 
 | 129 | + "metadata": {},  | 
 | 130 | + "source": [  | 
 | 131 | + "\"\"\" we are importing Support vector Machine from the Scikit Learn \n",  | 
 | 132 | + "Present we are working with SVC(Support Vector Classifier) \n",  | 
 | 133 | + "C is the most important parameter which says about the regularization and create good Hyper perameter line \n",  | 
 | 134 | + "in the algorithm and also know as Penalty parameter C of the error term, random_state is the parameter which will use as the seed function it will work with \n",  | 
 | 135 | + "random numbers , Kernel is the used to use for to solve non-linear complex dimention(Features) in the data set\n",  | 
 | 136 | + "degree is used for only the poly nomial kernels , (rbf, linear, poly, sigmoid)\"\"\""  | 
 | 137 | + ]  | 
 | 138 | + },  | 
 | 139 | + {  | 
 | 140 | + "cell_type": "code",  | 
 | 141 | + "execution_count": 6,  | 
 | 142 | + "metadata": {},  | 
 | 143 | + "outputs": [  | 
 | 144 | + {  | 
 | 145 | + "data": {  | 
 | 146 | + "text/plain": [  | 
 | 147 | + "SVC(C=1, cache_size=200, class_weight=None, coef0=0.0,\n",  | 
 | 148 | + " decision_function_shape='ovr', degree=3, gamma='auto_deprecated',\n",  | 
 | 149 | + " kernel='rbf', max_iter=-1, probability=False, random_state=0,\n",  | 
 | 150 | + " shrinking=True, tol=0.001, verbose=False)"  | 
 | 151 | + ]  | 
 | 152 | + },  | 
 | 153 | + "execution_count": 6,  | 
 | 154 | + "metadata": {},  | 
 | 155 | + "output_type": "execute_result"  | 
 | 156 | + }  | 
 | 157 | + ],  | 
 | 158 | + "source": [  | 
 | 159 | + "from sklearn.svm import SVC\n",  | 
 | 160 | + "ppn = SVC(C=1, random_state = 0)\n",  | 
 | 161 | + "ppn.fit(X_train,y_train)"  | 
 | 162 | + ]  | 
 | 163 | + },  | 
 | 164 | + {  | 
 | 165 | + "cell_type": "markdown",  | 
 | 166 | + "metadata": {},  | 
 | 167 | + "source": [  | 
 | 168 | + "# predicting"  | 
 | 169 | + ]  | 
 | 170 | + },  | 
 | 171 | + {  | 
 | 172 | + "cell_type": "code",  | 
 | 173 | + "execution_count": 7,  | 
 | 174 | + "metadata": {  | 
 | 175 | + "collapsed": true  | 
 | 176 | + },  | 
 | 177 | + "outputs": [],  | 
 | 178 | + "source": [  | 
 | 179 | + "y_pred = ppn.predict(X_test)"  | 
 | 180 | + ]  | 
 | 181 | + },  | 
 | 182 | + {  | 
 | 183 | + "cell_type": "markdown",  | 
 | 184 | + "metadata": {},  | 
 | 185 | + "source": [  | 
 | 186 | + "# misscalssification"  | 
 | 187 | + ]  | 
 | 188 | + },  | 
 | 189 | + {  | 
 | 190 | + "cell_type": "code",  | 
 | 191 | + "execution_count": 8,  | 
 | 192 | + "metadata": {},  | 
 | 193 | + "outputs": [  | 
 | 194 | + {  | 
 | 195 | + "data": {  | 
 | 196 | + "text/plain": [  | 
 | 197 | + "0"  | 
 | 198 | + ]  | 
 | 199 | + },  | 
 | 200 | + "execution_count": 8,  | 
 | 201 | + "metadata": {},  | 
 | 202 | + "output_type": "execute_result"  | 
 | 203 | + }  | 
 | 204 | + ],  | 
 | 205 | + "source": [  | 
 | 206 | + "(y_pred != y_test).sum()"  | 
 | 207 | + ]  | 
 | 208 | + },  | 
 | 209 | + {  | 
 | 210 | + "cell_type": "markdown",  | 
 | 211 | + "metadata": {},  | 
 | 212 | + "source": [  | 
 | 213 | + "# Accuracy"  | 
 | 214 | + ]  | 
 | 215 | + },  | 
 | 216 | + {  | 
 | 217 | + "cell_type": "code",  | 
 | 218 | + "execution_count": 9,  | 
 | 219 | + "metadata": {  | 
 | 220 | + "scrolled": true  | 
 | 221 | + },  | 
 | 222 | + "outputs": [  | 
 | 223 | + {  | 
 | 224 | + "data": {  | 
 | 225 | + "text/plain": [  | 
 | 226 | + "1.0"  | 
 | 227 | + ]  | 
 | 228 | + },  | 
 | 229 | + "execution_count": 9,  | 
 | 230 | + "metadata": {},  | 
 | 231 | + "output_type": "execute_result"  | 
 | 232 | + }  | 
 | 233 | + ],  | 
 | 234 | + "source": [  | 
 | 235 | + "from sklearn.metrics import accuracy_score\n",  | 
 | 236 | + "accuracy_score(y_test, y_pred)"  | 
 | 237 | + ]  | 
 | 238 | + },  | 
 | 239 | + {  | 
 | 240 | + "cell_type": "raw",  | 
 | 241 | + "metadata": {  | 
 | 242 | + "collapsed": true  | 
 | 243 | + },  | 
 | 244 | + "source": [  | 
 | 245 | + "\"\"\"NEXT I'M GOING TO DISCUSS ABOUT THE SUPPORT VECTOR MACHINE \n",  | 
 | 246 | + "2. SUPPORT VECTOR REGRESSION \n",  | 
 | 247 | + "Thanks IF YOU ARE INTERESTED FOLLOW ME ON GITHUB\n",  | 
 | 248 | + "\n",  | 
 | 249 | + "REGARDS\"\"\""  | 
 | 250 | + ]  | 
 | 251 | + },  | 
 | 252 | + {  | 
 | 253 | + "cell_type": "code",  | 
 | 254 | + "execution_count": null,  | 
 | 255 | + "metadata": {  | 
 | 256 | + "collapsed": true  | 
 | 257 | + },  | 
 | 258 | + "outputs": [],  | 
 | 259 | + "source": []  | 
 | 260 | + },  | 
 | 261 | + {  | 
 | 262 | + "cell_type": "code",  | 
 | 263 | + "execution_count": null,  | 
 | 264 | + "metadata": {  | 
 | 265 | + "collapsed": true  | 
 | 266 | + },  | 
 | 267 | + "outputs": [],  | 
 | 268 | + "source": []  | 
 | 269 | + }  | 
 | 270 | + ],  | 
 | 271 | + "metadata": {  | 
 | 272 | + "kernelspec": {  | 
 | 273 | + "display_name": "Python 3",  | 
 | 274 | + "language": "python",  | 
 | 275 | + "name": "python3"  | 
 | 276 | + },  | 
 | 277 | + "language_info": {  | 
 | 278 | + "codemirror_mode": {  | 
 | 279 | + "name": "ipython",  | 
 | 280 | + "version": 3  | 
 | 281 | + },  | 
 | 282 | + "file_extension": ".py",  | 
 | 283 | + "mimetype": "text/x-python",  | 
 | 284 | + "name": "python",  | 
 | 285 | + "nbconvert_exporter": "python",  | 
 | 286 | + "pygments_lexer": "ipython3",  | 
 | 287 | + "version": "3.6.7"  | 
 | 288 | + }  | 
 | 289 | + },  | 
 | 290 | + "nbformat": 4,  | 
 | 291 | + "nbformat_minor": 2  | 
 | 292 | +}  | 
0 commit comments