|
| 1 | +import pandas as pd |
| 2 | +import os |
| 3 | +from sklearn import tree, ensemble, model_selection, preprocessing, decomposition, manifold, feature_selection, svm |
| 4 | +import seaborn as sns |
| 5 | +import numpy as np |
| 6 | + |
| 7 | +import sys |
| 8 | +sys.path.append("E:/New Folder/utils") |
| 9 | + |
| 10 | +import classification_utils as cutils |
| 11 | +import common_utils as utils |
| 12 | + |
| 13 | +dir = 'C:/Users/Algorithmica/Downloads/dont-overfit-ii' |
| 14 | +train = pd.read_csv(os.path.join(dir, 'train.csv')) |
| 15 | +print(train.info()) |
| 16 | +print(train.columns) |
| 17 | + |
| 18 | +#filter unique value features |
| 19 | +train1 = train.iloc[:,2:] |
| 20 | +y = train['target'].astype(int) |
| 21 | + |
| 22 | +#filter zero-variance features |
| 23 | +variance = feature_selection.VarianceThreshold() |
| 24 | +train2 = variance.fit_transform(train1) |
| 25 | + |
| 26 | +rf_estimator = ensemble.RandomForestClassifier() |
| 27 | +rf_grid = {'max_depth':list(range(1,9)), 'n_estimators':list(range(1,300,100)) } |
| 28 | +rf_final_estimator = cutils.grid_search_best_model(rf_estimator, rf_grid, train1, y) |
| 29 | +embedded_selector = feature_selection.SelectFromModel(rf_final_estimator, prefit=True, threshold='mean') |
| 30 | +train3 = embedded_selector.transform(train1) |
| 31 | +utils.plot_feature_importances(rf_final_estimator,train1, cutoff=50) |
| 32 | + |
| 33 | +et_estimator = ensemble.ExtraTreesClassifier() |
| 34 | +et_grid = {'max_depth':list(range(1,9)), 'n_estimators':list(range(1,300,100)) } |
| 35 | +et_final_estimator = cutils.grid_search_best_model(et_estimator, et_grid, train1, y) |
| 36 | +embedded_selector = feature_selection.SelectFromModel(et_final_estimator, prefit=True, threshold='mean') |
| 37 | +train3 = embedded_selector.transform(train1) |
| 38 | +utils.plot_feature_importances(et_final_estimator,train1, cutoff=50) |
| 39 | + |
| 40 | +gb_estimator = ensemble.GradientBoostingClassifier() |
| 41 | +gb_grid = {'max_depth':[1,2,3], 'n_estimators':list(range(50,300, 100)), 'learning_rate':[0.001, 0.1, 1.0] } |
| 42 | +gb_final_estimator = cutils.grid_search_best_model(gb_estimator, gb_grid, train1, y) |
| 43 | +embedded_selector = feature_selection.SelectFromModel(gb_final_estimator, prefit=True, threshold='mean') |
| 44 | +X_train1 = embedded_selector.transform(train1) |
| 45 | +utils.plot_feature_importances(gb_final_estimator, train1) |
| 46 | + |
| 47 | +kernel_svm_estimator = svm.SVC(kernel='rbf') |
| 48 | +kernel_svm_grid = {'gamma':[0.01, 0.1, 1, 2, 5, 10], 'C':[0.001, 0.01, 0.1, 0.5] } |
| 49 | +final_estimator = cutils.grid_search_best_model(kernel_svm_estimator, kernel_svm_grid, train1, y) |
| 50 | +embedded_selector = feature_selection.SelectFromModel(final_estimator, prefit=True, threshold='mean') |
| 51 | +X_train1 = embedded_selector.transform(train1) |
| 52 | +utils.plot_feature_importances(final_estimator, train1) |
| 53 | + |
| 54 | +X_train, X_eval, y_train, y_eval = model_selection.train_test_split(train_pca, y, test_size=0.1, random_state=1) |
| 55 | + |
| 56 | +sns.countplot(x='target',data=train) |
| 57 | + |
| 58 | +kernel_svm_estimator = svm.SVC(kernel='rbf') |
| 59 | +kernel_svm_grid = {'gamma':[0.01, 0.1, 1, 2, 5, 10], 'C':[0.001, 0.01, 0.1, 0.5] } |
| 60 | +final_estimator = cutils.grid_search_best_model(kernel_svm_estimator, kernel_svm_grid, X_train, y_train) |
| 61 | + |
| 62 | +print(final_estimator.score(X_eval, y_eval)) |
| 63 | + |
| 64 | +test = pd.read_csv(os.path.join(dir, 'test.csv')) |
| 65 | +print(test.info()) |
| 66 | +print(test.columns) |
| 67 | + |
| 68 | +test1 = test.iloc[:,1:] |
| 69 | +test2 = variance.transform(test1) |
| 70 | +test_pca = lpca.transform(test2) |
| 71 | +test['target'] = final_estimator.predict(test_pca) |
| 72 | +test.to_csv(os.path.join(dir, 'submission.csv'), columns=['id', 'target'], index=False) |
0 commit comments