I built a modelPipeline
, which runs multiple classifiers and returns pipeline
and scores of each classifier as a DataFrame
.
How to use GridsearchCV
in the below modelPipeline
? Is it possible to use GridsearchCV
with multiple classifiers in Pipeline?
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from xgboost import XGBClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import sklearn.metrics as skm
import os
rs = {'random_state': 42}
# Train-test Split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = 0.33,
random_state = 42)
# Classification - Model Pipeline
def modelPipeline(X_train, X_test, y_train, y_test):
log_reg = LogisticRegression(**rs)
nb = BernoulliNB()
knn = KNeighborsClassifier()
svm = SVC(**rs)
mlp = MLPClassifier(max_iter=500, **rs)
dt = DecisionTreeClassifier(**rs)
et = ExtraTreesClassifier(**rs)
rf = RandomForestClassifier(**rs)
xgb = XGBClassifier(**rs, verbosity=0)
clfs = [
('Logistic Regression', log_reg),
('Naive Bayes', nb),
('K-Nearest Neighbors', knn),
('SVM', svm),
('MLP', mlp),
('Decision Tree', dt),
('Extra Trees', et),
('Random Forest', rf),
('XGBoost', xgb)
]
pipelines = []
scores_df = pd.DataFrame(columns=['Model', 'F1_Score', 'Precision', 'Recall', 'Accuracy', 'ROC_AUC'])
for clf_name, clf in clfs:
pipeline = Pipeline(steps=[
('scaler', StandardScaler()),
('classifier', clf)
]
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
# F1-Score
fscore = skm.f1_score(y_test, y_pred)
# Precision
pres = skm.precision_score(y_test, y_pred)
# Recall
rcall = skm.recall_score(y_test, y_pred)
# Accuracy
accu = skm.accuracy_score(y_test, y_pred)
# ROC_AUC
roc_auc = skm.roc_auc_score(y_test, y_pred)
pipelines.append(pipeline)
scores_df = scores_df.append({
'Model' : clf_name,
'F1_Score' : fscore,
'Precision' : pres,
'Recall' : rcall,
'Accuracy' : accu,
'ROC_AUC' : roc_auc
},
ignore_index=True)
return pipelines, scores_df
CodePudding user response:
GridSearchCV
can be given a list of classifiers to choose from for the final step in a pipeline. It won't do exactly what you have in your code though: most notably, the fitted models do not get saved by GridSearchCV
, just the scores (and the finally chosen refit-on-all-data model, if refit != False
).
pipe = Pipeline(steps=[
('scaler', StandardScaler()),
('classifier', DummyClassifier()), # doesn't matter, we're going to override this in the search
])
params = {
'classifier': [log_reg, nb, knn, svm, mlp, dt, et, rf, xgb],
}
scoring = ['f1', 'precision', 'recall', 'accuracy', 'roc_auc']
search = GridSearchCV(pipe, params, scoring=scoring, refit=False)
(Multiple metrics require setting refit
to either False
, one of the scoring
entries, or a custom callable.)
CodePudding user response:
From your comments on my other answer, perhaps you just want to tune each model? (Then you should have simplified the example to a single classifier, since the multiple ones will operate independently(?).)
So, e.g.
log_reg_params = {'C': [0.1, 1, 10]}
...
xgb_params = {
'learning_rate': [0.05, 0.1, 0.2],
'max_depth': [1, 2, 3, 5, 8],
'reg_lambda': [0, 1, 10],
}
clfs = [
('Logistic Regression', log_reg, log_reg_params),
('Naive Bayes', nb, nb_params),
...
('XGBoost', xgb, xgb_params),
]
for clf_name, clf, param_grid in clfs:
pipeline = Pipeline(steps=[
('scaler', StandardScaler()),
('classifier', clf),
])
search = GridSearchCV(pipeline, {f'classifier__{paramname}': paramvalue for paramname, paramvalue in param_grid.items()})
search.fit(X_train, y_train)
...