19.1 분류 파이프라인

import pandas as pd
from sklearn.experimental import (
    enable_iterative_imputer,
)
from sklearn import (
    ensemble,
    impute,
    model_selection,    
    preprocessing,
    tree,
)
from sklearn.base import (
    BaseEstimator,
    TransformerMixin,
)
from sklearn.ensemble import (
    RandomForestClassifier,
)
from sklearn.pipeline import Pipeline
def tweak_titanic(df):
    df = df.drop(
        columns=[
            "name",
            "ticket",
            "home.dest",
            "boat",
            "body",
            "cabin",
        ]
    ).pipe(pd.get_dummies, drop_first=True)
    return df

class TitanicTransformer(
    BaseEstimator, TransformerMixin
):
    def transform(self, X):
        # assumes X is output
        # from reading Excel file
        X = tweak_titanic(X)
        X = X.drop(columns="survived")
        return X
    def fit(self, X, y):
        return self
pipe = Pipeline(
    [
        ("titan", TitanicTransformer()),
        ("impute", impute.IterativeImputer()),
        (
            "std",
            preprocessing.StandardScaler(),
        ),
        ("rf", RandomForestClassifier()),
    ]
)
from sklearn.model_selection import (
    train_test_split,
)

url = "https://biostat.app.vumc.org/wiki/pub/Main/DataSets/titanic3.xls"

df = pd.read_excel(url)
orig_df = df
from sklearn.model_selection import (
  train_test_split,
)

X_train2, X_test2, y_train2, y_test2 = train_test_split(
  orig_df,
  orig_df.survived,
  test_size=0.3,
  random_state=42,
)

pipe.fit(X_train2, y_train2)
pipe.score(X_test2, y_test2)
0.7938931297709924
params = {
  "rf__max_features": [0.4, "auto"],
  "rf__n_estimators": [15, 200],
}

grid = model_selection.GridSearchCV(
    pipe, cv=3, param_grid=params
)

grid.fit(orig_df, orig_df.survived)
GridSearchCV(cv=3, error_score='raise-deprecating',
             estimator=Pipeline(memory=None,
                                steps=[('titan', TitanicTransformer()),
                                       ('impute',
                                        IterativeImputer(add_indicator=False,
                                                         estimator=None,
                                                         imputation_order='ascending',
                                                         initial_strategy='mean',
                                                         max_iter=10,
                                                         max_value=None,
                                                         min_value=None,
                                                         missing_values=nan,
                                                         n_nearest_features=None,
                                                         random_state=None,
                                                         sample_poster...
                                                               min_samples_leaf=1,
                                                               min_samples_split=2,
                                                               min_weight_fraction_leaf=0.0,
                                                               n_estimators=10,
                                                               n_jobs=None,
                                                               oob_score=False,
                                                               random_state=None,
                                                               verbose=0,
                                                               warm_start=False))],
                                verbose=False),
             iid='warn', n_jobs=None,
             param_grid={'rf__max_features': [0.4, 'auto'],
                         'rf__n_estimators': [15, 200]},
             pre_dispatch='2*n_jobs', refit=True, return_train_score=False,
             scoring=None, verbose=0)
grid.best_params_
{'rf__max_features': 'auto', 'rf__n_estimators': 200}
pipe.fit(X_train2, y_train2)
pipe.score(X_test2, y_test2)
0.7608142493638677
from sklearn import metrics

metrics.roc_auc_score(
  y_test2, pipe.predict(X_test2)
)
0.7429601648351648

19.2 회귀 파이프라인

from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression

reg_pipe = Pipeline(
  [
    (
      "std",
      preprocessing.StandardScaler(),
    ),
    ("lr", LinearRegression()),
  ]
)

reg_pipe.fit(bos_X_train, bos_y_train)
reg_pipe.score(bos_X_test, bos_y_test)
0.7112260057484932
reg_pipe.named_steps["lr"].intercept_
23.01581920903956
reg_pipe.named_steps["lr"].coef_
array([-1.10834602,  0.80843998,  0.34313466,  0.81386426, -1.79804295,
        2.913858  , -0.29893918, -2.94251148,  2.09419303, -1.44706731,
       -2.05232232,  1.02375187, -3.88579002])
metrics.mean_squared_error(
  bos_y_test, reg_pipe.predict(bos_X_test)
)
21.517444231177215

19.3 PCA 파이프라인

from sklearn.decomposition import PCA


pca_pipe = Pipeline(
  [
    (
      "std",
      preprocessing.StandardScaler(),
    ),
    ("pca", PCA()),
  ]
)

X_pca = pca_pipe.fit_transform(X)
pca_pipe.named_steps["pca"].explained_variance_ratio_
array([0.23922833, 0.21616853, 0.1923158 , 0.10464906, 0.08154797,
       0.0727221 , 0.05130716, 0.04206107])
pca_pipe.named_steps["pca"].components_[0]
array([-0.63274156,  0.39602149,  0.00653646,  0.11500362,  0.5815031 ,
       -0.19764926, -0.20422289, -0.10304598])