from sklearn.tree import DecisionTreeClassifier

dt = DecisionTreeClassifier(
  random_state=42, max_depth=3
)
dt.fit(X_train, y_train)
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=3,
                       max_features=None, max_leaf_nodes=None,
                       min_impurity_decrease=0.0, min_impurity_split=None,
                       min_samples_leaf=1, min_samples_split=2,
                       min_weight_fraction_leaf=0.0, presort=False,
                       random_state=42, splitter='best')

13.3 LIME

from lime import lime_tabular
explainer = lime_tabular.LimeTabularExplainer(
  X_train.values,
  feature_names=X.columns,
  class_names=["died", "survived"],
)

exp = explainer.explain_instance(
  X_train.iloc[-1].values, dt.predict_proba
)
fig = exp.as_pyplot_figure()
fig.tight_layout()
data = X_train.iloc[-2].values.copy()
dt.predict_proba(
  [data]
)
array([[0.48062016, 0.51937984]])
data[5] = 1 
dt.predict_proba([data])
array([[0.87954545, 0.12045455]])

13.4 트리 기반 모델의 해석

rf5 = ensemble.RandomForestClassifier(
    **{
        "max_features": "auto",
        "min_samples_leaf": 0.1,
        "n_estimators": 200,
        "random_state": 42,
    }
)
rf5.fit(X_train, y_train)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
                       max_depth=None, max_features='auto', max_leaf_nodes=None,
                       min_impurity_decrease=0.0, min_impurity_split=None,
                       min_samples_leaf=0.1, min_samples_split=2,
                       min_weight_fraction_leaf=0.0, n_estimators=200,
                       n_jobs=None, oob_score=False, random_state=42, verbose=0,
                       warm_start=False)
from treeinterpreter import (
  treeinterpreter as ti,
)

instances = X.iloc[:2]
prediction, bias, contribs = ti.predict(
  rf5, instances
)

i = 0

print("Instance", i)
print("Prediction", prediction[i])
print("Bias (trainset mean)", bias[i])
print("Feature contributions:")

for c, feature in zip(
  contribs[i], instances.columns
):
  print(" {} {}".format(feature, c))
Instance 0
Prediction [0.82046191 0.17953809]
Bias (trainset mean) [0.63887555 0.36112445]
Feature contributions:
 pclass [ 0.02865085 -0.02865085]
 age [ 0.01115629 -0.01115629]
 sibsp [ 0.00528926 -0.00528926]
 parch [ 0.00656872 -0.00656872]
 fare [ 0.04126856 -0.04126856]
 sex_male [ 0.07660626 -0.07660626]
 embarked_Q [0. 0.]
 embarked_S [ 0.01204643 -0.01204643]

13.5 부분 의존성 도표

rf5 = ensemble.RandomForestClassifier(
  **{
    "max_features": "auto",
    "min_samples_leaf": 0.1,
    "n_estimators": 200,
    "random_state": 42,
  }
)
rf5.fit(X_train, y_train)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
                       max_depth=None, max_features='auto', max_leaf_nodes=None,
                       min_impurity_decrease=0.0, min_impurity_split=None,
                       min_samples_leaf=0.1, min_samples_split=2,
                       min_weight_fraction_leaf=0.0, n_estimators=200,
                       n_jobs=None, oob_score=False, random_state=42, verbose=0,
                       warm_start=False)
from pdpbox import pdp

feat_name = "age"
p = pdp.pdp_isolate(
  rf5, X, X.columns, feat_name
)

fig, _ = pdp.pdp_plot(
  p, feat_name, plot_lines=True, figsize=(30, 15)
)
features = ["fare", "sex_male"]

p = pdp.pdp_interact(
  rf5, X, X.columns, features
)

fig, _ = pdp.pdp_interact_plot(p, features)

13.6 대리 모델

from sklearn import svm

sv = svm.SVC()
sv.fit(X_train, y_train)

sur_dt = tree.DecisionTreeClassifier()
sur_dt.fit(X_test, sv.predict(X_test))

for col, val in sorted(zip(X_test.columns,sur_dt.feature_importances_),
                       key=lambda x: x[1],
                       reverse=True)[:7]:
  print(f"{col:10}{val:10.3f}")
sex_male       0.723
pclass         0.070
age            0.062
sibsp          0.061
embarked_S     0.056
fare           0.022
parch          0.005

13.7 셰이플리

rf5.predict_proba(X_test.iloc[[20]])
array([[0.60129761, 0.39870239]])
import shap

shap.initjs()

s = shap.TreeExplainer(rf5)

shap_vals = s.shap_values(X_test)
target_idx = 1

shap.force_plot(
  s.expected_value[target_idx],
  shap_vals[target_idx][20, :],
  feature_names=X_test.columns,
)
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
shap.initjs()

shap.force_plot(
  s.expected_value[1],
  shap_vals[1],
  feature_names=X_test.columns,
)
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
fig, ax = plt.subplots(figsize=(10, 8))
shap.summary_plot(shap_vals[0], X_test)