Mlflow autolog

MLFow with autologging and custom metrics from sklearn.metrics import precision_recall_fscore_support from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.datasets import make_classification from sklearn.metrics import accuracy_score import mlflow mlflow.set_tracking_uri("http://127.0.0.1:5000") mlflow.set_experiment("experiment-001") # ------------------------------------------------ # X, y = make_classification( n_samples=250, n_features=10, n_informative=5, n_redundant=3, random_state=42, shuffle=True ) print(X.shape, y.shape) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42 ) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # ------------------------------------------------ # %%time with mlflow.start_run(): mlflow.sklearn.autolog(exclusive=False) n_estimators = 50 max_depth = 5 mlflow.log_param("max_depth", max_depth) mlflow.log_param("n_estimators", n_estimators) model = RandomForestClassifier( random_state=42, max_depth=max_depth, n_estimators=n_estimators ) model.fit(X_train, y_train) y_pred = model.predict(X_test) # y_proba = model.predict_proba(X_test) mlflow.log_dict( { "y_test": [int(x) for x in y_test], "y_pred": [int(x) for x in y_pred] }, "ytest-ypred.json" ) test_acc = accuracy_score(y_test, y_pred) test_precision, test_recall, test_f1, _ = precision_recall_fscore_support( y_test, y_pred, average='binary' ) mlflow.log_metric("test_accuracy", test_acc) mlflow.log_metric("test_precision", test_precision) mlflow.log_metric("test_recall", test_recall) mlflow.log_metric("test_f1_score", test_f1) print("test_accuracy:", test_acc) print("test_precision:", test_precision) print("test_recall:", test_recall) print("test_f1_score:", test_f1) mlflow.sklearn.autolog(disable=True)

October 31, 2022 · 1 min

Model training duration

Model training evaluation using Matplotlib / seaborn scatter plot, colors on condition and custom color palette. fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 5)) a = pd.Series(np.random.randint(60, 180, 25)) b = pd.Series(np.random.randint(55, 160, 25)) x_min = min(min(a), min(b)) y_max = max(max(a), max(b)) sns.scatterplot(a, b, ax=ax1) ax1.plot([x_min, y_max], [x_min, y_max], ":", color="grey") ax1.set_title("Model training runtime (Experiment #2)", size=16) ax1.set_xlabel("User-defined runtime (sec.)", size=14) ax1.set_ylabel("Actual runtime (sec.)", size=14) data=pd.DataFrame({"a":a, "diff":(b-a), "cond":((b-a) <= 0) * 1}) sns.scatterplot(x="a", y="diff", data=data, ax=ax2, hue="cond", palette={0: "tab:orange", 1: "tab:green"}, legend = False) ax2.axhline(y=0, xmin=a.index.min(), xmax=a.index.max(), linestyle=":", color="grey") ax2.set_title("Runtime difference in seconds (lower is better)", size=16) ax2.set_ylabel("Runtime difference (sec.)", size=14) ax2.set_xlabel("User-defined runtime (sec.)", size=14) plt.show() Output:

September 22, 2022 · 1 min