RamAnanth1 commited on
Commit
9f47792
Β·
1 Parent(s): 8ac91ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -109
app.py CHANGED
@@ -6,8 +6,7 @@ from sklearn.model_selection import train_test_split
6
  import matplotlib.cm as cm
7
  from sklearn.utils import shuffle
8
  from sklearn.utils import check_random_state
9
- from sklearn.cluster import MiniBatchKMeans
10
- from sklearn.cluster import KMeans
11
 
12
  theme = gr.themes.Monochrome(
13
  primary_hue="indigo",
@@ -17,126 +16,68 @@ theme = gr.themes.Monochrome(
17
 
18
  description = f"""
19
  ## Description
20
- This demo can be used to evaluate the ability of k-means initializations strategies to make the algorithm convergence robust as measured by the relative standard deviation of the inertia of the clustering (i.e. the sum of squared distances to the nearest cluster center).
21
- The dataset used for evaluation is a 2D grid of isotropic Gaussian clusters widely spaced.
22
- The Inertia plot shows the best inertia reached for each combination of the model (KMeans or MiniBatchKMeans), and either random initialization or k-means++ initialization.
23
- The Cluster Allocation plot demonstrates one single run of the MiniBatchKMeans estimator using random initialization.
24
- The demo is based on the [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_stability_low_dim_dense.html#sphx-glr-auto-examples-cluster-plot-kmeans-stability-low-dim-dense-py)
25
- """
26
-
27
- # k-means models can do several random inits so as to be able to trade
28
- # CPU time for convergence robustness
29
- n_init_range = np.array([1, 5, 10, 15, 20])
30
-
31
- # Datasets generation parameters
32
- scale = 0.1
33
-
34
- def make_data(random_state, n_samples_per_center, grid_size, scale):
35
- random_state = check_random_state(random_state)
36
- centers = np.array([[i, j] for i in range(grid_size) for j in range(grid_size)])
37
- n_clusters_true, n_features = centers.shape
38
-
39
- noise = random_state.normal(
40
- scale=scale, size=(n_samples_per_center, centers.shape[1])
41
- )
42
 
43
- X = np.concatenate([c + noise for c in centers])
44
- y = np.concatenate([[i] * n_samples_per_center for i in range(n_clusters_true)])
45
- return shuffle(X, y, random_state=random_state)
46
 
47
- def quant_evaluation(n_runs, n_samples_per_center, grid_size):
48
-
49
- n_clusters = grid_size**2
50
 
51
- plt.figure()
52
- plots = []
53
- legends = []
54
-
55
- cases = [
56
- (KMeans, "k-means++", {}, "^-"),
57
- (KMeans, "random", {}, "o-"),
58
- (MiniBatchKMeans, "k-means++", {"max_no_improvement": 3}, "x-"),
59
- (MiniBatchKMeans, "random", {"max_no_improvement": 3, "init_size": 500}, "d-"),
60
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- for factory, init, params, format in cases:
63
- print("Evaluation of %s with %s init" % (factory.__name__, init))
64
- inertia = np.empty((len(n_init_range), n_runs))
65
-
66
- for run_id in range(n_runs):
67
- X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
68
- for i, n_init in enumerate(n_init_range):
69
- km = factory(
70
- n_clusters=n_clusters,
71
- init=init,
72
- random_state=run_id,
73
- n_init=n_init,
74
- **params,
75
- ).fit(X)
76
- inertia[i, run_id] = km.inertia_
77
- p = plt.errorbar(
78
- n_init_range, inertia.mean(axis=1), inertia.std(axis=1), fmt=format
79
  )
80
- plots.append(p[0])
81
- legends.append("%s with %s init" % (factory.__name__, init))
82
-
83
- plt.xlabel("n_init")
84
- plt.ylabel("inertia")
85
- plt.legend(plots, legends)
86
- plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
87
- return plt
88
-
89
- def qual_evaluation(random_state, n_samples_per_center, grid_size):
90
- n_clusters = grid_size**2
91
- X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
92
- km = MiniBatchKMeans(
93
- n_clusters=n_clusters, init="random", n_init=1, random_state=random_state
94
- ).fit(X)
95
-
96
- plt.figure()
97
- for k in range(n_clusters):
98
- my_members = km.labels_ == k
99
- color = cm.nipy_spectral(float(k) / n_clusters, 1)
100
- plt.plot(X[my_members, 0], X[my_members, 1], ".", c=color)
101
- cluster_center = km.cluster_centers_[k]
102
- plt.plot(
103
- cluster_center[0],
104
- cluster_center[1],
105
- "o",
106
- markerfacecolor=color,
107
- markeredgecolor="k",
108
- markersize=6,
109
- )
110
- plt.title(
111
- "Example cluster allocation with a single random init\nwith MiniBatchKMeans"
112
  )
113
- return plt
 
 
114
 
115
  with gr.Blocks(theme=theme) as demo:
116
  gr.Markdown('''
117
- <h1 style='text-align: center'>Empirical evaluation of the impact of k-means initialization πŸ“Š</h1>
118
  ''')
119
  gr.Markdown(description)
120
 
121
-
122
- gr.Markdown('''
123
- ### Dataset Generation Parameters
124
- ''')
125
- with gr.Row():
126
- n_runs = gr.Slider(minimum=1, maximum=10, step=1, value=5, label="Number of Evaluation Runs")
127
- random_state = gr.Slider(minimum=0, maximum=2000, step=5, value=0, label="Random state")
128
-
129
- with gr.Row():
130
- n_samples_per_center = gr.Slider(minimum=50, maximum=200, step=10, value=100, label="Number of Samples per Center")
131
- grid_size = gr.Slider(minimum=1, maximum=8, step=1, value=3, label="Grid Size")
132
-
133
  with gr.Row():
134
- run_button = gr.Button('Evaluate Inertia')
135
- run_button_qual = gr.Button('Generate Cluster Allocations')
136
  with gr.Row():
137
- plot_inertia = gr.Plot()
138
- plot_vis = gr.Plot()
139
- run_button.click(fn=quant_evaluation, inputs=[n_runs, n_samples_per_center, grid_size], outputs=plot_inertia)
140
- run_button_qual.click(fn=qual_evaluation, inputs=[random_state, n_samples_per_center, grid_size], outputs=plot_vis)
141
 
142
  demo.launch()
 
6
  import matplotlib.cm as cm
7
  from sklearn.utils import shuffle
8
  from sklearn.utils import check_random_state
9
+ from sklearn.linear_model import BayesianRidge
 
10
 
11
  theme = gr.themes.Monochrome(
12
  primary_hue="indigo",
 
16
 
17
  description = f"""
18
  ## Description
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ This demo computes a Bayesian Ridge Regression of Sinusoids.
 
 
21
 
22
+ The demo is based on the [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/linear_model/plot_bayesian_ridge_curvefit.html#sphx-glr-auto-examples-linear-model-plot-bayesian-ridge-curvefit-py)
23
+ """
 
24
 
25
+ def func(x):
26
+ return np.sin(2 * np.pi * x)
27
+
28
+
29
+ size = 25
30
+ rng = np.random.RandomState(1234)
31
+ x_train = rng.uniform(0.0, 1.0, size)
32
+ y_train = func(x_train) + rng.normal(scale=0.1, size=size)
33
+ x_test = np.linspace(0.0, 1.0, 100)
34
+
35
+ n_order = 3
36
+ X_train = np.vander(x_train, n_order + 1, increasing=True)
37
+ X_test = np.vander(x_test, n_order + 1, increasing=True)
38
+ reg = BayesianRidge(tol=1e-6, fit_intercept=False, compute_score=True)
39
+
40
+ def curve_fit():
41
+ fig, axes = plt.subplots(1, 2, figsize=(8, 4))
42
+ for i, ax in enumerate(axes):
43
+ # Bayesian ridge regression with different initial value pairs
44
+ if i == 0:
45
+ init = [1 / np.var(y_train), 1.0] # Default values
46
+ elif i == 1:
47
+ init = [1.0, 1e-3]
48
+ reg.set_params(alpha_init=init[0], lambda_init=init[1])
49
+ reg.fit(X_train, y_train)
50
+ ymean, ystd = reg.predict(X_test, return_std=True)
51
 
52
+ ax.plot(x_test, func(x_test), color="blue", label="sin($2\\pi x$)")
53
+ ax.scatter(x_train, y_train, s=50, alpha=0.5, label="observation")
54
+ ax.plot(x_test, ymean, color="red", label="predict mean")
55
+ ax.fill_between(
56
+ x_test, ymean - ystd, ymean + ystd, color="pink", alpha=0.5, label="predict std"
 
 
 
 
 
 
 
 
 
 
 
 
57
  )
58
+ ax.set_ylim(-1.3, 1.3)
59
+ ax.legend()
60
+ title = "$\\alpha$_init$={:.2f},\\ \\lambda$_init$={}$".format(init[0], init[1])
61
+ if i == 0:
62
+ title += " (Default)"
63
+ ax.set_title(title, fontsize=12)
64
+ text = "$\\alpha={:.1f}$\n$\\lambda={:.3f}$\n$L={:.1f}$".format(
65
+ reg.alpha_, reg.lambda_, reg.scores_[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  )
67
+ ax.text(0.05, -1.0, text, fontsize=12)
68
+ return fig
69
+
70
 
71
  with gr.Blocks(theme=theme) as demo:
72
  gr.Markdown('''
73
+ <h1 style='text-align: center'>Curve Fitting with Bayesian Ridge Regression πŸ“ˆ</h1>
74
  ''')
75
  gr.Markdown(description)
76
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  with gr.Row():
78
+ run_button = gr.Button('Fit the Curve')
 
79
  with gr.Row():
80
+ plot_result = gr.Plot()
81
+ run_button.click(fn=curve_fit, inputs=[], outputs=[plot_result])
 
 
82
 
83
  demo.launch()