Alignment-Lab-AI commited on
Commit
1251d29
·
verified ·
1 Parent(s): e1f32a6

Upload sweep9.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sweep9.py +388 -0
sweep9.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import torch.optim as optim
6
+ from torch.utils.data import DataLoader
7
+ import numpy as np
8
+ import matplotlib.pyplot as plt
9
+ import json
10
+ from tqdm.auto import tqdm
11
+ import random
12
+ from scipy.signal import savgol_filter
13
+ import wandb
14
+ from transformers import AutoModelForCausalLM, AutoTokenizer
15
+ import os
16
+ from itertools import product
17
+ import pandas as pd
18
+ import multiprocessing as mp
19
+ from functools import partial
20
+ import logging
21
+
22
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
23
+ logger = logging.getLogger(__name__)
24
+
25
+ class Config:
26
+ def __init__(self):
27
+ self.model_name = "Qwen/Qwen2-0.5B"
28
+ self.data_dir = "dataset_chunks"
29
+ self.max_length = 1024
30
+ self.batch_size = 32
31
+ self.num_seeds = 10000
32
+ self.num_lr_steps = 10000
33
+ self.min_lr = 1e-8
34
+ self.max_lr = 10
35
+ self.hidden_dim_ratio = 0.5
36
+ self.dropout = 0.1
37
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38
+ self.num_workers = mp.cpu_count()
39
+
40
+ class ImprovedAutoencoder(nn.Module):
41
+ def __init__(self, input_dim, hidden_dim, num_layers, dropout):
42
+ super().__init__()
43
+ self.encoder = nn.ModuleList([
44
+ nn.Linear(input_dim if i == 0 else hidden_dim, hidden_dim, dtype=torch.bfloat16)
45
+ for i in range(num_layers)
46
+ ])
47
+ self.decoder = nn.ModuleList([
48
+ nn.Linear(hidden_dim, hidden_dim if i < num_layers - 1 else input_dim, dtype=torch.bfloat16)
49
+ for i in range(num_layers)
50
+ ])
51
+ self.layer_norms = nn.ModuleList([
52
+ nn.LayerNorm(hidden_dim, dtype=torch.bfloat16)
53
+ for _ in range(num_layers * 2 - 1)
54
+ ])
55
+ self.dropout = nn.Dropout(dropout)
56
+
57
+ def forward(self, x):
58
+ for enc, norm in zip(self.encoder, self.layer_norms[:len(self.encoder)]):
59
+ x = F.relu(norm(enc(x)))
60
+ x = self.dropout(x)
61
+ for dec, norm in zip(self.decoder[:-1], self.layer_norms[len(self.encoder):]):
62
+ x = F.relu(norm(dec(x)))
63
+ x = self.dropout(x)
64
+ x = self.decoder[-1](x)
65
+ return x
66
+
67
+ class TokenizedDataset(torch.utils.data.Dataset):
68
+ def __init__(self, file_paths):
69
+ self.data = []
70
+ for file_path in tqdm(file_paths, desc="Loading data chunks"):
71
+ chunk_data = torch.load(file_path)
72
+ logger.info(f"Loaded data from {file_path}")
73
+ logger.info(f"Type of loaded data: {type(chunk_data)}")
74
+
75
+ if isinstance(chunk_data, dict): # Handle dictionary format (if present)
76
+ logger.info(f"Keys in the dictionary: {chunk_data.keys()}")
77
+ logger.info(f"Shape of input_ids: {chunk_data['input_ids'].shape}")
78
+ self.data.append(chunk_data)
79
+ elif isinstance(chunk_data, transformers.tokenization_utils_base.BatchEncoding):
80
+ logger.info(f"Keys in the BatchEncoding: {chunk_data.keys()}")
81
+ logger.info(f"Shape of input_ids: {chunk_data['input_ids'].shape}")
82
+ self.data.append(chunk_data) # Handle BatchEncoding format
83
+ else:
84
+ logger.warning(f"Unexpected data type: {type(chunk_data)}")
85
+
86
+ logger.info(f"Loaded {len(self.data)} chunks of data")
87
+
88
+ def __len__(self):
89
+ return sum(len(chunk['input_ids']) for chunk in self.data)
90
+
91
+ def __getitem__(self, idx):
92
+ for chunk in self.data:
93
+ if idx < len(chunk['input_ids']):
94
+ return {k: v[idx] for k, v in chunk.items()}
95
+ idx -= len(chunk['input_ids'])
96
+ raise IndexError("Index out of range")
97
+
98
+ def set_seed(seed):
99
+ random.seed(seed)
100
+ np.random.seed(seed)
101
+ torch.manual_seed(seed)
102
+ torch.cuda.manual_seed_all(seed)
103
+
104
+ def load_data(config):
105
+ logger.info(f"Looking for data in directory: {config.data_dir}")
106
+ chunk_files = [f for f in os.listdir(config.data_dir) if f.endswith('_tokenized.pt')]
107
+ logger.info(f"Found {len(chunk_files)} chunk files: {chunk_files}")
108
+
109
+ if not chunk_files:
110
+ raise ValueError(f"No tokenized data files found in {config.data_dir}")
111
+
112
+ chunk_files.sort(key=lambda x: int(x.split('_')[1]))
113
+ chunk_files = [os.path.join(config.data_dir, f) for f in chunk_files]
114
+
115
+ dataset = TokenizedDataset(chunk_files[:1]) # Load only the first chunk for now
116
+ logger.info(f"Created dataset with {len(dataset)} samples")
117
+
118
+ return DataLoader(dataset, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
119
+
120
+ def extract_hidden_states(batch, model):
121
+ with torch.no_grad():
122
+ outputs = model(**batch, output_hidden_states=True)
123
+ return outputs.hidden_states[0], outputs.hidden_states[-1]
124
+
125
+ class KLDivergenceLoss(nn.Module):
126
+ def forward(self, pred, target):
127
+ pred = F.log_softmax(pred, dim=-1)
128
+ target = F.softmax(target, dim=-1)
129
+ return F.kl_div(pred, target, reduction='batchmean', log_target=False)
130
+
131
+ def lr_finder(model, autoencoder, loss_fn, optimizer, train_loader, config):
132
+ model.eval()
133
+ autoencoder.train()
134
+ log_lrs, losses = [], []
135
+ best_loss, best_lr = float('inf'), None
136
+
137
+ pbar = tqdm(total=config.num_lr_steps, desc="LR Finder")
138
+ for batch_idx, batch in enumerate(train_loader):
139
+ if batch_idx >= config.num_lr_steps:
140
+ break
141
+
142
+ lr = config.min_lr * (config.max_lr / config.min_lr) ** (batch_idx / (config.num_lr_steps - 1))
143
+ optimizer.param_groups[0]['lr'] = lr
144
+
145
+ batch = {k: v.to(config.device) for k, v in batch.items()}
146
+ first_states, last_states = extract_hidden_states(batch, model)
147
+
148
+ optimizer.zero_grad()
149
+ reconstructed = autoencoder(first_states)
150
+ loss = loss_fn(reconstructed, last_states)
151
+ loss.backward()
152
+ optimizer.step()
153
+
154
+ if loss < best_loss:
155
+ best_loss = loss.item()
156
+ best_lr = lr
157
+
158
+ log_lrs.append(lr)
159
+ losses.append(loss.item())
160
+
161
+ pbar.update(1)
162
+ pbar.set_postfix({"Loss": f"{loss.item():.4f}", "LR": f"{lr:.2e}"})
163
+
164
+ pbar.close()
165
+ return log_lrs, losses, best_lr, best_loss
166
+
167
+ def run_experiment(config, model, train_loader, num_layers, seed):
168
+ set_seed(seed)
169
+ input_dim = model.config.hidden_size
170
+ hidden_dim = int(input_dim * config.hidden_dim_ratio)
171
+ autoencoder = ImprovedAutoencoder(input_dim, hidden_dim, num_layers, config.dropout).to(config.device)
172
+
173
+ loss_fn = KLDivergenceLoss()
174
+ optimizer = optim.AdamW(autoencoder.parameters(), lr=config.min_lr)
175
+
176
+ log_lrs, losses = [], []
177
+ best_loss, best_lr = float('inf'), None
178
+
179
+ pbar = tqdm(total=config.num_lr_steps, desc=f"LR Finder (Layers: {num_layers}, Seed: {seed})")
180
+ for batch_idx, batch in enumerate(train_loader):
181
+ if batch_idx >= config.num_lr_steps:
182
+ break
183
+
184
+ lr = config.min_lr * (config.max_lr / config.min_lr) ** (batch_idx / (config.num_lr_steps - 1))
185
+ optimizer.param_groups[0]['lr'] = lr
186
+
187
+ batch = {k: v.to(config.device) for k, v in batch.items()}
188
+ first_states, last_states = extract_hidden_states(batch, model)
189
+
190
+ optimizer.zero_grad()
191
+ reconstructed = autoencoder(first_states)
192
+ loss = loss_fn(reconstructed, last_states)
193
+ loss.backward()
194
+ optimizer.step()
195
+
196
+ if loss < best_loss:
197
+ best_loss = loss.item()
198
+ best_lr = lr
199
+
200
+ log_lrs.append(lr)
201
+ losses.append(loss.item())
202
+
203
+ # Log to wandb at every step
204
+ wandb.log({
205
+ "loss": loss.item(),
206
+ "lr": lr,
207
+ "batch_idx": batch_idx,
208
+ "num_layers": num_layers,
209
+ "seed": seed,
210
+ "best_loss": best_loss,
211
+ "best_lr": best_lr
212
+ })
213
+
214
+ pbar.update(1)
215
+ pbar.set_postfix({"Loss": f"{loss.item():.4f}", "LR": f"{lr:.2e}"})
216
+
217
+ pbar.close()
218
+
219
+ result = {
220
+ 'seed': seed,
221
+ 'num_layers': num_layers,
222
+ 'hidden_dim_ratio': config.hidden_dim_ratio,
223
+ 'dropout': config.dropout,
224
+ 'final_loss': losses[-1],
225
+ 'final_lr': log_lrs[-1],
226
+ 'best_lr': best_lr,
227
+ 'best_loss': best_loss
228
+ }
229
+
230
+ logger.info(f"Experiment completed: {result}")
231
+ return result
232
+
233
+ def main():
234
+ config = Config()
235
+ wandb.init(project="qwen-autoencoder-lr-finder", config=config.__dict__)
236
+
237
+ logger.info("Loading Qwen model and tokenizer...")
238
+ model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16).to(config.device)
239
+ tokenizer = AutoTokenizer.from_pretrained(config.model_name)
240
+
241
+ logger.info("Loading data...")
242
+ train_loader = load_data(config)
243
+
244
+ logger.info("Starting experiments...")
245
+ results = []
246
+ for num_layers in range(4, 9):
247
+ for seed in range(1, config.num_seeds + 1):
248
+ # Start a new wandb run for each experiment
249
+ with wandb.init(project="qwen-autoencoder-lr-finder",
250
+ config=config.__dict__,
251
+ group=f"layers_{num_layers}",
252
+ name=f"seed_{seed}",
253
+ job_type="experiment",
254
+ reinit=True):
255
+
256
+ result = run_experiment(config, model, train_loader, num_layers, seed)
257
+ results.append(result)
258
+
259
+ # Save results after each experiment
260
+ with open('lr_finder_results.jsonl', 'a') as f:
261
+ json.dump(result, f)
262
+ f.write('\n')
263
+
264
+ # Log final results to wandb
265
+ wandb.log(result)
266
+
267
+ logger.info("Creating visualizations...")
268
+ plot_results(results)
269
+ create_heatmap(results)
270
+ create_parallel_coordinates_plot(results)
271
+ create_3d_scatter(results)
272
+
273
+ logger.info("Experiment completed. Check WandB for detailed results and visualizations.")
274
+
275
+ if __name__ == "__main__":
276
+ main()
277
+ def run_experiments_sequential(config, model, train_loader):
278
+ results = []
279
+ for num_layers in tqdm(range(4, 9), desc="Number of Layers"):
280
+ for seed in tqdm(range(1, config.num_seeds + 1), desc="Seeds", leave=False):
281
+ result = run_experiment(config, model, train_loader, num_layers, seed)
282
+ results.append(result)
283
+ return results
284
+
285
+ def plot_results(results):
286
+ fig, axs = plt.subplots(3, 2, figsize=(20, 30))
287
+ fig.suptitle('Learning Rate Finder Results')
288
+
289
+ for i, num_layers in enumerate(range(4, 9)):
290
+ layer_results = [r for r in results if r['num_layers'] == num_layers]
291
+ best_lrs = [r['Best'] for r in layer_results]
292
+ best_losses = [r['best_loss'] for r in layer_results]
293
+
294
+ axs[i // 2, i % 2].scatter(best_lrs, best_losses, alpha=0.5)
295
+ axs[i // 2, i % 2].set_xlabel('Best Learning Rate')
296
+ axs[i // 2, i % 2].set_ylabel('Best Loss')
297
+ axs[i // 2, i % 2].set_title(f'{num_layers} Layers')
298
+ axs[i // 2, i % 2].set_xscale('log')
299
+ axs[i // 2, i % 2].set_yscale('log')
300
+
301
+ plt.tight_layout()
302
+ wandb.log({"lr_loss_relationships": wandb.Image(plt)})
303
+ plt.close()
304
+
305
+ def create_heatmap(results):
306
+ layer_counts = len(set(r['num_layers'] for r in results))
307
+ seed_counts = len(set(r['seed'] for r in results))
308
+
309
+ heatmap_data = np.zeros((layer_counts, seed_counts))
310
+ for r in results:
311
+ layer_idx = r['num_layers'] - 4
312
+ seed_idx = r['seed'] - 1
313
+ heatmap_data[layer_idx, seed_idx] = r['best_loss']
314
+
315
+ plt.figure(figsize=(20, 10))
316
+ plt.imshow(heatmap_data, aspect='auto', cmap='viridis')
317
+ plt.colorbar(label='Best Loss')
318
+ plt.xlabel('Seed')
319
+ plt.ylabel('Number of Layers')
320
+ plt.title('Heatmap of Best Loss across Layers and Seeds')
321
+ plt.tight_layout()
322
+ wandb.log({"loss_heatmap": wandb.Image(plt)})
323
+ plt.close()
324
+
325
+ def create_parallel_coordinates_plot(results):
326
+ df = pd.DataFrame(results)
327
+
328
+ plt.figure(figsize=(20, 10))
329
+ pd.plotting.parallel_coordinates(df, 'num_layers', colormap='viridis')
330
+ plt.title('Parallel Coordinates Plot of Hyperparameters')
331
+ plt.tight_layout()
332
+ wandb.log({"parallel_coordinates": wandb.Image(plt)})
333
+ plt.close()
334
+
335
+ def create_3d_scatter(results):
336
+ fig = plt.figure(figsize=(15, 15))
337
+ ax = fig.add_subplot(111, projection='3d')
338
+
339
+ for num_layers in range(4, 9):
340
+ layer_results = [r for r in results if r['num_layers'] == num_layers]
341
+ x = [r['Best'] for r in layer_results]
342
+ y = [r['best_loss'] for r in layer_results]
343
+ z = [r['seed'] for r in layer_results]
344
+ ax.scatter(x, y, z, label=f'{num_layers} Layers')
345
+
346
+ ax.set_xlabel('Best Learning Rate')
347
+ ax.set_ylabel('Best Loss')
348
+ ax.set_zlabel('Seed')
349
+ ax.set_xscale('log')
350
+ ax.set_yscale('log')
351
+ ax.legend()
352
+ plt.title('3D Scatter Plot of Best LR, Loss, and Seed')
353
+ plt.tight_layout()
354
+ wandb.log({"3d_scatter": wandb.Image(plt)})
355
+ plt.close()
356
+
357
+ def main():
358
+ mp.set_start_method('spawn')
359
+ config = Config()
360
+ wandb.init(project="qwen-autoencoder-lr-finder", config=config.__dict__)
361
+
362
+ logger.info("Loading Qwen model and tokenizer...")
363
+ model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16).to(config.device)
364
+ tokenizer = AutoTokenizer.from_pretrained(config.model_name)
365
+
366
+ logger.info("Loading data...")
367
+ train_loader = load_data(config)
368
+
369
+ logger.info("Starting experiments...")
370
+ results = run_experiments_sequential(config, model, train_loader)
371
+
372
+ logger.info("Saving results...")
373
+ with open('lr_finder_results.jsonl', 'w') as f:
374
+ for result in results:
375
+ json.dump(result, f)
376
+ f.write('\n')
377
+
378
+ logger.info("Creating visualizations...")
379
+ plot_results(results)
380
+ create_heatmap(results)
381
+ create_parallel_coordinates_plot(results)
382
+ create_3d_scatter(results)
383
+
384
+ logger.info("Experiment completed. Check WandB for detailed results and visualizations.")
385
+ wandb.finish()
386
+
387
+ if __name__ == "__main__":
388
+ main()