|
| 1 | +import torch |
| 2 | +from labml.configs import option |
| 3 | + |
| 4 | +from labml import experiment, tracker |
| 5 | +from labml_helpers.train_valid import BatchIndex |
| 6 | +from labml_nn.optimizers.sophia import Sophia |
| 7 | +from labml_nn.transformers.basic.autoregressive_experiment import Configs as TransformerAutoRegressionConfigs |
| 8 | + |
| 9 | + |
| 10 | +class Configs(TransformerAutoRegressionConfigs): |
| 11 | + """ |
| 12 | + ## Configurations |
| 13 | +
|
| 14 | + This inherits from [`Configs`](autoregressive_experiment.html) |
| 15 | + """ |
| 16 | + |
| 17 | + hess_interval: int = 10 |
| 18 | + |
| 19 | + optimizer: Sophia |
| 20 | + |
| 21 | + def step(self, batch: any, batch_idx: BatchIndex): |
| 22 | + """ |
| 23 | + ### Training or validation step |
| 24 | + """ |
| 25 | + |
| 26 | + # Set training/eval mode |
| 27 | + self.model.train(self.mode.is_train) |
| 28 | + |
| 29 | + # Move data to the device |
| 30 | + data, target = batch[0].to(self.device), batch[1].to(self.device) |
| 31 | + |
| 32 | + if isinstance(self.optimizer, Sophia) and self.mode.is_train and batch_idx.idx % self.hess_interval == 0: |
| 33 | + # Whether to capture model outputs |
| 34 | + with self.mode.update(is_log_activations=False): |
| 35 | + # Get model outputs. |
| 36 | + # It's returning a tuple for states when using RNNs. |
| 37 | + # This is not implemented yet. 😜 |
| 38 | + output, *_ = self.model(data) |
| 39 | + |
| 40 | + samp_dist = torch.distributions.Categorical(logits=output) |
| 41 | + y_sample = samp_dist.sample() |
| 42 | + |
| 43 | + # Calculate and log loss |
| 44 | + loss = self.loss_func(output, y_sample) |
| 45 | + tracker.add("loss.hess.", loss) |
| 46 | + |
| 47 | + # Calculate gradients |
| 48 | + loss.backward() |
| 49 | + # Clip gradients |
| 50 | + torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip) |
| 51 | + # Update Hessian estimate |
| 52 | + self.optimizer.update_hessian(data.numel()) |
| 53 | + # Clear the gradients |
| 54 | + self.optimizer.zero_grad() |
| 55 | + else: |
| 56 | + # Move data to the device |
| 57 | + data, target = batch[0].to(self.device), batch[1].to(self.device) |
| 58 | + |
| 59 | + # Update global step (number of tokens processed) when in training mode |
| 60 | + if self.mode.is_train: |
| 61 | + tracker.add_global_step(data.shape[0] * data.shape[1]) |
| 62 | + |
| 63 | + # Whether to capture model outputs |
| 64 | + with self.mode.update(is_log_activations=batch_idx.is_last and self.is_log_model_activations): |
| 65 | + # Get model outputs. |
| 66 | + # It's returning a tuple for states when using RNNs. |
| 67 | + # This is not implemented yet. 😜 |
| 68 | + output, *_ = self.model(data) |
| 69 | + |
| 70 | + # Calculate and log loss |
| 71 | + loss = self.loss_func(output, target) |
| 72 | + tracker.add("loss.", loss) |
| 73 | + |
| 74 | + # Calculate and log accuracy |
| 75 | + self.accuracy(output, target) |
| 76 | + self.accuracy.track() |
| 77 | + |
| 78 | + self.other_metrics(output, target) |
| 79 | + |
| 80 | + # Train the model |
| 81 | + if self.mode.is_train: |
| 82 | + # Calculate gradients |
| 83 | + loss.backward() |
| 84 | + # Clip gradients |
| 85 | + torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip) |
| 86 | + # Take optimizer step |
| 87 | + self.optimizer.step() |
| 88 | + # Log the model parameters and gradients on last batch of every epoch |
| 89 | + if batch_idx.is_last and self.is_log_model_params_grads: |
| 90 | + tracker.add('model', self.model) |
| 91 | + # Clear the gradients |
| 92 | + self.optimizer.zero_grad() |
| 93 | + |
| 94 | + # Save the tracked metrics |
| 95 | + tracker.save() |
| 96 | + |
| 97 | + |
| 98 | + |
| 99 | +def main(): |
| 100 | + # Create experiment |
| 101 | + experiment.create(name="transformer") |
| 102 | + # Create configs |
| 103 | + conf = Configs() |
| 104 | + # Override configurations |
| 105 | + experiment.configs(conf, { |
| 106 | + # Use character level tokenizer |
| 107 | + 'tokenizer': 'character', |
| 108 | + # Prompt separator is blank |
| 109 | + 'prompt_separator': '', |
| 110 | + # Starting prompt for sampling |
| 111 | + 'prompt': 'It is ', |
| 112 | + # Use Tiny Shakespeare dataset |
| 113 | + 'text': 'tiny_shakespeare', |
| 114 | + |
| 115 | + # Use a context size of $256$ |
| 116 | + 'seq_len': 512, |
| 117 | + # Train for 32 epochs |
| 118 | + 'epochs': 32, |
| 119 | + # Batch size $32$ |
| 120 | + 'batch_size': 16, |
| 121 | + # Switch between training and validation for $10$ times |
| 122 | + # per epoch |
| 123 | + 'inner_iterations': 10, |
| 124 | + |
| 125 | + # Model size |
| 126 | + 'd_model': 256, |
| 127 | + 'transformer.n_heads': 16, |
| 128 | + 'transformer.ffn.d_ff': 1024, |
| 129 | + |
| 130 | + # Use [Noam optimizer](../../optimizers/noam.html) |
| 131 | + 'optimizer.optimizer': 'Sophia', |
| 132 | + 'optimizer.learning_rate': 3e-4, |
| 133 | + 'optimizer.rho': 0.03, |
| 134 | + }) |
| 135 | + |
| 136 | + # Set models for saving and loading |
| 137 | + experiment.add_pytorch_models({'model': conf.model}) |
| 138 | + |
| 139 | + # Start the experiment |
| 140 | + with experiment.start(): |
| 141 | + # Run training |
| 142 | + conf.run() |
| 143 | + |
| 144 | + |
| 145 | +# |
| 146 | +if __name__ == '__main__': |
| 147 | + main() |
0 commit comments