diff --git a/readme.md b/readme.md index b3ed8c376..2eb5d9144 100644 --- a/readme.md +++ b/readme.md @@ -82,7 +82,7 @@ The speed/FPS test includes the time of post-processing with no jit/data precisi ## Demo # install requirements - pip install pycocotools numpy opencv-python tqdm tensorboard tensorboardX pyyaml webcolors + pip install pycocotools numpy opencv-python tqdm tensorboard tensorboardX pyyaml webcolors wandb pip install torch==1.4.0 pip install torchvision==0.5.0 diff --git a/train.py b/train.py index 04133cb0c..0e9780f97 100644 --- a/train.py +++ b/train.py @@ -10,6 +10,7 @@ import numpy as np import torch import yaml +import wandb from tensorboardX import SummaryWriter from torch import nn from torch.utils.data import DataLoader @@ -173,6 +174,7 @@ def freeze_backbone(m): else: use_sync_bn = False + wandb.init() # initialize a wandb run writer = SummaryWriter(opt.log_path + f'/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}/') # warp the model with loss function, to reduce the memory usage on gpu0 and speedup @@ -248,7 +250,13 @@ def freeze_backbone(m): # log learning_rate current_lr = optimizer.param_groups[0]['lr'] writer.add_scalar('learning_rate', current_lr, step) - + wandb.log({"step" : step, + "epoch": epoch, + "train-loss": loss, + "train-reg-loss": reg_loss, + "train-cls-loss" : cls_loss, + "lr" : current_lr + }) step += 1 if step % opt.save_interval == 0 and step > 0: @@ -296,6 +304,12 @@ def freeze_backbone(m): writer.add_scalars('Regression_loss', {'val': reg_loss}, step) writer.add_scalars('Classfication_loss', {'val': cls_loss}, step) + wandb.log({"step" : step, + "val-loss" : loss, + "val-reg-loss" : reg_loss, + "val-cls-loss" : cls_loss, + "epoch" : epoch}) + if loss + opt.es_min_delta < best_loss: best_loss = loss best_epoch = epoch