WandB

WandB Tutorial

WandB Tutorial

Experiment

wandb.init(project="ddpm", name="exp1", entity="semyu0102-viclab", config=__dict__, tags=["batch=32", f"lr={wandb.config.lr}"], id=...)
# wandb.run.name = wandb.run.id
# wandb.run.name = "exp1"
# wandb.run.save()
# wandb.config.update({"epochs":4, "batch_size":32})
wandb.watch(model, criterion=criterion, log='all', log_freq=100)
wandb.log(__dict__, step=epoch)
wandb.finish()
artifact = wandb.Artifact('model', type='model')
artifact.add_file(f"model/resnet50.pt")
wandb.log_artifact(artifact)
images=[]
images.append(wandb.Image(img[0], caption="Pred: {} Truth: {}".format(pred[0].item(), target[0])))
# img[0] : np array or PIL or matplotlib.figure.Figure ...
wandb.log({"Image": images}) # 100여개 정도까지가 한계
wandb.log({"gradients": wandb.Histogram(sequence)}) 
# sequence : np array ...

Sweep

wandb sweep sweep.yaml # sweep_id 출력
wandb agent <sweep_id> --count <sweep 횟수>
program: main.py
method: bayes
name: ddpm-sweep
description: test ddpm sweep
project: ddpm
entity: semyu0102-viclab
meric:
  name: val_loss
  goal: minimize
parameters:
  learning_rate: 
    min: 0.0001
    max: 0.1
  optimizer:
    values: ["adam", "sgd"]
  epochs:
    value: 5
  parameter1:
    distribution: normal
    mu: 100
    sigma: 10
early_terminate:
  type: hyperband
  min_iter: 3
command:
  - python
  - train.py
  - --learning_rate=${learning_rate}