Computer Vision News - April 2023
33 MONAI Generative Models num_res_blocks=1, num_head_channels=256, ) model.to (device) Parameters: DDPMScheduler and Adam Optimiser scheduler = DDPMScheduler(num_train_timesteps=1000) optimizer = torch.optim.Adam(params=model.parameters(), lr=2.5e-5) inferer = DiffusionInferer(scheduler) Loading pre-trained model model = torch.hub.load("marksgraham/pretrained_generative_models:v0.2", model="ddpm_2d", verbose=True).to(device) model.eval() mask = mask.to( device) val_image_masked = val_image_masked.to(device) #timesteps = torch.Tensor((999,)).to(noise.device).long() val_image_inpainted = torch.randn((1, 1, 64, 64)).to(device) scheduler.set_timesteps(num_inference_steps=1000) progress_bar = tqdm(scheduler.timesteps) Inferencing on validation cases num_resample_steps = 4 with torch.no_grad(): with autocast(enabled=True): for t in progress_bar: for u in range(num_resample_steps): # get the known portion at t-1 if t > 0: noise = torch.randn((1, 1, 64, 64)).to(device) timesteps_prev = torch.Tensor((t - 1,)).to (noise.de - vice).long() val_image_inpainted_prev_known = scheduler.add_noise( original_samples=val_image_masked, noise=noise, timesteps=timesteps_prev ) else: val_image_inpainted_prev_known = val_image_masked # perform a denoising step to get the unknown portion at t-1 if t > 0: timesteps = torch.Tensor((t,)).to(noise.device).long() model_output = model(val_image_inpainted, timesteps=- timesteps) val_image_inpainted_prev_unknown, _ = scheduler.step(- model_output, t, val_image_inpainted)
Made with FlippingBook
RkJQdWJsaXNoZXIy NTc3NzU=