Mariam-Elz commited on
Commit
09dbe55
·
verified ·
1 Parent(s): 5b9ca85

Upload imagedream/ldm/modules/diffusionmodules/util.py with huggingface_hub

Browse files
imagedream/ldm/modules/diffusionmodules/util.py CHANGED
@@ -1,353 +1,353 @@
1
- # adopted from
2
- # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
- # and
4
- # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
- # and
6
- # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
- #
8
- # thanks!
9
-
10
-
11
- import os
12
- import math
13
- import torch
14
- import torch.nn as nn
15
- import numpy as np
16
- from einops import repeat
17
- import importlib
18
-
19
-
20
- def instantiate_from_config(config):
21
- if not "target" in config:
22
- if config == "__is_first_stage__":
23
- return None
24
- elif config == "__is_unconditional__":
25
- return None
26
- raise KeyError("Expected key `target` to instantiate.")
27
- return get_obj_from_str(config["target"])(**config.get("params", dict()))
28
-
29
-
30
- def get_obj_from_str(string, reload=False):
31
- module, cls = string.rsplit(".", 1)
32
- if reload:
33
- module_imp = importlib.import_module(module)
34
- importlib.reload(module_imp)
35
- return getattr(importlib.import_module(module, package=None), cls)
36
-
37
-
38
- def make_beta_schedule(
39
- schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
40
- ):
41
- if schedule == "linear":
42
- betas = (
43
- torch.linspace(
44
- linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
45
- )
46
- ** 2
47
- )
48
-
49
- elif schedule == "cosine":
50
- timesteps = (
51
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
52
- )
53
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
54
- alphas = torch.cos(alphas).pow(2)
55
- alphas = alphas / alphas[0]
56
- betas = 1 - alphas[1:] / alphas[:-1]
57
- betas = np.clip(betas, a_min=0, a_max=0.999)
58
-
59
- elif schedule == "sqrt_linear":
60
- betas = torch.linspace(
61
- linear_start, linear_end, n_timestep, dtype=torch.float64
62
- )
63
- elif schedule == "sqrt":
64
- betas = (
65
- torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
66
- ** 0.5
67
- )
68
- else:
69
- raise ValueError(f"schedule '{schedule}' unknown.")
70
- return betas.numpy()
71
-
72
- def enforce_zero_terminal_snr(betas):
73
- betas = torch.tensor(betas) if not isinstance(betas, torch.Tensor) else betas
74
- # Convert betas to alphas_bar_sqrt
75
- alphas =1 - betas
76
- alphas_bar = alphas.cumprod(0)
77
- alphas_bar_sqrt = alphas_bar.sqrt()
78
- # Store old values.
79
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
80
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
81
- # Shift so last timestep is zero.
82
- alphas_bar_sqrt -= alphas_bar_sqrt_T
83
- # Scale so first timestep is back to old value.
84
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
85
- # Convert alphas_bar_sqrt to betas
86
- alphas_bar = alphas_bar_sqrt ** 2
87
- alphas = alphas_bar[1:] / alphas_bar[:-1]
88
- alphas = torch.cat ([alphas_bar[0:1], alphas])
89
- betas = 1 - alphas
90
- return betas
91
-
92
-
93
- def make_ddim_timesteps(
94
- ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
95
- ):
96
- if ddim_discr_method == "uniform":
97
- c = num_ddpm_timesteps // num_ddim_timesteps
98
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
99
- elif ddim_discr_method == "quad":
100
- ddim_timesteps = (
101
- (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
102
- ).astype(int)
103
- else:
104
- raise NotImplementedError(
105
- f'There is no ddim discretization method called "{ddim_discr_method}"'
106
- )
107
-
108
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
109
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
110
- steps_out = ddim_timesteps + 1
111
- if verbose:
112
- print(f"Selected timesteps for ddim sampler: {steps_out}")
113
- return steps_out
114
-
115
-
116
- def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
117
- # select alphas for computing the variance schedule
118
- alphas = alphacums[ddim_timesteps]
119
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
120
-
121
- # according the the formula provided in https://arxiv.org/abs/2010.02502
122
- sigmas = eta * np.sqrt(
123
- (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
124
- )
125
- if verbose:
126
- print(
127
- f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
128
- )
129
- print(
130
- f"For the chosen value of eta, which is {eta}, "
131
- f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
132
- )
133
- return sigmas, alphas, alphas_prev
134
-
135
-
136
- def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
137
- """
138
- Create a beta schedule that discretizes the given alpha_t_bar function,
139
- which defines the cumulative product of (1-beta) over time from t = [0,1].
140
- :param num_diffusion_timesteps: the number of betas to produce.
141
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
142
- produces the cumulative product of (1-beta) up to that
143
- part of the diffusion process.
144
- :param max_beta: the maximum beta to use; use values lower than 1 to
145
- prevent singularities.
146
- """
147
- betas = []
148
- for i in range(num_diffusion_timesteps):
149
- t1 = i / num_diffusion_timesteps
150
- t2 = (i + 1) / num_diffusion_timesteps
151
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
152
- return np.array(betas)
153
-
154
-
155
- def extract_into_tensor(a, t, x_shape):
156
- b, *_ = t.shape
157
- out = a.gather(-1, t)
158
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
159
-
160
-
161
- def checkpoint(func, inputs, params, flag):
162
- """
163
- Evaluate a function without caching intermediate activations, allowing for
164
- reduced memory at the expense of extra compute in the backward pass.
165
- :param func: the function to evaluate.
166
- :param inputs: the argument sequence to pass to `func`.
167
- :param params: a sequence of parameters `func` depends on but does not
168
- explicitly take as arguments.
169
- :param flag: if False, disable gradient checkpointing.
170
- """
171
- if flag:
172
- args = tuple(inputs) + tuple(params)
173
- return CheckpointFunction.apply(func, len(inputs), *args)
174
- else:
175
- return func(*inputs)
176
-
177
-
178
- class CheckpointFunction(torch.autograd.Function):
179
- @staticmethod
180
- def forward(ctx, run_function, length, *args):
181
- ctx.run_function = run_function
182
- ctx.input_tensors = list(args[:length])
183
- ctx.input_params = list(args[length:])
184
-
185
- with torch.no_grad():
186
- output_tensors = ctx.run_function(*ctx.input_tensors)
187
- return output_tensors
188
-
189
- @staticmethod
190
- def backward(ctx, *output_grads):
191
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
192
- with torch.enable_grad():
193
- # Fixes a bug where the first op in run_function modifies the
194
- # Tensor storage in place, which is not allowed for detach()'d
195
- # Tensors.
196
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
197
- output_tensors = ctx.run_function(*shallow_copies)
198
- input_grads = torch.autograd.grad(
199
- output_tensors,
200
- ctx.input_tensors + ctx.input_params,
201
- output_grads,
202
- allow_unused=True,
203
- )
204
- del ctx.input_tensors
205
- del ctx.input_params
206
- del output_tensors
207
- return (None, None) + input_grads
208
-
209
-
210
- def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
211
- """
212
- Create sinusoidal timestep embeddings.
213
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
214
- These may be fractional.
215
- :param dim: the dimension of the output.
216
- :param max_period: controls the minimum frequency of the embeddings.
217
- :return: an [N x dim] Tensor of positional embeddings.
218
- """
219
- if not repeat_only:
220
- half = dim // 2
221
- freqs = torch.exp(
222
- -math.log(max_period)
223
- * torch.arange(start=0, end=half, dtype=torch.float32)
224
- / half
225
- ).to(device=timesteps.device)
226
- args = timesteps[:, None].float() * freqs[None]
227
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
228
- if dim % 2:
229
- embedding = torch.cat(
230
- [embedding, torch.zeros_like(embedding[:, :1])], dim=-1
231
- )
232
- else:
233
- embedding = repeat(timesteps, "b -> b d", d=dim)
234
- # import pdb; pdb.set_trace()
235
- return embedding
236
-
237
-
238
- def zero_module(module):
239
- """
240
- Zero out the parameters of a module and return it.
241
- """
242
- for p in module.parameters():
243
- p.detach().zero_()
244
- return module
245
-
246
-
247
- def scale_module(module, scale):
248
- """
249
- Scale the parameters of a module and return it.
250
- """
251
- for p in module.parameters():
252
- p.detach().mul_(scale)
253
- return module
254
-
255
-
256
- def mean_flat(tensor):
257
- """
258
- Take the mean over all non-batch dimensions.
259
- """
260
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
261
-
262
-
263
- def normalization(channels):
264
- """
265
- Make a standard normalization layer.
266
- :param channels: number of input channels.
267
- :return: an nn.Module for normalization.
268
- """
269
- return GroupNorm32(32, channels)
270
-
271
-
272
- # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
273
- class SiLU(nn.Module):
274
- def forward(self, x):
275
- return x * torch.sigmoid(x)
276
-
277
-
278
- class GroupNorm32(nn.GroupNorm):
279
- def forward(self, x):
280
- return super().forward(x.float()).type(x.dtype)
281
-
282
-
283
- def conv_nd(dims, *args, **kwargs):
284
- """
285
- Create a 1D, 2D, or 3D convolution module.
286
- """
287
- if dims == 1:
288
- return nn.Conv1d(*args, **kwargs)
289
- elif dims == 2:
290
- return nn.Conv2d(*args, **kwargs)
291
- elif dims == 3:
292
- return nn.Conv3d(*args, **kwargs)
293
- raise ValueError(f"unsupported dimensions: {dims}")
294
-
295
-
296
- def linear(*args, **kwargs):
297
- """
298
- Create a linear module.
299
- """
300
- return nn.Linear(*args, **kwargs)
301
-
302
-
303
- def avg_pool_nd(dims, *args, **kwargs):
304
- """
305
- Create a 1D, 2D, or 3D average pooling module.
306
- """
307
- if dims == 1:
308
- return nn.AvgPool1d(*args, **kwargs)
309
- elif dims == 2:
310
- return nn.AvgPool2d(*args, **kwargs)
311
- elif dims == 3:
312
- return nn.AvgPool3d(*args, **kwargs)
313
- raise ValueError(f"unsupported dimensions: {dims}")
314
-
315
-
316
- class HybridConditioner(nn.Module):
317
- def __init__(self, c_concat_config, c_crossattn_config):
318
- super().__init__()
319
- self.concat_conditioner = instantiate_from_config(c_concat_config)
320
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
321
-
322
- def forward(self, c_concat, c_crossattn):
323
- c_concat = self.concat_conditioner(c_concat)
324
- c_crossattn = self.crossattn_conditioner(c_crossattn)
325
- return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
326
-
327
-
328
- def noise_like(shape, device, repeat=False):
329
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
330
- shape[0], *((1,) * (len(shape) - 1))
331
- )
332
- noise = lambda: torch.randn(shape, device=device)
333
- return repeat_noise() if repeat else noise()
334
-
335
-
336
- # dummy replace
337
- def convert_module_to_f16(l):
338
- """
339
- Convert primitive modules to float16.
340
- """
341
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
342
- l.weight.data = l.weight.data.half()
343
- if l.bias is not None:
344
- l.bias.data = l.bias.data.half()
345
-
346
- def convert_module_to_f32(l):
347
- """
348
- Convert primitive modules to float32, undoing convert_module_to_f16().
349
- """
350
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
351
- l.weight.data = l.weight.data.float()
352
- if l.bias is not None:
353
- l.bias.data = l.bias.data.float()
 
1
+ # adopted from
2
+ # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
+ # and
4
+ # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ # and
6
+ # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
+ #
8
+ # thanks!
9
+
10
+
11
+ import os
12
+ import math
13
+ import torch
14
+ import torch.nn as nn
15
+ import numpy as np
16
+ from einops import repeat
17
+ import importlib
18
+
19
+
20
+ def instantiate_from_config(config):
21
+ if not "target" in config:
22
+ if config == "__is_first_stage__":
23
+ return None
24
+ elif config == "__is_unconditional__":
25
+ return None
26
+ raise KeyError("Expected key `target` to instantiate.")
27
+ return get_obj_from_str(config["target"])(**config.get("params", dict()))
28
+
29
+
30
+ def get_obj_from_str(string, reload=False):
31
+ module, cls = string.rsplit(".", 1)
32
+ if reload:
33
+ module_imp = importlib.import_module(module)
34
+ importlib.reload(module_imp)
35
+ return getattr(importlib.import_module(module, package=None), cls)
36
+
37
+
38
+ def make_beta_schedule(
39
+ schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
40
+ ):
41
+ if schedule == "linear":
42
+ betas = (
43
+ torch.linspace(
44
+ linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
45
+ )
46
+ ** 2
47
+ )
48
+
49
+ elif schedule == "cosine":
50
+ timesteps = (
51
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
52
+ )
53
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
54
+ alphas = torch.cos(alphas).pow(2)
55
+ alphas = alphas / alphas[0]
56
+ betas = 1 - alphas[1:] / alphas[:-1]
57
+ betas = np.clip(betas, a_min=0, a_max=0.999)
58
+
59
+ elif schedule == "sqrt_linear":
60
+ betas = torch.linspace(
61
+ linear_start, linear_end, n_timestep, dtype=torch.float64
62
+ )
63
+ elif schedule == "sqrt":
64
+ betas = (
65
+ torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
66
+ ** 0.5
67
+ )
68
+ else:
69
+ raise ValueError(f"schedule '{schedule}' unknown.")
70
+ return betas.numpy()
71
+
72
+ def enforce_zero_terminal_snr(betas):
73
+ betas = torch.tensor(betas) if not isinstance(betas, torch.Tensor) else betas
74
+ # Convert betas to alphas_bar_sqrt
75
+ alphas =1 - betas
76
+ alphas_bar = alphas.cumprod(0)
77
+ alphas_bar_sqrt = alphas_bar.sqrt()
78
+ # Store old values.
79
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
80
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
81
+ # Shift so last timestep is zero.
82
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
83
+ # Scale so first timestep is back to old value.
84
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
85
+ # Convert alphas_bar_sqrt to betas
86
+ alphas_bar = alphas_bar_sqrt ** 2
87
+ alphas = alphas_bar[1:] / alphas_bar[:-1]
88
+ alphas = torch.cat ([alphas_bar[0:1], alphas])
89
+ betas = 1 - alphas
90
+ return betas
91
+
92
+
93
+ def make_ddim_timesteps(
94
+ ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
95
+ ):
96
+ if ddim_discr_method == "uniform":
97
+ c = num_ddpm_timesteps // num_ddim_timesteps
98
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
99
+ elif ddim_discr_method == "quad":
100
+ ddim_timesteps = (
101
+ (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
102
+ ).astype(int)
103
+ else:
104
+ raise NotImplementedError(
105
+ f'There is no ddim discretization method called "{ddim_discr_method}"'
106
+ )
107
+
108
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
109
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
110
+ steps_out = ddim_timesteps + 1
111
+ if verbose:
112
+ print(f"Selected timesteps for ddim sampler: {steps_out}")
113
+ return steps_out
114
+
115
+
116
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
117
+ # select alphas for computing the variance schedule
118
+ alphas = alphacums[ddim_timesteps]
119
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
120
+
121
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
122
+ sigmas = eta * np.sqrt(
123
+ (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
124
+ )
125
+ if verbose:
126
+ print(
127
+ f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
128
+ )
129
+ print(
130
+ f"For the chosen value of eta, which is {eta}, "
131
+ f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
132
+ )
133
+ return sigmas, alphas, alphas_prev
134
+
135
+
136
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
137
+ """
138
+ Create a beta schedule that discretizes the given alpha_t_bar function,
139
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
140
+ :param num_diffusion_timesteps: the number of betas to produce.
141
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
142
+ produces the cumulative product of (1-beta) up to that
143
+ part of the diffusion process.
144
+ :param max_beta: the maximum beta to use; use values lower than 1 to
145
+ prevent singularities.
146
+ """
147
+ betas = []
148
+ for i in range(num_diffusion_timesteps):
149
+ t1 = i / num_diffusion_timesteps
150
+ t2 = (i + 1) / num_diffusion_timesteps
151
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
152
+ return np.array(betas)
153
+
154
+
155
+ def extract_into_tensor(a, t, x_shape):
156
+ b, *_ = t.shape
157
+ out = a.gather(-1, t)
158
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
159
+
160
+
161
+ def checkpoint(func, inputs, params, flag):
162
+ """
163
+ Evaluate a function without caching intermediate activations, allowing for
164
+ reduced memory at the expense of extra compute in the backward pass.
165
+ :param func: the function to evaluate.
166
+ :param inputs: the argument sequence to pass to `func`.
167
+ :param params: a sequence of parameters `func` depends on but does not
168
+ explicitly take as arguments.
169
+ :param flag: if False, disable gradient checkpointing.
170
+ """
171
+ if flag:
172
+ args = tuple(inputs) + tuple(params)
173
+ return CheckpointFunction.apply(func, len(inputs), *args)
174
+ else:
175
+ return func(*inputs)
176
+
177
+
178
+ class CheckpointFunction(torch.autograd.Function):
179
+ @staticmethod
180
+ def forward(ctx, run_function, length, *args):
181
+ ctx.run_function = run_function
182
+ ctx.input_tensors = list(args[:length])
183
+ ctx.input_params = list(args[length:])
184
+
185
+ with torch.no_grad():
186
+ output_tensors = ctx.run_function(*ctx.input_tensors)
187
+ return output_tensors
188
+
189
+ @staticmethod
190
+ def backward(ctx, *output_grads):
191
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
192
+ with torch.enable_grad():
193
+ # Fixes a bug where the first op in run_function modifies the
194
+ # Tensor storage in place, which is not allowed for detach()'d
195
+ # Tensors.
196
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
197
+ output_tensors = ctx.run_function(*shallow_copies)
198
+ input_grads = torch.autograd.grad(
199
+ output_tensors,
200
+ ctx.input_tensors + ctx.input_params,
201
+ output_grads,
202
+ allow_unused=True,
203
+ )
204
+ del ctx.input_tensors
205
+ del ctx.input_params
206
+ del output_tensors
207
+ return (None, None) + input_grads
208
+
209
+
210
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
211
+ """
212
+ Create sinusoidal timestep embeddings.
213
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
214
+ These may be fractional.
215
+ :param dim: the dimension of the output.
216
+ :param max_period: controls the minimum frequency of the embeddings.
217
+ :return: an [N x dim] Tensor of positional embeddings.
218
+ """
219
+ if not repeat_only:
220
+ half = dim // 2
221
+ freqs = torch.exp(
222
+ -math.log(max_period)
223
+ * torch.arange(start=0, end=half, dtype=torch.float32)
224
+ / half
225
+ ).to(device=timesteps.device)
226
+ args = timesteps[:, None].float() * freqs[None]
227
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
228
+ if dim % 2:
229
+ embedding = torch.cat(
230
+ [embedding, torch.zeros_like(embedding[:, :1])], dim=-1
231
+ )
232
+ else:
233
+ embedding = repeat(timesteps, "b -> b d", d=dim)
234
+ # import pdb; pdb.set_trace()
235
+ return embedding
236
+
237
+
238
+ def zero_module(module):
239
+ """
240
+ Zero out the parameters of a module and return it.
241
+ """
242
+ for p in module.parameters():
243
+ p.detach().zero_()
244
+ return module
245
+
246
+
247
+ def scale_module(module, scale):
248
+ """
249
+ Scale the parameters of a module and return it.
250
+ """
251
+ for p in module.parameters():
252
+ p.detach().mul_(scale)
253
+ return module
254
+
255
+
256
+ def mean_flat(tensor):
257
+ """
258
+ Take the mean over all non-batch dimensions.
259
+ """
260
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
261
+
262
+
263
+ def normalization(channels):
264
+ """
265
+ Make a standard normalization layer.
266
+ :param channels: number of input channels.
267
+ :return: an nn.Module for normalization.
268
+ """
269
+ return GroupNorm32(32, channels)
270
+
271
+
272
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
273
+ class SiLU(nn.Module):
274
+ def forward(self, x):
275
+ return x * torch.sigmoid(x)
276
+
277
+
278
+ class GroupNorm32(nn.GroupNorm):
279
+ def forward(self, x):
280
+ return super().forward(x.float()).type(x.dtype)
281
+
282
+
283
+ def conv_nd(dims, *args, **kwargs):
284
+ """
285
+ Create a 1D, 2D, or 3D convolution module.
286
+ """
287
+ if dims == 1:
288
+ return nn.Conv1d(*args, **kwargs)
289
+ elif dims == 2:
290
+ return nn.Conv2d(*args, **kwargs)
291
+ elif dims == 3:
292
+ return nn.Conv3d(*args, **kwargs)
293
+ raise ValueError(f"unsupported dimensions: {dims}")
294
+
295
+
296
+ def linear(*args, **kwargs):
297
+ """
298
+ Create a linear module.
299
+ """
300
+ return nn.Linear(*args, **kwargs)
301
+
302
+
303
+ def avg_pool_nd(dims, *args, **kwargs):
304
+ """
305
+ Create a 1D, 2D, or 3D average pooling module.
306
+ """
307
+ if dims == 1:
308
+ return nn.AvgPool1d(*args, **kwargs)
309
+ elif dims == 2:
310
+ return nn.AvgPool2d(*args, **kwargs)
311
+ elif dims == 3:
312
+ return nn.AvgPool3d(*args, **kwargs)
313
+ raise ValueError(f"unsupported dimensions: {dims}")
314
+
315
+
316
+ class HybridConditioner(nn.Module):
317
+ def __init__(self, c_concat_config, c_crossattn_config):
318
+ super().__init__()
319
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
320
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
321
+
322
+ def forward(self, c_concat, c_crossattn):
323
+ c_concat = self.concat_conditioner(c_concat)
324
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
325
+ return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
326
+
327
+
328
+ def noise_like(shape, device, repeat=False):
329
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
330
+ shape[0], *((1,) * (len(shape) - 1))
331
+ )
332
+ noise = lambda: torch.randn(shape, device=device)
333
+ return repeat_noise() if repeat else noise()
334
+
335
+
336
+ # dummy replace
337
+ def convert_module_to_f16(l):
338
+ """
339
+ Convert primitive modules to float16.
340
+ """
341
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
342
+ l.weight.data = l.weight.data.half()
343
+ if l.bias is not None:
344
+ l.bias.data = l.bias.data.half()
345
+
346
+ def convert_module_to_f32(l):
347
+ """
348
+ Convert primitive modules to float32, undoing convert_module_to_f16().
349
+ """
350
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
351
+ l.weight.data = l.weight.data.float()
352
+ if l.bias is not None:
353
+ l.bias.data = l.bias.data.float()