CoreloneH commited on
Commit
fddee3b
·
1 Parent(s): 27c7fab
Files changed (3) hide show
  1. .gitignore +182 -0
  2. models/unet_2d_blocks_custom.py +34 -31
  3. requirements.txt +4 -4
.gitignore ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
175
+
176
+ # User config files
177
+ .vscode/
178
+ output/
179
+
180
+ ckpts/
181
+ outputs/
182
+ temps/
models/unet_2d_blocks_custom.py CHANGED
@@ -22,7 +22,7 @@ from torch import nn
22
  from diffusers.utils import is_torch_version, logging
23
  # from diffusers.models.attention import AdaGroupNorm
24
  from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0
25
- from diffusers.models.dual_transformer_2d import DualTransformer2DModel
26
  from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
27
  # from diffusers.models.transformer_2d import Transformer2DModel
28
  from models.transformer_2d_custom import Transformer2DModel
@@ -582,16 +582,17 @@ class UNetMidBlock2DCrossAttn(nn.Module):
582
  )
583
  )
584
  else:
585
- attentions.append(
586
- DualTransformer2DModel(
587
- num_attention_heads,
588
- in_channels // num_attention_heads,
589
- in_channels=in_channels,
590
- num_layers=1,
591
- cross_attention_dim=cross_attention_dim,
592
- norm_num_groups=resnet_groups,
593
- )
594
- )
 
595
  resnets.append(
596
  ResnetBlock2D(
597
  in_channels=in_channels,
@@ -951,16 +952,17 @@ class CrossAttnDownBlock2D(nn.Module):
951
  )
952
  )
953
  else:
954
- attentions.append(
955
- DualTransformer2DModel(
956
- num_attention_heads,
957
- out_channels // num_attention_heads,
958
- in_channels=out_channels,
959
- num_layers=1,
960
- cross_attention_dim=cross_attention_dim,
961
- norm_num_groups=resnet_groups,
962
- )
963
- )
 
964
  self.attentions = nn.ModuleList(attentions)
965
  self.resnets = nn.ModuleList(resnets)
966
 
@@ -2140,16 +2142,17 @@ class CrossAttnUpBlock2D(nn.Module):
2140
  )
2141
  )
2142
  else:
2143
- attentions.append(
2144
- DualTransformer2DModel(
2145
- num_attention_heads,
2146
- out_channels // num_attention_heads,
2147
- in_channels=out_channels,
2148
- num_layers=1,
2149
- cross_attention_dim=cross_attention_dim,
2150
- norm_num_groups=resnet_groups,
2151
- )
2152
- )
 
2153
  self.attentions = nn.ModuleList(attentions)
2154
  self.resnets = nn.ModuleList(resnets)
2155
 
 
22
  from diffusers.utils import is_torch_version, logging
23
  # from diffusers.models.attention import AdaGroupNorm
24
  from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0
25
+ # from diffusers.models.dual_transformer_2d import DualTransformer2DModel
26
  from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
27
  # from diffusers.models.transformer_2d import Transformer2DModel
28
  from models.transformer_2d_custom import Transformer2DModel
 
582
  )
583
  )
584
  else:
585
+ # attentions.append(
586
+ # DualTransformer2DModel(
587
+ # num_attention_heads,
588
+ # in_channels // num_attention_heads,
589
+ # in_channels=in_channels,
590
+ # num_layers=1,
591
+ # cross_attention_dim=cross_attention_dim,
592
+ # norm_num_groups=resnet_groups,
593
+ # )
594
+ # )
595
+ pass
596
  resnets.append(
597
  ResnetBlock2D(
598
  in_channels=in_channels,
 
952
  )
953
  )
954
  else:
955
+ # attentions.append(
956
+ # DualTransformer2DModel(
957
+ # num_attention_heads,
958
+ # out_channels // num_attention_heads,
959
+ # in_channels=out_channels,
960
+ # num_layers=1,
961
+ # cross_attention_dim=cross_attention_dim,
962
+ # norm_num_groups=resnet_groups,
963
+ # )
964
+ # )
965
+ pass
966
  self.attentions = nn.ModuleList(attentions)
967
  self.resnets = nn.ModuleList(resnets)
968
 
 
2142
  )
2143
  )
2144
  else:
2145
+ # attentions.append(
2146
+ # DualTransformer2DModel(
2147
+ # num_attention_heads,
2148
+ # out_channels // num_attention_heads,
2149
+ # in_channels=out_channels,
2150
+ # num_layers=1,
2151
+ # cross_attention_dim=cross_attention_dim,
2152
+ # norm_num_groups=resnet_groups,
2153
+ # )
2154
+ # )
2155
+ pass
2156
  self.attentions = nn.ModuleList(attentions)
2157
  self.resnets = nn.ModuleList(resnets)
2158
 
requirements.txt CHANGED
@@ -8,7 +8,7 @@ ftfy
8
  # Training
9
  bs4==0.0.1 # Needed for text cleaning
10
  bson==0.5.10
11
- diffusers==0.19.3 # diffusers[torch]==0.19.3 in control
12
  einops==0.6.0
13
  ftfy==6.1.1 # Needed for text cleaning
14
  kornia==0.6.12
@@ -30,7 +30,7 @@ httpx==0.23.3
30
  opencv-python
31
  open_clip_torch
32
  protobuf==3.20.0
33
- huggingface_hub==0.25.2 --force-reinstall
34
 
35
- open_clip_torch
36
- git+https://github.com/openai/CLIP.git
 
 
8
  # Training
9
  bs4==0.0.1 # Needed for text cleaning
10
  bson==0.5.10
11
+ diffusers==0.29.0 # diffusers[torch]==0.19.3 in control
12
  einops==0.6.0
13
  ftfy==6.1.1 # Needed for text cleaning
14
  kornia==0.6.12
 
30
  opencv-python
31
  open_clip_torch
32
  protobuf==3.20.0
 
33
 
34
+ git+https://github.com/openai/CLIP.git
35
+
36
+ huggingface_hub==0.30.2