Skip to content

Commit 405a1fa

Browse files
yhZhaisayakpaul
andauthored
fix: enable unet_3d_condition to support time_cond_proj_dim (#7364)
Co-authored-by: Sayak Paul <[email protected]>
1 parent 3028089 commit 405a1fa

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

src/diffusers/models/unets/unet_3d_condition.py

+4
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin)
9191
cross_attention_dim (`int`, *optional*, defaults to 1024): The dimension of the cross attention features.
9292
attention_head_dim (`int`, *optional*, defaults to 64): The dimension of the attention heads.
9393
num_attention_heads (`int`, *optional*): The number of attention heads.
94+
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
95+
The dimension of `cond_proj` layer in the timestep embedding.
9496
"""
9597

9698
_supports_gradient_checkpointing = False
@@ -123,6 +125,7 @@ def __init__(
123125
cross_attention_dim: int = 1024,
124126
attention_head_dim: Union[int, Tuple[int]] = 64,
125127
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
128+
time_cond_proj_dim: Optional[int] = None,
126129
):
127130
super().__init__()
128131

@@ -174,6 +177,7 @@ def __init__(
174177
timestep_input_dim,
175178
time_embed_dim,
176179
act_fn=act_fn,
180+
cond_proj_dim=time_cond_proj_dim,
177181
)
178182

179183
self.transformer_in = TransformerTemporalModel(

0 commit comments

Comments
 (0)