Skip to content

ValueError: not enough values to unpack (expected 2, got 1) #29

@Hakurei-Reimu-Gensokyo

Description

@Hakurei-Reimu-Gensokyo

I have run the FLUX demo, but it threw an error:
ValueError: not enough values to unpack (expected 2, got 1)

Passing `txt_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor
Passing `img_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[1], line 26
     18 ################################################
     19 
     20 # recommend not using batch operations for sd3, as cpu memory could be exceeded.
     21 prompts = [
     22     # "A photo of a puppy wearing a hat.",
     23     "A capybara holding a sign that reads Hello World.",
     24 ]
---> 26 images = pipe(
     27     prompts,
     28     num_inference_steps=15,
     29     guidance_scale=4.5,
     30 ).images
     32 for batch, image in enumerate(images):
     33     image.save(f'{batch}-flux-dev.png')

File [~/miniconda3/lib/python3.12/site-packages/torch/utils/_contextlib.py:116](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/torch/utils/_contextlib.py#line=115), in context_decorator.<locals>.decorate_context(*args, **kwargs)
    113 @functools.wraps(func)
    114 def decorate_context(*args, **kwargs):
    115     with ctx_factory():
--> 116         return func(*args, **kwargs)

File [~/miniconda3/lib/python3.12/site-packages/attention_map_diffusers/modules.py:528](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/attention_map_diffusers/modules.py#line=527), in FluxPipeline_call(self, prompt, prompt_2, height, width, num_inference_steps, timesteps, guidance_scale, num_images_per_prompt, generator, latents, prompt_embeds, pooled_prompt_embeds, output_type, return_dict, joint_attention_kwargs, callback_on_step_end, callback_on_step_end_tensor_inputs, max_sequence_length)
    525 # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
    526 timestep = t.expand(latents.shape[0]).to(latents.dtype)
--> 528 noise_pred = self.transformer(
    529     hidden_states=latents,
    530     timestep=timestep / 1000,
    531     guidance=guidance,
    532     pooled_projections=pooled_prompt_embeds,
    533     encoder_hidden_states=prompt_embeds,
    534     txt_ids=text_ids,
    535     img_ids=latent_image_ids,
    536     joint_attention_kwargs=self.joint_attention_kwargs,
    537     return_dict=False,
    538     ##################################################
    539     height=2 * (int(height) // (self.vae_scale_factor * 2)) // 2,
    540     ##################################################
    541 )[0]
    543 # compute the previous noisy sample x_t -> x_t-1
    544 latents_dtype = latents.dtype

File [~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py:1751](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py#line=1750), in Module._wrapped_call_impl(self, *args, **kwargs)
   1749     return self._compiled_call_impl(*args, **kwargs)  # type: ignore[misc]
   1750 else:
-> 1751     return self._call_impl(*args, **kwargs)

File [~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py:1762](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py#line=1761), in Module._call_impl(self, *args, **kwargs)
   1757 # If we don't have any hooks, we want to skip the rest of the logic in
   1758 # this function, and just call forward.
   1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
   1760         or _global_backward_pre_hooks or _global_backward_hooks
   1761         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762     return forward_call(*args, **kwargs)
   1764 result = None
   1765 called_always_called_hooks = set()

File [~/miniconda3/lib/python3.12/site-packages/attention_map_diffusers/modules.py:1074](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/attention_map_diffusers/modules.py#line=1073), in FluxTransformer2DModelForward(self, hidden_states, encoder_hidden_states, pooled_projections, timestep, img_ids, txt_ids, guidance, joint_attention_kwargs, controlnet_block_samples, controlnet_single_block_samples, return_dict, controlnet_blocks_repeat, height, width)
   1071     img_ids = img_ids[0]
   1073 ids = torch.cat((txt_ids, img_ids), dim=0)
-> 1074 image_rotary_emb = self.pos_embed(ids)
   1076 for index_block, block in enumerate(self.transformer_blocks):
   1077     if torch.is_grad_enabled() and self.gradient_checkpointing:

File [~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py:1751](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py#line=1750), in Module._wrapped_call_impl(self, *args, **kwargs)
   1749     return self._compiled_call_impl(*args, **kwargs)  # type: ignore[misc]
   1750 else:
-> 1751     return self._call_impl(*args, **kwargs)

File [~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py:1762](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/torch/nn/modules/module.py#line=1761), in Module._call_impl(self, *args, **kwargs)
   1757 # If we don't have any hooks, we want to skip the rest of the logic in
   1758 # this function, and just call forward.
   1759 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
   1760         or _global_backward_pre_hooks or _global_backward_hooks
   1761         or _global_forward_hooks or _global_forward_pre_hooks):
-> 1762     return forward_call(*args, **kwargs)
   1764 result = None
   1765 called_always_called_hooks = set()

File [~/miniconda3/lib/python3.12/site-packages/diffusers/models/transformers/transformer_flux.py:65](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/diffusers/models/transformers/transformer_flux.py#line=64), in EmbedND.forward(self, ids)
     62 def forward(self, ids: torch.Tensor) -> torch.Tensor:
     63     n_axes = ids.shape[-1]
     64     emb = torch.cat(
---> 65         [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
     66         dim=-3,
     67     )
     68     return emb.unsqueeze(1)

File [~/miniconda3/lib/python3.12/site-packages/diffusers/models/transformers/transformer_flux.py:44](https://a233301-9e7b-63840ca9.bjc1.seetacloud.com:8443/jupyter/lab/tree/autodl-tmp/FlowEdit/~/miniconda3/lib/python3.12/site-packages/diffusers/models/transformers/transformer_flux.py#line=43), in rope(pos, dim, theta)
     41 scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
     42 omega = 1.0 / (theta**scale)
---> 44 batch_size, seq_length = pos.shape
     45 out = torch.einsum("...n,d->...nd", pos, omega)
     46 cos_out = torch.cos(out)

ValueError: not enough values to unpack (expected 2, got 1)

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions