-
Notifications
You must be signed in to change notification settings - Fork 363
Description
Hello .
Does anyone encounter such problems ?
As tittle says...
[Comfy3D] Zero123Plus Diffusion Model
headdim should be in [64, 96, 128].
I'm just using default workflow - zero123Plus_to_instantMesh
LOG
got prompt !!! Exception during processing !!! headdim should be in [64, 96, 128]. Traceback (most recent call last): File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 518, in execute output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 329, in get_output_data return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 303, in _async_map_node_over_list await process_inputs(input_dict, i) File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\execution.py", line 291, in process_inputs result = f(**inputs) ^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 120, in decorate_context return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-3D-Pack\nodes.py", line 2457, in run_model output_image = zero123plus_pipe( ^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 120, in decorate_context return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-3D-Pack\Gen_3D_Modules\Zero123Plus\pipeline.py", line 365, in __call__ encoded = self.vision_encoder(image_2, output_hidden_states=False) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\utils\generic.py", line 918, in wrapper output = func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\clip\modeling_clip.py", line 1146, in forward vision_outputs: BaseModelOutputWithPooling = self.vision_model( ^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\clip\modeling_clip.py", line 745, in forward encoder_outputs: BaseModelOutput = self.encoder( ^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\clip\modeling_clip.py", line 549, in forward layer_outputs = encoder_layer( ^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\modeling_layers.py", line 94, in __call__ return super().__call__(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\clip\modeling_clip.py", line 397, in forward hidden_states, attn_weights = self.self_attn( ^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1775, in _wrapped_call_impl return self._call_impl(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1786, in _call_impl return forward_call(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\models\clip\modeling_clip.py", line 333, in forward attn_output, attn_weights = attention_interface( ^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\integrations\sdpa_attention.py", line 96, in sdpa_attention_forward attn_output = torch.nn.functional.scaled_dot_product_attention( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\COMFYUI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\sageattention\core.py", line 82, in sageattn assert headdim in [64, 96, 128], "headdim should be in [64, 96, 128]." ^^^^^^^^^^^^^^^^^^^^^^^^ AssertionError: headdim should be in [64, 96, 128].