error occurred when executing DownloadAndLoadChatGLM3: Torch not compiled with CUDA enabled File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 316, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 191, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 168, in _map_node_over_list process_inputs(input_dict, i) File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 157, in process_inputs results.append(getattr(obj, func)(**inputs)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/nodes.py", line 186, in loadmodel text_encoder.quantize(8) File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/kolors/models/modeling_chatglm.py", line 852, in quantize quantize(self.encoder, weight_bit_width) File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/kolors/models/quantization.py", line 157, in quantize weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()), ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/torch/cuda/__init__.py", line 936, in current_device _lazy_init() File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/torch/cuda/__init__.py", line 306, in _lazy_init raise AssertionError("Torch not compiled with CUDA enabled") E găp lỗi này là lỗi gì ạ (Nó chạy đến ChatGLM3 model)
Hãy theo dõi kênh để sớm nhận được thông bảo những video sớm nhất và quy trình làm việc MIỄN PHÍ:
ruclips.net/channel/UCDpFsNIdAio-s7Sjxkz_8OA
error occurred when executing DownloadAndLoadChatGLM3:
Torch not compiled with CUDA enabled
File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 316, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 191, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 168, in _map_node_over_list
process_inputs(input_dict, i)
File "/Users/kienmy/Desktop/AI/ComfyUI/execution.py", line 157, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/nodes.py", line 186, in loadmodel
text_encoder.quantize(8)
File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/kolors/models/modeling_chatglm.py", line 852, in quantize
quantize(self.encoder, weight_bit_width)
File "/Users/kienmy/Desktop/AI/ComfyUI/custom_nodes/ComfyUI-KwaiKolorsWrapper/kolors/models/quantization.py", line 157, in quantize
weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/torch/cuda/__init__.py", line 936, in current_device
_lazy_init()
File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/torch/cuda/__init__.py", line 306, in _lazy_init
raise AssertionError("Torch not compiled with CUDA enabled")
E găp lỗi này là lỗi gì ạ (Nó chạy đến ChatGLM3 model)
@@32_dotrungkien71 mời bạn tham gia nhóm zalo cộng đồng của gia đình Mới Mẻ để cùng nhau giải đáp những vấn đề chi tiết hơn bạn nhé.