You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "/mnt/data/Tune-A-Video/train_tuneavideo.py", line 367, in
main(**OmegaConf.load(args.config))
File "/mnt/data/Tune-A-Video/train_tuneavideo.py", line 123, in main
unet.enable_xformers_memory_efficient_attention()
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 215, in enable_xformers_memory_efficient_attention
self.set_use_memory_efficient_attention_xformers(True)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 203, in set_use_memory_efficient_attention_xformers
fn_recursive_set_mem_eff(module)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 196, in fn_recursive_set_mem_eff
module.set_use_memory_efficient_attention_xformers(valid)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 203, in set_use_memory_efficient_attention_xformers
fn_recursive_set_mem_eff(module)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 196, in fn_recursive_set_mem_eff
module.set_use_memory_efficient_attention_xformers(valid)
File "/mnt/data/Tune-A-Video/tuneavideo/models/attention.py", line 226, in set_use_memory_efficient_attention_xformers
raise e
File "/mnt/data/Tune-A-Video/tuneavideo/models/attention.py", line 220, in set_use_memory_efficient_attention_xformers
_ = xformers.ops.memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 223, in memory_efficient_attention
return _memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 321, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 337, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 120, in _dispatch_fw
return _run_priority_list(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for memory_efficient_attention_forward with inputs:
query : shape=(1, 2, 1, 40) (torch.float32)
key : shape=(1, 2, 1, 40) (torch.float32)
value : shape=(1, 2, 1, 40) (torch.float32)
attn_bias : <class 'NoneType'>
p : 0.0 decoderF is not supported because:
xFormers wasn't build with CUDA support
attn_bias type is <class 'NoneType'>
operator wasn't built - see python -m xformers.info for more info [email protected] is not supported because:
xFormers wasn't build with CUDA support
dtype=torch.float32 (supported: {torch.float16, torch.bfloat16})
operator wasn't built - see python -m xformers.info for more info tritonflashattF is not supported because:
xFormers wasn't build with CUDA support
dtype=torch.float32 (supported: {torch.float16, torch.bfloat16})
operator wasn't built - see python -m xformers.info for more info
triton is not available
Only work on pre-MLIR triton for now cutlassF is not supported because:
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info for more info smallkF is not supported because:
max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
operator wasn't built - see python -m xformers.info for more info
unsupported embed per head: 40
Traceback (most recent call last):
File "/home/admin1/.local/bin/accelerate", line 8, in
sys.exit(main())
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 47, in main
args.func(args)
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1017, in launch_command
simple_launcher(args)
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/launch.py", line 637, in simple_launcher
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
subprocess.CalledProcessError: Command '['/usr/bin/python3', 'train_tuneavideo.py', '--config=configs/man-skiing.yaml']' returned non-zero exit status 1.
The text was updated successfully, but these errors were encountered:
Traceback (most recent call last):
File "/mnt/data/Tune-A-Video/train_tuneavideo.py", line 367, in
main(**OmegaConf.load(args.config))
File "/mnt/data/Tune-A-Video/train_tuneavideo.py", line 123, in main
unet.enable_xformers_memory_efficient_attention()
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 215, in enable_xformers_memory_efficient_attention
self.set_use_memory_efficient_attention_xformers(True)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 203, in set_use_memory_efficient_attention_xformers
fn_recursive_set_mem_eff(module)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 196, in fn_recursive_set_mem_eff
module.set_use_memory_efficient_attention_xformers(valid)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 203, in set_use_memory_efficient_attention_xformers
fn_recursive_set_mem_eff(module)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 199, in fn_recursive_set_mem_eff
fn_recursive_set_mem_eff(child)
File "/home/admin1/.local/lib/python3.10/site-packages/diffusers/modeling_utils.py", line 196, in fn_recursive_set_mem_eff
module.set_use_memory_efficient_attention_xformers(valid)
File "/mnt/data/Tune-A-Video/tuneavideo/models/attention.py", line 226, in set_use_memory_efficient_attention_xformers
raise e
File "/mnt/data/Tune-A-Video/tuneavideo/models/attention.py", line 220, in set_use_memory_efficient_attention_xformers
_ = xformers.ops.memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 223, in memory_efficient_attention
return _memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 321, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 337, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 120, in _dispatch_fw
return _run_priority_list(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for
memory_efficient_attention_forward
with inputs:query : shape=(1, 2, 1, 40) (torch.float32)
key : shape=(1, 2, 1, 40) (torch.float32)
value : shape=(1, 2, 1, 40) (torch.float32)
attn_bias : <class 'NoneType'>
p : 0.0
decoderF
is not supported because:xFormers wasn't build with CUDA support
attn_bias type is <class 'NoneType'>
operator wasn't built - see
python -m xformers.info
for more info[email protected]
is not supported because:xFormers wasn't build with CUDA support
dtype=torch.float32 (supported: {torch.float16, torch.bfloat16})
operator wasn't built - see
python -m xformers.info
for more infotritonflashattF
is not supported because:xFormers wasn't build with CUDA support
dtype=torch.float32 (supported: {torch.float16, torch.bfloat16})
operator wasn't built - see
python -m xformers.info
for more infotriton is not available
Only work on pre-MLIR triton for now
cutlassF
is not supported because:xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infosmallkF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infounsupported embed per head: 40
Traceback (most recent call last):
File "/home/admin1/.local/bin/accelerate", line 8, in
sys.exit(main())
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 47, in main
args.func(args)
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1017, in launch_command
simple_launcher(args)
File "/home/admin1/.local/lib/python3.10/site-packages/accelerate/commands/launch.py", line 637, in simple_launcher
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
subprocess.CalledProcessError: Command '['/usr/bin/python3', 'train_tuneavideo.py', '--config=configs/man-skiing.yaml']' returned non-zero exit status 1.
The text was updated successfully, but these errors were encountered: