We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
2 parents ae612e3 + fcf001c commit e0cbfabCopy full SHA for e0cbfab
1 file changed
FlagEmbedding/llm_embedder/src/lm/modeling_lm.py
@@ -38,7 +38,7 @@ def __init__(self, model_name_or_path=None, padding_side="left", dtype="bf16", c
38
except ValueError:
39
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, cache_dir=cache_dir, torch_dtype=dtype, trust_remote_code=True, device_map=device_map)
40
41
- # if device_map is specified, we don't need to move the model to any specifc gpu
+ # if device_map is specified, we don't need to move the model to any specific gpu
42
if device_map is None:
43
if accelerator is not None:
44
device = accelerator.device
0 commit comments