Skip to content

Commit 17729a6

Browse files
authored
Merge pull request #1189 from hanhainebula/master
delete useless parameters for embedder classes
2 parents b5e4597 + 2956b26 commit 17729a6

4 files changed

Lines changed: 1 addition & 28 deletions

File tree

FlagEmbedding/inference/embedder/decoder_only/base.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,6 @@ class BaseLLMEmbedder(AbsEmbedder):
4747
batch_size (int, optional): Batch size for inference. Defaults to :data:`256`.
4848
query_max_length (int, optional): Maximum length for query. Defaults to :data:`512`.
4949
passage_max_length (int, optional): Maximum length for passage. Defaults to :data:`512`.
50-
instruction (Optional[str], optional): Instruction for embedding with :attr:`instruction_format`. Defaults to :data:`None`.
51-
instruction_format (str, optional): Instruction format when using :attr:`instruction`. Defaults to :data:`"{}{}"`.
5250
convert_to_numpy (bool, optional): If True, the output embedding will be a Numpy array. Otherwise, it will be a Torch Tensor.
5351
Defaults to :data:`True`.
5452
@@ -72,8 +70,6 @@ def __init__(
7270
batch_size: int = 256,
7371
query_max_length: int = 512,
7472
passage_max_length: int = 512,
75-
instruction: Optional[str] = None,
76-
instruction_format: str = "{}{}",
7773
convert_to_numpy: bool = True,
7874
**kwargs: Any,
7975
):
@@ -87,8 +83,6 @@ def __init__(
8783
batch_size=batch_size,
8884
query_max_length=query_max_length,
8985
passage_max_length=passage_max_length,
90-
instruction=instruction,
91-
instruction_format=instruction_format,
9286
convert_to_numpy=convert_to_numpy,
9387
**kwargs
9488
)

FlagEmbedding/inference/embedder/decoder_only/icl.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,6 @@ class ICLLLMEmbedder(AbsEmbedder):
5454
batch_size (int, optional): Batch size for inference. Defaults to :data:`256`.
5555
query_max_length (int, optional): Maximum length for query. Defaults to :data:`512`.
5656
passage_max_length (int, optional): Maximum length for passage. Defaults to :data:`512`.
57-
instruction (Optional[str], optional): Instruction for embedding with :attr:`instruction_format`. Defaults to :data:`None`.
58-
instruction_format (str, optional): Instruction format when using :attr:`instruction`. Defaults to :data:`"{}{}"`.
5957
convert_to_numpy (bool, optional): If True, the output embedding will be a Numpy array. Otherwise, it will be a Torch Tensor.
6058
Defaults to :data:`True`.
6159
@@ -81,8 +79,6 @@ def __init__(
8179
batch_size: int = 256,
8280
query_max_length: int = 512,
8381
passage_max_length: int = 512,
84-
instruction: Optional[str] = None,
85-
instruction_format: str = "{}{}",
8682
convert_to_numpy: bool = True,
8783
**kwargs: Any,
8884
):
@@ -96,10 +92,8 @@ def __init__(
9692
batch_size=batch_size,
9793
query_max_length=query_max_length,
9894
passage_max_length=passage_max_length,
99-
instruction=instruction,
100-
instruction_format=instruction_format,
10195
convert_to_numpy=convert_to_numpy,
102-
kwargs=kwargs
96+
**kwargs
10397
)
10498

10599
self.tokenizer = AutoTokenizer.from_pretrained(

FlagEmbedding/inference/embedder/encoder_only/base.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,6 @@ class BaseEmbedder(AbsEmbedder):
2828
batch_size (int, optional): Batch size for inference. Defaults to :data:`256`.
2929
query_max_length (int, optional): Maximum length for query. Defaults to :data:`512`.
3030
passage_max_length (int, optional): Maximum length for passage. Defaults to :data:`512`.
31-
instruction (Optional[str], optional): Instruction for embedding with :attr:`instruction_format`. Defaults to :data:`None`.
32-
instruction_format (str, optional): Instruction format when using :attr:`instruction`. Defaults to :data:`"{}{}"`.
3331
convert_to_numpy (bool, optional): If True, the output embedding will be a Numpy array. Otherwise, it will be a Torch Tensor.
3432
Defaults to :data:`True`.
3533
@@ -55,8 +53,6 @@ def __init__(
5553
batch_size: int = 256,
5654
query_max_length: int = 512,
5755
passage_max_length: int = 512,
58-
instruction: Optional[str] = None,
59-
instruction_format: str = "{}{}",
6056
convert_to_numpy: bool = True,
6157
**kwargs: Any,
6258
):
@@ -70,8 +66,6 @@ def __init__(
7066
batch_size=batch_size,
7167
query_max_length=query_max_length,
7268
passage_max_length=passage_max_length,
73-
instruction=instruction,
74-
instruction_format=instruction_format,
7569
convert_to_numpy=convert_to_numpy,
7670
**kwargs
7771
)
@@ -201,9 +195,6 @@ def encode_single_device(
201195
if device == "cpu": self.use_fp16 = False
202196
if self.use_fp16: self.model.half()
203197

204-
if device == "cpu": self.use_fp16 = False
205-
if self.use_fp16: self.model.half()
206-
207198
self.model.to(device)
208199
self.model.eval()
209200

FlagEmbedding/inference/embedder/encoder_only/m3.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,6 @@ class M3Embedder(AbsEmbedder):
3838
batch_size (int, optional): Batch size for inference. Defaults to :data:`256`.
3939
query_max_length (int, optional): Maximum length for query. Defaults to :data:`512`.
4040
passage_max_length (int, optional): Maximum length for passage. Defaults to :data:`512`.
41-
instruction (Optional[str], optional): Instruction for embedding with :attr:`instruction_format`. Defaults to :data:`None`.
42-
instruction_format (str, optional): Instruction format when using :attr:`instruction`. Defaults to :data:`"{}{}"`.
4341
return_dense (bool, optional): If true, will return the dense embedding. Defaults to :data:`True`.
4442
return_sparse (bool, optional): If true, will return the sparce embedding. Defaults to :data:`False`.
4543
return_colbert_vecs (bool, optional): If true, will return the colbert vectors. Defaults to :data:`False`.
@@ -66,8 +64,6 @@ def __init__(
6664
batch_size: int = 256,
6765
query_max_length: int = 512,
6866
passage_max_length: int = 512,
69-
instruction: Optional[str] = None,
70-
instruction_format: str = "{}{}",
7167
return_dense: bool = True,
7268
return_sparse: bool = False,
7369
return_colbert_vecs: bool = False,
@@ -83,8 +79,6 @@ def __init__(
8379
batch_size=batch_size,
8480
query_max_length=query_max_length,
8581
passage_max_length=passage_max_length,
86-
instruction=instruction,
87-
instruction_format=instruction_format,
8882
return_dense=return_dense,
8983
return_sparse=return_sparse,
9084
return_colbert_vecs=return_colbert_vecs,

0 commit comments

Comments
 (0)