Skip to content

Commit 4fb915b

Browse files
wtmlonwj-Mcat
andauthored
Add qwen baichuan ci (#7324)
* add qwen alibi-baichuan ci * attention mask fix * add tiktoken requirement --------- Co-authored-by: 骑马小猫 <[email protected]>
1 parent 0407095 commit 4fb915b

File tree

8 files changed

+28
-6
lines changed

8 files changed

+28
-6
lines changed

paddlenlp/transformers/qwen/modeling.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -841,7 +841,13 @@ def update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder
841841

842842
# update attention_mask
843843
if not is_encoder_decoder and "attention_mask" in model_kwargs:
844-
model_kwargs["attention_mask"] = None
844+
attention_mask = model_kwargs["attention_mask"]
845+
if attention_mask is not None and len(attention_mask.shape) == 2:
846+
model_kwargs["attention_mask"] = paddle.concat(
847+
[attention_mask, paddle.ones([attention_mask.shape[0], 1], dtype=attention_mask.dtype)], axis=-1
848+
)
849+
else:
850+
model_kwargs["attention_mask"] = None
845851

846852
return model_kwargs
847853

scripts/regression/requirements_ci.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,4 +41,4 @@ sacremoses
4141
soundfile
4242
librosa
4343
gradio
44-
tiktoken
44+
tiktoken

tests/fixtures/llm/finetune.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ finetune:
3434
model_name_or_path: __internal_testing__/tiny-random-chatglm2
3535
bloom:
3636
model_name_or_path: __internal_testing__/tiny-fused-bloom
37+
qwen:
38+
model_name_or_path: __internal_testing__/tiny-fused-qwen
39+
baichuan:
40+
model_name_or_path: __internal_testing__/tiny-fused-baichuan
3741

3842
inference-predict:
3943
default:

tests/fixtures/llm/lora.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,10 @@ lora:
3636
model_name_or_path: __internal_testing__/tiny-random-chatglm2
3737
bloom:
3838
model_name_or_path: __internal_testing__/tiny-fused-bloom
39+
qwen:
40+
model_name_or_path: __internal_testing__/tiny-fused-qwen
41+
baichuan:
42+
model_name_or_path: __internal_testing__/tiny-fused-baichuan
3943

4044
inference-predict:
4145
default:

tests/fixtures/llm/prefix_tuning.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ prefix_tuning:
3535
model_name_or_path: __internal_testing__/tiny-random-chatglm2
3636
bloom:
3737
model_name_or_path: __internal_testing__/tiny-fused-bloom
38+
qwen:
39+
model_name_or_path: __internal_testing__/tiny-fused-qwen
40+
baichuan:
41+
model_name_or_path: __internal_testing__/tiny-fused-baichuan
3842

3943
inference-predict:
4044
default:

tests/llm/test_finetune.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626
@parameterized_class(
2727
["model_dir"],
28-
[["llama"], ["chatglm"], ["bloom"], ["chatglm2"]],
28+
[["llama"], ["chatglm"], ["bloom"], ["chatglm2"], ["qwen"], ["baichuan"]],
2929
)
3030
class FinetuneTest(LLMTest, unittest.TestCase):
3131
config_path: str = "./tests/fixtures/llm/finetune.yaml"
@@ -50,7 +50,7 @@ def test_finetune(self):
5050

5151
main()
5252

53-
if self.model_dir != "opt" and self.model_dir != "chatglm2":
53+
if self.model_dir not in ["opt", "chatglm2", "qwen", "baichuan"]:
5454
self.run_predictor({"inference_model": True})
5555

5656
self.run_predictor({"inference_model": False})

tests/llm/test_lora.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
["chatglm"],
3333
["chatglm2"],
3434
["bloom"],
35+
["qwen"],
36+
["baichuan"],
3537
],
3638
)
3739
class LoraTest(LLMTest, unittest.TestCase):
@@ -73,7 +75,7 @@ def test_lora(self):
7375

7476
merge()
7577

76-
if self.model_dir not in ["chatglm2"]:
78+
if self.model_dir not in ["chatglm2", "qwen", "baichuan"]:
7779
self.run_predictor({"inference_model": True})
7880

7981
self.run_predictor({"inference_model": False})

tests/llm/test_prefix_tuning.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@
3131
["bloom"],
3232
["chatglm"],
3333
["chatglm2"],
34+
["qwen"],
35+
["baichuan"],
3436
],
3537
)
3638
class PrefixTuningTest(LLMTest, unittest.TestCase):
@@ -58,7 +60,7 @@ def test_prefix_tuning(self):
5860

5961
main()
6062

61-
if self.model_dir not in ["chatglm2"]:
63+
if self.model_dir not in ["chatglm2", "qwen", "baichuan"]:
6264
self.run_predictor(
6365
{
6466
"inference_model": True,

0 commit comments

Comments
 (0)