Skip to content

Commit

Permalink
delete unusing files
Browse files Browse the repository at this point in the history
  • Loading branch information
TranscenderNing committed Jul 28, 2024
1 parent 3e6d0da commit fdfb8c5
Showing 1 changed file with 10 additions and 11 deletions.
21 changes: 10 additions & 11 deletions llm/run_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,14 @@
load_dataset,
)
from paddlenlp.metrics import BLEU, Rouge1, Rouge2, RougeL

from paddlenlp.peft import LoRAConfig, LoRAModel, PrefixConfig, PrefixModelForCausalLM
from paddlenlp.peft import (
LoRAConfig,
LoRAModel,
PrefixConfig,
PrefixModelForCausalLM,
VeRAConfig,
VeRAModel,
)
from paddlenlp.peft.reft.pareft import (
LoreftIntervention,
ReftConfig,
Expand All @@ -56,15 +62,6 @@
get_reft_model,
)
from paddlenlp.peft.reft.pareft.dataset import LoReftSupervisedDataset
from paddlenlp.peft import (
LoRAConfig,
LoRAModel,
PrefixConfig,
PrefixModelForCausalLM,
VeRAConfig,
VeRAModel,
)

from paddlenlp.trainer import PdArgumentParser, get_last_checkpoint
from paddlenlp.trainer.trainer_callback import TrainerState
from paddlenlp.transformers import (
Expand Down Expand Up @@ -395,6 +392,8 @@ def neft_post_hook(module, input, output):
if not model_args.reft:
if training_args.pipeline_parallel_degree > 1:
from utils.data import convert_example_common

trans_func = partial(convert_example_common, tokenizer=tokenizer, data_args=data_args)
else:
trans_func = partial(get_convert_example(model), tokenizer=tokenizer, data_args=data_args)

Expand Down

0 comments on commit fdfb8c5

Please sign in to comment.