huggingface 的transformers模块进行机器翻译


前言

写法上主要是,大多数的数据都可以通过pandas进行读取,所以这里以dataframe形式的数据进行机器翻译,效果bleu可达到31,官方例子(补充了预测)


一、机器翻译

参考代码链接(官方):

https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb

代码是在kaggle上跑的。

二、使用步骤

1.安装包

pip install datasets sacrebleu
import pandas as pd
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
from datasets import load_metric
from sklearn.model_selection import train_test_split

数据预处理

# 读取数据集
data_news=pd.read_csv('../input/news-infomation/news-commentary-v14.en-zh.tsv',sep='\t',nrows=40000,header=None,names=['en','zh'])
#删除存在的空数据
data_news.drop(data_news[data_news['en'].isnull()].index,axis=0,inplace=True)
data_news.drop(data_news[data_news['zh'].isnull()].index,axis=0,inplace=True)

metric = load_metric("sacrebleu") # 如果是本地运行这玩意还得装一个包,
#导入tokenizer model
tokenizer = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-zh-en')
model = AutoModelForSeq2SeqLM.from_pretrained('Helsinki-NLP/opus-mt-zh-en')
max_input_length = 256
max_target_length = 256
#数据分割
train_news,test_news=train_test_split(data_news,test_size=0.05)   
# 实际上就是将句子转化成词向量编号 (字典形式) 然后加上labels 键值对
def preprocess_function(examples):
    inputs = [ex for ex in examples['zh']]
    targets = [ex for ex in examples['en']]
    model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
    with tokenizer.as_target_tokenizer():
        labels = tokenizer(targets, max_length=max_target_length, truncation=True)
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs
train_dataset_news=preprocess_function(train_news)
test_dataset_news=preprocess_function(test_news)

损失函数的定义bleu

import numpy as np

def postprocess_text(preds, labels):
    preds = [pred.strip() for pred in preds]
    labels = [[label.strip()] for label in labels]
    print(preds,labels)
    return preds, labels

def compute_metrics(eval_preds):
    preds, labels = eval_preds
    if isinstance(preds, tuple):
        preds = preds[0]
    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)

    # Replace -100 in the labels as we can't decode them.
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    # Some simple post-processing
    decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

    result = metric.compute(predictions=decoded_preds, references=decoded_labels)
    result = {
    
    "bleu": result["score"]}

    prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
    result["gen_len"] = np.mean(prediction_lens)
    result = {
    
    k: round(v, 4) for k, v in result.items()}
    return result

dataset格式的数据生成

import torch
# 将数据转化成dataset格式
class IMDbDataset(torch.utils.data.Dataset):
    def __init__(self, encodings):
        self.encodings = encodings
    def __getitem__(self, idx):
        item = {
    
    key: val[idx] for key, val in self.encodings.items()}
        return item
    def __len__(self):
        return len(self.encodings['input_ids'])
    
train_data_news=IMDbDataset(train_dataset_news)
test_data_news=IMDbDataset(test_dataset_news)

数据处理完成了,然后开始模型训练

#
batch_size = 16
# model_name = model_checkpoint.split("/")[-1]
args = Seq2SeqTrainingArguments(
    "test-translation",
    evaluation_strategy = "epoch",  # 每趟训练完输出一次验证集的信息
    learning_rate=2e-5,              
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    weight_decay=0.01,
    save_total_limit=3,
    num_train_epochs=1,
    predict_with_generate=True,
    fp16=True,
#     push_to_hub=True,
#     push_to_hub_model_id=f"{model_name}-finetuned",
)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(
    model,
    args,
    train_dataset=train_data_news,
    eval_dataset=test_data_news,
    data_collator=data_collator,
    tokenizer=tokenizer,
    compute_metrics=compute_metrics
)
trainer.train() 

模型训练完之后保存模型,并进行预测(官方写的predict和你想象的完全不一样,所以改用其他方式)

from transformers import TranslationPipeline
# 指定路径保存模型(这种方式保存,会附带词库和模型(bin格式))
trainer.save_model('./seq2seq')
model_train = AutoModelForSeq2SeqLM.from_pretrained('./seq2seq/')
tokenizer = AutoTokenizer.from_pretrained('./seq2seq/')
# 导入模型和tokenizer
pip=TranslationPipeline(task ='translation_zh_to_en', model=model_train, tokenizer=tokenizer)
#预测
pip('拉力赛发达国家哦')

结果:‘La Li Xi is a developed country.’

猜你喜欢

转载自blog.csdn.net/weixin_43896398/article/details/119382170