先配置一下模型路径
model_path="/data1/model/chinese-macbert-base"
Step1 导入相关包
import evaluate
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification
from seqeval.metrics import classification_report
Step2 加载数据集
# 如果可以联网,直接使用load_dataset进行加载
ner_datasets = load_dataset("peoples_daily_ner")
# 如果无法联网,则使用下面的方式加载数据集
# from datasets import DatasetDict
# ner_datasets = DatasetDict.load_from_disk("ner_data")
print(ner_datasets["train"][0])
第一条结果如下:
{'id': '0',
'tokens': ['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', '与', '金', '门', '之', '间', '的', '海', '域', '。'],
'ner_tags': [0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0]}
获取所有的label
label_list = ner_datasets["train"].features["ner_tags"].feature.names
print(label_list)
# ['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']
Step3 数据集预处理
tokenizer = AutoTokenizer.from_pretrained(model_path)
# 借助word_ids 实现标签映射
def process_function(examples):
tokenized_exmaples = tokenizer(examples["tokens"], max_length=128, truncation=True, is_split_into_words=True)
labels = []
for i, label in enumerate(examples["ner_tags"]):
word_ids = tokenized_exmaples.word_ids(batch_index=i)
label_ids = []
for word_id in word_ids:
if word_id is None:
label_ids.append(-100)
else:
label_ids.append(label[word_id])
labels.append(label_ids)
tokenized_exmaples["labels"] = labels
return tokenized_exmaples
tokenized_datasets = ner_datasets.map(process_function, batched=True)
tokenized_datasets
看一下训练集第一条
Dataset({
features: ['id', 'tokens', 'ner_tags', 'input_ids', 'token_type_ids', 'attention_mask', 'labels'],
num_rows: 20865
})
Step4 创建模型
# 对于所有的非二分类任务,切记要指定num_labels,否则就会device错误
model = AutoModelForTokenClassification.from_pretrained(model_path, num_labels=len(label_list))
Step5 创建评估函数
# 加载评测方法
seqeval = evaluate.load("seqeval_metric.py")
import numpy as np
def eval_metric(pred):
predictions, labels = pred
predictions = np.argmax(predictions, axis=-1)
# 将id转换为原始的字符串类型的标签
true_predictions = [
[label_list[p] for p, l in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for p, l in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
result = seqeval.compute(predictions=true_predictions, references=true_labels, mode="strict", scheme="IOB2")
return {
"f1": result["overall_f1"],
"recall": result["overall_recall"],
"accuracy": result["overall_accuracy"]
}
Step6 配置训练参数
args = TrainingArguments(
output_dir="models_for_ner",
per_device_train_batch_size=64,
per_device_eval_batch_size=128,
evaluation_strategy="epoch",
save_strategy="epoch",
logging_steps=50,
num_train_epochs=3
)
Step7 创建训练器
trainer = Trainer(
model=model,
args=args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
compute_metrics=eval_metric,
data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer)
)
Step8 模型训练
trainer.train()
训练结果:
{'eval_loss': 0.023305043578147888,
'eval_f1': 0.9483832094798152,
'eval_recall': 0.950738255033557,
'eval_accuracy': 0.9936748981551398,
'eval_runtime': 9.956,
'eval_samples_per_second': 465.75,
'eval_steps_per_second': 3.716,
'epoch': 3.0}
Step9 模型预测
from transformers import pipeline
# 使用pipeline进行推理,要指定id2label
model.config.id2label = {idx: label for idx, label in enumerate(label_list)}
model.config.label2id = {label:idx for idx, label in enumerate(label_list)}
# 如果模型是基于GPU训练的,那么推理时要指定device
# 对于NER任务,可以指定aggregation_strategy为simple,得到具体的实体的结果,而不是token的结果
# model = AutoModelForTokenClassification.from_pretrained("./models_for_ner/checkpoint-66")
ner_pipe = pipeline("ner", model=model,
tokenizer=tokenizer,
device=0,
aggregation_strategy='simple')
x = "2013年,姚明当选为第十二届全国政协委员"
res = ner_pipe(x)
ner_result = {}
for r in res:
if r["entity_group"] not in ner_result:
ner_result[r["entity_group"]] = []
ner_result[r["entity_group"]].append(x[r["start"]: r["end"]])
print(ner_result)
最终结果如下:
{'PER': ['姚明'], 'ORG': ['第十二届全国政协']}
代码地址
更多内容
- 小鱼吃猫博客——Transformers教程
- 微信公众号 codeCraft编程工艺
评论区