673 lines
24 KiB
Python
673 lines
24 KiB
Python
|
#!/usr/bin/env python
|
||
|
# coding=utf-8
|
||
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||
|
#
|
||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
# you may not use this file except in compliance with the License.
|
||
|
# You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
# """
|
||
|
# Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...)
|
||
|
# on a text file or a dataset without using HuggingFace Trainer.
|
||
|
# Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
|
||
|
# https://huggingface.co/models?filter=masked-lm
|
||
|
# """
|
||
|
# You can also adapt this script on your own mlm task. Pointers for this are left as comments.
|
||
|
|
||
|
import argparse
|
||
|
import logging
|
||
|
import math
|
||
|
import os
|
||
|
import random
|
||
|
|
||
|
import datasets
|
||
|
import numpy as np
|
||
|
import tokenizers
|
||
|
import torch
|
||
|
import transformers
|
||
|
from accelerate import Accelerator
|
||
|
from datasets import load_dataset
|
||
|
from torch.utils.data.dataloader import DataLoader
|
||
|
from tqdm.auto import tqdm
|
||
|
from transformers import (
|
||
|
CONFIG_MAPPING,
|
||
|
MODEL_MAPPING,
|
||
|
AdamW,
|
||
|
AutoConfig,
|
||
|
AutoModelForMaskedLM,
|
||
|
AutoTokenizer,
|
||
|
BatchEncoding,
|
||
|
BertConfig,
|
||
|
BertForPreTraining,
|
||
|
DataCollatorForLanguageModeling,
|
||
|
SchedulerType,
|
||
|
get_scheduler,
|
||
|
set_seed,
|
||
|
)
|
||
|
|
||
|
logger = logging.getLogger(__name__)
|
||
|
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
|
||
|
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
|
||
|
|
||
|
|
||
|
def parse_args():
|
||
|
parser = argparse.ArgumentParser(
|
||
|
description="Finetune a transformers model on a Masked Language Modeling task"
|
||
|
)
|
||
|
# parser.add_argument(
|
||
|
# "--dataset_name",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="The name of the dataset to use (via the datasets library).",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--dataset_config_name",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="The configuration name of the dataset to use (via the datasets library).",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--train_file",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="A csv or a json file containing the training data.",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--validation_file",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="A csv or a json file containing the validation data.",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--validation_split_percentage",
|
||
|
# default=5,
|
||
|
# help="The percentage of the train set used as validation set in case there's no validation split",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--pad_to_max_length",
|
||
|
# action="store_true",
|
||
|
# help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--model_name_or_path",
|
||
|
# type=str,
|
||
|
# help="Path to pretrained model or model identifier from huggingface.co/models.",
|
||
|
# required=True,
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--config_name",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="Pretrained config name or path if not the same as model_name",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--tokenizer_name",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="Pretrained tokenizer name or path if not the same as model_name",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--use_slow_tokenizer",
|
||
|
# action="store_true",
|
||
|
# help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
|
||
|
# )
|
||
|
parser.add_argument(
|
||
|
"--per_device_train_batch_size",
|
||
|
type=int,
|
||
|
default=8,
|
||
|
help="Batch size (per device) for the training dataloader.",
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--per_device_eval_batch_size",
|
||
|
type=int,
|
||
|
default=8,
|
||
|
help="Batch size (per device) for the evaluation dataloader.",
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--learning_rate",
|
||
|
type=float,
|
||
|
default=5e-5,
|
||
|
help="Initial learning rate (after the potential warmup period) to use.",
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--weight_decay", type=float, default=0.0, help="Weight decay to use."
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--num_train_epochs",
|
||
|
type=int,
|
||
|
default=40,
|
||
|
help="Total number of training epochs to perform.",
|
||
|
)
|
||
|
# parser.add_argument(
|
||
|
# "--max_train_steps",
|
||
|
# type=int,
|
||
|
# default=None,
|
||
|
# help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--gradient_accumulation_steps",
|
||
|
# type=int,
|
||
|
# default=1,
|
||
|
# help="Number of updates steps to accumulate before performing a backward/update pass.",
|
||
|
# )
|
||
|
parser.add_argument(
|
||
|
"--lr_scheduler_type",
|
||
|
type=SchedulerType,
|
||
|
default="linear",
|
||
|
help="The scheduler type to use.",
|
||
|
choices=[
|
||
|
"linear",
|
||
|
"cosine",
|
||
|
"cosine_with_restarts",
|
||
|
"polynomial",
|
||
|
"constant",
|
||
|
"constant_with_warmup",
|
||
|
],
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--num_warmup_steps",
|
||
|
type=int,
|
||
|
default=0,
|
||
|
help="Number of steps for the warmup in the lr scheduler.",
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--output_dir", type=str, default=None, help="Where to store the final model."
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--seed", type=int, default=None, help="A seed for reproducible training."
|
||
|
)
|
||
|
# parser.add_argument(
|
||
|
# "--model_type",
|
||
|
# type=str,
|
||
|
# default=None,
|
||
|
# help="Model type to use if training from scratch.",
|
||
|
# choices=MODEL_TYPES,
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--max_seq_length",
|
||
|
# type=int,
|
||
|
# default=None,
|
||
|
# help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.",
|
||
|
# )
|
||
|
# parser.add_argument(
|
||
|
# "--line_by_line",
|
||
|
# type=bool,
|
||
|
# default=False,
|
||
|
# help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
|
||
|
# )
|
||
|
parser.add_argument(
|
||
|
"--preprocessing_num_workers",
|
||
|
type=int,
|
||
|
default=None,
|
||
|
help="The number of processes to use for the preprocessing.",
|
||
|
)
|
||
|
# parser.add_argument(
|
||
|
# "--overwrite_cache",
|
||
|
# type=bool,
|
||
|
# default=False,
|
||
|
# help="Overwrite the cached training and evaluation sets",
|
||
|
# )
|
||
|
parser.add_argument(
|
||
|
"--mlm_probability",
|
||
|
type=float,
|
||
|
default=0.15,
|
||
|
help="Ratio of tokens to mask for masked language modeling loss",
|
||
|
)
|
||
|
|
||
|
args = parser.parse_args()
|
||
|
|
||
|
# Sanity checks
|
||
|
# if (
|
||
|
# args.dataset_name is None
|
||
|
# and args.train_file is None
|
||
|
# and args.validation_file is None
|
||
|
# ):
|
||
|
# raise ValueError("Need either a dataset name or a training/validation file.")
|
||
|
# else:
|
||
|
# if args.train_file is not None:
|
||
|
# extension = args.train_file.split(".")[-1]
|
||
|
# assert extension in [
|
||
|
# "csv",
|
||
|
# "json",
|
||
|
# "txt",
|
||
|
# ], "`train_file` should be a csv, json or txt file."
|
||
|
# if args.validation_file is not None:
|
||
|
# extension = args.validation_file.split(".")[-1]
|
||
|
# assert extension in [
|
||
|
# "csv",
|
||
|
# "json",
|
||
|
# "txt",
|
||
|
# ], "`validation_file` should be a csv, json or txt file."
|
||
|
|
||
|
if args.output_dir is not None:
|
||
|
os.makedirs(args.output_dir, exist_ok=True)
|
||
|
|
||
|
return args
|
||
|
|
||
|
|
||
|
def main():
|
||
|
args = parse_args()
|
||
|
|
||
|
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
||
|
accelerator = Accelerator()
|
||
|
# Make one log on every process with the configuration for debugging.
|
||
|
logging.basicConfig(
|
||
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||
|
datefmt="%m/%d/%Y %H:%M:%S",
|
||
|
level=logging.INFO,
|
||
|
)
|
||
|
logger.info(accelerator.state)
|
||
|
|
||
|
# Setup logging, we only want one process per machine to log things on the screen.
|
||
|
# accelerator.is_local_main_process is only True for one process per machine.
|
||
|
logger.setLevel(
|
||
|
logging.INFO if accelerator.is_local_main_process else logging.ERROR
|
||
|
)
|
||
|
if accelerator.is_local_main_process:
|
||
|
datasets.utils.logging.set_verbosity_warning()
|
||
|
transformers.utils.logging.set_verbosity_info()
|
||
|
else:
|
||
|
datasets.utils.logging.set_verbosity_error()
|
||
|
transformers.utils.logging.set_verbosity_error()
|
||
|
|
||
|
# If passed along, set the training seed now.
|
||
|
if args.seed is not None:
|
||
|
set_seed(args.seed)
|
||
|
|
||
|
# # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
|
||
|
# # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
|
||
|
# # (the dataset will be downloaded automatically from the datasets Hub).
|
||
|
# #
|
||
|
# # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
|
||
|
# # 'text' is found. You can easily tweak this behavior (see below).
|
||
|
# #
|
||
|
# # In distributed training, the load_dataset function guarantee that only one local process can concurrently
|
||
|
# # download the dataset.
|
||
|
# if args.dataset_name is not None:
|
||
|
# # Downloading and loading a dataset from the hub.
|
||
|
# raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
|
||
|
# if "validation" not in raw_datasets.keys():
|
||
|
# raw_datasets["validation"] = load_dataset(
|
||
|
# args.dataset_name,
|
||
|
# args.dataset_config_name,
|
||
|
# split=f"train[:{args.validation_split_percentage}%]",
|
||
|
# )
|
||
|
# raw_datasets["train"] = load_dataset(
|
||
|
# args.dataset_name,
|
||
|
# args.dataset_config_name,
|
||
|
# split=f"train[{args.validation_split_percentage}%:]",
|
||
|
# )
|
||
|
# else:
|
||
|
# data_files = {}
|
||
|
# if args.train_file is not None:
|
||
|
# data_files["train"] = args.train_file
|
||
|
# if args.validation_file is not None:
|
||
|
# data_files["validation"] = args.validation_file
|
||
|
# extension = args.train_file.split(".")[-1]
|
||
|
# if extension == "txt":
|
||
|
# extension = "text"
|
||
|
# raw_datasets = load_dataset(extension, data_files=data_files)
|
||
|
|
||
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||
|
|
||
|
# we use dataset in json format,
|
||
|
# `raw_dataset` has two features: `text`:"sentA\tsentB" and `is_next`:0 or 1
|
||
|
|
||
|
# we take control of the load of dataset by oursevles
|
||
|
# there will be several json file for training
|
||
|
# raw_datasets = load_dataset("json", data_files=args.train_file, field="data")
|
||
|
raw_datasets = load_dataset(
|
||
|
"json",
|
||
|
data_files="/home/ming/malware/inst2vec_bert/data/test_lm/inst.json",
|
||
|
field="data",
|
||
|
)
|
||
|
|
||
|
# we use the tokenizer trained on the positive dataset before
|
||
|
# tokenizer = tokenizers.Tokenizer.from_file(args.tokenizer_file)
|
||
|
tokenizer = tokenizers.Tokenizer.from_file(
|
||
|
"/home/ming/malware/inst2vec_bert/bert/tokenizer-inst.json"
|
||
|
)
|
||
|
tokenizer.enable_padding(
|
||
|
pad_id=tokenizer.token_to_id("[PAD]"), pad_token="[PAD]", length=64
|
||
|
)
|
||
|
|
||
|
# we use a much smaller BERT, config is:
|
||
|
config = BertConfig(
|
||
|
vocab_size=tokenizer.get_vocab_size(),
|
||
|
hidden_size=64,
|
||
|
num_hidden_layers=8,
|
||
|
num_attention_heads=8,
|
||
|
intermediate_size=256,
|
||
|
max_position_embeddings=64,
|
||
|
)
|
||
|
# initalize the new BERT for pre-training
|
||
|
model = BertForPreTraining(config)
|
||
|
|
||
|
# all_special_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
|
||
|
# all_special_ids = [tokenizer.token_to_id(token) for token in all_special_tokens]
|
||
|
|
||
|
# # Load pretrained model and tokenizer
|
||
|
# #
|
||
|
# # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
||
|
# # download model & vocab.
|
||
|
# if args.config_name:
|
||
|
# config = AutoConfig.from_pretrained(args.config_name)
|
||
|
# elif args.model_name_or_path:
|
||
|
# config = AutoConfig.from_pretrained(args.model_name_or_path)
|
||
|
# else:
|
||
|
# config = CONFIG_MAPPING[args.model_type]()
|
||
|
# logger.warning("You are instantiating a new config instance from scratch.")
|
||
|
|
||
|
# if args.tokenizer_name:
|
||
|
# tokenizer = AutoTokenizer.from_pretrained(
|
||
|
# args.tokenizer_name, use_fast=not args.use_slow_tokenizer
|
||
|
# )
|
||
|
# elif args.model_name_or_path:
|
||
|
# tokenizer = AutoTokenizer.from_pretrained(
|
||
|
# args.model_name_or_path, use_fast=not args.use_slow_tokenizer
|
||
|
# )
|
||
|
# else:
|
||
|
# raise ValueError(
|
||
|
# "You are instantiating a new tokenizer from scratch. This is not supported by this script."
|
||
|
# "You can do it from another script, save it, and load it from here, using --tokenizer_name."
|
||
|
# )
|
||
|
|
||
|
# if args.model_name_or_path:
|
||
|
# model = AutoModelForMaskedLM.from_pretrained(
|
||
|
# args.model_name_or_path,
|
||
|
# from_tf=bool(".ckpt" in args.model_name_or_path),
|
||
|
# config=config,
|
||
|
# )
|
||
|
# else:
|
||
|
# logger.info("Training new model from scratch")
|
||
|
# model = AutoModelForMaskedLM.from_config(config)
|
||
|
|
||
|
# model.resize_token_embeddings(len(tokenizer))
|
||
|
|
||
|
# Preprocessing the datasets.
|
||
|
# First we tokenize all the texts.
|
||
|
column_names = raw_datasets["train"].column_names
|
||
|
text_column_name = "text" if "text" in column_names else column_names[0]
|
||
|
|
||
|
# if args.max_seq_length is None:
|
||
|
# max_seq_length = tokenizer.model_max_length
|
||
|
# if max_seq_length > 1024:
|
||
|
# logger.warning(
|
||
|
# f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
|
||
|
# "Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
|
||
|
# )
|
||
|
# max_seq_length = 1024
|
||
|
# else:
|
||
|
# if args.max_seq_length > tokenizer.model_max_length:
|
||
|
# logger.warning(
|
||
|
# f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
|
||
|
# f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
|
||
|
# )
|
||
|
# max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)
|
||
|
|
||
|
# if args.line_by_line:
|
||
|
# # When using line_by_line, we just tokenize each nonempty line.
|
||
|
# padding = "max_length" if args.pad_to_max_length else False
|
||
|
|
||
|
# def tokenize_function(examples):
|
||
|
# # Remove empty lines
|
||
|
# examples["text"] = [
|
||
|
# line
|
||
|
# for line in examples["text"]
|
||
|
# if len(line) > 0 and not line.isspace()
|
||
|
# ]
|
||
|
# return tokenizer(
|
||
|
# examples["text"],
|
||
|
# padding=padding,
|
||
|
# truncation=True,
|
||
|
# max_length=max_seq_length,
|
||
|
# # We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
|
||
|
# # receives the `special_tokens_mask`.
|
||
|
# return_special_tokens_mask=True,
|
||
|
# )
|
||
|
|
||
|
# tokenized_datasets = raw_datasets.map(
|
||
|
# tokenize_function,
|
||
|
# batched=True,
|
||
|
# num_proc=args.preprocessing_num_workers,
|
||
|
# remove_columns=[text_column_name],
|
||
|
# load_from_cache_file=not args.overwrite_cache,
|
||
|
# )
|
||
|
# else:
|
||
|
# # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
|
||
|
# # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
|
||
|
# # efficient when it receives the `special_tokens_mask`.
|
||
|
# def tokenize_function(examples):
|
||
|
# return tokenizer(
|
||
|
# examples[text_column_name], return_special_tokens_mask=True
|
||
|
# )
|
||
|
|
||
|
# tokenized_datasets = raw_datasets.map(
|
||
|
# tokenize_function,
|
||
|
# batched=True,
|
||
|
# num_proc=args.preprocessing_num_workers,
|
||
|
# remove_columns=column_names,
|
||
|
# load_from_cache_file=not args.overwrite_cache,
|
||
|
# )
|
||
|
|
||
|
# # Main data processing function that will concatenate all texts from our dataset and generate chunks of
|
||
|
# # max_seq_length.
|
||
|
# def group_texts(examples):
|
||
|
# # Concatenate all texts.
|
||
|
# concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||
|
# total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||
|
# # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
|
||
|
# # customize this part to your needs.
|
||
|
# total_length = (total_length // max_seq_length) * max_seq_length
|
||
|
# # Split by chunks of max_len.
|
||
|
# result = {
|
||
|
# k: [
|
||
|
# t[i : i + max_seq_length]
|
||
|
# for i in range(0, total_length, max_seq_length)
|
||
|
# ]
|
||
|
# for k, t in concatenated_examples.items()
|
||
|
# }
|
||
|
# return result
|
||
|
|
||
|
# # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
|
||
|
# # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
|
||
|
# # might be slower to preprocess.
|
||
|
# #
|
||
|
# # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
|
||
|
# # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
|
||
|
|
||
|
# tokenized_datasets = tokenized_datasets.map(
|
||
|
# group_texts,
|
||
|
# batched=True,
|
||
|
# num_proc=args.preprocessing_num_workers,
|
||
|
# load_from_cache_file=not args.overwrite_cache,
|
||
|
# )
|
||
|
|
||
|
def tokenize_function(examples):
|
||
|
text = [tuple(sent) for sent in examples["text"]]
|
||
|
encoded_inputs = {}
|
||
|
results = tokenizer.encode_batch(text)
|
||
|
# input_ids, type_ids, special_token_masks = [], [], []
|
||
|
# for i, result in enumerate(results):
|
||
|
# input_ids.append(result.ids)
|
||
|
# type_ids.append(result.type_ids)
|
||
|
# special_token_masks.append(result.special_tokens_mask)
|
||
|
encoded_inputs["input_ids"] = [result.ids for result in results]
|
||
|
encoded_inputs["token_type_ids"] = [result.type_ids for result in results]
|
||
|
# special_tokens_mask = [1 if token in all_special_ids else 0 for token in ids]
|
||
|
encoded_inputs["special_tokens_mask"] = [
|
||
|
result.special_tokens_mask for result in results
|
||
|
]
|
||
|
# 0: is ; 1 : is not
|
||
|
encoded_inputs["next_sentence_label "] = [
|
||
|
1 - label for label in examples["is_next"]
|
||
|
]
|
||
|
batch_outputs = BatchEncoding(
|
||
|
encoded_inputs, tensor_type="np", prepend_batch_axis=False,
|
||
|
)
|
||
|
return batch_outputs
|
||
|
|
||
|
tokenized_datasets = raw_datasets.map(
|
||
|
tokenize_function,
|
||
|
batched=True,
|
||
|
num_proc=args.preprocessing_num_workers,
|
||
|
remove_columns=column_names,
|
||
|
# load_from_cache_file=not args.overwrite_cache,
|
||
|
)
|
||
|
|
||
|
train_dataset = tokenized_datasets["train"]
|
||
|
eval_dataset = tokenized_datasets["validation"]
|
||
|
|
||
|
# Log a few random samples from the training set:
|
||
|
for index in random.sample(range(len(train_dataset)), 3):
|
||
|
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
|
||
|
|
||
|
# Data collator
|
||
|
# This one will take care of randomly masking the tokens.
|
||
|
data_collator = DataCollatorForLanguageModeling(
|
||
|
tokenizer=tokenizer, mlm_probability=args.mlm_probability
|
||
|
)
|
||
|
|
||
|
# DataLoaders creation:
|
||
|
train_dataloader = DataLoader(
|
||
|
train_dataset,
|
||
|
shuffle=True,
|
||
|
collate_fn=data_collator,
|
||
|
batch_size=args.per_device_train_batch_size,
|
||
|
)
|
||
|
eval_dataloader = DataLoader(
|
||
|
eval_dataset,
|
||
|
collate_fn=data_collator,
|
||
|
batch_size=args.per_device_eval_batch_size,
|
||
|
)
|
||
|
|
||
|
# Optimizer
|
||
|
# Split weights in two groups, one with weight decay and the other not.
|
||
|
no_decay = ["bias", "LayerNorm.weight"]
|
||
|
optimizer_grouped_parameters = [
|
||
|
{
|
||
|
"params": [
|
||
|
p
|
||
|
for n, p in model.named_parameters()
|
||
|
if not any(nd in n for nd in no_decay)
|
||
|
],
|
||
|
"weight_decay": args.weight_decay,
|
||
|
},
|
||
|
{
|
||
|
"params": [
|
||
|
p
|
||
|
for n, p in model.named_parameters()
|
||
|
if any(nd in n for nd in no_decay)
|
||
|
],
|
||
|
"weight_decay": 0.0,
|
||
|
},
|
||
|
]
|
||
|
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
|
||
|
|
||
|
# Prepare everything with our `accelerator`.
|
||
|
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
|
||
|
model, optimizer, train_dataloader, eval_dataloader
|
||
|
)
|
||
|
|
||
|
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
|
||
|
# shorter in multiprocess)
|
||
|
|
||
|
# Scheduler and math around the number of training steps.
|
||
|
num_update_steps_per_epoch = math.ceil(
|
||
|
len(train_dataloader) / args.gradient_accumulation_steps
|
||
|
)
|
||
|
if args.max_train_steps is None:
|
||
|
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||
|
else:
|
||
|
args.num_train_epochs = math.ceil(
|
||
|
args.max_train_steps / num_update_steps_per_epoch
|
||
|
)
|
||
|
|
||
|
lr_scheduler = get_scheduler(
|
||
|
name=args.lr_scheduler_type,
|
||
|
optimizer=optimizer,
|
||
|
num_warmup_steps=args.num_warmup_steps,
|
||
|
num_training_steps=args.max_train_steps,
|
||
|
)
|
||
|
|
||
|
# Train!
|
||
|
total_batch_size = (
|
||
|
args.per_device_train_batch_size
|
||
|
* accelerator.num_processes
|
||
|
* args.gradient_accumulation_steps
|
||
|
)
|
||
|
|
||
|
logger.info("***** Running training *****")
|
||
|
logger.info(f" Num examples = {len(train_dataset)}")
|
||
|
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
||
|
logger.info(
|
||
|
f" Instantaneous batch size per device = {args.per_device_train_batch_size}"
|
||
|
)
|
||
|
logger.info(
|
||
|
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
|
||
|
)
|
||
|
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
||
|
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
||
|
# Only show the progress bar once on each machine.
|
||
|
progress_bar = tqdm(
|
||
|
range(args.max_train_steps), disable=not accelerator.is_local_main_process
|
||
|
)
|
||
|
completed_steps = 0
|
||
|
|
||
|
for epoch in range(args.num_train_epochs):
|
||
|
model.train()
|
||
|
for step, batch in enumerate(train_dataloader):
|
||
|
outputs = model(**batch)
|
||
|
loss = outputs.loss
|
||
|
loss = loss / args.gradient_accumulation_steps
|
||
|
accelerator.backward(loss)
|
||
|
if (
|
||
|
step % args.gradient_accumulation_steps == 0
|
||
|
or step == len(train_dataloader) - 1
|
||
|
):
|
||
|
optimizer.step()
|
||
|
lr_scheduler.step()
|
||
|
optimizer.zero_grad()
|
||
|
progress_bar.update(1)
|
||
|
completed_steps += 1
|
||
|
|
||
|
if completed_steps >= args.max_train_steps:
|
||
|
break
|
||
|
|
||
|
model.eval()
|
||
|
losses = []
|
||
|
for step, batch in enumerate(eval_dataloader):
|
||
|
with torch.no_grad():
|
||
|
outputs = model(**batch)
|
||
|
|
||
|
loss = outputs.loss
|
||
|
losses.append(
|
||
|
accelerator.gather(loss.repeat(args.per_device_eval_batch_size))
|
||
|
)
|
||
|
|
||
|
losses = torch.cat(losses)
|
||
|
losses = losses[: len(eval_dataset)]
|
||
|
try:
|
||
|
perplexity = math.exp(torch.mean(losses))
|
||
|
except OverflowError:
|
||
|
perplexity = float("inf")
|
||
|
|
||
|
logger.info(f"epoch {epoch}: perplexity: {perplexity}")
|
||
|
|
||
|
if args.output_dir is not None:
|
||
|
accelerator.wait_for_everyone()
|
||
|
unwrapped_model = accelerator.unwrap_model(model)
|
||
|
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
|
||
|
|
||
|
|
||
|
if __name__ == "__main__":
|
||
|
main()
|