Skip to content

unitorch.cli.models.bert¤

BertProcessor¤

Tip

core/process/bert is the section for configuration of BertProcessor.

Bases: BertProcessor

Processor for BERT models.

Initialize BertProcessor.

Parameters:

Name Type Description Default
vocab_path str

The path to the vocabulary file.

required
max_seq_length int

The maximum sequence length. Defaults to 128.

128
special_input_ids Dict

Special input IDs. Defaults to an empty dictionary.

dict()
do_lower_case bool

Whether to lower case the input text. Defaults to True.

True
do_basic_tokenize bool

Whether to perform basic tokenization. Defaults to True.

True
do_whole_word_mask bool

Whether to use whole word masking. Defaults to True.

True
masked_lm_prob float

The probability of masking a token for masked language modeling. Defaults to 0.15.

0.15
max_predictions_per_seq int

The maximum number of masked LM predictions per sequence. Defaults to 20.

20
Source code in src/unitorch/cli/models/bert/processing.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def __init__(
    self,
    vocab_path: str,
    max_seq_length: Optional[int] = 128,
    special_input_ids: Optional[Dict] = dict(),
    do_lower_case: Optional[bool] = True,
    do_basic_tokenize: Optional[bool] = True,
    do_whole_word_mask: Optional[bool] = True,
    masked_lm_prob: Optional[float] = 0.15,
    max_predictions_per_seq: Optional[int] = 20,
):
    """
    Initialize BertProcessor.

    Args:
        vocab_path (str): The path to the vocabulary file.
        max_seq_length (int, optional): The maximum sequence length. Defaults to 128.
        special_input_ids (Dict, optional): Special input IDs. Defaults to an empty dictionary.
        do_lower_case (bool, optional): Whether to lower case the input text. Defaults to True.
        do_basic_tokenize (bool, optional): Whether to perform basic tokenization. Defaults to True.
        do_whole_word_mask (bool, optional): Whether to use whole word masking. Defaults to True.
        masked_lm_prob (float, optional): The probability of masking a token for masked language modeling. Defaults to 0.15.
        max_predictions_per_seq (int, optional): The maximum number of masked LM predictions per sequence. Defaults to 20.
    """
    super().__init__(
        vocab_path=vocab_path,
        max_seq_length=max_seq_length,
        special_input_ids=special_input_ids,
        do_lower_case=do_lower_case,
        do_basic_tokenize=do_basic_tokenize,
        do_whole_word_mask=do_whole_word_mask,
        masked_lm_prob=masked_lm_prob,
        max_predictions_per_seq=max_predictions_per_seq,
    )

from_core_configure classmethod ¤

from_core_configure(config, **kwargs)

Create an instance of BertProcessor from a core configuration.

Parameters:

Name Type Description Default
config

The core configuration.

required
**kwargs

Additional keyword arguments.

{}

Returns:

Name Type Description
BertProcessor

An instance of BertProcessor.

Source code in src/unitorch/cli/models/bert/processing.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
@classmethod
@add_default_section_for_init("core/process/bert")
def from_core_configure(cls, config, **kwargs):
    """
    Create an instance of BertProcessor from a core configuration.

    Args:
        config: The core configuration.
        **kwargs: Additional keyword arguments.

    Returns:
        BertProcessor: An instance of BertProcessor.
    """
    config.set_default_section("core/process/bert")
    pretrained_name = config.getoption("pretrained_name", "bert-base-uncased")
    vocab_path = config.getoption("vocab_path", None)
    vocab_path = pop_value(
        vocab_path,
        nested_dict_value(pretrained_bert_infos, pretrained_name, "vocab"),
    )
    vocab_path = cached_path(vocab_path)

    return {
        "vocab_path": vocab_path,
    }

BertForClassification¤

Tip

core/model/classification/bert is the section for configuration of BertForClassification.

Bases: BertForClassification

BERT model for classification tasks.

Initialize BertForClassification.

Parameters:

Name Type Description Default
config_path str

The path to the model configuration file.

required
num_classes int

The number of classes for classification. Defaults to 1.

1
gradient_checkpointing bool

Whether to use gradient checkpointing. Defaults to False.

False
Source code in src/unitorch/cli/models/bert/modeling.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def __init__(
    self,
    config_path: str,
    num_classes: Optional[int] = 1,
    gradient_checkpointing: Optional[bool] = False,
):
    """
    Initialize BertForClassification.

    Args:
        config_path (str): The path to the model configuration file.
        num_classes (int, optional): The number of classes for classification. Defaults to 1.
        gradient_checkpointing (bool, optional): Whether to use gradient checkpointing. Defaults to False.
    """
    super().__init__(
        config_path=config_path,
        num_classes=num_classes,
        gradient_checkpointing=gradient_checkpointing,
    )

forward ¤

forward(
    input_ids: Tensor,
    attention_mask: Optional[Tensor] = None,
    token_type_ids: Optional[Tensor] = None,
    position_ids: Optional[Tensor] = None,
)

Forward pass of the BertForClassification model.

Parameters:

Name Type Description Default
input_ids Tensor

Input IDs.

required
attention_mask Tensor

Attention mask. Defaults to None.

None
token_type_ids Tensor

Token type IDs. Defaults to None.

None
position_ids Tensor

Position IDs. Defaults to None.

None

Returns:

Name Type Description
ClassificationOutputs

Model outputs for classification.

Source code in src/unitorch/cli/models/bert/modeling.py
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
@autocast(device_type=("cuda" if torch.cuda.is_available() else "cpu"))
def forward(
    self,
    input_ids: torch.Tensor,
    attention_mask: Optional[torch.Tensor] = None,
    token_type_ids: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.Tensor] = None,
):
    """
    Forward pass of the BertForClassification model.

    Args:
        input_ids (torch.Tensor): Input IDs.
        attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
        token_type_ids (torch.Tensor, optional): Token type IDs. Defaults to None.
        position_ids (torch.Tensor, optional): Position IDs. Defaults to None.

    Returns:
        ClassificationOutputs: Model outputs for classification.
    """
    outputs = super().forward(
        input_ids=input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
    )
    return ClassificationOutputs(outputs=outputs)

from_core_configure classmethod ¤

from_core_configure(config, **kwargs)

Create an instance of BertForClassification from a core configuration.

Parameters:

Name Type Description Default
config

The core configuration.

required
**kwargs

Additional keyword arguments.

{}

Returns:

Name Type Description
BertForClassification

An instance of BertForClassification.

Source code in src/unitorch/cli/models/bert/modeling.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
@classmethod
@add_default_section_for_init("core/model/classification/bert")
def from_core_configure(cls, config, **kwargs):
    """
    Create an instance of BertForClassification from a core configuration.

    Args:
        config: The core configuration.
        **kwargs: Additional keyword arguments.

    Returns:
        BertForClassification: An instance of BertForClassification.
    """
    config.set_default_section("core/model/classification/bert")
    pretrained_name = config.getoption("pretrained_name", "bert-base-uncased")
    config_path = config.getoption("config_path", None)
    num_classes = config.getoption("num_classes", 1)

    config_path = pop_value(
        config_path,
        nested_dict_value(pretrained_bert_infos, pretrained_name, "config"),
    )
    config_path = cached_path(config_path)
    gradient_checkpointing = config.getoption("gradient_checkpointing", False)

    inst = cls(config_path, num_classes, gradient_checkpointing)
    pretrained_weight_path = config.getoption("pretrained_weight_path", None)
    weight_path = pop_value(
        pretrained_weight_path,
        nested_dict_value(pretrained_bert_infos, pretrained_name, "weight"),
        check_none=False,
    )
    if weight_path is not None:
        inst.from_pretrained(weight_path)

    return inst