Skip to content

unitorch.models.roberta¤

RobertaProcessor¤

Bases: HfTextClassificationProcessor

Initializes a RobertaProcessor for text classification tasks.

Parameters:

Name Type Description Default
vocab_path str

The path to the vocabulary file.

required
merge_path str

The path to the merge file.

required
max_seq_length int

The maximum sequence length. Defaults to 128.

128
source_type_id int

The ID for the source type. Defaults to 0.

0
target_type_id int

The ID for the target type. Defaults to 0.

0
Source code in src/unitorch/models/roberta/processing.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def __init__(
    self,
    vocab_path: str,
    merge_path: str,
    max_seq_length: Optional[int] = 128,
    source_type_id: Optional[int] = 0,
    target_type_id: Optional[int] = 0,
):
    """
    Initializes a RobertaProcessor for text classification tasks.

    Args:
        vocab_path (str): The path to the vocabulary file.
        merge_path (str): The path to the merge file.
        max_seq_length (int, optional): The maximum sequence length. Defaults to 128.
        source_type_id (int, optional): The ID for the source type. Defaults to 0.
        target_type_id (int, optional): The ID for the target type. Defaults to 0.
    """
    tokenizer = get_roberta_tokenizer(
        vocab_path,
        merge_path,
    )
    super().__init__(
        tokenizer=tokenizer,
        max_seq_length=max_seq_length,
        source_type_id=source_type_id,
        target_type_id=target_type_id,
        position_start_id=tokenizer.pad_token_id + 1,
    )

RobertaForClassification¤

Bases: GenericModel

Initializes a RobertaForClassification model.

Parameters:

Name Type Description Default
config_path str

The path to the model configuration file.

required
num_classes int

The number of classes for classification. Defaults to 1.

1
gradient_checkpointing bool

Whether to use gradient checkpointing. Defaults to False.

False
Source code in src/unitorch/models/roberta/modeling.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __init__(
    self,
    config_path: str,
    num_classes: Optional[int] = 1,
    gradient_checkpointing: Optional[bool] = False,
):
    """
    Initializes a RobertaForClassification model.

    Args:
        config_path (str): The path to the model configuration file.
        num_classes (int, optional): The number of classes for classification. Defaults to 1.
        gradient_checkpointing (bool, optional): Whether to use gradient checkpointing. Defaults to False.
    """
    super().__init__()
    self.config = RobertaConfig.from_json_file(config_path)
    self.config.gradient_checkpointing = gradient_checkpointing
    self.roberta = RobertaModel(self.config)
    self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
    self.classifier = nn.Linear(self.config.hidden_size, num_classes)
    self.init_weights()

forward ¤

forward(
    input_ids: Tensor,
    attention_mask: Optional[Tensor] = None,
    token_type_ids: Optional[Tensor] = None,
    position_ids: Optional[Tensor] = None,
)

Performs forward pass of the RobertaForClassification model.

Parameters:

Name Type Description Default
input_ids Tensor

Tensor of input token IDs.

required
attention_mask Tensor

Tensor of attention mask. Defaults to None.

None
token_type_ids Tensor

Tensor of token type IDs. Defaults to None.

None
position_ids Tensor

Tensor of position IDs. Defaults to None.

None

Returns:

Type Description
Tensor

The model's logits.

Source code in src/unitorch/models/roberta/modeling.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def forward(
    self,
    input_ids: torch.Tensor,
    attention_mask: Optional[torch.Tensor] = None,
    token_type_ids: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.Tensor] = None,
):
    """
    Performs forward pass of the RobertaForClassification model.

    Args:
        input_ids (torch.Tensor): Tensor of input token IDs.
        attention_mask (torch.Tensor, optional): Tensor of attention mask. Defaults to None.
        token_type_ids (torch.Tensor, optional): Tensor of token type IDs. Defaults to None.
        position_ids (torch.Tensor, optional): Tensor of position IDs. Defaults to None.

    Returns:
        (torch.Tensor):The model's logits.
    """
    outputs = self.roberta(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
    )
    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)
    return logits

RobertaForMaskLM¤

Bases: GenericModel

Initializes a RobertaForMaskLM model.

Parameters:

Name Type Description Default
config_path str

The path to the model configuration file.

required
gradient_checkpointing bool

Whether to use gradient checkpointing. Defaults to False.

False
Source code in src/unitorch/models/roberta/modeling.py
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def __init__(
    self,
    config_path: str,
    gradient_checkpointing: Optional[bool] = False,
):
    """
    Initializes a RobertaForMaskLM model.

    Args:
        config_path (str): The path to the model configuration file.
        gradient_checkpointing (bool, optional): Whether to use gradient checkpointing. Defaults to False.
    """
    super().__init__()
    self.config = RobertaConfig.from_json_file(config_path)
    self.config.gradient_checkpointing = gradient_checkpointing
    self.roberta = RobertaModel(self.config, add_pooling_layer=False)
    self.lm_head = RobertaLMHead(self.config)
    self.lm_head.decoder.weight = self.roberta.embeddings.word_embeddings.weight
    self.init_weights()

forward ¤

forward(
    input_ids: Tensor,
    attention_mask: Optional[Tensor] = None,
    token_type_ids: Optional[Tensor] = None,
    position_ids: Optional[Tensor] = None,
)

Performs forward pass of the RobertaForMaskLM model.

Parameters:

Name Type Description Default
input_ids Tensor

Tensor of input token IDs.

required
attention_mask Tensor

Tensor of attention mask. Defaults to None.

None
token_type_ids Tensor

Tensor of token type IDs. Defaults to None.

None
position_ids Tensor

Tensor of position IDs. Defaults to None.

None

Returns:

Type Description
Tensor

The model's logits.

Source code in src/unitorch/models/roberta/modeling.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def forward(
    self,
    input_ids: torch.Tensor,
    attention_mask: Optional[torch.Tensor] = None,
    token_type_ids: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.Tensor] = None,
):
    """
    Performs forward pass of the RobertaForMaskLM model.

    Args:
        input_ids (torch.Tensor): Tensor of input token IDs.
        attention_mask (torch.Tensor, optional): Tensor of attention mask. Defaults to None.
        token_type_ids (torch.Tensor, optional): Tensor of token type IDs. Defaults to None.
        position_ids (torch.Tensor, optional): Tensor of position IDs. Defaults to None.

    Returns:
        (torch.Tensor):The model's logits.
    """
    outputs = self.roberta(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
    )
    sequence_output = outputs[0]
    logits = self.lm_head(sequence_output)
    return logits