model

Model implementation.

Functions

scaled_dot_product_attention(query, key, value)

Classes

Attention(hidden_dimensions, head_dimensions)

Defaults()

FeedForward(hidden_dimensions, ...)

LanguageConnector(input_size, output_size, ...)

MaskedLMHead(hidden_dimensions, vocab_size, eps)

MultiHeadAttention(hidden_dimensions, heads)

PositionEmbedding(hidden_dimensions, ...)

TransformerEncoder(depth, hidden_dimensions, ...)

TransformerEncoderForMaskedLM(depth, ...)

TransformerEncoderForSequenceClassification(...)

TransformerEncoderForSequenceSimilarity(...)

TransformerEncoderForSequenceSummarizationGPT2(...)

TransformerEncoderLayer(hidden_dimensions, ...)

class undertale.models.item.model.TransformerEncoder(depth: int, hidden_dimensions: int, vocab_size: int, input_size: int, heads: int, intermediate_dimensions: int, dropout: float, eps: float)

Bases: Module

class undertale.models.item.model.TransformerEncoderForMaskedLM(depth: int, hidden_dimensions: int, vocab_size: int, input_size: int, heads: int, intermediate_dimensions: int, dropout: float, eps: float, lr: float, warmup: float)

Bases: LightningModule, Module

class undertale.models.item.model.TransformerEncoderForSequenceSimilarity(*args, **kwargs)

Bases: Module

class undertale.models.item.model.TransformerEncoderForSequenceClassification(classes: int, depth: int, hidden_dimensions: int, vocab_size: int, input_size: int, heads: int, intermediate_dimensions: int, dropout: float)

Bases: Module

class undertale.models.item.model.TransformerEncoderForSequenceSummarizationGPT2(*args, **kwargs)

Bases: Module