tokenizers
Tokenizers are used to prepare textual inputs for a model.
Example: Create an AutoTokenizer
and use it to tokenize a sentence.
This will automatically detect the tokenizer type based on the tokenizer class defined in tokenizer.json
.
import { AutoTokenizer } from '@xenova/transformers';
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');
let { input_ids } = await tokenizer('I love transformers!');
// Tensor {
// data: BigInt64Array(6) [101n, 1045n, 2293n, 19081n, 999n, 102n],
// dims: [1, 6],
// type: 'int64',
// size: 6,
// }
- tokenizers
- static
- .TokenizerModel β
Callable
new TokenizerModel(config)
- instance
.vocab
:Array.<string>
.tokens_to_ids
:Map.<string, number>
.fuse_unk
:boolean
._call(tokens)
βArray.<string>
.encode(tokens)
βArray.<string>
.convert_tokens_to_ids(tokens)
βArray.<number>
.convert_ids_to_tokens(ids)
βArray.<string>
- static
.fromConfig(config, ...args)
βTokenizerModel
- .PreTrainedTokenizer
new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig)
- instance
.remove_space
:boolean
.getToken(...keys)
βstring
|null
.prepare_model_inputs(inputs)
βObject
._call(text, options)
βObject
._encode_text(text)
βArray<string>
|null
.encode(text, text_pair)
βArray.<number>
.batch_decode(batch, decode_args)
βArray.<string>
.decode(token_ids, [decode_args])
βstring
.decode_single(token_ids, decode_args)
βstring
- static
.from_pretrained(pretrained_model_name_or_path, options)
βPromise.<PreTrainedTokenizer>
- .BertTokenizer β
PreTrainedTokenizer
.prepare_model_inputs()
:add_token_types
- .AlbertTokenizer β
PreTrainedTokenizer
.prepare_model_inputs()
:add_token_types
- .NllbTokenizer
- .M2M100Tokenizer
- .WhisperTokenizer β
PreTrainedTokenizer
._decode_asr(sequences, options)
β*
.decode()
:*
.get_decoder_prompt_ids(options)
βArray.<Array<number>>
- .MarianTokenizer
- .AutoTokenizer
.from_pretrained(pretrained_model_name_or_path, options)
βPromise.<PreTrainedTokenizer>
- .TokenizerModel β
- inner
- ~WordPieceTokenizer β
TokenizerModel
new WordPieceTokenizer(config)
.tokens_to_ids
:Map.<string, number>
.unk_token_id
:number
.unk_token
:string
.vocab
:Array.<string>
.encode(tokens)
βArray.<string>
- ~Unigram β
TokenizerModel
new Unigram(config, moreConfig)
.populateNodes(lattice)
.tokenize(normalized)
βArray.<string>
.encode(tokens)
βArray
- ~BPE β
TokenizerModel
new BPE(config)
.tokens_to_ids
:Map.<string, number>
.cache
:Map.<string, Array<string>>
.bpe(token)
βArray.<string>
.encode(tokens)
βArray.<string>
- ~LegacyTokenizerModel
new LegacyTokenizerModel(config, moreConfig)
.tokens_to_ids
:Map.<string, number>
- ~Normalizer
new Normalizer(config)
- instance
.normalize(text)
βstring
._call(text)
βstring
- static
.fromConfig(config)
βNormalizer
- ~Replace β
Normalizer
.normalize(text)
βstring
- ~NFC β
Normalizer
.normalize(text)
βstring
- ~NFKD β
Normalizer
.normalize(text)
βstring
- ~StripNormalizer
.normalize(text)
βstring
- ~StripAccents β
Normalizer
.normalize(text)
βstring
- ~Lowercase β
Normalizer
.normalize(text)
βstring
- ~Prepend β
Normalizer
.normalize(text)
βstring
- ~NormalizerSequence β
Normalizer
new NormalizerSequence(config)
.normalize(text)
βstring
- ~BertNormalizer β
Normalizer
._tokenize_chinese_chars(text)
βstring
._is_chinese_char(cp)
βboolean
.stripAccents(text)
βstring
.normalize(text)
βstring
- ~PreTokenizer β
Callable
- instance
.pre_tokenize_text(text)
βArray.<string>
.pre_tokenize(text)
βArray.<string>
._call(text)
βArray.<string>
- static
.fromConfig(config)
βPreTokenizer
- instance
- ~BertPreTokenizer β
PreTokenizer
new BertPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
- ~ByteLevelPreTokenizer β
PreTokenizer
new ByteLevelPreTokenizer(config)
.add_prefix_space
:boolean
.trim_offsets
:boolean
.use_regex
:boolean
.pre_tokenize_text(text)
βArray.<string>
- ~SplitPreTokenizer β
PreTokenizer
new SplitPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
- ~PunctuationPreTokenizer β
PreTokenizer
new PunctuationPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
- ~DigitsPreTokenizer β
PreTokenizer
new DigitsPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
- ~PostProcessor β
Callable
new PostProcessor(config)
- instance
.post_process(tokens, ...args)
βArray
._call(tokens, ...args)
βArray
- static
.fromConfig(config)
βPostProcessor
- ~BertProcessing
new BertProcessing(config)
.post_process(tokens, tokens_pair)
βArray.<string>
- ~TemplateProcessing β
PostProcessor
- ~ByteLevelPostProcessor β
PostProcessor
.post_process(tokens)
βArray.<string>
- ~Decoder β
Callable
new Decoder(config)
- instance
._call(tokens)
βstring
.decode(tokens)
βstring
.decode_chain(tokens)
βArray.<string>
- static
.fromConfig(config)
βDecoder
- ~FuseDecoder
.decode_chain()
:*
- ~WordPieceDecoder β
Decoder
- ~ByteLevelDecoder β
Decoder
- ~CTCDecoder
.convert_tokens_to_string(tokens)
βstring
.decode_chain()
:*
- ~DecoderSequence β
Decoder
- ~MetaspacePreTokenizer β
PreTokenizer
new MetaspacePreTokenizer(config)
.pre_tokenize(normalizedTokens)
βArray.<string>
- ~MetaspaceDecoder β
Decoder
- ~Precompiled β
Normalizer
new Precompiled(config)
.normalize(text)
βstring
- ~PreTokenizerSequence β
PreTokenizer
new PreTokenizerSequence(config)
.pre_tokenize_text(text)
βArray.<string>
- ~WhitespaceSplit β
PreTokenizer
new WhitespaceSplit(config)
.pre_tokenize_text(text)
βArray.<string>
- ~ReplacePreTokenizer
new ReplacePreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
~BYTES_TO_UNICODE
βObject
~loadTokenizer(pretrained_model_name_or_path, options)
βPromise.<Array>
~createPattern(pattern, invert)
βRegExp
|string
|null
~objectToMap(obj)
βMap.<string, any>
~clean_up_tokenization(text)
βstring
~remove_accents(text)
βstring
~lowercase_and_remove_accent(text)
βstring
~fuse(arr, value)
~whitespace_split(text)
βArray.<string>
~add_token_types(inputs)
βObject
~PretrainedOptions
:*
~BPENode
:Object
~SplitDelimiterBehavior
:βremovedβ
|βisolatedβ
|βmergedWithPreviousβ
|βmergedWithNextβ
|βcontiguousβ
- ~WordPieceTokenizer β
- static
tokenizers.TokenizerModel β Callable
Abstract base class for tokenizer models.
Kind: static class of tokenizers
Extends: Callable
- .TokenizerModel β
Callable
new TokenizerModel(config)
- instance
.vocab
:Array.<string>
.tokens_to_ids
:Map.<string, number>
.fuse_unk
:boolean
._call(tokens)
βArray.<string>
.encode(tokens)
βArray.<string>
.convert_tokens_to_ids(tokens)
βArray.<number>
.convert_ids_to_tokens(ids)
βArray.<string>
- static
.fromConfig(config, ...args)
βTokenizerModel
new TokenizerModel(config)
Creates a new instance of TokenizerModel.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the TokenizerModel. |
tokenizerModel.vocab
: Array.<string>
Kind: instance property of TokenizerModel
tokenizerModel.tokens_to_ids
: Map.<string, number>
A mapping of tokens to ids.
Kind: instance property of TokenizerModel
tokenizerModel.fuse_unk
: boolean
Whether to fuse unknown tokens when encoding. Defaults to false.
Kind: instance property of TokenizerModel
tokenizerModel._call(tokens)
β Array.<string>
Internal function to call the TokenizerModel instance.
Kind: instance method of TokenizerModel
Returns: Array.<string>
- The encoded token IDs.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The tokens to encode. |
tokenizerModel.encode(tokens)
β Array.<string>
Encodes a list of tokens into a list of token IDs.
Kind: instance method of TokenizerModel
Returns: Array.<string>
- The encoded tokens.
Throws:
- Will throw an error if not implemented in a subclass.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The tokens to encode. |
tokenizerModel.convert_tokens_to_ids(tokens)
β Array.<number>
Converts a list of tokens into a list of token IDs.
Kind: instance method of TokenizerModel
Returns: Array.<number>
- The converted token IDs.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The tokens to convert. |
tokenizerModel.convert_ids_to_tokens(ids)
β Array.<string>
Converts a list of token IDs into a list of tokens.
Kind: instance method of TokenizerModel
Returns: Array.<string>
- The converted tokens.
Param | Type | Description |
---|---|---|
ids | Array.<number> | The token IDs to convert. |
TokenizerModel.fromConfig(config, ...args)
β TokenizerModel
Instantiates a new TokenizerModel instance based on the configuration object provided.
Kind: static method of TokenizerModel
Returns: TokenizerModel
- A new instance of a TokenizerModel.
Throws:
- Will throw an error if the TokenizerModel type in the config is not recognized.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the TokenizerModel. |
...args | * | Optional arguments to pass to the specific TokenizerModel constructor. |
tokenizers.PreTrainedTokenizer
Kind: static class of tokenizers
- .PreTrainedTokenizer
new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig)
- instance
.remove_space
:boolean
.getToken(...keys)
βstring
|null
.prepare_model_inputs(inputs)
βObject
._call(text, options)
βObject
._encode_text(text)
βArray<string>
|null
.encode(text, text_pair)
βArray.<number>
.batch_decode(batch, decode_args)
βArray.<string>
.decode(token_ids, [decode_args])
βstring
.decode_single(token_ids, decode_args)
βstring
- static
.from_pretrained(pretrained_model_name_or_path, options)
βPromise.<PreTrainedTokenizer>
new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig)
Create a new PreTrainedTokenizer instance.
Param | Type | Description |
---|---|---|
tokenizerJSON | Object | The JSON of the tokenizer. |
tokenizerConfig | Object | The config of the tokenizer. |
preTrainedTokenizer.remove_space
: boolean
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
Kind: instance property of PreTrainedTokenizer
preTrainedTokenizer.getToken(...keys)
β string
| null
Returns the value of the first matching key in the tokenizer config object.
Kind: instance method of PreTrainedTokenizer
Returns: string
| null
- The value associated with the first matching key, or null if no match is found.
Throws:
Error
If an object is found for a matching key and its __type property is not "AddedToken".
Param | Type | Description |
---|---|---|
...keys | string | One or more keys to search for in the tokenizer config object. |
preTrainedTokenizer.prepare_model_inputs(inputs)
β Object
This function can be overridden by a subclass to apply additional preprocessing to a modelβs input data.
Kind: instance method of PreTrainedTokenizer
Returns: Object
- The modified inputs object.
Param | Type | Description |
---|---|---|
inputs | Object | An object containing input data as properties. |
preTrainedTokenizer._call(text, options)
β Object
Encode/tokenize the given text(s).
Kind: instance method of PreTrainedTokenizer
Returns: Object
- Object to be passed to the model.
Param | Type | Default | Description |
---|---|---|---|
text | string | Array<string> | The text to tokenize. |
|
options | Object | An optional object containing the following properties: |
|
[options.text_pair] | string | Array<string> | null | Optional second sequence to be encoded. If set, must be the same type as text. |
[options.padding] | boolean | false | Whether to pad the input sequences. |
[options.truncation] | boolean |
| Whether to truncate the input sequences. |
[options.max_length] | number |
| Maximum length of the returned list and optionally padding length. |
[options.return_tensor] | boolean | true | Whether to return the results as Tensors or arrays. |
preTrainedTokenizer._encode_text(text)
β Array<string>
| null
Encodes a single text using the preprocessor pipeline of the tokenizer.
Kind: instance method of PreTrainedTokenizer
Returns: Array<string>
| null
- The encoded tokens.
Param | Type | Description |
---|---|---|
text | string | null | The text to encode. |
preTrainedTokenizer.encode(text, text_pair)
β Array.<number>
Encodes a single text or a pair of texts using the modelβs tokenizer.
Kind: instance method of PreTrainedTokenizer
Returns: Array.<number>
- An array of token IDs representing the encoded text(s).
Param | Type | Default | Description |
---|---|---|---|
text | string | The text to encode. |
|
text_pair | string | null | null | The optional second text to encode. |
preTrainedTokenizer.batch_decode(batch, decode_args)
β Array.<string>
Decode a batch of tokenized sequences.
Kind: instance method of PreTrainedTokenizer
Returns: Array.<string>
- List of decoded sequences.
Param | Type | Description |
---|---|---|
batch | Array.<Array<number>> | List of tokenized input sequences. |
decode_args | Object | (Optional) Object with decoding arguments. |
preTrainedTokenizer.decode(token_ids, [decode_args])
β string
Decodes a sequence of token IDs back to a string.
Kind: instance method of PreTrainedTokenizer
Returns: string
- The decoded string.
Throws:
Error
If `token_ids` is not a non-empty array of integers.
Param | Type | Default | Description |
---|---|---|---|
token_ids | Array.<number> | List of token IDs to decode. |
|
[decode_args] | Object | {} | |
[decode_args.skip_special_tokens] | boolean | false | If true, special tokens are removed from the output string. |
[decode_args.clean_up_tokenization_spaces] | boolean | true | If true, spaces before punctuations and abbreviated forms are removed. |
preTrainedTokenizer.decode_single(token_ids, decode_args)
β string
Decode a single list of token ids to a string.
Kind: instance method of PreTrainedTokenizer
Returns: string
- The decoded string
Param | Type | Default | Description |
---|---|---|---|
token_ids | Array.<number> | List of token ids to decode |
|
decode_args | Object | Optional arguments for decoding |
|
[decode_args.skip_special_tokens] | boolean | false | Whether to skip special tokens during decoding |
[decode_args.clean_up_tokenization_spaces] | boolean |
| Whether to clean up tokenization spaces during decoding.
If null, the value is set to |
PreTrainedTokenizer.from_pretrained(pretrained_model_name_or_path, options)
β Promise.<PreTrainedTokenizer>
Loads a pre-trained tokenizer from the given pretrained_model_name_or_path
.
Kind: static method of PreTrainedTokenizer
Returns: Promise.<PreTrainedTokenizer>
- A new instance of the PreTrainedTokenizer
class.
Throws:
Error
Throws an error if the tokenizer.json or tokenizer_config.json files are not found in the `pretrained_model_name_or_path`.
Param | Type | Description |
---|---|---|
pretrained_model_name_or_path | string | The path to the pre-trained tokenizer. |
options | PretrainedOptions | Additional options for loading the tokenizer. |
tokenizers.BertTokenizer β PreTrainedTokenizer
BertTokenizer is a class used to tokenize text for BERT models.
Kind: static class of tokenizers
Extends: PreTrainedTokenizer
bertTokenizer.prepare_model_inputs()
: add_token_types
Kind: instance method of BertTokenizer
tokenizers.AlbertTokenizer β PreTrainedTokenizer
Albert tokenizer
Kind: static class of tokenizers
Extends: PreTrainedTokenizer
albertTokenizer.prepare_model_inputs()
: add_token_types
Kind: instance method of AlbertTokenizer
tokenizers.NllbTokenizer
The NllbTokenizer class is used to tokenize text for NLLB (βNo Language Left Behindβ) models.
No Language Left Behind (NLLB) is a first-of-its-kind, AI breakthrough project that open-sources models capable of delivering high-quality translations directly between any pair of 200+ languages β including low-resource languages like Asturian, Luganda, Urdu and more. It aims to help people communicate with anyone, anywhere, regardless of their language preferences. For more information, check out their paper.
For a list of supported languages (along with their language codes),
Kind: static class of tokenizers
See: https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200
nllbTokenizer._build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs)
β Object
Helper function to build translation inputs for an NllbTokenizer
.
Kind: instance method of NllbTokenizer
Returns: Object
- Object to be passed to the model.
Param | Type | Description |
---|---|---|
raw_inputs | string | Array<string> | The text to tokenize. |
tokenizer_options | Object | Options to be sent to the tokenizer |
generate_kwargs | Object | Generation options. |
tokenizers.M2M100Tokenizer
The M2M100Tokenizer class is used to tokenize text for M2M100 (βMany-to-Manyβ) models.
M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation. It was introduced in this paper and first released in this repository.
For a list of supported languages (along with their language codes),
Kind: static class of tokenizers
See: https://huggingface.co/facebook/m2m100_418M#languages-covered
m2M100Tokenizer._build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs)
β Object
Helper function to build translation inputs for an M2M100Tokenizer
.
Kind: instance method of M2M100Tokenizer
Returns: Object
- Object to be passed to the model.
Param | Type | Description |
---|---|---|
raw_inputs | string | Array<string> | The text to tokenize. |
tokenizer_options | Object | Options to be sent to the tokenizer |
generate_kwargs | Object | Generation options. |
tokenizers.WhisperTokenizer β PreTrainedTokenizer
WhisperTokenizer tokenizer
Kind: static class of tokenizers
Extends: PreTrainedTokenizer
- .WhisperTokenizer β
PreTrainedTokenizer
._decode_asr(sequences, options)
β*
.decode()
:*
.get_decoder_prompt_ids(options)
βArray.<Array<number>>
whisperTokenizer._decode_asr(sequences, options)
β *
Decodes automatic speech recognition (ASR) sequences.
Kind: instance method of WhisperTokenizer
Returns: *
- The decoded sequences.
Param | Type | Description |
---|---|---|
sequences | * | The sequences to decode. |
options | Object | The options to use for decoding. |
whisperTokenizer.decode()
: *
Kind: instance method of WhisperTokenizer
whisperTokenizer.get_decoder_prompt_ids(options)
β Array.<Array<number>>
Helper function to build translation inputs for a WhisperTokenizer
,
depending on the language, task, and whether to predict timestamp tokens.
Used to override the prefix tokens appended to the start of the label sequence.
Example: Get ids for a language
// instantiate the tokenizer and set the prefix token to Spanish
let tokenizer = await WhisperTokenizer.from_pretrained('Xenova/whisper-tiny');
let forced_decoder_ids = tokenizer.get_decoder_prompt_ids({ language: 'spanish' });
// [(1, 50262), (2, 50363)]
Kind: instance method of WhisperTokenizer
Returns: Array.<Array<number>>
- The decoder prompt ids.
Param | Type | Description |
---|---|---|
options | Object | Options to generate the decoder prompt. |
[options.language] | string | The language of the transcription text. The corresponding language id token is appended to the start of the sequence for multilingual speech recognition and speech translation tasks, e.g. for "Spanish" the token "<|es|>" is appended to the start of sequence. |
[options.task] | string | Task identifier to append at the start of sequence (if any). This should be used for mulitlingual fine-tuning, with "transcribe" for speech recognition and "translate" for speech translation. |
[options.no_timestamps] | boolean | Whether to add the <|notimestamps|> token at the start of the sequence. |
tokenizers.MarianTokenizer
Kind: static class of tokenizers
Todo
- This model is not yet supported by Model Databaseβs βfastβ tokenizers library (https://github.com/huggingface/tokenizers). Therefore, this implementation (which is based on fast tokenizers) may produce slightly inaccurate results.
new MarianTokenizer(tokenizerJSON, tokenizerConfig)
Create a new MarianTokenizer instance.
Param | Type | Description |
---|---|---|
tokenizerJSON | Object | The JSON of the tokenizer. |
tokenizerConfig | Object | The config of the tokenizer. |
marianTokenizer._encode_text(text)
β Array
Encodes a single text. Overriding this method is necessary since the language codes must be removed before encoding with sentencepiece model.
Kind: instance method of MarianTokenizer
Returns: Array
- The encoded tokens.
See: https://github.com/huggingface/transformers/blob/12d51db243a00726a548a43cc333390ebae731e3/src/transformers/models/marian/tokenization_marian.py#L204-L213
Param | Type | Description |
---|---|---|
text | string | null | The text to encode. |
tokenizers.AutoTokenizer
Helper class which is used to instantiate pretrained tokenizers with the from_pretrained
function.
The chosen tokenizer class is determined by the type specified in the tokenizer config.
Kind: static class of tokenizers
AutoTokenizer.from_pretrained(pretrained_model_name_or_path, options)
β Promise.<PreTrainedTokenizer>
Instantiate one of the tokenizer classes of the library from a pretrained model.
The tokenizer class to instantiate is selected based on the tokenizer_class
property of the config object
(either passed as an argument or loaded from pretrained_model_name_or_path
if possible)
Kind: static method of AutoTokenizer
Returns: Promise.<PreTrainedTokenizer>
- A new instance of the PreTrainedTokenizer class.
Param | Type | Description |
---|---|---|
pretrained_model_name_or_path | string | The name or path of the pretrained model. Can be either:
|
options | PretrainedOptions | Additional options for loading the tokenizer. |
tokenizers~WordPieceTokenizer β TokenizerModel
A subclass of TokenizerModel that uses WordPiece encoding to encode tokens.
Kind: inner class of tokenizers
Extends: TokenizerModel
- ~WordPieceTokenizer β
TokenizerModel
new WordPieceTokenizer(config)
.tokens_to_ids
:Map.<string, number>
.unk_token_id
:number
.unk_token
:string
.vocab
:Array.<string>
.encode(tokens)
βArray.<string>
new WordPieceTokenizer(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
config.vocab | Object | A mapping of tokens to ids. |
config.unk_token | string | The unknown token string. |
config.continuing_subword_prefix | string | The prefix to use for continuing subwords. |
wordPieceTokenizer.tokens_to_ids
: Map.<string, number>
A mapping of tokens to ids.
Kind: instance property of WordPieceTokenizer
wordPieceTokenizer.unk_token_id
: number
The id of the unknown token.
Kind: instance property of WordPieceTokenizer
wordPieceTokenizer.unk_token
: string
The unknown token string.
Kind: instance property of WordPieceTokenizer
wordPieceTokenizer.vocab
: Array.<string>
An array of tokens.
Kind: instance property of WordPieceTokenizer
wordPieceTokenizer.encode(tokens)
β Array.<string>
Encodes an array of tokens using WordPiece encoding.
Kind: instance method of WordPieceTokenizer
Returns: Array.<string>
- An array of encoded tokens.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The tokens to encode. |
tokenizers~Unigram β TokenizerModel
Class representing a Unigram tokenizer model.
Kind: inner class of tokenizers
Extends: TokenizerModel
- ~Unigram β
TokenizerModel
new Unigram(config, moreConfig)
.populateNodes(lattice)
.tokenize(normalized)
βArray.<string>
.encode(tokens)
βArray
new Unigram(config, moreConfig)
Create a new Unigram tokenizer model.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the Unigram model. |
config.unk_id | number | The ID of the unknown token |
config.vocab | Array.<Array<any>> | A 2D array representing a mapping of tokens to scores. |
moreConfig | Object | Additional configuration object for the Unigram model. |
unigram.populateNodes(lattice)
Populates lattice nodes.
Kind: instance method of Unigram
Param | Type | Description |
---|---|---|
lattice | TokenLattice | The token lattice to populate with nodes. |
unigram.tokenize(normalized)
β Array.<string>
Encodes an array of tokens into an array of subtokens using the unigram model.
Kind: instance method of Unigram
Returns: Array.<string>
- An array of subtokens obtained by encoding the input tokens using the unigram model.
Param | Type | Description |
---|---|---|
normalized | string | The normalized string. |
unigram.encode(tokens)
β Array
Encodes an array of tokens using Unigram encoding.
Kind: instance method of Unigram
Returns: Array
- An array of encoded tokens.
Param | Type | Description |
---|---|---|
tokens | Array | The tokens to encode. |
tokenizers~BPE β TokenizerModel
BPE class for encoding text into Byte-Pair-Encoding (BPE) tokens.
Kind: inner class of tokenizers
Extends: TokenizerModel
- ~BPE β
TokenizerModel
new BPE(config)
.tokens_to_ids
:Map.<string, number>
.cache
:Map.<string, Array<string>>
.bpe(token)
βArray.<string>
.encode(tokens)
βArray.<string>
new BPE(config)
Create a BPE instance.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for BPE. |
config.vocab | Object | A mapping of tokens to ids. |
config.unk_token | string | The unknown token used for out of vocabulary words. |
config.end_of_word_suffix | string | The suffix to place at the end of each word. |
[config.continuing_subword_suffix] | string | The suffix to insert between words. |
config.merges | Array | An array of BPE merges as strings. |
bpE.tokens_to_ids
: Map.<string, number>
Kind: instance property of BPE
bpE.cache
: Map.<string, Array<string>>
Kind: instance property of BPE
bpE.bpe(token)
β Array.<string>
Apply Byte-Pair-Encoding (BPE) to a given token. Efficient heap-based priority queue implementation adapted from https://github.com/belladoreai/llama-tokenizer-js.
Kind: instance method of BPE
Returns: Array.<string>
- The BPE encoded tokens.
Param | Type | Description |
---|---|---|
token | string | The token to encode. |
bpE.encode(tokens)
β Array.<string>
Encodes the input sequence of tokens using the BPE algorithm and returns the resulting subword tokens.
Kind: instance method of BPE
Returns: Array.<string>
- The resulting subword tokens after applying the BPE algorithm to the input sequence of tokens.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The input sequence of tokens to encode. |
tokenizers~LegacyTokenizerModel
Legacy tokenizer class for tokenizers with only a vocabulary.
Kind: inner class of tokenizers
- ~LegacyTokenizerModel
new LegacyTokenizerModel(config, moreConfig)
.tokens_to_ids
:Map.<string, number>
new LegacyTokenizerModel(config, moreConfig)
Create a LegacyTokenizerModel instance.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for LegacyTokenizerModel. |
config.vocab | Object | A (possibly nested) mapping of tokens to ids. |
moreConfig | Object | Additional configuration object for the LegacyTokenizerModel model. |
legacyTokenizerModel.tokens_to_ids
: Map.<string, number>
Kind: instance property of LegacyTokenizerModel
*tokenizers~Normalizer*
A base class for text normalization.
Kind: inner abstract class of tokenizers
- ~Normalizer
new Normalizer(config)
- instance
.normalize(text)
βstring
._call(text)
βstring
- static
.fromConfig(config)
βNormalizer
*new Normalizer(config)
*
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the normalizer. |
**normalizer.normalize(text)
β string
**
Normalize the input text.
Kind: instance abstract method of Normalizer
Returns: string
- The normalized text.
Throws:
Error
If this method is not implemented in a subclass.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
*normalizer._call(text)
β string
*
Alias for Normalizer#normalize.
Kind: instance method of Normalizer
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
*Normalizer.fromConfig(config)
β Normalizer
*
Factory method for creating normalizers from config objects.
Kind: static method of Normalizer
Returns: Normalizer
- A Normalizer object.
Throws:
Error
If an unknown Normalizer type is specified in the config.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the normalizer. |
tokenizers~Replace β Normalizer
Replace normalizer that replaces occurrences of a pattern with a given string or regular expression.
Kind: inner class of tokenizers
Extends: Normalizer
replace.normalize(text)
β string
Normalize the input text by replacing the pattern with the content.
Kind: instance method of Replace
Returns: string
- The normalized text after replacing the pattern with the content.
Param | Type | Description |
---|---|---|
text | string | The input text to be normalized. |
tokenizers~NFC β Normalizer
A normalizer that applies Unicode normalization form C (NFC) to the input text.
Kind: inner class of tokenizers
Extends: Normalizer
nfC.normalize(text)
β string
Normalize the input text by applying Unicode normalization form C (NFC).
Kind: instance method of NFC
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The input text to be normalized. |
tokenizers~NFKD β Normalizer
NFKD Normalizer.
Kind: inner class of tokenizers
Extends: Normalizer
nfkD.normalize(text)
β string
Normalize text using NFKD normalization.
Kind: instance method of NFKD
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to be normalized. |
tokenizers~StripNormalizer
A normalizer that strips leading and/or trailing whitespace from the input text.
Kind: inner class of tokenizers
stripNormalizer.normalize(text)
β string
Strip leading and/or trailing whitespace from the input text.
Kind: instance method of StripNormalizer
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The input text. |
tokenizers~StripAccents β Normalizer
StripAccents normalizer removes all accents from the text.
Kind: inner class of tokenizers
Extends: Normalizer
stripAccents.normalize(text)
β string
Remove all accents from the text.
Kind: instance method of StripAccents
Returns: string
- The normalized text without accents.
Param | Type | Description |
---|---|---|
text | string | The input text. |
tokenizers~Lowercase β Normalizer
A Normalizer that lowercases the input string.
Kind: inner class of tokenizers
Extends: Normalizer
lowercase.normalize(text)
β string
Lowercases the input string.
Kind: instance method of Lowercase
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
tokenizers~Prepend β Normalizer
A Normalizer that prepends a string to the input string.
Kind: inner class of tokenizers
Extends: Normalizer
prepend.normalize(text)
β string
Prepends the input string.
Kind: instance method of Prepend
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
tokenizers~NormalizerSequence β Normalizer
A Normalizer that applies a sequence of Normalizers.
Kind: inner class of tokenizers
Extends: Normalizer
- ~NormalizerSequence β
Normalizer
new NormalizerSequence(config)
.normalize(text)
βstring
new NormalizerSequence(config)
Create a new instance of NormalizerSequence.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
config.normalizers | Array.<Object> | An array of Normalizer configuration objects. |
normalizerSequence.normalize(text)
β string
Apply a sequence of Normalizers to the input text.
Kind: instance method of NormalizerSequence
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
tokenizers~BertNormalizer β Normalizer
A class representing a normalizer used in BERT tokenization.
Kind: inner class of tokenizers
Extends: Normalizer
- ~BertNormalizer β
Normalizer
._tokenize_chinese_chars(text)
βstring
._is_chinese_char(cp)
βboolean
.stripAccents(text)
βstring
.normalize(text)
βstring
bertNormalizer._tokenize_chinese_chars(text)
β string
Adds whitespace around any CJK (Chinese, Japanese, or Korean) character in the input text.
Kind: instance method of BertNormalizer
Returns: string
- The tokenized text with whitespace added around CJK characters.
Param | Type | Description |
---|---|---|
text | string | The input text to tokenize. |
bertNormalizer._is_chinese_char(cp)
β boolean
Checks whether the given Unicode codepoint represents a CJK (Chinese, Japanese, or Korean) character.
A βchinese characterβ is defined as anything in the CJK Unicode block: https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
Note that the CJK Unicode block is NOT all Japanese and Korean characters, despite its name. The modern Korean Hangul alphabet is a different block, as is Japanese Hiragana and Katakana. Those alphabets are used to write space-separated words, so they are not treated specially and are handled like all other languages.
Kind: instance method of BertNormalizer
Returns: boolean
- True if the codepoint represents a CJK character, false otherwise.
Param | Type | Description |
---|---|---|
cp | number | The Unicode codepoint to check. |
bertNormalizer.stripAccents(text)
β string
Strips accents from the given text.
Kind: instance method of BertNormalizer
Returns: string
- The text with accents removed.
Param | Type | Description |
---|---|---|
text | string | The text to strip accents from. |
bertNormalizer.normalize(text)
β string
Normalizes the given text based on the configuration.
Kind: instance method of BertNormalizer
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
tokenizers~PreTokenizer β Callable
A callable class representing a pre-tokenizer used in tokenization. Subclasses
should implement the pre_tokenize_text
method to define the specific pre-tokenization logic.
Kind: inner class of tokenizers
Extends: Callable
- ~PreTokenizer β
Callable
- instance
.pre_tokenize_text(text)
βArray.<string>
.pre_tokenize(text)
βArray.<string>
._call(text)
βArray.<string>
- static
.fromConfig(config)
βPreTokenizer
- instance
*preTokenizer.pre_tokenize_text(text)
β Array.<string>
*
Method that should be implemented by subclasses to define the specific pre-tokenization logic.
Kind: instance abstract method of PreTokenizer
Returns: Array.<string>
- The pre-tokenized text.
Throws:
Error
If the method is not implemented in the subclass.
Param | Type | Description |
---|---|---|
text | string | The text to pre-tokenize. |
preTokenizer.pre_tokenize(text)
β Array.<string>
Tokenizes the given text into pre-tokens.
Kind: instance method of PreTokenizer
Returns: Array.<string>
- An array of pre-tokens.
Param | Type | Description |
---|---|---|
text | string | Array<string> | The text or array of texts to pre-tokenize. |
preTokenizer._call(text)
β Array.<string>
Alias for PreTokenizer#pre_tokenize.
Kind: instance method of PreTokenizer
Returns: Array.<string>
- An array of pre-tokens.
Param | Type | Description |
---|---|---|
text | string | Array<string> | The text or array of texts to pre-tokenize. |
PreTokenizer.fromConfig(config)
β PreTokenizer
Factory method that returns an instance of a subclass of PreTokenizer
based on the provided configuration.
Kind: static method of PreTokenizer
Returns: PreTokenizer
- An instance of a subclass of PreTokenizer
.
Throws:
Error
If the provided configuration object does not correspond to any known pre-tokenizer.
Param | Type | Description |
---|---|---|
config | Object | A configuration object for the pre-tokenizer. |
tokenizers~BertPreTokenizer β PreTokenizer
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~BertPreTokenizer β
PreTokenizer
new BertPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
new BertPreTokenizer(config)
A PreTokenizer that splits text into wordpieces using a basic tokenization scheme similar to that used in the original implementation of BERT.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
bertPreTokenizer.pre_tokenize_text(text)
β Array.<string>
Tokenizes a single text using the BERT pre-tokenization scheme.
Kind: instance method of BertPreTokenizer
Returns: Array.<string>
- An array of tokens.
Param | Type | Description |
---|---|---|
text | string | The text to tokenize. |
tokenizers~ByteLevelPreTokenizer β PreTokenizer
A pre-tokenizer that splits text into Byte-Pair-Encoding (BPE) subwords.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~ByteLevelPreTokenizer β
PreTokenizer
new ByteLevelPreTokenizer(config)
.add_prefix_space
:boolean
.trim_offsets
:boolean
.use_regex
:boolean
.pre_tokenize_text(text)
βArray.<string>
new ByteLevelPreTokenizer(config)
Creates a new instance of the ByteLevelPreTokenizer
class.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
byteLevelPreTokenizer.add_prefix_space
: boolean
Whether to add a leading space to the first word.This allows to treat the leading word just as any other word.
Kind: instance property of ByteLevelPreTokenizer
byteLevelPreTokenizer.trim_offsets
: boolean
Whether the post processing step should trim offsetsto avoid including whitespaces.
Kind: instance property of ByteLevelPreTokenizer
Todo
- Use this in the pretokenization step.
byteLevelPreTokenizer.use_regex
: boolean
Whether to use the standard GPT2 regex for whitespace splitting.Set it to False if you want to use your own splitting. Defaults to true.
Kind: instance property of ByteLevelPreTokenizer
byteLevelPreTokenizer.pre_tokenize_text(text)
β Array.<string>
Tokenizes a single piece of text using byte-level tokenization.
Kind: instance method of ByteLevelPreTokenizer
Returns: Array.<string>
- An array of tokens.
Param | Type | Description |
---|---|---|
text | string | The text to tokenize. |
tokenizers~SplitPreTokenizer β PreTokenizer
Splits text using a given pattern.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~SplitPreTokenizer β
PreTokenizer
new SplitPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
new SplitPreTokenizer(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration options for the pre-tokenizer. |
config.pattern | Object | The pattern used to split the text. Can be a string or a regex object. |
config.pattern.String | string | undefined | The string to use for splitting. Only defined if the pattern is a string. |
config.pattern.Regex | string | undefined | The regex to use for splitting. Only defined if the pattern is a regex. |
config.behavior | SplitDelimiterBehavior | The behavior to use when splitting. |
config.invert | boolean | Whether to split (invert=false) or match (invert=true) the pattern. |
splitPreTokenizer.pre_tokenize_text(text)
β Array.<string>
Tokenizes text by splitting it using the given pattern.
Kind: instance method of SplitPreTokenizer
Returns: Array.<string>
- An array of tokens.
Param | Type | Description |
---|---|---|
text | string | The text to tokenize. |
tokenizers~PunctuationPreTokenizer β PreTokenizer
Splits text based on punctuation.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~PunctuationPreTokenizer β
PreTokenizer
new PunctuationPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
new PunctuationPreTokenizer(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration options for the pre-tokenizer. |
config.behavior | SplitDelimiterBehavior | The behavior to use when splitting. |
punctuationPreTokenizer.pre_tokenize_text(text)
β Array.<string>
Tokenizes text by splitting it using the given pattern.
Kind: instance method of PunctuationPreTokenizer
Returns: Array.<string>
- An array of tokens.
Param | Type | Description |
---|---|---|
text | string | The text to tokenize. |
tokenizers~DigitsPreTokenizer β PreTokenizer
Splits text based on digits.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~DigitsPreTokenizer β
PreTokenizer
new DigitsPreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
new DigitsPreTokenizer(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration options for the pre-tokenizer. |
config.individual_digits | boolean | Whether to split on individual digits. |
digitsPreTokenizer.pre_tokenize_text(text)
β Array.<string>
Tokenizes text by splitting it using the given pattern.
Kind: instance method of DigitsPreTokenizer
Returns: Array.<string>
- An array of tokens.
Param | Type | Description |
---|---|---|
text | string | The text to tokenize. |
tokenizers~PostProcessor β Callable
Kind: inner class of tokenizers
Extends: Callable
- ~PostProcessor β
Callable
new PostProcessor(config)
- instance
.post_process(tokens, ...args)
βArray
._call(tokens, ...args)
βArray
- static
.fromConfig(config)
βPostProcessor
new PostProcessor(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration for the post-processor. |
postProcessor.post_process(tokens, ...args)
β Array
Method to be implemented in subclass to apply post-processing on the given tokens.
Kind: instance method of PostProcessor
Returns: Array
- The post-processed tokens.
Throws:
Error
If the method is not implemented in subclass.
Param | Type | Description |
---|---|---|
tokens | Array | The input tokens to be post-processed. |
...args | * | Additional arguments required by the post-processing logic. |
postProcessor._call(tokens, ...args)
β Array
Alias for PostProcessor#post_process.
Kind: instance method of PostProcessor
Returns: Array
- An array of post-processed tokens.
Param | Type | Description |
---|---|---|
tokens | Array | The text or array of texts to post-process. |
...args | * | Additional arguments required by the post-processing logic. |
PostProcessor.fromConfig(config)
β PostProcessor
Factory method to create a PostProcessor object from a configuration object.
Kind: static method of PostProcessor
Returns: PostProcessor
- A PostProcessor object created from the given configuration.
Throws:
Error
If an unknown PostProcessor type is encountered.
Param | Type | Description |
---|---|---|
config | Object | Configuration object representing a PostProcessor. |
tokenizers~BertProcessing
A post-processor that adds special tokens to the beginning and end of the input.
Kind: inner class of tokenizers
- ~BertProcessing
new BertProcessing(config)
.post_process(tokens, tokens_pair)
βArray.<string>
new BertProcessing(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration for the post-processor. |
config.cls | Array.<string> | The special tokens to add to the beginning of the input. |
config.sep | Array.<string> | The special tokens to add to the end of the input. |
bertProcessing.post_process(tokens, tokens_pair)
β Array.<string>
Adds the special tokens to the beginning and end of the input.
Kind: instance method of BertProcessing
Returns: Array.<string>
- The input tokens with the special tokens added to the beginning and end.
Param | Type | Default | Description |
---|---|---|---|
tokens | Array.<string> | The input tokens. |
|
tokens_pair | Array<string> | null |
| An optional second set of input tokens. |
tokenizers~TemplateProcessing β PostProcessor
Post processor that replaces special tokens in a template with actual tokens.
Kind: inner class of tokenizers
Extends: PostProcessor
- ~TemplateProcessing β
PostProcessor
new TemplateProcessing(config)
Creates a new instance of TemplateProcessing
.
Param | Type | Description |
---|---|---|
config | Object | The configuration options for the post processor. |
config.single | Array | The template for a single sequence of tokens. |
config.pair | Array | The template for a pair of sequences of tokens. |
templateProcessing.post_process(tokens, [tokens_pair])
β Array
Replaces special tokens in the template with actual tokens.
Kind: instance method of TemplateProcessing
Returns: Array
- The list of tokens with the special tokens replaced with actual tokens.
Param | Type | Default | Description |
---|---|---|---|
tokens | Array | The list of tokens for the first sequence. |
|
[tokens_pair] | Array |
| The list of tokens for the second sequence (optional). |
tokenizers~ByteLevelPostProcessor β PostProcessor
A PostProcessor that returns the given tokens as is.
Kind: inner class of tokenizers
Extends: PostProcessor
byteLevelPostProcessor.post_process(tokens)
β Array.<string>
Post process the given tokens.
Kind: instance method of ByteLevelPostProcessor
Returns: Array.<string>
- The post processed tokens.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The tokens to be post processed. |
tokenizers~Decoder β Callable
The base class for token decoders.
Kind: inner class of tokenizers
Extends: Callable
- ~Decoder β
Callable
new Decoder(config)
- instance
._call(tokens)
βstring
.decode(tokens)
βstring
.decode_chain(tokens)
βArray.<string>
- static
.fromConfig(config)
βDecoder
new Decoder(config)
Creates an instance of Decoder
.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
decoder._call(tokens)
β string
Calls the decode
method.
Kind: instance method of Decoder
Returns: string
- The decoded string.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The list of tokens. |
decoder.decode(tokens)
β string
Decodes a list of tokens.
Kind: instance method of Decoder
Returns: string
- The decoded string.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The list of tokens. |
decoder.decode_chain(tokens)
β Array.<string>
Apply the decoder to a list of tokens.
Kind: instance method of Decoder
Returns: Array.<string>
- The decoded list of tokens.
Throws:
Error
If the `decode_chain` method is not implemented in the subclass.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | The list of tokens. |
Decoder.fromConfig(config)
β Decoder
Creates a decoder instance based on the provided configuration.
Kind: static method of Decoder
Returns: Decoder
- A decoder instance.
Throws:
Error
If an unknown decoder type is provided.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
tokenizers~FuseDecoder
Fuse simply fuses all tokens into one big string. Itβs usually the last decoding step anyway, but this decoder exists incase some decoders need to happen after that step
Kind: inner class of tokenizers
fuseDecoder.decode_chain()
: *
Kind: instance method of FuseDecoder
tokenizers~WordPieceDecoder β Decoder
A decoder that decodes a list of WordPiece tokens into a single string.
Kind: inner class of tokenizers
Extends: Decoder
- ~WordPieceDecoder β
Decoder
new WordPieceDecoder(config)
Creates a new instance of WordPieceDecoder.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
config.prefix | string | The prefix used for WordPiece encoding. |
config.cleanup | boolean | Whether to cleanup the decoded string. |
wordPieceDecoder.decode_chain()
: *
Kind: instance method of WordPieceDecoder
tokenizers~ByteLevelDecoder β Decoder
Byte-level decoder for tokenization output. Inherits from the Decoder
class.
Kind: inner class of tokenizers
Extends: Decoder
- ~ByteLevelDecoder β
Decoder
new ByteLevelDecoder(config)
Create a ByteLevelDecoder
object.
Param | Type | Description |
---|---|---|
config | Object | Configuration object. |
byteLevelDecoder.convert_tokens_to_string(tokens)
β string
Convert an array of tokens to string by decoding each byte.
Kind: instance method of ByteLevelDecoder
Returns: string
- The decoded string.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | Array of tokens to be decoded. |
byteLevelDecoder.decode_chain()
: *
Kind: instance method of ByteLevelDecoder
tokenizers~CTCDecoder
The CTC (Connectionist Temporal Classification) decoder. See https://github.com/huggingface/tokenizers/blob/bb38f390a61883fc2f29d659af696f428d1cda6b/tokenizers/src/decoders/ctc.rs
Kind: inner class of tokenizers
- ~CTCDecoder
.convert_tokens_to_string(tokens)
βstring
.decode_chain()
:*
ctcDecoder.convert_tokens_to_string(tokens)
β string
Converts a connectionist-temporal-classification (CTC) output tokens into a single string.
Kind: instance method of CTCDecoder
Returns: string
- The decoded string.
Param | Type | Description |
---|---|---|
tokens | Array.<string> | Array of tokens to be decoded. |
ctcDecoder.decode_chain()
: *
Kind: instance method of CTCDecoder
tokenizers~DecoderSequence β Decoder
Apply a sequence of decoders.
Kind: inner class of tokenizers
Extends: Decoder
- ~DecoderSequence β
Decoder
new DecoderSequence(config)
Creates a new instance of DecoderSequence.
Param | Type | Description |
---|---|---|
config | Object | The configuration object. |
config.decoders | Array.<Decoder> | The list of decoders to apply. |
decoderSequence.decode_chain()
: *
Kind: instance method of DecoderSequence
tokenizers~MetaspacePreTokenizer β PreTokenizer
This PreTokenizer replaces spaces with the given replacement character, adds a prefix space if requested, and returns a list of tokens.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~MetaspacePreTokenizer β
PreTokenizer
new MetaspacePreTokenizer(config)
.pre_tokenize(normalizedTokens)
βArray.<string>
new MetaspacePreTokenizer(config)
Param | Type | Default | Description |
---|---|---|---|
config | Object | The configuration object for the MetaspacePreTokenizer. |
|
config.add_prefix_space | boolean | Whether to add a prefix space to the first token. |
|
config.replacement | string | The character to replace spaces with. |
|
[config.str_rep] | string | "config.replacement" | An optional string representation of the replacement character. |
metaspacePreTokenizer.pre_tokenize(normalizedTokens)
β Array.<string>
This method takes a list of normalized tokens, replaces spaces with the replacement character, adds a prefix space if requested, and returns a new list of tokens.
Kind: instance method of MetaspacePreTokenizer
Returns: Array.<string>
- A new list of pre-tokenized tokens.
Param | Type | Description |
---|---|---|
normalizedTokens | Array<string> | string | The list of normalized tokens to pre-tokenize. |
tokenizers~MetaspaceDecoder β Decoder
MetaspaceDecoder class extends the Decoder class and decodes Metaspace tokenization.
Kind: inner class of tokenizers
Extends: Decoder
- ~MetaspaceDecoder β
Decoder
new MetaspaceDecoder(config)
Constructs a new MetaspaceDecoder object.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the MetaspaceDecoder. |
config.add_prefix_space | boolean | Whether to add a prefix space to the decoded string. |
config.replacement | string | The string to replace spaces with. |
metaspaceDecoder.decode_chain()
: *
Kind: instance method of MetaspaceDecoder
tokenizers~Precompiled β Normalizer
A normalizer that applies a precompiled charsmap. This is useful for applying complex normalizations in C++ and exposing them to JavaScript.
Kind: inner class of tokenizers
Extends: Normalizer
- ~Precompiled β
Normalizer
new Precompiled(config)
.normalize(text)
βstring
new Precompiled(config)
Create a new instance of Precompiled normalizer.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the Precompiled normalizer. |
config.precompiled_charsmap | Object | The precompiled charsmap object. |
precompiled.normalize(text)
β string
Normalizes the given text by applying the precompiled charsmap.
Kind: instance method of Precompiled
Returns: string
- The normalized text.
Param | Type | Description |
---|---|---|
text | string | The text to normalize. |
tokenizers~PreTokenizerSequence β PreTokenizer
A pre-tokenizer that applies a sequence of pre-tokenizers to the input text.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~PreTokenizerSequence β
PreTokenizer
new PreTokenizerSequence(config)
.pre_tokenize_text(text)
βArray.<string>
new PreTokenizerSequence(config)
Creates an instance of PreTokenizerSequence.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the pre-tokenizer sequence. |
config.pretokenizers | Array.<Object> | An array of pre-tokenizer configurations. |
preTokenizerSequence.pre_tokenize_text(text)
β Array.<string>
Applies each pre-tokenizer in the sequence to the input text in turn.
Kind: instance method of PreTokenizerSequence
Returns: Array.<string>
- The pre-tokenized text.
Param | Type | Description |
---|---|---|
text | string | Array<string> | The text(s) to pre-tokenize. |
tokenizers~WhitespaceSplit β PreTokenizer
Splits a string of text by whitespace characters into individual tokens.
Kind: inner class of tokenizers
Extends: PreTokenizer
- ~WhitespaceSplit β
PreTokenizer
new WhitespaceSplit(config)
.pre_tokenize_text(text)
βArray.<string>
new WhitespaceSplit(config)
Creates an instance of WhitespaceSplit.
Param | Type | Description |
---|---|---|
config | Object | The configuration object for the pre-tokenizer sequence. |
whitespaceSplit.pre_tokenize_text(text)
β Array.<string>
Pre-tokenizes the input text by splitting it on whitespace characters.
Kind: instance method of WhitespaceSplit
Returns: Array.<string>
- An array of tokens produced by splitting the input text on whitespace.
Param | Type | Description |
---|---|---|
text | string | The text to be pre-tokenized. |
tokenizers~ReplacePreTokenizer
Kind: inner class of tokenizers
- ~ReplacePreTokenizer
new ReplacePreTokenizer(config)
.pre_tokenize_text(text)
βArray.<string>
new ReplacePreTokenizer(config)
Param | Type | Description |
---|---|---|
config | Object | The configuration options for the pre-tokenizer. |
config.pattern | Object | The pattern used to split the text. Can be a string or a regex object. |
config.content | string | What to replace the pattern with. |
replacePreTokenizer.pre_tokenize_text(text)
β Array.<string>
Pre-tokenizes the input text by replacing certain characters.
Kind: instance method of ReplacePreTokenizer
Returns: Array.<string>
- An array of tokens produced by replacing certain characters.
Param | Type | Description |
---|---|---|
text | string | The text to be pre-tokenized. |
tokenizers~BYTES_TO_UNICODE
β Object
Returns list of utf-8 byte and a mapping to unicode strings. Specifically avoids mapping to whitespace/control characters the BPE code barfs on.
Kind: inner constant of tokenizers
Returns: Object
- Object with utf-8 byte keys and unicode string values.
tokenizers~loadTokenizer(pretrained_model_name_or_path, options)
β Promise.<Array>
Loads a tokenizer from the specified path.
Kind: inner method of tokenizers
Returns: Promise.<Array>
- A promise that resolves with information about the loaded tokenizer.
Param | Type | Description |
---|---|---|
pretrained_model_name_or_path | string | The path to the tokenizer directory. |
options | PretrainedOptions | Additional options for loading the tokenizer. |
tokenizers~createPattern(pattern, invert)
β RegExp
| string
| null
Helper method to construct a pattern from a config object.
Kind: inner method of tokenizers
Returns: RegExp
| string
| null
- The compiled pattern.
Param | Type | Default | Description |
---|---|---|---|
pattern | Object | The pattern object. |
|
invert | boolean | true | Whether to invert the pattern (only applicable for Regex patterns). |
tokenizers~objectToMap(obj)
β Map.<string, any>
Helper function to convert an Object to a Map
Kind: inner method of tokenizers
Returns: Map.<string, any>
- The map.
Param | Type | Description |
---|---|---|
obj | Object | The object to convert. |
tokenizers~clean_up_tokenization(text)
β string
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms
Kind: inner method of tokenizers
Returns: string
- The cleaned up text.
Param | Type | Description |
---|---|---|
text | string | The text to clean up. |
tokenizers~remove_accents(text)
β string
Helper function to remove accents from a string.
Kind: inner method of tokenizers
Returns: string
- The text with accents removed.
Param | Type | Description |
---|---|---|
text | string | The text to remove accents from. |
tokenizers~lowercase_and_remove_accent(text)
β string
Helper function to lowercase a string and remove accents.
Kind: inner method of tokenizers
Returns: string
- The lowercased text with accents removed.
Param | Type | Description |
---|---|---|
text | string | The text to lowercase and remove accents from. |
tokenizers~fuse(arr, value)
Helper function to fuse consecutive values in an array equal to the specified value.
Kind: inner method of tokenizers
Param | Type | Description |
---|---|---|
arr | Array | The input array |
value | any | The value to fuse on. |
tokenizers~whitespace_split(text)
β Array.<string>
Split a string on whitespace.
Kind: inner method of tokenizers
Returns: Array.<string>
- The split string.
Param | Type | Description |
---|---|---|
text | string | The text to split. |
tokenizers~add_token_types(inputs)
β Object
Helper method for adding token_type_ids
to model inputs
Kind: inner method of tokenizers
Returns: Object
- The prepared inputs object.
Param | Type | Description |
---|---|---|
inputs | Object | An object containing the input ids and attention mask. |
tokenizers~PretrainedOptions
: *
Kind: inner typedef of tokenizers
tokenizers~BPENode
: Object
Kind: inner typedef of tokenizers
Properties
Name | Type | Description |
---|---|---|
token | string | The token associated with the node |
bias | number | A positional bias for the node. |
[score] | number | The score of the node. |
[prev] | BPENode | The previous node in the linked list. |
[next] | BPENode | The next node in the linked list. |
tokenizers~SplitDelimiterBehavior
: 'removed'
| 'isolated'
| 'mergedWithPrevious'
| 'mergedWithNext'
| 'contiguous'
Kind: inner typedef of tokenizers