Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: update added tokens to be more agnostic #107

Merged
merged 1 commit into from
Oct 23, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions model2vec/distill/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import json
import logging
from typing import Any

from tokenizers import Tokenizer

Expand Down Expand Up @@ -36,11 +37,11 @@ def remove_tokens(tokenizer: Tokenizer, tokens_to_remove: list[str]) -> Tokenize
logger.info("No tokens to remove.")
return Tokenizer.from_str(tokenizer.to_str())

tokenizer_data = json.loads(tokenizer.to_str())
tokenizer_data: dict[str, Any] = json.loads(tokenizer.to_str())

# Find all added tokens
added_tokens = tokenizer_data["added_tokens"]
added_tokens_str = {token["content"] for token in added_tokens}
added_tokens: list[dict[str, Any]] = tokenizer_data.get("added_tokens", [])
added_tokens_str: set[str] = {token["content"] for token in added_tokens}

# Remove all added tokens from the list of tokens to remove.
# Things will go bad if we keep them.
Expand Down Expand Up @@ -76,9 +77,9 @@ def remove_tokens(tokenizer: Tokenizer, tokens_to_remove: list[str]) -> Tokenize
raise ValueError(f"Unknown model type {model_type}")

# Reindex the special tokens (i.e., CLS and SEP for BertTokenizers.)
special_tokens_post_processor: dict[str, dict] = tokenizer_data["post_processor"]["special_tokens"]
for token, token_data in special_tokens_post_processor.items():
token_data["ids"] = [reindexed[token] for token in token_data["tokens"]]
added_tokens = tokenizer_data.get("added_tokens", [])
for token_data in added_tokens:
token_data["id"] = reindexed[token_data["content"]]

# Reinitialize the tokenizer from the json.
tokenizer = Tokenizer.from_str(json.dumps(tokenizer_data))
Expand Down