From b47eda8ea202dc3fee96d22c3df19c23d67a3e16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Wed, 1 Apr 2026 15:28:15 +0200 Subject: [PATCH 01/19] Started working on contextual encoding and ragged array manipulation --- turftopic/encoders/contextual.py | 206 +++++++++++++++++++++++++++++++ turftopic/ragged.py | 156 +++++++++++++++++++++++ 2 files changed, 362 insertions(+) create mode 100644 turftopic/encoders/contextual.py create mode 100644 turftopic/ragged.py diff --git a/turftopic/encoders/contextual.py b/turftopic/encoders/contextual.py new file mode 100644 index 0000000..bd538d0 --- /dev/null +++ b/turftopic/encoders/contextual.py @@ -0,0 +1,206 @@ +import itertools +import warnings +from typing import Iterable, Union + +import numpy as np +import torch +from sentence_transformers import SentenceTransformer +from sklearn.preprocessing import normalize +from tokenizers import Tokenizer +from tqdm import trange + + +def is_contextual(encoder): + return hasattr(encoder, "encode_tokens") + + +Offsets = list[tuple[int, int]] +Lengths = list[int] + + +def flatten_embeddings( + embeddings: list[np.ndarray], +) -> tuple[np.ndarray, Lengths]: + """Flattens ragged array to normal array. + + Parameters + ---------- + embeddings: list[ndarray] + Ragged embedding array. + + Returns + ------- + flat_embeddings: ndarray + Flattened embedding array. + lengths: list[int] + Length of each document in the corpus. + """ + lengths = [emb.shape[0] for emb in embeddings] + return np.concatenate(embeddings, axis=0), lengths + + +def unflatten_embeddings( + flat_embeddings: np.ndarray, lengths: Lengths +) -> list[np.ndarray]: + """Unflattens flat array to ragged array. + + Parameters + ---------- + flat_embeddings: ndarray + Flattened embedding array. + lengths: list[int] + Length of each document in the corpus. + + Returns + ------- + embeddings: list[ndarray] + Ragged embedding array. + + """ + embeddings = [] + start_index = 0 + for length in lengths: + embeddings.append(flat_embeddings[start_index:length]) + start_index += length + return embeddings + + +class ContextTransformer(SentenceTransformer): + def encode( + self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs + ): + warnings.warn( + "Encoder is contextual but topic model is not using contextual embeddings. Perhaps you wanted to use another topic model." + ) + return super().encode(sentences, *args, **kwargs) + + def _encode_tokens( + self, + texts, + batch_size=32, + show_progress_bar=True, + ) -> tuple[list[np.ndarray], list[Offsets]]: + """ + Returns + ------- + token_embeddings: list[np.ndarray] + Embedding matrix of tokens for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + token_embeddings = [] + offsets = [] + tokenizer = Tokenizer.from_pretrained(self.model_card_data.base_model) + for start_index in trange( + 0, + len(texts), + batch_size, + disable=not show_progress_bar, + desc="Encoding tokens...", + ): + batch = texts[start_index : start_index + batch_size] + features = self.tokenize(batch) + with torch.no_grad(): + output_features = self.forward(features) + n_tokens = output_features["attention_mask"].sum(axis=1) + # Find first nonzero elements in each document + # The document could be padded from the left, so we have to watch out for this. + start_token = torch.argmax( + (output_features["attention_mask"] > 0).to(torch.long), axis=1 + ) + end_token = start_token + n_tokens + for i_doc in range(len(batch)): + _token_embeddings = output_features["token_embeddings"][ + i_doc, start_token[i_doc] : end_token[i_doc], : + ].numpy(force=True) + _offsets = tokenizer.encode(batch[i_doc]).offsets + token_embeddings.append(_token_embeddings) + offsets.append(_offsets) + return token_embeddings, offsets + + def encode_tokens( + self, + sentences: list[str], + batch_size: int = 32, + show_progress_bar: bool = True, + ): + """Produces contextual token embeddings over all documents. + + Parameters + ---------- + sentences: list[str] + Documents to encode contextually. + batch_size: int, default 32 + Size of the batch of document to encode at once. + show_progress_bar: bool, default True + Indicates whether a progress bar should be displayed when encoding. + + Returns + ------- + token_embeddings: list[np.ndarray] + Embedding matrix of tokens for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + # This is needed because the above implementation does not normalize embeddings, + # which normally happens to document embeddings. + token_embeddings, offsets = self._encode_tokens( + sentences, + batch_size=batch_size, + show_progress_bar=show_progress_bar, + ) + token_embeddings = [normalize(emb) for emb in token_embeddings] + return token_embeddings, offsets + + def encode_windows( + self, + sentences: list[str], + batch_size: int = 32, + window_size: int = 50, + step_size: int = 40, + show_progress_bar: bool = True, + ): + """Produces contextual embeddings for a sliding window of tokens similar to C-Top2Vec. + + Parameters + ---------- + sentences: list[str] + Documents to encode contextually. + batch_size: int, default 32 + Size of the batch of document to encode at once. + window_size: int, default 50 + Size of the sliding window. + step_size: int, default 40 + Step size of the window. + If step_size < window_size, windows will overlap. + If step_size == window_size, then windows are separate. + If step_size > window_size, there will be gaps between the windows. + In this case, we throw a warning, as this is probably unintended behaviour. + show_progress_bar: bool, default True + Indicates whether a progress bar should be displayed when encoding. + + Returns + ------- + window_embeddings: list[np.ndarray] + Embedding matrix of windows for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + token_embeddings, token_offsets = self._encode_tokens( + sentences, + batch_size=batch_size, + show_progress_bar=show_progress_bar, + ) + window_embeddings = [] + window_offsets = [] + for emb, offs in zip(token_embeddings, token_offsets): + _offsets = [] + _embeddings = [] + for start_index in trange(0, len(emb), step_size): + end_index = start_index + window_size + window_emb = np.mean(emb[start_index:end_index], axis=0) + _embeddings.append(window_emb) + _offsets.append((offs[start_index][0], offs[end_index][1])) + window_embeddings.append(normalize(np.stack(_embeddings))) + window_offsets.append(_offsets) + return window_embeddings, window_offsets diff --git a/turftopic/ragged.py b/turftopic/ragged.py new file mode 100644 index 0000000..5654b23 --- /dev/null +++ b/turftopic/ragged.py @@ -0,0 +1,156 @@ +from typing import Callable, Optional + +import numpy as np +from sklearn.base import TransformerMixin + +Lengths = list[int] + + +def flatten_repr( + repr: list[np.ndarray], +) -> tuple[np.ndarray, Lengths]: + """Flattens ragged array to normal array. + + Parameters + ---------- + repr: list[ndarray] + Ragged representation array. + + Returns + ------- + flat_repr: ndarray + Flattened representation array. + lengths: list[int] + Length of each document in the corpus. + """ + lengths = [r.shape[0] for r in repr] + return np.concatenate(repr, axis=0), lengths + + +def unflatten_repr( + flat_repr: np.ndarray, lengths: Lengths +) -> list[np.ndarray]: + """Unflattens flat array to ragged array. + + Parameters + ---------- + flat_repr: ndarray + Flattened representation array. + lengths: list[int] + Length of each document in the corpus. + + Returns + ------- + repr: list[ndarray] + Ragged representation array. + + """ + repr = [] + start_index = 0 + for length in lengths: + repr.append(flat_repr[start_index:length]) + start_index += length + return repr + + +def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.mean): + pooled = [] + start_index = 0 + for length in lengths: + pooled.append(agg(flat_repr[start_index:length], axis=0)) + start_index += length + return np.stack(pooled) + + +class TokenLevel(TransformerMixin): + def __init__( + self, + model: TransformerMixin, + batch_size: int = 32, + pooling: Optional[Callable] = None, + ): + self.model = model + self.batch_size = batch_size + self.pooling = pooling + + def transform( + self, raw_documents: list[str], embeddings: list[np.ndarray] = None + ): + if embeddings is None: + embeddings = self.model.encoder.encode_tokens( + raw_documents, batch_size=self.batch_size + ) + flat_embeddings, lengths = flatten_repr(embeddings) + out_array = self.model.transform( + raw_documents, embeddings=flat_embeddings + ) + if self.pooling is None: + return unflatten_repr(out_array, lengths) + else: + return pool_flat(out_array, lengths) + + def fit_transform( + self, + raw_documents: list[str], + y=None, + embeddings: list[np.ndarray] = None, + ): + if embeddings is None: + embeddings = self.model.encoder.encode_tokens( + raw_documents, batch_size=self.batch_size + ) + flat_embeddings, lengths = flatten_repr(embeddings) + out_array = self.model.fit_transform( + raw_documents, y, embeddings=flat_embeddings + ) + if self.pooling is None: + return unflatten_repr(out_array, lengths) + else: + return pool_flat(out_array, lengths) + + +class Windowed(TransformerMixin): + def __init__( + self, + model: TransformerMixin, + batch_size: int = 32, + pooling: Optional[Callable] = None, + ): + self.model = model + self.batch_size = batch_size + self.pooling = pooling + + def transform( + self, raw_documents: list[str], embeddings: list[np.ndarray] = None + ): + if embeddings is None: + embeddings = self.model.encoder.encode_tokens( + raw_documents, batch_size=self.batch_size + ) + flat_embeddings, lengths = flatten_repr(embeddings) + out_array = self.model.transform( + raw_documents, embeddings=flat_embeddings + ) + if self.pooling is None: + return unflatten_repr(out_array, lengths) + else: + return pool_flat(out_array, lengths) + + def fit_transform( + self, + raw_documents: list[str], + y=None, + embeddings: list[np.ndarray] = None, + ): + if embeddings is None: + embeddings = self.model.encoder.encode_tokens( + raw_documents, batch_size=self.batch_size + ) + flat_embeddings, lengths = flatten_repr(embeddings) + out_array = self.model.fit_transform( + raw_documents, y, embeddings=flat_embeddings + ) + if self.pooling is None: + return unflatten_repr(out_array, lengths) + else: + return pool_flat(out_array, lengths) From 6db859299431f0ae3e6a0d54a4c825982031c087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 10:37:27 +0200 Subject: [PATCH 02/19] Renamed to late --- turftopic/encoders/contextual.py | 49 +------------------------------- turftopic/{ragged.py => late.py} | 0 2 files changed, 1 insertion(+), 48 deletions(-) rename turftopic/{ragged.py => late.py} (100%) diff --git a/turftopic/encoders/contextual.py b/turftopic/encoders/contextual.py index bd538d0..1a84c4b 100644 --- a/turftopic/encoders/contextual.py +++ b/turftopic/encoders/contextual.py @@ -18,54 +18,7 @@ def is_contextual(encoder): Lengths = list[int] -def flatten_embeddings( - embeddings: list[np.ndarray], -) -> tuple[np.ndarray, Lengths]: - """Flattens ragged array to normal array. - - Parameters - ---------- - embeddings: list[ndarray] - Ragged embedding array. - - Returns - ------- - flat_embeddings: ndarray - Flattened embedding array. - lengths: list[int] - Length of each document in the corpus. - """ - lengths = [emb.shape[0] for emb in embeddings] - return np.concatenate(embeddings, axis=0), lengths - - -def unflatten_embeddings( - flat_embeddings: np.ndarray, lengths: Lengths -) -> list[np.ndarray]: - """Unflattens flat array to ragged array. - - Parameters - ---------- - flat_embeddings: ndarray - Flattened embedding array. - lengths: list[int] - Length of each document in the corpus. - - Returns - ------- - embeddings: list[ndarray] - Ragged embedding array. - - """ - embeddings = [] - start_index = 0 - for length in lengths: - embeddings.append(flat_embeddings[start_index:length]) - start_index += length - return embeddings - - -class ContextTransformer(SentenceTransformer): +class LateTransformer(SentenceTransformer): def encode( self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs ): diff --git a/turftopic/ragged.py b/turftopic/late.py similarity index 100% rename from turftopic/ragged.py rename to turftopic/late.py From 3ececccbd6e68101e7a0add75e24d7c758a61e1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 10:55:48 +0200 Subject: [PATCH 03/19] Renamed to LateSentenceTransformer --- .../{contextual.py => late_interaction.py} | 2 +- turftopic/late.py | 77 ++++++------------- 2 files changed, 24 insertions(+), 55 deletions(-) rename turftopic/encoders/{contextual.py => late_interaction.py} (99%) diff --git a/turftopic/encoders/contextual.py b/turftopic/encoders/late_interaction.py similarity index 99% rename from turftopic/encoders/contextual.py rename to turftopic/encoders/late_interaction.py index 1a84c4b..ee53106 100644 --- a/turftopic/encoders/contextual.py +++ b/turftopic/encoders/late_interaction.py @@ -18,7 +18,7 @@ def is_contextual(encoder): Lengths = list[int] -class LateTransformer(SentenceTransformer): +class LateSentenceTransformer(SentenceTransformer): def encode( self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs ): diff --git a/turftopic/late.py b/turftopic/late.py index 5654b23..d103a62 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -3,6 +3,8 @@ import numpy as np from sklearn.base import TransformerMixin +from turftopic.encoders.contextual import Offsets + Lengths = list[int] @@ -62,7 +64,17 @@ def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.mean): return np.stack(pooled) -class TokenLevel(TransformerMixin): +def get_document_chunks( + raw_documents: list[str], offsets: list[Offsets] +) -> list[str]: + chunks = [] + for doc, _offs in zip(raw_documents, offsets): + for start_char, end_char in _offs: + chunks.append(raw_documents[start_char, end_char]) + return chunks + + +class LateModel(TransformerMixin): def __init__( self, model: TransformerMixin, @@ -74,63 +86,18 @@ def __init__( self.pooling = pooling def transform( - self, raw_documents: list[str], embeddings: list[np.ndarray] = None - ): - if embeddings is None: - embeddings = self.model.encoder.encode_tokens( - raw_documents, batch_size=self.batch_size - ) - flat_embeddings, lengths = flatten_repr(embeddings) - out_array = self.model.transform( - raw_documents, embeddings=flat_embeddings - ) - if self.pooling is None: - return unflatten_repr(out_array, lengths) - else: - return pool_flat(out_array, lengths) - - def fit_transform( self, raw_documents: list[str], - y=None, embeddings: list[np.ndarray] = None, + offsets: list[Offsets] = None, ): - if embeddings is None: - embeddings = self.model.encoder.encode_tokens( + if (embeddings is None) or (offsets is None): + embeddings, offsets = self.model.encoder.encode_tokens( raw_documents, batch_size=self.batch_size ) flat_embeddings, lengths = flatten_repr(embeddings) - out_array = self.model.fit_transform( - raw_documents, y, embeddings=flat_embeddings - ) - if self.pooling is None: - return unflatten_repr(out_array, lengths) - else: - return pool_flat(out_array, lengths) - - -class Windowed(TransformerMixin): - def __init__( - self, - model: TransformerMixin, - batch_size: int = 32, - pooling: Optional[Callable] = None, - ): - self.model = model - self.batch_size = batch_size - self.pooling = pooling - - def transform( - self, raw_documents: list[str], embeddings: list[np.ndarray] = None - ): - if embeddings is None: - embeddings = self.model.encoder.encode_tokens( - raw_documents, batch_size=self.batch_size - ) - flat_embeddings, lengths = flatten_repr(embeddings) - out_array = self.model.transform( - raw_documents, embeddings=flat_embeddings - ) + chunks = get_document_chunks(raw_documents, offsets) + out_array = self.model.transform(chunks, embeddings=flat_embeddings) if self.pooling is None: return unflatten_repr(out_array, lengths) else: @@ -141,14 +108,16 @@ def fit_transform( raw_documents: list[str], y=None, embeddings: list[np.ndarray] = None, + offsets: list[Offsets] = None, ): - if embeddings is None: - embeddings = self.model.encoder.encode_tokens( + if (embeddings is None) or (offsets is None): + embeddings, offsets = self.model.encoder.encode_tokens( raw_documents, batch_size=self.batch_size ) flat_embeddings, lengths = flatten_repr(embeddings) + chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.fit_transform( - raw_documents, y, embeddings=flat_embeddings + chunks, embeddings=flat_embeddings ) if self.pooling is None: return unflatten_repr(out_array, lengths) From f50b983ab0ede31120ee467718be612a31ef2805 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 11:07:54 +0200 Subject: [PATCH 04/19] Moved late interaction code to one file --- turftopic/encoders/late_interaction.py | 159 --------------------- turftopic/late.py | 188 +++++++++++++++++++++++-- 2 files changed, 178 insertions(+), 169 deletions(-) delete mode 100644 turftopic/encoders/late_interaction.py diff --git a/turftopic/encoders/late_interaction.py b/turftopic/encoders/late_interaction.py deleted file mode 100644 index ee53106..0000000 --- a/turftopic/encoders/late_interaction.py +++ /dev/null @@ -1,159 +0,0 @@ -import itertools -import warnings -from typing import Iterable, Union - -import numpy as np -import torch -from sentence_transformers import SentenceTransformer -from sklearn.preprocessing import normalize -from tokenizers import Tokenizer -from tqdm import trange - - -def is_contextual(encoder): - return hasattr(encoder, "encode_tokens") - - -Offsets = list[tuple[int, int]] -Lengths = list[int] - - -class LateSentenceTransformer(SentenceTransformer): - def encode( - self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs - ): - warnings.warn( - "Encoder is contextual but topic model is not using contextual embeddings. Perhaps you wanted to use another topic model." - ) - return super().encode(sentences, *args, **kwargs) - - def _encode_tokens( - self, - texts, - batch_size=32, - show_progress_bar=True, - ) -> tuple[list[np.ndarray], list[Offsets]]: - """ - Returns - ------- - token_embeddings: list[np.ndarray] - Embedding matrix of tokens for each document. - offsets: list[list[tuple[int, int]]] - Start and end character of each token in each document. - """ - token_embeddings = [] - offsets = [] - tokenizer = Tokenizer.from_pretrained(self.model_card_data.base_model) - for start_index in trange( - 0, - len(texts), - batch_size, - disable=not show_progress_bar, - desc="Encoding tokens...", - ): - batch = texts[start_index : start_index + batch_size] - features = self.tokenize(batch) - with torch.no_grad(): - output_features = self.forward(features) - n_tokens = output_features["attention_mask"].sum(axis=1) - # Find first nonzero elements in each document - # The document could be padded from the left, so we have to watch out for this. - start_token = torch.argmax( - (output_features["attention_mask"] > 0).to(torch.long), axis=1 - ) - end_token = start_token + n_tokens - for i_doc in range(len(batch)): - _token_embeddings = output_features["token_embeddings"][ - i_doc, start_token[i_doc] : end_token[i_doc], : - ].numpy(force=True) - _offsets = tokenizer.encode(batch[i_doc]).offsets - token_embeddings.append(_token_embeddings) - offsets.append(_offsets) - return token_embeddings, offsets - - def encode_tokens( - self, - sentences: list[str], - batch_size: int = 32, - show_progress_bar: bool = True, - ): - """Produces contextual token embeddings over all documents. - - Parameters - ---------- - sentences: list[str] - Documents to encode contextually. - batch_size: int, default 32 - Size of the batch of document to encode at once. - show_progress_bar: bool, default True - Indicates whether a progress bar should be displayed when encoding. - - Returns - ------- - token_embeddings: list[np.ndarray] - Embedding matrix of tokens for each document. - offsets: list[list[tuple[int, int]]] - Start and end character of each token in each document. - """ - # This is needed because the above implementation does not normalize embeddings, - # which normally happens to document embeddings. - token_embeddings, offsets = self._encode_tokens( - sentences, - batch_size=batch_size, - show_progress_bar=show_progress_bar, - ) - token_embeddings = [normalize(emb) for emb in token_embeddings] - return token_embeddings, offsets - - def encode_windows( - self, - sentences: list[str], - batch_size: int = 32, - window_size: int = 50, - step_size: int = 40, - show_progress_bar: bool = True, - ): - """Produces contextual embeddings for a sliding window of tokens similar to C-Top2Vec. - - Parameters - ---------- - sentences: list[str] - Documents to encode contextually. - batch_size: int, default 32 - Size of the batch of document to encode at once. - window_size: int, default 50 - Size of the sliding window. - step_size: int, default 40 - Step size of the window. - If step_size < window_size, windows will overlap. - If step_size == window_size, then windows are separate. - If step_size > window_size, there will be gaps between the windows. - In this case, we throw a warning, as this is probably unintended behaviour. - show_progress_bar: bool, default True - Indicates whether a progress bar should be displayed when encoding. - - Returns - ------- - window_embeddings: list[np.ndarray] - Embedding matrix of windows for each document. - offsets: list[list[tuple[int, int]]] - Start and end character of each token in each document. - """ - token_embeddings, token_offsets = self._encode_tokens( - sentences, - batch_size=batch_size, - show_progress_bar=show_progress_bar, - ) - window_embeddings = [] - window_offsets = [] - for emb, offs in zip(token_embeddings, token_offsets): - _offsets = [] - _embeddings = [] - for start_index in trange(0, len(emb), step_size): - end_index = start_index + window_size - window_emb = np.mean(emb[start_index:end_index], axis=0) - _embeddings.append(window_emb) - _offsets.append((offs[start_index][0], offs[end_index][1])) - window_embeddings.append(normalize(np.stack(_embeddings))) - window_offsets.append(_offsets) - return window_embeddings, window_offsets diff --git a/turftopic/late.py b/turftopic/late.py index d103a62..dace679 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -1,13 +1,160 @@ -from typing import Callable, Optional +import itertools +import warnings +from typing import Callable, Iterable, Optional, Union import numpy as np +import torch +from sentence_transformers import SentenceTransformer from sklearn.base import TransformerMixin +from sklearn.preprocessing import normalize +from tokenizers import Tokenizer +from tqdm import trange -from turftopic.encoders.contextual import Offsets - +Offsets = list[tuple[int, int]] Lengths = list[int] +class LateSentenceTransformer(SentenceTransformer): + def encode( + self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs + ): + warnings.warn( + "Encoder is contextual but topic model is not using contextual embeddings. Perhaps you wanted to use another topic model." + ) + return super().encode(sentences, *args, **kwargs) + + def _encode_tokens( + self, + texts, + batch_size=32, + show_progress_bar=True, + ) -> tuple[list[np.ndarray], list[Offsets]]: + """ + Returns + ------- + token_embeddings: list[np.ndarray] + Embedding matrix of tokens for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + token_embeddings = [] + offsets = [] + tokenizer = Tokenizer.from_pretrained(self.model_card_data.base_model) + for start_index in trange( + 0, + len(texts), + batch_size, + disable=not show_progress_bar, + desc="Encoding tokens...", + ): + batch = texts[start_index : start_index + batch_size] + features = self.tokenize(batch) + with torch.no_grad(): + output_features = self.forward(features) + n_tokens = output_features["attention_mask"].sum(axis=1) + # Find first nonzero elements in each document + # The document could be padded from the left, so we have to watch out for this. + start_token = torch.argmax( + (output_features["attention_mask"] > 0).to(torch.long), axis=1 + ) + end_token = start_token + n_tokens + for i_doc in range(len(batch)): + _token_embeddings = output_features["token_embeddings"][ + i_doc, start_token[i_doc] : end_token[i_doc], : + ].numpy(force=True) + _offsets = tokenizer.encode(batch[i_doc]).offsets + token_embeddings.append(_token_embeddings) + offsets.append(_offsets) + return token_embeddings, offsets + + def encode_tokens( + self, + sentences: list[str], + batch_size: int = 32, + show_progress_bar: bool = True, + ): + """Produces contextual token embeddings over all documents. + + Parameters + ---------- + sentences: list[str] + Documents to encode contextually. + batch_size: int, default 32 + Size of the batch of document to encode at once. + show_progress_bar: bool, default True + Indicates whether a progress bar should be displayed when encoding. + + Returns + ------- + token_embeddings: list[np.ndarray] + Embedding matrix of tokens for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + # This is needed because the above implementation does not normalize embeddings, + # which normally happens to document embeddings. + token_embeddings, offsets = self._encode_tokens( + sentences, + batch_size=batch_size, + show_progress_bar=show_progress_bar, + ) + token_embeddings = [normalize(emb) for emb in token_embeddings] + return token_embeddings, offsets + + def encode_windows( + self, + sentences: list[str], + batch_size: int = 32, + window_size: int = 50, + step_size: int = 40, + show_progress_bar: bool = True, + ): + """Produces contextual embeddings for a sliding window of tokens similar to C-Top2Vec. + + Parameters + ---------- + sentences: list[str] + Documents to encode contextually. + batch_size: int, default 32 + Size of the batch of document to encode at once. + window_size: int, default 50 + Size of the sliding window. + step_size: int, default 40 + Step size of the window. + If step_size < window_size, windows will overlap. + If step_size == window_size, then windows are separate. + If step_size > window_size, there will be gaps between the windows. + In this case, we throw a warning, as this is probably unintended behaviour. + show_progress_bar: bool, default True + Indicates whether a progress bar should be displayed when encoding. + + Returns + ------- + window_embeddings: list[np.ndarray] + Embedding matrix of windows for each document. + offsets: list[list[tuple[int, int]]] + Start and end character of each token in each document. + """ + token_embeddings, token_offsets = self._encode_tokens( + sentences, + batch_size=batch_size, + show_progress_bar=show_progress_bar, + ) + window_embeddings = [] + window_offsets = [] + for emb, offs in zip(token_embeddings, token_offsets): + _offsets = [] + _embeddings = [] + for start_index in trange(0, len(emb), step_size): + end_index = start_index + window_size + window_emb = np.mean(emb[start_index:end_index], axis=0) + _embeddings.append(window_emb) + _offsets.append((offs[start_index][0], offs[end_index][1])) + window_embeddings.append(normalize(np.stack(_embeddings))) + window_offsets.append(_offsets) + return window_embeddings, window_offsets + + def flatten_repr( repr: list[np.ndarray], ) -> tuple[np.ndarray, Lengths]: @@ -78,12 +225,37 @@ class LateModel(TransformerMixin): def __init__( self, model: TransformerMixin, - batch_size: int = 32, + batch_size: Optional[int] = 32, + window_size: Optional[int] = None, + step_size: Optional[int] = None, pooling: Optional[Callable] = None, ): self.model = model self.batch_size = batch_size self.pooling = pooling + self.window_size = window_size + self.step_size = step_size + + def encode_documents( + self, raw_documents: list[str] + ) -> tuple[np.ndarray, list[Offsets]]: + if self.window_size is None: + embeddings, offsets = self.model.encoder.encode_tokens( + raw_documents, batch_size=self.batch_size + ) + return embeddings, offsets + # If the window_size is specified, but not step_size, we set the step size to the window size + # Thereby getting non-overlapping windows + step_size = ( + self.window_size if self.step_size is None else self.step_size + ) + embeddings, offsets = self.model.encoder.encode_windows( + raw_documents, + batch_size=self.batch_size, + window_size=self.window_size, + step_size=step_size, + ) + return embeddings, offsets def transform( self, @@ -92,9 +264,7 @@ def transform( offsets: list[Offsets] = None, ): if (embeddings is None) or (offsets is None): - embeddings, offsets = self.model.encoder.encode_tokens( - raw_documents, batch_size=self.batch_size - ) + embeddings, offsets = self.encode_documents(raw_documents) flat_embeddings, lengths = flatten_repr(embeddings) chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.transform(chunks, embeddings=flat_embeddings) @@ -111,9 +281,7 @@ def fit_transform( offsets: list[Offsets] = None, ): if (embeddings is None) or (offsets is None): - embeddings, offsets = self.model.encoder.encode_tokens( - raw_documents, batch_size=self.batch_size - ) + embeddings, offsets = self.encode_documents(raw_documents) flat_embeddings, lengths = flatten_repr(embeddings) chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.fit_transform( From aaa0e17167b23580490087ae28bcaabfbd28bbcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 12:37:48 +0200 Subject: [PATCH 05/19] Added properties and printing to LateWrapper --- turftopic/late.py | 60 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/turftopic/late.py b/turftopic/late.py index dace679..61a75f4 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -7,9 +7,10 @@ from sentence_transformers import SentenceTransformer from sklearn.base import TransformerMixin from sklearn.preprocessing import normalize -from tokenizers import Tokenizer from tqdm import trange +from turftopic.base import ContextualModel + Offsets = list[tuple[int, int]] Lengths = list[int] @@ -39,13 +40,11 @@ def _encode_tokens( """ token_embeddings = [] offsets = [] - tokenizer = Tokenizer.from_pretrained(self.model_card_data.base_model) for start_index in trange( 0, len(texts), batch_size, - disable=not show_progress_bar, - desc="Encoding tokens...", + desc="Encoding batches...", ): batch = texts[start_index : start_index + batch_size] features = self.tokenize(batch) @@ -59,10 +58,18 @@ def _encode_tokens( ) end_token = start_token + n_tokens for i_doc in range(len(batch)): - _token_embeddings = output_features["token_embeddings"][ - i_doc, start_token[i_doc] : end_token[i_doc], : - ].numpy(force=True) - _offsets = tokenizer.encode(batch[i_doc]).offsets + _token_embeddings = ( + output_features["token_embeddings"][ + i_doc, start_token[i_doc] : end_token[i_doc], : + ] + .float() + .numpy(force=True) + ) + _n = _token_embeddings.shape[0] + # We extract the character offsets and prune it at the maximum context length + _offsets = self.tokenizer( + batch[i_doc], return_offsets_mapping=True, verbose=False + )["offset_mapping"][:_n] token_embeddings.append(_token_embeddings) offsets.append(_offsets) return token_embeddings, offsets @@ -145,11 +152,12 @@ def encode_windows( for emb, offs in zip(token_embeddings, token_offsets): _offsets = [] _embeddings = [] - for start_index in trange(0, len(emb), step_size): + for start_index in range(0, len(emb), step_size): end_index = start_index + window_size window_emb = np.mean(emb[start_index:end_index], axis=0) + off = offs[start_index:end_index] _embeddings.append(window_emb) - _offsets.append((offs[start_index][0], offs[end_index][1])) + _offsets.append((off[0][0], off[-1][1])) window_embeddings.append(normalize(np.stack(_embeddings))) window_offsets.append(_offsets) return window_embeddings, window_offsets @@ -197,7 +205,7 @@ def unflatten_repr( repr = [] start_index = 0 for length in lengths: - repr.append(flat_repr[start_index:length]) + repr.append(flat_repr[start_index : start_index + length]) start_index += length return repr @@ -217,11 +225,11 @@ def get_document_chunks( chunks = [] for doc, _offs in zip(raw_documents, offsets): for start_char, end_char in _offs: - chunks.append(raw_documents[start_char, end_char]) + chunks.append(doc[start_char:end_char]) return chunks -class LateModel(TransformerMixin): +class LateWrapper(ContextualModel, TransformerMixin): def __init__( self, model: TransformerMixin, @@ -236,7 +244,7 @@ def __init__( self.window_size = window_size self.step_size = step_size - def encode_documents( + def encode_late( self, raw_documents: list[str] ) -> tuple[np.ndarray, list[Offsets]]: if self.window_size is None: @@ -264,7 +272,7 @@ def transform( offsets: list[Offsets] = None, ): if (embeddings is None) or (offsets is None): - embeddings, offsets = self.encode_documents(raw_documents) + embeddings, offsets = self.encode_late(raw_documents) flat_embeddings, lengths = flatten_repr(embeddings) chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.transform(chunks, embeddings=flat_embeddings) @@ -281,7 +289,7 @@ def fit_transform( offsets: list[Offsets] = None, ): if (embeddings is None) or (offsets is None): - embeddings, offsets = self.encode_documents(raw_documents) + embeddings, offsets = self.encode_late(raw_documents) flat_embeddings, lengths = flatten_repr(embeddings) chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.fit_transform( @@ -291,3 +299,23 @@ def fit_transform( return unflatten_repr(out_array, lengths) else: return pool_flat(out_array, lengths) + + @property + def components_(self): + return self.model.components_ + + @property + def hierarchy(self): + return self.model.hierarchy + + @property + def topic_names(self): + return self.model.topic_names + + @property + def classes_(self): + return self.model.classes_ + + @property + def vectorizer(self): + return self.model.vectorizer From 2146f68f76faa3c026b1f14e7bc957cb7aef098b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 12:38:42 +0200 Subject: [PATCH 06/19] Added C-Top2Vec --- turftopic/__init__.py | 8 ++- turftopic/models/cluster.py | 97 ++++++++++++++++++++++++++++++++++++- 2 files changed, 103 insertions(+), 2 deletions(-) diff --git a/turftopic/__init__.py b/turftopic/__init__.py index a763b26..7137c7d 100644 --- a/turftopic/__init__.py +++ b/turftopic/__init__.py @@ -2,7 +2,12 @@ from turftopic._datamapplot import build_datamapplot from turftopic.base import ContextualModel from turftopic.error import NotInstalled -from turftopic.models.cluster import BERTopic, ClusteringTopicModel, Top2Vec +from turftopic.models.cluster import ( + BERTopic, + ClusteringTopicModel, + CTop2Vec, + Top2Vec, +) from turftopic.models.cvp import ConceptVectorProjection from turftopic.models.decomp import S3, SemanticSignalSeparation from turftopic.models.fastopic import FASTopic @@ -29,6 +34,7 @@ "ContextualModel", "FASTopic", "Top2Vec", + "CTop2Vec", "BERTopic", "load_model", "build_datamapplot", diff --git a/turftopic/models/cluster.py b/turftopic/models/cluster.py index 4b54ccc..f28eb09 100644 --- a/turftopic/models/cluster.py +++ b/turftopic/models/cluster.py @@ -5,7 +5,7 @@ import webbrowser from datetime import datetime from pathlib import Path -from typing import Any, Iterable, Literal, Optional, Sequence, Union +from typing import Any, Callable, Iterable, Literal, Optional, Sequence, Union import numpy as np from rich.console import Console @@ -30,6 +30,7 @@ npmi, soft_ctf_idf, ) +from turftopic.late import LateSentenceTransformer, LateWrapper from turftopic.models._hierarchical_clusters import ( VALID_LINKAGE_METHODS, ClusterNode, @@ -43,6 +44,7 @@ ) from turftopic.types import VALID_DISTANCE_METRICS, DistanceMetric from turftopic.utils import safe_binarize +from turftopic.vectorizers import PhraseVectorizer from turftopic.vectorizers.default import default_vectorizer integer_message = """ @@ -865,3 +867,96 @@ def __init__( reduction_distance_metric=reduction_distance_metric, reduction_topic_representation=reduction_topic_representation, ) + + +class CTop2Vec(LateWrapper): + """Convenience function to construct a CTop2Vec model in Turftopic. + The model is essentially the same as ClusteringTopicModel in a Late Wrapper + with defaults that resemble CTop2Vec. This includes: + + 1. A late interaction embedding model, with windowed aggregation + 2. UMAP reduction + 3. HDBSCAN clustering + 4. Centroid term importance + 5. Phrase vectorizer + + ```bash + pip install turftopic[umap-learn] + ``` + + ```python + from turftopic import CTop2Vec + + corpus: list[str] = ["some text", "more text", ...] + + model = CTop2Vec().fit(corpus) + model.print_topics() + ``` + """ + + def __init__( + self, + encoder: Union[ + Encoder, str, MultimodalEncoder + ] = "sentence-transformers/all-MiniLM-L6-v2", + vectorizer: Optional[CountVectorizer] = None, + dimensionality_reduction: Optional[TransformerMixin] = None, + clustering: Optional[ClusterMixin] = None, + feature_importance: WordImportance = "centroid", + n_reduce_to: Optional[int] = None, + reduction_method: LinkageMethod = "smallest", + reduction_distance_metric: DistanceMetric = "cosine", + reduction_topic_representation: TopicRepresentation = "centroid", + window_size: Optional[int] = 50, + step_size: Optional[int] = 40, + pooling: Optional[Callable] = np.mean, + random_state: Optional[int] = None, + ): + if dimensionality_reduction is None: + try: + from umap import UMAP + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "UMAP is not installed in your environment, but Top2Vec requires it." + ) from e + dimensionality_reduction = UMAP( + n_neighbors=15, + n_components=5, + min_dist=0.0, + metric="cosine", + random_state=random_state, + ) + if clustering is None: + clustering = HDBSCAN( + min_cluster_size=15, + metric="euclidean", + cluster_selection_method="eom", + ) + self.encoder = encoder + self.vectorizer = vectorizer + self.dimensionality_reduction = dimensionality_reduction + self.clustering = clustering + self.feature_importance = feature_importance + self.n_reduce_to = n_reduce_to + self.reduction_method = reduction_method + self.reduction_distance_metric = reduction_distance_metric + self.reduction_topic_representation = reduction_topic_representation + self.random_state = random_state + self.model = ClusteringTopicModel( + encoder=encoder, + vectorizer=vectorizer, + dimensionality_reduction=dimensionality_reduction, + clustering=clustering, + n_reduce_to=n_reduce_to, + random_state=random_state, + feature_importance=feature_importance, + reduction_method=reduction_method, + reduction_distance_metric=reduction_distance_metric, + reduction_topic_representation=reduction_topic_representation, + ) + super().__init__( + self.model, + window_size=self.window_size, + step_size=self.step_size, + pooling=self.pooling, + ) From badeeec255fa7da68f8b4229a078aecd9daba76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 13:26:38 +0200 Subject: [PATCH 07/19] Fixed pooling --- turftopic/late.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/turftopic/late.py b/turftopic/late.py index 61a75f4..80f4250 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -210,11 +210,13 @@ def unflatten_repr( return repr -def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.mean): +def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.nanmean): pooled = [] start_index = 0 for length in lengths: - pooled.append(agg(flat_repr[start_index:length], axis=0)) + pooled.append( + agg(flat_repr[start_index : start_index + length], axis=0) + ) start_index += length return np.stack(pooled) From ba45dc1da7bf422d2df17bac65dadd952bfed5e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 13:33:34 +0200 Subject: [PATCH 08/19] Made tests faster and added late interaction testing --- tests/test_integration.py | 46 ++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/tests/test_integration.py b/tests/test_integration.py index 181e591..599608b 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -6,7 +6,6 @@ import numpy as np import pandas as pd import pytest -from sentence_transformers import SentenceTransformer from sklearn.cluster import KMeans from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import PCA @@ -15,6 +14,7 @@ GMM, AutoEncodingTopicModel, ClusteringTopicModel, + CTop2Vec, FASTopic, KeyNMF, SemanticSignalSeparation, @@ -22,6 +22,9 @@ Topeax, load_model, ) +from turftopic.late import LateSentenceTransformer + +ENCODER = "sentence-transformers/static-retrieval-mrl-en-v1" def batched(iterable, n: int): @@ -56,22 +59,13 @@ def generate_dates( remove=("headers", "footers", "quotes"), ) texts = newsgroups.data -trf = SentenceTransformer("paraphrase-MiniLM-L3-v2") +trf = LateSentenceTransformer(ENCODER) embeddings = np.asarray(trf.encode(texts)) timestamps = generate_dates(n_dates=len(texts)) models = [ - GMM(3, encoder=trf), SemanticSignalSeparation(3, encoder=trf), - KeyNMF(3, encoder=trf), KeyNMF(3, encoder=trf, cross_lingual=True), - ClusteringTopicModel( - dimensionality_reduction=PCA(10), - clustering=KMeans(3), - feature_importance="c-tf-idf", - encoder=trf, - reduction_method="average", - ), ClusteringTopicModel( dimensionality_reduction=PCA(10), clustering=KMeans(3), @@ -79,21 +73,14 @@ def generate_dates( encoder=trf, reduction_method="smallest", ), - AutoEncodingTopicModel(3, combined=True), - FASTopic(3, batch_size=None), - SensTopic(), - Topeax(), + AutoEncodingTopicModel(3, combined=False, encoder=trf), + FASTopic(3, batch_size=None, encoder=trf), + SensTopic(encoder=trf), + Topeax(encoder=trf), ] dynamic_models = [ GMM(3, encoder=trf), - ClusteringTopicModel( - dimensionality_reduction=PCA(10), - clustering=KMeans(3), - feature_importance="centroid", - encoder=trf, - reduction_method="smallest", - ), ClusteringTopicModel( dimensionality_reduction=PCA(10), clustering=KMeans(3), @@ -106,6 +93,8 @@ def generate_dates( online_models = [KeyNMF(3, encoder=trf)] +late_models = [CTop2Vec(encoder=trf)] + @pytest.mark.parametrize("model", dynamic_models) def test_fit_dynamic(model): @@ -122,6 +111,19 @@ def test_fit_dynamic(model): df = pd.read_csv(out_path) +@pytest.mark.parametrize("model", late_models) +def test_late(model): + doc_topic_matrix = model.fit_transform( + texts, + ) + table = model.export_topics(format="csv") + with tempfile.TemporaryDirectory() as tmpdirname: + out_path = Path(tmpdirname).joinpath("topics.csv") + with out_path.open("w") as out_file: + out_file.write(table) + df = pd.read_csv(out_path) + + @pytest.mark.parametrize("model", online_models) def test_fit_online(model): for epoch in range(5): From a98d5e1a99de2cb750cf16342d65ec5b79553bf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 13:34:54 +0200 Subject: [PATCH 09/19] Made warning smarter --- turftopic/late.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/turftopic/late.py b/turftopic/late.py index 80f4250..ec8f2c6 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -16,12 +16,15 @@ class LateSentenceTransformer(SentenceTransformer): + has_used_token_level = False + def encode( self, sentences: Union[str, list[str], np.ndarray], *args, **kwargs ): - warnings.warn( - "Encoder is contextual but topic model is not using contextual embeddings. Perhaps you wanted to use another topic model." - ) + if not self.has_used_token_level: + warnings.warn( + "Encoder is contextual but topic model is not using contextual embeddings. Perhaps you wanted to use another topic model." + ) return super().encode(sentences, *args, **kwargs) def _encode_tokens( @@ -38,6 +41,7 @@ def _encode_tokens( offsets: list[list[tuple[int, int]]] Start and end character of each token in each document. """ + self.has_used_token_level = True token_embeddings = [] offsets = [] for start_index in trange( From 55dee2db1a6d12f503abeeffa6d227965eb6201d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 13:53:17 +0200 Subject: [PATCH 10/19] Fixed import --- turftopic/models/cluster.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/turftopic/models/cluster.py b/turftopic/models/cluster.py index f28eb09..feb5998 100644 --- a/turftopic/models/cluster.py +++ b/turftopic/models/cluster.py @@ -44,8 +44,8 @@ ) from turftopic.types import VALID_DISTANCE_METRICS, DistanceMetric from turftopic.utils import safe_binarize -from turftopic.vectorizers import PhraseVectorizer from turftopic.vectorizers.default import default_vectorizer +from turftopic.vectorizers.phrases import PhraseVectorizer integer_message = """ You tried to pass an integer to ClusteringTopicModel as its first argument. @@ -719,12 +719,12 @@ def transform( X = self.vectorizer.transform(raw_documents) X = normalize(X, axis=1, norm="l1", copy=False) X = X * idf_diag - doc_topic_matrix = np.exp(cosine_similarity(X, self.components_)) + doc_topic_matrix = cosine_similarity(X, self.components_) elif self.feature_importance == "centroid": if embeddings is None: embeddings = self.encode_documents(raw_documents) - doc_topic_matrix = np.exp( - cosine_similarity(embeddings, self._calculate_topic_vectors()) + doc_topic_matrix = cosine_similarity( + embeddings, self._calculate_topic_vectors() ) else: doc_topic_matrix = safe_binarize( @@ -909,7 +909,7 @@ def __init__( reduction_topic_representation: TopicRepresentation = "centroid", window_size: Optional[int] = 50, step_size: Optional[int] = 40, - pooling: Optional[Callable] = np.mean, + pooling: Optional[Callable] = np.nanmean, random_state: Optional[int] = None, ): if dimensionality_reduction is None: @@ -933,7 +933,10 @@ def __init__( cluster_selection_method="eom", ) self.encoder = encoder - self.vectorizer = vectorizer + if isinstance(encoder, str): + encoder = LateSentenceTransformer(encoder) + if vectorizer is None: + vectorizer = PhraseVectorizer() self.dimensionality_reduction = dimensionality_reduction self.clustering = clustering self.feature_importance = feature_importance @@ -942,7 +945,7 @@ def __init__( self.reduction_distance_metric = reduction_distance_metric self.reduction_topic_representation = reduction_topic_representation self.random_state = random_state - self.model = ClusteringTopicModel( + model = ClusteringTopicModel( encoder=encoder, vectorizer=vectorizer, dimensionality_reduction=dimensionality_reduction, @@ -955,8 +958,8 @@ def __init__( reduction_topic_representation=reduction_topic_representation, ) super().__init__( - self.model, - window_size=self.window_size, - step_size=self.step_size, - pooling=self.pooling, + model, + window_size=window_size, + step_size=step_size, + pooling=pooling, ) From b2221dcd823b65e94427c1e5aa7d6990150f422b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 14:19:42 +0200 Subject: [PATCH 11/19] Added docstrings to late.py --- turftopic/late.py | 68 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/turftopic/late.py b/turftopic/late.py index ec8f2c6..d52a5d0 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -16,6 +16,17 @@ class LateSentenceTransformer(SentenceTransformer): + """SentenceTransformer model that can produce token and window-level embeddings. + Its output can be used by topic models that can use multi-vector document representations. + + !!! warning + This is not checked yet in the library, + but we recommend that you use SentenceTransformers that are + a) **Mean pooled** + b) **L2 Normalized** + This will guarrantee that the token/window embeddings are in the same embedding space as the documents. + """ + has_used_token_level = False def encode( @@ -215,6 +226,20 @@ def unflatten_repr( def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.nanmean): + """Pools vectors within documents using the agg function. + + Parameters + ---------- + flat_repr: ndarray of shape (n_total_tokens, n_dims) + Flattened document representations. + lengths: Lengths + Number of tokens in each document. + + Returns + ------- + ndarray of shape (n_documents, n_dims) + Pooled representation for each document. + """ pooled = [] start_index = 0 for length in lengths: @@ -228,6 +253,20 @@ def pool_flat(flat_repr: np.ndarray, lengths: Lengths, agg=np.nanmean): def get_document_chunks( raw_documents: list[str], offsets: list[Offsets] ) -> list[str]: + """Extracts text chunks from documents based on token/window offsets. + + Parameters + ---------- + raw_documents: list[str] + Text documents. + offsets: list[Offsets] + Offsets returned when encoding. + + Returns + ------- + list[str] + Text chunks of tokens/windows in the documents. + """ chunks = [] for doc, _offs in zip(raw_documents, offsets): for start_char, end_char in _offs: @@ -236,6 +275,35 @@ def get_document_chunks( class LateWrapper(ContextualModel, TransformerMixin): + """Wraps existing Turftopic model so that they can accept and create + multi-vector document representations. + + !!! warning + The model HAS TO HAVE a late interaction encoder model + (e.g. `LateSentenceTransformer`) + + Parameters + ---------- + model + Turftopic model to turn into late-interaction model. + batch_size: int, default 32 + Batch size of the transformer. + window_size: int, default None + Size of the sliding window to average tokens over. + If None, documents will be represented at a token level. + step_size: int, default None + Step size of the window. + If (step_size == None) or (step_size == window_size), then windows are separate. + If step_size < window_size, windows will overlap. + If step_size > window_size, there will be gaps between the windows. + In this case, we throw a warning, as this is probably unintended behaviour. + pooling: Callable, default None + Indicates whether and how to pool document-topic matrices. + If None, multi-vector topic proportions are returned in a ragged array. + If Callable, multiple vectors are averaged with the callable in each document. + You could for example take the mean by specifying `pooling=np.nanmean`. + """ + def __init__( self, model: TransformerMixin, From 45bcc78c2c346d383717faa139f8b478ed8676e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 14:19:57 +0200 Subject: [PATCH 12/19] Started working on docs --- docs/late_interaction.md | 31 +++++++++++++++++++++++++++++++ docs/stylesheets/extra.css | 11 +++++++++++ mkdocs.yml | 14 +++++++++----- 3 files changed, 51 insertions(+), 5 deletions(-) create mode 100644 docs/late_interaction.md create mode 100644 docs/stylesheets/extra.css diff --git a/docs/late_interaction.md b/docs/late_interaction.md new file mode 100644 index 0000000..489cb73 --- /dev/null +++ b/docs/late_interaction.md @@ -0,0 +1,31 @@ + +| Topic ID | Highest Ranking | +| - | - | +| -1 | nasa, nasa gov, space exploration, space science, spaceflight, astronomy space, spacecraft, national space, space program, sci space | +| 225 | astronomical, astronomers, astronomy, astronomy space, sci astro, interplanetary, galactic, celestial bbs, gamma ray, astro | +| 240 | satellites, nasa, telescope, astronomy space, satellite, observatory, spacecraft, nasa gov, astronomical, astronomy | +| 242 | shuttle program, shuttle mission, space shuttle, shuttle launch, shuttle elements, space program, spaceflight, shuttle, shuttle elements documentation, space exploration | +| 243 | nasa gov, nasa, space exploration, national space, commercial space, spaceflight, space station, space science, sci space, space news | +| 244 | astronomy space, nasa, space science, sci astro, sci space, nasa gov, spacecraft, astronomical, space news, space program | + + +## API Reference + +### Encoder + +::: turftopic.late.LateSentenceTransformer + +### Wrapper + +::: turftopic.late.LateWrapper + +### Utility functions + +::: turftopic.late.flatten_repr + +::: turftopic.late.unflatten_repr + +::: turftopic.late.pool_flat + +::: turftopic.late.get_document_chunks + diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000..c27d485 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,11 @@ +.md-tabs__item { + height: 70px; + align-items: center; + display: flex; +} +.md-tabs__link { + text-align: center; +} +.subtext { + font-size: small; +} diff --git a/mkdocs.yml b/mkdocs.yml index 87f298a..61b77df 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -11,8 +11,9 @@ nav: - Online Topic Modeling: online.md - Hierarchical Topic Modeling: hierarchical.md - Cross-Lingual Topic Modeling: cross_lingual.md - - Multimodal Modeling (BETA): multimodal.md - - Concept Induction (BETA): concept_induction.md + - Late Interaction Models (Experimental): late_interaction.md + - Multimodal Modeling (Experimental): multimodal.md + - Concept Induction: concept_induction.md - Modifying and Finetuning Models: finetuning.md - Saving and Loading: persistence.md - Using TopicData: topic_data.md @@ -23,7 +24,7 @@ nav: - Discourse Analysis on Morality and Religion: tutorials/religious.md - Discovering a Data-driven Political Compass: tutorials/ideologies.md - Customer Dissatisfaction Analysis: tutorials/reviews.md - - Topic Models (Overview and Performance): + - Topic Models
(Overview and Performance): - Model Overview: model_overview.md - Model Leaderboard: benchmark.md - Semantic Signal Separation (S³): s3.md @@ -34,10 +35,10 @@ nav: - Clustering Models (BERTopic & Top2Vec): clustering.md - Autoencoding Models (ZeroShotTM & CombinedTM): ctm.md - FASTopic: FASTopic.md - - Other Models (e.g. Sentiment Analysis): + - Other Models
(e.g. Sentiment Analysis): - Concept Vector Projection (Continuous Sentiment Scoring): cvp.md - Embedding Models: encoders.md - - Vectorizers (Term extraction): vectorizers.md + - Vectorizers
(Term extraction): vectorizers.md - Topic Analysis and Naming with LLMs: analyzers.md theme: name: material @@ -79,6 +80,9 @@ theme: - toc.follow - content.code.copy +extra_css: + - stylesheets/extra.css + plugins: - search - mkdocstrings: From 64bd30af12f83d61ffcd2118844b4984c6d3440f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Thu, 2 Apr 2026 14:22:35 +0200 Subject: [PATCH 13/19] Fixed tests --- tests/test_integration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_integration.py b/tests/test_integration.py index 599608b..8d1309b 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -24,7 +24,7 @@ ) from turftopic.late import LateSentenceTransformer -ENCODER = "sentence-transformers/static-retrieval-mrl-en-v1" +ENCODER = "sentence-transformers/paraphrase-MiniLM-L3-v2" def batched(iterable, n: int): @@ -58,7 +58,7 @@ def generate_dates( ], remove=("headers", "footers", "quotes"), ) -texts = newsgroups.data +texts = newsgroups.data[:400] trf = LateSentenceTransformer(ENCODER) embeddings = np.asarray(trf.encode(texts)) timestamps = generate_dates(n_dates=len(texts)) From 5f972d5e96a73d61b1f223cd71b96f0606ba0852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos=C3=98?= Date: Sat, 4 Apr 2026 13:06:41 +0200 Subject: [PATCH 14/19] Changed titles in mkdocs --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 61b77df..4435027 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -37,7 +37,7 @@ nav: - FASTopic: FASTopic.md - Other Models
(e.g. Sentiment Analysis): - Concept Vector Projection (Continuous Sentiment Scoring): cvp.md - - Embedding Models: encoders.md + - Embeddings and Encoders
(Transformer Models): encoders.md - Vectorizers
(Term extraction): vectorizers.md - Topic Analysis and Naming with LLMs: analyzers.md theme: From 2eac0617fef2ae9361624a81df5b6c2b812b2988 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Sat, 4 Apr 2026 14:37:28 +0200 Subject: [PATCH 15/19] changed encoder parameter in test --- tests/test_integration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_integration.py b/tests/test_integration.py index 8d1309b..8888f21 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -220,14 +220,14 @@ def test_topic_joining(): def test_refitting(): - model = SemanticSignalSeparation(10) + model = SemanticSignalSeparation(10, encoder=trf) model.fit(texts, embeddings=embeddings) model.refit(texts, embeddings=embeddings, n_components=20) assert model.components_.shape[0] == 20 def test_serialization(): - model = SemanticSignalSeparation(10) + model = SemanticSignalSeparation(10, encoder=trf) model.fit(texts, embeddings=embeddings) with tempfile.TemporaryDirectory() as tmp_dir: model.to_disk(tmp_dir) From fa446e954a6cbd81b00c60c94feed0299c86d861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Sat, 4 Apr 2026 16:41:14 +0200 Subject: [PATCH 16/19] late interaction models now also return offsets --- turftopic/late.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/turftopic/late.py b/turftopic/late.py index d52a5d0..cb140d6 100644 --- a/turftopic/late.py +++ b/turftopic/late.py @@ -351,7 +351,7 @@ def transform( chunks = get_document_chunks(raw_documents, offsets) out_array = self.model.transform(chunks, embeddings=flat_embeddings) if self.pooling is None: - return unflatten_repr(out_array, lengths) + return unflatten_repr(out_array, lengths), offsets else: return pool_flat(out_array, lengths) @@ -370,7 +370,7 @@ def fit_transform( chunks, embeddings=flat_embeddings ) if self.pooling is None: - return unflatten_repr(out_array, lengths) + return unflatten_repr(out_array, lengths), offsets else: return pool_flat(out_array, lengths) From b5f85d778dd2fcea5bc71e7a5017097db65ffa43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Sat, 4 Apr 2026 16:41:33 +0200 Subject: [PATCH 17/19] Updated documentation with late-interaction models --- docs/c_top2vec.md | 43 + docs/cvp.md | 39 + docs/images/sentiment_arcs.html | 3888 +++++++++++++++++++++++++++++++ docs/late_interaction.md | 205 +- mkdocs.yml | 3 +- 5 files changed, 4171 insertions(+), 7 deletions(-) create mode 100644 docs/c_top2vec.md create mode 100644 docs/images/sentiment_arcs.html diff --git a/docs/c_top2vec.md b/docs/c_top2vec.md new file mode 100644 index 0000000..49e571b --- /dev/null +++ b/docs/c_top2vec.md @@ -0,0 +1,43 @@ +# C-Top2Vec + +Contextual Top2Vec [(Angelov and Inkpen, 2024)](https://aclanthology.org/2024.findings-emnlp.790/) is a [late-interaction topic model](late_interaction.md), that uses windowed representations. + +!!! info + This part of the documentation is still in the works. + More information, visualizations and benchmark results are on their way. + +The model is essentially the same as wrapping a regular Top2vec model in `LateWrapper`, but we provide a convenience class in Turftopic, so that it's easy for you to initialize this model. +It comes pre-loaded with the following features: + + - Same hyperparameters as in Angelov and Inkpen (2024) + - Phrase-vectorizer that finds regular phrases based on PMI + - `LateSentenceTransformer` by default, you can specify any model. + +Our implementation is much more flexible than the original top2vec package, and you might be able to use much more powerful or novel embedding models. + +!!! tip + For more info about multi-vector/late-interaction models, read our [User Guide](late-interaction.md). + +## Example Usage + +You should install Turftopic with UMAP in order to be able to use C-Top2Vec: + +```bash +pip install turftopic[umap-learn] +``` + +Then use the topic model as you would use any other model in Turftopic: + +```python +from turftopic import CTop2Vec + +model = CTop2Vec(n_reduce_to=5) +doc_topic_matrix = model.fit_transform(corpus) + +model.print_topics() +``` + +## API Reference + +::: turftopic.models.cluster.CTop2Vec + diff --git a/docs/cvp.md b/docs/cvp.md index 289b3e6..40d53f9 100644 --- a/docs/cvp.md +++ b/docs/cvp.md @@ -71,6 +71,45 @@ print(concept_df) 1 0.269454 0.009495 ``` +## Sentiment Arcs + +Sometimes you might want to get a more granular understanding of how concepts evolve in a document. +`ConceptVectorProjection` can be used with [late-interaction/multi-vector functionality](late_interaction.md) in Turftopic, and thus you can easily generate sentiment arcs within documents that either span individual tokens or contextualized rolling windows. + +!!! tip + To get a more in-depth understanding of late-interaction/multi-vector models, read our [User Guide](late_interaction.md). + +```python +from turftopic.late import LateWrapper, LateSentenceTransformer +# For plotting: +import plotly.express as px +import plotly.graph_objects as go + +seeds = [("cuteness", cuteness_seeds), ("bullish", bullish_seeds)] + +cvp = LateWrapper( + ConceptVectorProjection(seeds=seeds, encoder=LateSentenceTransformer("all-MiniLM-L6-v2")) +) +test_documents = ["What an awesome investment", "Tiny beautiful kitty-cat"] +doc_concept_matrix, offsets = cvp.transform(test_documents) + +# We will plot document 0's' sentiment arcs +fig = go.Figure() +# We extract the tokens +tokens = [test_documents[0][start:end] for start, end in offsets[0]] +# First token is [CLS] +tokens[0] = "[CLS]" +fig = fig.add_scatter(x=tokens, y=doc_concept_matrix[0][:, 0], name="Cuteness") +fig = fig.add_scatter(x=tokens, y=doc_concept_matrix[0][:, 1], name="Bullish") +fig.show() + +``` + +
+ +
Figure 2: Concepts evolving over tokens in the first document.
+
+ ## API Reference diff --git a/docs/images/sentiment_arcs.html b/docs/images/sentiment_arcs.html new file mode 100644 index 0000000..b669359 --- /dev/null +++ b/docs/images/sentiment_arcs.html @@ -0,0 +1,3888 @@ + + + +
+
+ + \ No newline at end of file diff --git a/docs/late_interaction.md b/docs/late_interaction.md index 489cb73..f054f18 100644 --- a/docs/late_interaction.md +++ b/docs/late_interaction.md @@ -1,13 +1,206 @@ +# Late Interaction Topic Models + +Late interaction, or multi-vector models use token representations from a Sentence Transformer before pooling them all together into a single document embedding. +This can be particularly useful for clustering models, as they, by default assign one topic to a single document, but when accessing token representations, can assign topics on a per-token basis. + +!!! info + There are currently no native late-interaction models in Turftopic, meaning models that explicitly model token representations in the context of a document. + We are currently working on implementing such models, but for the time being, wrappers are included, that can force regular models to use embeddings of higher granularity. + **Visualization utilities** are also on the way. + +## Encoding Tokens, and Ragged Array Manipulation + +Turftopic provides a convenience class for encoding documents on a token-level using Sentence Transformers instead of pooling them together into document embeddings. +In order to initialize an encoder, load `LateSentenceTransformer`, and specify which model you would like to use: + +!!! tip + While you could use any encoder model with `LateSentenceTransformer`, we recommend that you stick to ones that have mean pooling, and normalize embeddings. + This is because in these models, you can be sure that the pooled document embeddings and the token embeddings will be in the same semantic space. + +### Token Embeddings + +```python +from turftopic.late import LateSentenceTransformer + +documents = ["This is a text", "This is another but slightly longer text"] + +encoder = LateSentenceTransformer("all-MiniLM-L6-v2") +token_embeddings, offsets = encoder.encode_tokens(documents) +print(token_embeddings) +print(offsets) +``` + +```python +[ + array([[-0.01135089, 0.04170538, 0.00379963, ..., 0.01383126, + -0.00274855, -0.05360783], + ... + [ 0.05069249, 0.03840942, -0.03545087, ..., 0.03142243, + 0.01929936, -0.09216172]], + shape=(6, 384), dtype=float32), + array([[-0.00047079, 0.03402771, 0.00037086, ..., 0.0228903 , + -0.01734272, -0.04073172], + ..., + [-0.02586325, 0.03737643, 0.02260585, ..., 0.05613737, + -0.01032581, -0.03799873]], shape=(9, 384), dtype=float32) +] +[[(0, 0), (0, 4), (5, 7), (8, 9), (10, 14), (0, 0)], [(0, 0), (0, 4), (5, 7), (8, 15), (16, 19), (20, 28), (29, 35), (36, 40), (0, 0)]] +``` + +As you can see, `encode_tokens` returns two arrays, one of them being the token embeddings. This is a ragged array, where longer document can have more embeddings. +`offsets` contains a list of tuples for each document, where the first element of the tuple is the start character of the given token, and the second element is the end character. + +### Rolling Window Embeddings + +You can also pool these embeddings over a rolling window of tokens. +This way, you still represent your document with multiple vectors, but don't need to model each token individually: + +```python +window_embeddings, window_offsets = encoder.encode_windows(documents, window_size=5, step_size=4) +for doc_emb, doc_off in zip(window_embeddings, window_offsets): + print(doc_emb.shape, doc_off) +``` + +```python +(2, 384) [(0, 14), (10, 0)] +(3, 384) [(0, 19), (16, 0), (0, 0)] +``` + +### Ragged array manipulation + +These ragged datastructures are hard to deal with, especially when using array operations, so we include convenience functions for manipulating them: +**`flatten_repr`** flattens the ragged array into a single large array, and returns the length of each sub-array: + +```python +from turftopic.late import flatten_repr, unflatten_repr + +flat_token_embeddings, lengths = flatten_repr(token_embeddings) +print(flat_token_embeddings.shape) +# (15, 384) +``` + +**`unflatten_repr`** will turn a flattened representation array into a ragged array: +```python +token_embeddings = unflatten_repr(flat_token_embeddings, lengths) +``` + +**`pool_flat`** will pool a document representations in a flattened array using a given aggregation function: +```python +import numpy as np +from turftopic.late import pool_flat + +pooled = pool_flat(flat_token_embeddings, lengths, agg=np.nanmean) +print(pooled.shape) +# (2, 384) +``` + +## Turning Regular Models into Multi-Vector Models + +The `LateWrapper` class can turn your regular topic models into ones that can utilize windowed or token-level embeddings. +Here's how `LateWrapper` works: + + 1. It encodes documents at a token or window-level based on its parameters. + 2. It flattens the embedding array, and feeds the this into the topic model, along with the token/window text. + 3. It unflattens the output of the topic model (`doc_topic_matrix`) into a ragged array, where you get topic importance for each token. + 4. *\[OPTIONAL\]* It pools token-level topic content on the document level, so that you get one document-topic vector for each document instead of each token. + +Let's see how this works in practice, and create a [Topeax](Topeax.md) model that uses windowed embeddings instead of document-level embeddings: + +```python +from sklearn.datasets import fetch_20newsgroups +from turftopic import Topeax +from turftopic.late import LateWrapper, LateSentenceTransformer + +corpus = fetch_20newsgroups(subset="all", categories=["alt.atheism"]).data + +model = LateWrapper( + Topeax(encoder=LateSentenceTransformer("all-MiniLM-L6-v2")), + window_size=50, # If we don't specify window size, it will use token-level embeddings + step_size=40, # Since the step size is smaller than the window, we will get overlapping windows +) +doc_topic_matrix, offsets = model.fit_transform(corpus) +model.print_topics() +``` + +| Topic ID | Highest Ranking | +| - | - | +| 0 | morality, moral, morals, immoral, objective, behavior, instinctive, species, inherent, animals | +| 1 | matthew, luke, bible, text, passages, mormon, texts, translations, copy, john | +| 2 | atheism, agnostics, atheist, beliefs, belief, faith, contradictory, believers, contradictions, theists | +| 3 | punishment, cruel, abortion, penalty, death, constitution, homosexuality, painless, capital, punish | +| 4 | war, arms, invaded, gulf, hussein, civilians, military, kuwait, peace, sell | +| 5 | islam, islamic, muslim, qur, muslims, imams, rushdie, quran, koran, khomeini | + +The document-topic matrix, we created, is now a ragged array and contains document-topic proportions for each window in a document. +Let's see what this means in practice for the first document in our corpus: +```python +import pandas as pd + +# We select document 0, then collect all information into a dataframe: +window_topic_matrix = doc_topic_matrix[0] +window_offs = offsets[0] +document = corpus[0] +# We extract the text for each window based on the offsets +window_text = [document[window_start: window_end] for window_start, window_end in window_offs] +df = pd.DataFrame(window_topic_matrix, index=window_text, columns=model.topic_names) +print(df) +``` + +```python + 0_morality_moral_morals_immoral 1_matthew_luke_bible_text ... 4_war_arms_invaded_gulf 5_islam_islamic_muslim_qur +From: acooper@mac.cc.macalstr.edu (Turin Turamb... 0.334267 1.287207e-13 ... 2.626869e-26 1.459101e-04 +alester College\nLines: 55\n\nIn article I guess I'm delving in... 0.847002 5.002921e-22 ... 4.852574e-41 3.141366e-07 +this you just have a spiral. What\nwould then ... 0.848413 5.819050e-22 ... 8.139559e-41 3.286224e-07 +, even though this would hardly seem moral. Fo... 0.863685 1.272204e-21 ... 2.823941e-41 2.815930e-07 +whatever helps this goal is\n"moral", whatever ... 0.864913 1.584558e-21 ... 5.780971e-41 3.003952e-07 +a "hyper-morality" to apply to just the methods... 0.865558 1.919885e-21 ... 1.251694e-40 3.231265e-07 +not doing something because it is\n> a personal... 0.868360 2.951441e-21 ... 3.085662e-40 3.494368e-07 +we only consider something moral or immoral if ... 0.872827 5.444738e-21 ... 4.708349e-40 3.580695e-07 +here we have a way to discriminate\nmorals. I ... 0.876951 1.021014e-20 ... 3.486096e-40 3.411401e-07 +enough and\nlistened to the arguments, I could ... 0.878680 2.302363e-20 ... 5.866410e-40 3.565728e-07 +. Or, as you brought out,\n> if whatever is ri... 0.878953 3.004052e-20 ... 5.977738e-40 3.566668e-07 +> ******************************* 0.647793 5.664651e-17 ... 1.805073e-19 4.612731e-04 +``` + +## C-Top2Vec + +Contextual Top2Vec [(Angelov and Inkpen, 2024)](https://aclanthology.org/2024.findings-emnlp.790/) is a late-interaction topic model, that uses windowed representations. +The model is essentially the same as wrapping a regular Top2vec model in `LateWrapper`, but we provide a convenience class in Turftopic, so that it's easy for you to initialize this model. +It comes pre-loaded with the following features: + + - Same hyperparameters as in Angelov and Inkpen (2024) + - Phrase-vectorizer that finds regular phrases based on PMI + - `LateSentenceTransformer` by default, you can specify any model. + +Our implementation is much more flexible than the original top2vec package, and you might be able to use much more powerful or novel embedding models. + +```python +from turftopic import CTop2Vec + +model = CTop2Vec(n_reduce_to=5) +doc_topic_matrix = model.fit_transform(corpus) + +model.print_topics() +``` + | Topic ID | Highest Ranking | | - | - | -| -1 | nasa, nasa gov, space exploration, space science, spaceflight, astronomy space, spacecraft, national space, space program, sci space | -| 225 | astronomical, astronomers, astronomy, astronomy space, sci astro, interplanetary, galactic, celestial bbs, gamma ray, astro | -| 240 | satellites, nasa, telescope, astronomy space, satellite, observatory, spacecraft, nasa gov, astronomical, astronomy | -| 242 | shuttle program, shuttle mission, space shuttle, shuttle launch, shuttle elements, space program, spaceflight, shuttle, shuttle elements documentation, space exploration | -| 243 | nasa gov, nasa, space exploration, national space, commercial space, spaceflight, space station, space science, sci space, space news | -| 244 | astronomy space, nasa, space science, sci astro, sci space, nasa gov, spacecraft, astronomical, space news, space program | +| -1 | caused atheism organization, genocide caused atheism, atheism organization, atheism, subject political atheists, alt atheism, caused atheism, political atheists organization, subject amusing atheists, amusing atheists | +| 166 | atheists organization, political atheists organization, christian morality organization, caused atheism organization, morality organization, atheism organization, atheists organization california, subject amusing atheists, cwru edu article, alt atheism | +| 172 | biblical, read bible, caused atheism, agnostics, caused atheism organization, atheists agnostics, christianity, alt atheism, atheism, christian morality organization | +| 173 | objective morality, morality, subject christian morality, christian morality, natural morality, say christian morality, morality organization, christian morality organization, behavior moral, moral | +| 175 | atheism, atheism organization, caused atheism organization, atheists agnostics, caused atheism, subject political atheists, alt atheism, genocide caused atheism, subject amusing atheists, amusing atheists | +| 176 | rushdie islamic law, subject rushdie islamic, islamic genocide, islamic law, genocide caused atheism, subject islamic, islamic law organization, islamic genocide organization, rushdie islamic, islamic authority | + +You might also observe that the output of this model is a regular document-topic matrix, and isn't ragged. +```python +print(doc_topic_matrix.shape) +# (1024, 6) +``` +This is because this way the model has the same API, as other Turftopic models, and works the same way as the top2vec package, making migration easier. ## API Reference diff --git a/mkdocs.yml b/mkdocs.yml index 4435027..c8ac07b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -11,7 +11,7 @@ nav: - Online Topic Modeling: online.md - Hierarchical Topic Modeling: hierarchical.md - Cross-Lingual Topic Modeling: cross_lingual.md - - Late Interaction Models (Experimental): late_interaction.md + - Late Interaction (Multi-vector) Topic Models: late_interaction.md - Multimodal Modeling (Experimental): multimodal.md - Concept Induction: concept_induction.md - Modifying and Finetuning Models: finetuning.md @@ -33,6 +33,7 @@ nav: - Topeax: Topeax.md - GMM: GMM.md - Clustering Models (BERTopic & Top2Vec): clustering.md + - C-Top2Vec: c_top2vec.md - Autoencoding Models (ZeroShotTM & CombinedTM): ctm.md - FASTopic: FASTopic.md - Other Models
(e.g. Sentiment Analysis): From b52dff890ddd3ff351c7136080ff36f748390f2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Sat, 4 Apr 2026 18:57:25 +0200 Subject: [PATCH 18/19] Version bump --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e3a9ff1..3a0d1fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ profile = "black" [project] name = "turftopic" -version = "0.23.3" +version = "0.25.0" description = "Topic modeling with contextual representations from sentence transformers." authors = [ { name = "Márton Kardos ", email = "martonkardos@cas.au.dk" } From 2347d503704d5ee125ce59bfa8894b51f496c612 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Sat, 4 Apr 2026 19:01:30 +0200 Subject: [PATCH 19/19] Added citations to C-Top2Vec --- docs/c_top2vec.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/docs/c_top2vec.md b/docs/c_top2vec.md index 49e571b..f8f6ab3 100644 --- a/docs/c_top2vec.md +++ b/docs/c_top2vec.md @@ -37,6 +37,47 @@ doc_topic_matrix = model.fit_transform(corpus) model.print_topics() ``` +## Citation + +Please cite Angelov and Inkpen (2024) and Turftopic when using C-Top2Vec in publications: + +```bibtex +@article{ + Kardos2025, + title = {Turftopic: Topic Modelling with Contextual Representations from Sentence Transformers}, + doi = {10.21105/joss.08183}, + url = {https://doi.org/10.21105/joss.08183}, + year = {2025}, + publisher = {The Open Journal}, + volume = {10}, + number = {111}, + pages = {8183}, + author = {Kardos, Márton and Enevoldsen, Kenneth C. and Kostkan, Jan and Kristensen-McLachlan, Ross Deans and Rocca, Roberta}, + journal = {Journal of Open Source Software} +} + +@inproceedings{angelov-inkpen-2024-topic, + title = "Topic Modeling: Contextual Token Embeddings Are All You Need", + author = "Angelov, Dimo and + Inkpen, Diana", + editor = "Al-Onaizan, Yaser and + Bansal, Mohit and + Chen, Yun-Nung", + booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024", + month = nov, + year = "2024", + address = "Miami, Florida, USA", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2024.findings-emnlp.790/", + doi = "10.18653/v1/2024.findings-emnlp.790", + pages = "13528--13539", + abstract = "The goal of topic modeling is to find meaningful topics that capture the information present in a collection of documents. The main challenges of topic modeling are finding the optimal number of topics, labeling the topics, segmenting documents by topic, and evaluating topic model performance. Current neural approaches have tackled some of these problems but none have been able to solve all of them. We introduce a novel topic modeling approach, Contextual-Top2Vec, which uses document contextual token embeddings, it creates hierarchical topics, finds topic spans within documents and labels topics with phrases rather than just words. We propose the use of BERTScore to evaluate topic coherence and to evaluate how informative topics are of the underlying documents. Our model outperforms the current state-of-the-art models on a comprehensive set of topic model evaluation metrics." +} + +``` + + + ## API Reference ::: turftopic.models.cluster.CTop2Vec