Skip to content

Visualizer

Visualizer bound to a trainer. Exposes single-model plots. User can subclass to customize plotting behavior.

Source code in gradiend/visualizer/visualizer.py
def __init__(self, trainer: Any):
    self._trainer = trainer

_trainer instance-attribute

_trainer = trainer

trainer property

trainer

compute_topk_sets staticmethod

compute_topk_sets(models, topk=100, part='decoder-weight')

Compute top-k weight sets for multiple models (intersection/union).

Source code in gradiend/visualizer/visualizer.py
@staticmethod
def compute_topk_sets(models: Dict[str, Any], topk: int = 100, part: str = "decoder-weight"):
    """Compute top-k weight sets for multiple models (intersection/union)."""
    return compute_topk_sets(models, topk=topk, part=part)

plot_encoder_distributions

plot_encoder_distributions(encoder_df=None, **kwargs)

Plot encoder distributions (grouped split violins). Pass encoder_df for self-managed data.

Source code in gradiend/visualizer/visualizer.py
def plot_encoder_distributions(self, encoder_df: Optional[Any] = None, **kwargs: Any) -> str:
    """Plot encoder distributions (grouped split violins). Pass encoder_df for self-managed data."""
    return _plot_encoder_distributions(self._trainer, encoder_df=encoder_df, **kwargs)

plot_encoder_scatter

plot_encoder_scatter(encoder_df=None, **kwargs)

Interactive 1D encoder scatter (jitter x, encoded y), colored by label, with hover. For Jupyter.

Source code in gradiend/visualizer/visualizer.py
def plot_encoder_scatter(
    self,
    encoder_df: Optional[Any] = None,
    **kwargs: Any,
) -> Any:
    """Interactive 1D encoder scatter (jitter x, encoded y), colored by label, with hover. For Jupyter."""
    return _plot_encoder_scatter(trainer=self._trainer, encoder_df=encoder_df, **kwargs)

plot_probability_shifts

plot_probability_shifts(decoder_results=None, class_ids=None, target_class=None, increase_target_probabilities=True, use_cache=None, **kwargs)

Plot decoder probability shifts vs learning rate.

Automatically calls analyze_decoder_for_plotting if needed to extend decoder results with probabilities for all classes on all datasets.

Source code in gradiend/visualizer/visualizer.py
def plot_probability_shifts(
    self,
    decoder_results: Optional[Dict[str, Any]] = None,
    class_ids: Optional[List[str]] = None,
    target_class: Optional[str] = None,
    increase_target_probabilities: bool = True,
    use_cache: Optional[bool] = None,
    **kwargs: Any,
) -> str:
    """
    Plot decoder probability shifts vs learning rate.

    Automatically calls analyze_decoder_for_plotting if needed to extend decoder results
    with probabilities for all classes on all datasets.
    """
    if decoder_results is None:
        decoder_results = self.trainer.evaluate_decoder(use_cache=use_cache)

    plotting_data = self.trainer.analyze_decoder_for_plotting(
        decoder_results=decoder_results,
        class_ids=class_ids,
        use_cache=use_cache,
    )

    return _plot_probability_shifts(
        trainer=self.trainer,
        decoder_results=decoder_results,
        plotting_data=plotting_data,
        class_ids=class_ids,
        target_class=target_class,
        increase_target_probabilities=increase_target_probabilities,
        **kwargs
    )

plot_topk_neuron_intersection

plot_topk_neuron_intersection(models=None, topk=100, part='decoder-weight', **kwargs)

Plot top-k neuron intersection. If models is None, uses trainer.get_model().

Source code in gradiend/visualizer/visualizer.py
def plot_topk_neuron_intersection(
    self,
    models: Optional[Dict[str, Any]] = None,
    topk: int = 100,
    part: str = "decoder-weight",
    **kwargs: Any,
) -> Any:
    """Plot top-k neuron intersection. If models is None, uses trainer.get_model()."""
    if models is None:
        model = self._trainer.get_model()
        models = {getattr(model, "name_or_path", "model"): model} if model is not None else {}
    return plot_topk_overlap_venn(models, topk=topk, part=part, **kwargs)

plot_training_convergence

plot_training_convergence(**kwargs)

Plot training convergence (means by class/feature_class and correlation). Uses trainer for stats.

Source code in gradiend/visualizer/visualizer.py
def plot_training_convergence(self, **kwargs: Any) -> str:
    """Plot training convergence (means by class/feature_class and correlation). Uses trainer for stats."""
    return _plot_training_convergence(trainer=self._trainer, **kwargs)