Models

Model resource

Access models through client.models in the Vi SDK.

📋

Prerequisites

Get started with Vi SDK →


Methods

list()

List models for a run.

# Basic listing
models = client.models.list("run_abc123")

for model in models.items:
    print(f"Model: {model.model_id}")
    print(f"Epoch: {model.spec.epoch}")
# With custom pagination
from vi.api.types import PaginationParams

models = client.models.list(
    run_id_or_link="run_abc123",
    pagination=PaginationParams(page_size=50)
)

for model in models.items:
    print(f"{model.model_id}")
# Using dict for pagination
models = client.models.list(
    run_id_or_link="run_abc123",
    pagination={"page_size": 50}
)
# List with metrics
models = client.models.list("run_abc123")

print("Available Models:")
for model in models.items:
    epoch = model.spec.epoch or "N/A"
    metrics_str = "No metrics"

    if model.spec.evaluation_metrics:
        metrics = model.spec.evaluation_metrics
        metrics_str = ", ".join(f"{k}: {v:.4f}" for k, v in metrics.items())

    print(f"  Epoch {epoch}: {metrics_str}")
# Find best model by metric
from vi.api.resources.models.responses import Model

def find_best_model(run_id: str, metric_name: str = "accuracy") -> tuple[Model | None, float]:
    """Find model with best metric value."""
    models = client.models.list(run_id)

    best_model: Model | None = None
    best_value = float('-inf')

    for model in models.items:
        if not model.spec.evaluation_metrics:
            continue

        value = model.spec.evaluation_metrics.get(metric_name, 0)
        if value > best_value:
            best_value = value
            best_model = model

    return best_model, best_value

best, value = find_best_model("run_abc123", "accuracy")
if best:
    print(f"Best model: {best.model_id}")
    print(f"Accuracy: {value}")

Parameters:

ParameterTypeDescriptionDefault
run_id_or_linkstrRun identifier or linkRequired
paginationPaginationParams | dictPagination settingsPaginationParams()

Returns: PaginatedResponse[Model]


get()

Get a specific model checkpoint.

# Get latest model
model = client.models.get(run_id_or_link="run_abc123")

print(f"Model ID: {model.model_id}")
print(f"Epoch: {model.spec.epoch}")
# Get specific checkpoint
model = client.models.get(
    run_id_or_link="run_abc123",
    ckpt="epoch_10"
)

print(f"Model ID: {model.model_id}")
print(f"Epoch: {model.spec.epoch}")
# Access model metrics
model = client.models.get(run_id_or_link="run_abc123")

if model.spec.evaluation_metrics:
    for metric, value in model.spec.evaluation_metrics.items():
        print(f"{metric}: {value}")
# Display detailed information
model = client.models.get(run_id_or_link="run_abc123")
model.info()  # Prints formatted model summary

Parameters:

ParameterTypeDescriptionDefault
run_id_or_linkstrRun identifierRequired
ckptstrCheckpoint nameNone (latest)

Returns: Model


download()

Download a model.

# Basic download
downloaded = client.models.download(
    run_id_or_link="run_abc123",
    save_dir="./models"
)

print(f"Model path: {downloaded.model_path}")
# Download specific checkpoint
downloaded = client.models.download(
    run_id_or_link="run_abc123",
    ckpt="epoch_10",
    save_dir="./models"
)

print(f"Model: {downloaded.model_path}")
print(f"Config: {downloaded.run_config_path}")
# Using the convenience method on client
downloaded = client.get_model(
    run_id="run_abc123",
    save_path="./models"
)

print(f"✓ Model downloaded!")
print(f"Model: {downloaded.model_path}")
print(f"Config: {downloaded.run_config_path}")
# Cache downloaded models
from pathlib import Path

def get_or_download_model(run_id: str, save_dir: str = "./models") -> dict[str, str]:
    """Download model if not already cached."""
    model_dir = Path(save_dir) / run_id

    # Check cache
    if model_dir.exists() and (model_dir / "model_full").exists():
        print(f"Using cached model at {model_dir}")
        return {
            "model_path": str(model_dir / "model_full"),
            "run_config_path": str(model_dir / "run_config.json")
        }

    # Download
    return client.get_model(run_id=run_id, save_path=save_dir)

result = get_or_download_model("run_abc123")
# Load downloaded model for inference
from vi.inference import ViModel

# Download first
downloaded = client.get_model(
    run_id="run_abc123",
    save_path="./models"
)

# Load for inference
model = ViModel(
    secret_key="your-secret-key",
    organization_id="your-organization-id",
    run_id="run_abc123"
)

# Run inference
result, error = model(
    source="test.jpg",
    user_prompt="Describe this image"
)

if error is None:
    print(result.caption)

Parameters:

ParameterTypeDescriptionDefault
run_id_or_linkstrRun identifierRequired
ckptstrCheckpoint nameNone
save_dirstr | PathSave directoryRequired

Returns: ModelDownloadResult


Response types

Model

Main model response object.

from vi.api.resources.models.responses import Model
PropertyTypeDescription
kindstrResource kind
run_idstrParent run ID
organization_idstrOrganization ID
model_idstrUnique model identifier
specModelSpecModel specification
statusModelStatusModel status
metadataResourceMetadataMetadata
self_linkstrAPI link
etagstrEntity tag

Methods:

MethodReturnsDescription
info()NoneDisplay formatted model information

ModelSpec

from vi.api.resources.models.responses import ModelSpec
PropertyTypeDescription
kindstrModel kind identifier
epochint | NoneTraining epoch number
evaluation_metricsdict | NoneEvaluation metrics (accuracy, loss, etc.)

ModelStatus

from vi.api.resources.models.responses import ModelStatus
PropertyTypeDescription
observed_generationintObserved generation number
conditionslist[dict]Status conditions
storage_objectstrStorage location
contentsModelContents | NoneDownload info when available

ModelContents

from vi.api.resources.models.responses import ModelContents
PropertyTypeDescription
download_urlModelDownloadUrlDownload URL info

ModelDownloadUrl

from vi.api.resources.models.responses import ModelDownloadUrl
PropertyTypeDescription
urlstrPre-signed download URL
expires_atintExpiration timestamp

ModelDownloadResult

Result from downloading a model.

PropertyTypeDescription
model_pathPathPath to model weights
adapter_pathPath | NonePath to adapter weights (if available)
run_config_pathPathPath to run configuration

Downloaded model structure

models/
└── run_abc123/
    ├── model_full/          # Full model weights
    │   ├── config.json
    │   ├── model.safetensors
    │   └── ...
    ├── adapter/             # Adapter weights (if available)
    │   ├── adapter_config.json
    │   ├── adapter_model.safetensors
    │   └── ...
    └── run_config.json      # Run configuration

Related resources