Models
Access models through client.models in the Datature Vi SDK.
Models are the checkpoint artifacts produced by completed training runs. Each run can produce multiple checkpoints (one per epoch or at configured intervals). You can list all checkpoints for a run, retrieve a specific epoch, and download the weights to a local directory for inference.
Before You Start
- Vi SDK installed with authentication configured
- A secret key for API authentication
- A completed training run with saved model checkpoints
Methods
list()
List model checkpoints for a training run.
models = client.models.list("run_abc123")
for model in models.items:
print(f"Model: {model.model_id}")
print(f"Epoch: {model.spec.epoch}")from vi.api.types import PaginationParams
models = client.models.list(
run_id_or_link="run_abc123",
pagination=PaginationParams(page_size=50)
)
for model in models.items:
print(f"{model.model_id}")models = client.models.list("run_abc123")
print("Available models:")
for model in models.items:
epoch = model.spec.epoch or "N/A"
metrics_str = "No metrics"
if model.spec.evaluation_metrics:
metrics = model.spec.evaluation_metrics
metrics_str = ", ".join(f"{k}: {v:.4f}" for k, v in metrics.items())
print(f" Epoch {epoch}: {metrics_str}")from vi.api.resources.models.responses import Model
def find_best_model(run_id: str, metric_name: str = "accuracy") -> tuple[Model | None, float]:
"""Find the checkpoint with the best metric value."""
models = client.models.list(run_id)
best_model: Model | None = None
best_value = float('-inf')
for model in models.items:
if not model.spec.evaluation_metrics:
continue
value = model.spec.evaluation_metrics.get(metric_name, 0)
if value > best_value:
best_value = value
best_model = model
return best_model, best_value
best, value = find_best_model("run_abc123", "accuracy")
if best:
print(f"Best model: {best.model_id}")
print(f"Accuracy: {value}")Returns: PaginatedResponse[Model]
get()
Get a specific model checkpoint.
model = client.models.get(run_id_or_link="run_abc123")
print(f"Model ID: {model.model_id}")
print(f"Epoch: {model.spec.epoch}")model = client.models.get(
run_id_or_link="run_abc123",
ckpt="epoch_10"
)
print(f"Model ID: {model.model_id}")
print(f"Epoch: {model.spec.epoch}")model = client.models.get(run_id_or_link="run_abc123")
if model.spec.evaluation_metrics:
for metric, value in model.spec.evaluation_metrics.items():
print(f"{metric}: {value}")model = client.models.get(run_id_or_link="run_abc123")
model.info() # Prints formatted model summaryReturns: Model
download()
Download a model checkpoint to a local directory.
downloaded = client.models.download(
run_id_or_link="run_abc123",
save_dir="./models"
)
print(f"Model path: {downloaded.model_path}")downloaded = client.models.download(
run_id_or_link="run_abc123",
ckpt="epoch_10",
save_dir="./models"
)
print(f"Model: {downloaded.model_path}")
print(f"Config: {downloaded.run_config_path}")downloaded = client.get_model(
run_id="run_abc123",
save_path="./models"
)
print(f"Model: {downloaded.model_path}")
print(f"Config: {downloaded.run_config_path}")from pathlib import Path
def get_or_download_model(run_id: str, save_dir: str = "./models") -> dict[str, str]:
"""Download model if not already cached locally."""
model_dir = Path(save_dir) / run_id
if model_dir.exists() and (model_dir / "model_full").exists():
print(f"Using cached model at {model_dir}")
return {
"model_path": str(model_dir / "model_full"),
"run_config_path": str(model_dir / "run_config.json")
}
return client.get_model(run_id=run_id, save_path=save_dir)
result = get_or_download_model("run_abc123")from vi.inference import ViModel
downloaded = client.get_model(
run_id="run_abc123",
save_path="./models"
)
model = ViModel(
secret_key="your-secret-key",
organization_id="your-organization-id",
run_id="run_abc123"
)
result, error = model(
source="test.jpg",
user_prompt="Describe this image"
)
if error is None:
print(result.caption)Returns: ModelDownloadResult
Downloaded model structure
After downloading, the model files are organized under the run ID:
models/
└── run_abc123/
├── model_full/ # Full model weights
│ ├── config.json
│ ├── model.safetensors
│ └── ...
├── adapter/ # Adapter weights (if available)
│ ├── adapter_config.json
│ ├── adapter_model.safetensors
│ └── ...
└── run_config.json # Run configurationResponse types
Model
from vi.api.resources.models.responses import ModelMethods: info() → prints a formatted model summary.
ModelSpec
from vi.api.resources.models.responses import ModelSpecModelStatus
from vi.api.resources.models.responses import ModelStatusModelContents
from vi.api.resources.models.responses import ModelContentsModelDownloadUrl
from vi.api.resources.models.responses import ModelDownloadUrlModelDownloadResult
Related resources
Updated 30 days ago
