Overview
The Bread SDK uses Pydantic models for responses and TypedDicts for request parameters (Python), or TypeScript interfaces and types (TypeScript), providing full type safety.Response Types
RepoResponse
Copy
class RepoResponse:
repo_name: str # Repository name
base_model: str # Base model identifier
PromptResponse
Copy
class PromptResponse:
prompt_name: str # Prompt identifier
messages: List[Message] # List of messages
tools: Optional[List[Dict[str, object]]] = None # List of available tools/functions (OpenAI format)
Message
Copy
class Message:
role: str # "system", "user", or "assistant"
content: Union[str, List[Dict[str, object]], None] # Message content (can be null for assistant with tool_calls)
TargetResponse
Copy
class TargetResponse:
target_name: str # Target name
config: Config # Target configuration
class Config:
generators: Optional[List[Generator]]
model_name: Optional[str]
teacher_prompt: Optional[str] # Teacher prompt name (unconditioned stimulus)
student_prompt: Optional[str] # Student prompt name (conditioned stimulus)
max_concurrency: Optional[int]
max_tokens: Optional[int]
num_traj_per_stimulus: Optional[int]
temperature: Optional[float]
extra_kwargs: Optional[Dict[str, Any]]
BakeResponse
Copy
class BakeResponse:
status: str # Job status: "not_started", "running", "complete", "failed"
config: Optional[Dict[str, object]] # Job config parameters
error: Optional[str] # Error message if job failed
job_id: Optional[int] # Coordinator job ID (if job is queued/running)
lines: Optional[int] # Number of output lines (not applicable for bakes)
progress_percent: Optional[float] # Job progress percentage (0-100) if job is running
loss: Optional[Dict[str, float]] # Loss values from training (latest_loss, final_loss, min_loss, max_loss)
model_name: Optional[List[str]] # List of model names in format 'user/repo/bake_name/checkpoint' (only when status is 'complete')
HealthCheckResponse
Copy
class HealthCheckResponse:
status: str # "healthy" or "degraded"
api: str # API name
version: str # API version
BakeDownloadResponse
Copy
class BakeDownloadResponse:
bake_name: str # Bake name
checkpoint: int # Checkpoint number
expires_in: int # URL expiry time in seconds
url: str # Presigned URL for downloading weights
BakeGetMetricsResponse
Copy
# List of metrics entries
BakeGetMetricsResponse = List[Dict[str, Any]]
RepoGetTreeResponse
Copy
class RepoGetTreeResponse:
bakes: Dict[str, BakeTreeNode] # Dictionary of all bakes in the repository
base_model: str # Base model name (e.g., 'Qwen/Qwen3-32B')
edges: List[List[str]] # List of parent-child edges: [source_type, source_name, target_type, target_name]
class BakeTreeNode:
config: Dict[str, object] # Bake configuration (bake.yml)
status: str # Bake status: complete, failed, running, pending, unknown
checkpoints: Optional[List[int]] # List of checkpoint numbers
model_name: Optional[List[str]] # List of full model paths with checkpoints
RecipeGetDependencyGraphResponse
Copy
class RecipeGetDependencyGraphResponse:
bakes: Dict[str, Dict[str, object]] # Dictionary of bake configs (collation + bake)
base_model: str # Base model name
edges: List[List[object]] # List of dependency edges (source_type, source_name, target_type, target_name)
prompts: Dict[str, Optional[str]] # Dictionary of prompt names to file paths
targets: Dict[str, Dict[str, object]] # Dictionary of target configs (stim + rollout)
RecipeGetRecreationPlanResponse
Copy
class RecipeGetRecreationPlanResponse:
base_model: str # Base model name (e.g., 'Qwen/Qwen3-32B')
resources: Resources # Summary of all resources
steps: List[Step] # Ordered list of steps to recreate the bake
total_steps: int # Total number of steps
class Resources:
bakes: List[ResourcesBake] # List of bake resources with model names
prompts: List[str] # List of prompt names
targets: List[str] # List of target names
class ResourcesBake:
bake_name: str
model_names: Optional[List[str]]
class Step:
action: str # Action to perform: create_prompt, create_target, run_stim, run_rollout, create_bake, run_bake
config: Dict[str, Any] # Configuration for this resource (cleaned, no internal paths)
dependencies: List[str] # List of dependencies in format 'resource_type:resource_name'
resource_name: str
resource_type: str
step: int # Step number in execution order
Request Parameter Types
MessageParam
Copy
from aibread.types import MessageParam
class MessageParam(TypedDict, total=False):
role: str # Required: "system", "user", "assistant"
content: Optional[Union[str, List[Dict[str, object]]]] # Message content (can be null for assistant with tool_calls)
# Usage
messages: List[MessageParam] = [
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello"}
]
TargetConfigBaseParam
Copy
from aibread.types import TargetConfigBaseParam
class TargetConfigBaseParam(TypedDict, total=False):
generators: Optional[Iterable[GeneratorParam]]
model_name: Optional[str]
teacher_prompt: Optional[str] # Teacher prompt name (unconditioned stimulus)
student_prompt: Optional[str] # Student prompt name (conditioned stimulus)
u: Optional[str] # [DEPRECATED] Use 'teacher_prompt' instead
v: Optional[str] # [DEPRECATED] Use 'student_prompt' instead
max_concurrency: Optional[int]
max_tokens: Optional[int]
num_traj_per_stimulus: Optional[int]
temperature: Optional[float]
extra_kwargs: Optional[Dict[str, Any]]
BakeConfigBaseParam
Copy
from aibread.types import BakeConfigBaseParam
class BakeConfigBaseParam(TypedDict, total=False):
datasets: Optional[Iterable[DatasetItemParam]]
epochs: Optional[int]
micro_batch_size: Optional[int]
gradient_accumulation_steps: Optional[int]
total_trajectories: Optional[int]
seed: Optional[int]
model: Optional[ModelConfigParam]
optimizer: Optional[OptimizerConfigParam]
scheduler: Optional[SchedulerConfigParam]
deepspeed: Optional[DeepspeedConfigParam]
checkpoint: Optional[Iterable[CheckpointConfigParam]]
data: Optional[DataConfigParam]
eval_interval: Optional[int]
train_log_iter_interval: Optional[int]
type: Optional[str]
model_name: Optional[str]
Working with Types
Type Imports
Copy
from aibread import Bread
from aibread.types import (
RepoResponse,
PromptResponse,
TargetResponse,
BakeResponse,
Message,
MessageParam,
HealthCheckResponse
)
Type Hints
Copy
from aibread import Bread
from aibread.types import RepoResponse, PromptResponse
from typing import List
def get_repository(name: str) -> RepoResponse:
client = Bread()
return client.repo.get(name)
def create_prompt(
name: str,
repo: str,
messages: List[MessageParam]
) -> PromptResponse:
client = Bread()
return client.prompts.set(
prompt_name=name,
repo_name=repo,
messages=messages
)
Response Serialization
Copy
repo = client.repo.get("my_repo")
# To JSON
json_str = repo.to_json() # Pretty-printed
compact = repo.to_json(indent=None) # Compact
# To dictionary
data = repo.to_dict()
data_all = repo.to_dict(exclude_unset=False)
Check Fields Set
Copy
repo = client.repo.get("my_repo")
if 'base_model' not in repo.model_fields_set:
print('Field was not in API response')
Access Extra Fields
Copy
response = client.repo.get("my_repo")
# Access unknown property
extra = response.unknown_prop
# Get all extra fields
all_extras = response.model_extra
Generator Types
Copy
# Oneshot QS
{
"type": "oneshot_qs",
"model": str,
"numq": int,
"temperature": float,
"max_tokens": int | None,
"rollout_with_conditioned": bool | None,
"template_path": str | None,
"template_content": str | None
}
# Hardcoded
{
"type": "hardcoded",
"numq": int,
"questions": List[str],
"max_tokens": int | None,
"rollout_with_conditioned": bool | None
}
# From Dataset
{
"type": "from_dataset",
"dataset": str,
"numq": int,
"seed": int,
"max_tokens": int | None,
"rollout_with_conditioned": bool | None
}
# Persona
{
"type": "persona",
"numq": int,
"seed": int,
"temperature": float,
"max_tokens": int | None,
"rollout_with_conditioned": bool | None
}
Type Safety Example
Copy
from typing import List
from aibread import Bread
from aibread.types import MessageParam, PromptResponse
def create_coding_prompts(repo_name: str) -> List[PromptResponse]:
"""Type-safe prompt creation"""
client = Bread()
prompts: List[MessageParam] = [
{"role": "system", "content": "You are a coder"},
{"role": "user", "content": "Task: {task}"}
]
response: PromptResponse = client.prompts.set(
prompt_name="coding_prompt",
repo_name=repo_name,
messages=prompts
)
return [response]
Configuration Sub-types
These types are used withinBakeConfigBaseParam and TargetConfigBaseParam.
ModelConfig
Copy
class ModelConfigParam(TypedDict, total=False):
type: Optional[str]
parent_model_name: Optional[str]
baked_adapter_config: Optional[BakedAdapterConfig]
dtype: Optional[str]
attn_implementation: Optional[str]
disable_activation_checkpoint: Optional[bool]
peft_config: Optional[Dict[str, object]]
class BakedAdapterConfig(TypedDict, total=False):
r: Optional[int]
lora_alpha: Optional[int]
lora_dropout: Optional[float]
bias: Optional[str]
target_modules: Optional[str]
DataConfig
Copy
class DataConfigParam(TypedDict, total=False):
type: Optional[str]
sources: Optional[Iterable[DataSourceParam]]
eval_sources: Optional[Iterable[DataSourceParam]]
max_length: Optional[int]
train_eval_split: Optional[Iterable[float]]
dl_num_workers: Optional[int]
num_proc: Optional[int]
seed: Optional[int]
beta: Optional[float]
temperature: Optional[float]
class DataSourceParam(TypedDict, total=False):
type: Optional[str]
name_or_path: Optional[str]
split: Optional[str]
max_samples: Optional[int]
process: Optional[bool]
Optimizer & Scheduler
Copy
class OptimizerConfigParam(TypedDict, total=False):
type: Optional[str]
learning_rate: Optional[float]
weight_decay: Optional[float]
betas: Optional[Iterable[float]]
class SchedulerConfigParam(TypedDict, total=False):
type: Optional[str]
class DatasetItemParam(TypedDict, total=False):
target: Required[str]
weight: Optional[float]
Best Practices
Use Type Hints
Use Type Hints
Add type hints to your functions for better IDE support and type checking
Import Types Explicitly
Import Types Explicitly
Import types from
aibread.types for better autocompleteValidate TypedDicts
Validate TypedDicts
Use TypedDicts for request parameters to catch errors at development time
Check Model Fields
Check Model Fields
Use
model_fields_set to check which fields were actually returned by the API