SDK Advanced Usage
Advanced patterns and best practices for using OptixLog SDK in production.
Project Management
Organizing Projects
Structure your projects logically:
# Research project
with optixlog.run("experiment_1", project="Research2025") as client:
pass
# Production project
with optixlog.run("deployment_test", project="Production") as client:
pass
# Personal experiments
with optixlog.run("quick_test", project="Personal") as client:
passAuto-Create Projects
Automatically create projects if they don’t exist:
# create_project_if_not_exists defaults to True in optixlog.run()
with optixlog.run(
"new_experiment",
project="NewProject",
create_project_if_not_exists=True # Default: True
) as client:
passList Projects Programmatically
import optixlog
projects = optixlog.list_projects(
api_url="https://backend.optixlog.com",
api_key="your_key"
)
for project in projects:
print(f"{project.name}: {project.run_count} runs")Run Organization Strategies
Naming Conventions
Use consistent naming patterns:
# Date-based
run_name = f"2025-11-24_experiment_1"
# Parameter-based
run_name = f"wavelength_{wavelength}_resolution_{resolution}"
# Version-based
run_name = f"v1.2_experiment"
# Descriptive
run_name = "waveguide_sweep_chi3_variation"Config Management
Store all parameters in config:
with optixlog.run(
"parameter_sweep",
config={
"wavelength": 1.55,
"resolution": 30,
"material": "silicon",
"pml_thickness": 1.0,
"source_type": "Gaussian",
"simulation_time": 100
}
) as client:
passTask-Based Workflows
Tasks allow you to group multiple runs together for comparison.
Creating Tasks
Tasks are typically created via the web UI, but you can link runs to existing tasks:
task_id = "task_12345" # From web UI
# Link multiple runs to the same task
for param in [0.1, 0.2, 0.3]:
with optixlog.run(
f"experiment_{param}",
task_id=task_id,
config={"param": param}
) as client:
result = run_experiment(param)
client.log(step=0, result=result)Manual Task Linking
Link an existing run to a task:
client.add_run_to_task("run_abc123", "task_67890")Error Handling Patterns
Try-Except for Critical Logging
with optixlog.run("critical_experiment") as client:
for step in range(100):
try:
result = client.log(step=step, value=calculate_value())
if not result or not result.success:
print(f"Warning: Logging failed at step {step}")
except Exception as e:
print(f"Error logging step {step}: {e}")
# Continue executionValidate Before Logging
import numpy as np
with optixlog.run("validation_demo") as client:
value = calculate_value()
# Check for invalid values
if np.isfinite(value) and not np.isnan(value):
client.log(step=0, value=value)
else:
print(f"Warning: Invalid value {value}, skipping log")Retry Logic
def log_with_retry(client, step, max_retries=3, **kwargs):
for attempt in range(max_retries):
result = client.log(step=step, **kwargs)
if result and result.success:
return result
time.sleep(0.1 * (attempt + 1)) # Exponential backoff
return None
with optixlog.run("retry_demo") as client:
log_with_retry(client, step=0, loss=0.5)Performance Optimization
Use Batch Operations
Slow (sequential):
for step in range(1000):
client.log(step=step, loss=losses[step])Fast (parallel batch):
metrics_list = [
{"step": step, "loss": losses[step]}
for step in range(1000)
]
client.log_batch(metrics_list, max_workers=4)Minimize Logging Frequency
# Log every 10 steps instead of every step
for step in range(1000):
if step % 10 == 0:
client.log(step=step, loss=losses[step])Use Helper Methods
Helper methods are optimized and reduce boilerplate:
# Fast: One call
client.log_matplotlib("plot", fig)
# Slow: Manual conversion
# (15+ lines of PIL conversion code)Integration with Simulation Frameworks
Meep Integration
import optixlog
import meep as mp
with optixlog.run(
"meep_simulation",
config={
"resolution": 30,
"cell_size": [10, 10, 0],
"wavelength": 1.55
}
) as client:
sim = mp.Simulation(
cell_size=mp.Vector3(10, 10, 0),
resolution=30,
sources=[...],
boundary_layers=[mp.PML(1.0)]
)
for step in range(100):
sim.run(until=1)
if step % 10 == 0:
# Log field snapshot
field = sim.get_array(...)
client.log_array_as_image(f"field_{step}", field, cmap='RdBu')
# Log metrics
power = calculate_power(field)
client.log(step=step, power=power)Custom Simulation Framework
class MySimulator:
def __init__(self, config):
self.config = config
# Initialize simulator
def run_step(self):
# Run one step
pass
def get_results(self):
# Get results
pass
with optixlog.run("custom_sim", config=sim_config) as client:
sim = MySimulator(sim_config)
for step in range(100):
sim.run_step()
results = sim.get_results()
client.log(step=step, **results)Custom Logging Patterns
Decorator Pattern
def log_step(func):
def wrapper(client, step, *args, **kwargs):
result = func(*args, **kwargs)
client.log(step=step, result=result)
return result
return wrapper
with optixlog.run("decorator_demo") as client:
@log_step
def compute_value():
return 42
compute_value(client, step=0)Class-Based Pattern
class Experiment:
def __init__(self, run_name):
self.client = optixlog.init(run_name=run_name)
def run(self):
for step in range(100):
result = self.compute(step)
self.client.log(step=step, result=result)
def compute(self, step):
return step * 2
experiment = Experiment("class_based")
experiment.run()Batch Processing
Process Multiple Experiments
experiments = [
{"wavelength": 1.3, "resolution": 20},
{"wavelength": 1.4, "resolution": 25},
{"wavelength": 1.5, "resolution": 30},
]
for exp_config in experiments:
with optixlog.run(
f"experiment_{exp_config['wavelength']}",
config=exp_config
) as client:
result = run_experiment(exp_config)
client.log(step=0, result=result)Parallel Batch Processing
from concurrent.futures import ThreadPoolExecutor
def run_experiment_with_logging(config):
with optixlog.run(
f"exp_{config['id']}",
config=config
) as client:
result = run_experiment(config)
client.log(step=0, result=result)
configs = [{"id": i, "param": i*0.1} for i in range(10)]
with ThreadPoolExecutor(max_workers=4) as executor:
executor.map(run_experiment_with_logging, configs)Query and Analysis
Compare Multiple Runs
import optixlog
# Get runs
runs = optixlog.list_runs(api_url, api_key, project="MyProject", limit=5)
# Compare them
run_ids = [run.run_id for run in runs]
comparison = optixlog.compare_runs(api_url, api_key, run_ids)
print(f"Common metrics: {comparison.common_metrics}")
for metric in comparison.common_metrics:
print(f"\n{metric}:")
for run_id, values in comparison.metrics_data[metric].items():
print(f" {run_id}: {len(values)} values")Download Artifacts
# Get artifacts for a run
artifacts = optixlog.get_artifacts(api_url, api_key, "run_abc123")
# Download specific artifact
for artifact in artifacts:
if artifact.kind == "image":
optixlog.download_artifact(
api_url, api_key,
artifact.media_id,
f"downloads/{artifact.key}.png"
)Analyze Metrics
# Get all metrics for a run
metrics = optixlog.get_metrics(api_url, api_key, "run_abc123")
# Analyze
for metric_name, values in metrics.items():
steps, vals = zip(*values)
print(f"{metric_name}:")
print(f" Min: {min(vals):.4f}")
print(f" Max: {max(vals):.4f}")
print(f" Mean: {sum(vals)/len(vals):.4f}")Environment Configuration
Using Environment Variables
# .env file or shell
export OPTIX_API_KEY="proj_your_key"
export OPTIX_API_URL="https://backend.optixlog.com"
export OPTIX_PROJECT="MyProject"Then in Python:
# Uses environment variables automatically
with optixlog.run("my_experiment") as client:
passConfiguration Files
import json
import optixlog
# Load config
with open("config.json") as f:
config = json.load(f)
with optixlog.run(
config["run_name"],
project=config["project"],
config=config["simulation_params"]
) as client:
passBest Practices Summary
- Use context managers - Automatic cleanup
- Use helper methods - Less boilerplate
- Use batch operations - Faster uploads
- Store config in run - Track all parameters
- Use consistent naming - Easy to find runs
- Check return values - Handle errors gracefully
- Use tasks for sweeps - Group related runs
- Query programmatically - Analyze results in code
- Validate inputs - Catch errors early
- Optimize logging frequency - Balance detail vs performance