SDK
Advanced Usage
Best practices and advanced patterns for production use
Advanced Usage
Best practices and advanced patterns for production use.
Project Organization
Naming Conventions
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="MySimulations")
# Date-based
run = project.run(name="2025-01-15_experiment_v1")
# Parameter-based
run = project.run(name=f"wavelength_{wavelength}_res_{resolution}")
# Descriptive
run = project.run(name="waveguide_chi3_sweep")Config Management
Store all parameters for reproducibility:
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="Simulations")
run = project.run(
name="simulation",
config={
"wavelength": 1.55,
"resolution": 30,
"pml_thickness": 1.0,
"source_type": "Gaussian",
"material": "silicon",
"sim_time": 100
}
)
# Run your simulation
for step in range(100):
result = simulate_step()
run.log(step=step, **result)Task-Based Workflows
Group related runs for parameter sweeps:
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="ParameterSweeps")
task_id = "task_12345" # From web UI
for param in [0.1, 0.2, 0.3]:
run = project.run(
name=f"sweep_{param}",
task_id=task_id,
config={"param": param}
)
result = simulate(param)
run.log(step=0, result=result)Error Handling
Safe Logging
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="SafeLogging")
run = project.run(name="experiment")
for step in range(100):
try:
result = run.log(step=step, value=calculate_value())
if result and not result.success:
print(f"Warning: Log failed at step {step}")
except Exception as e:
print(f"Error: {e}")
# Continue executionValidate Before Logging
import numpy as np
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="Validation")
run = project.run(name="validated_run")
value = calculate_value()
if np.isfinite(value):
run.log(step=0, value=value)
else:
print(f"Skipping invalid value: {value}")Retry Logic
import time
def log_with_retry(run, step, max_retries=3, **kwargs):
for attempt in range(max_retries):
result = run.log(step=step, **kwargs)
if result and result.success:
return result
time.sleep(0.1 * (attempt + 1))
return None
# Usage
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="RetryDemo")
run = project.run(name="with_retry")
log_with_retry(run, step=0, loss=0.5)Performance Optimization
Reduce Logging Frequency
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="Optimized")
run = project.run(name="efficient_logging")
for step in range(10000):
loss = train_step()
if step % 100 == 0: # Log every 100 steps
run.log(step=step, loss=loss)Use Helper Methods
import matplotlib.pyplot as plt
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="Visualizations")
run = project.run(name="with_plots")
# Fast: One call, auto-cleanup
fig, ax = plt.subplots()
ax.plot(data)
run.log_matplotlib("plot", fig)
plt.close(fig)
# Also efficient: array to image
run.log_array_as_image("field", field_data, cmap='RdBu')Simulation Framework Integration
Meep
from optixlog import Optixlog
import meep as mp
client = Optixlog(api_key="your_api_key")
project = client.project(name="MeepSimulations")
run = project.run(
name="meep_simulation",
config={"resolution": 30, "wavelength": 1.55}
)
sim = mp.Simulation(
cell_size=mp.Vector3(10, 5),
resolution=30,
boundary_layers=[mp.PML(1.0)]
)
for step in range(100):
sim.run(until=1)
if step % 10 == 0:
field = sim.get_array(
center=mp.Vector3(),
size=mp.Vector3(10, 5),
component=mp.Ez
)
run.log_array_as_image(f"field_{step}", field, cmap='RdBu')
power = calculate_power(field)
run.log(step=step, power=power)Custom Framework
from optixlog import Optixlog
class Simulator:
def __init__(self, config):
self.config = config
def step(self):
# Run one step
return {"loss": 0.5, "power": 1.0}
client = Optixlog(api_key="your_api_key")
project = client.project(name="CustomSimulations")
sim_config = {"param1": 1.0, "param2": 2.0}
run = project.run(name="custom_sim", config=sim_config)
sim = Simulator(sim_config)
for step in range(100):
results = sim.step()
run.log(step=step, **results)Source Code Tracking
Automatic Detection
The SDK automatically detects and logs your source code:
- Jupyter notebooks: Detects
.ipynbfiles - Python scripts: Detects
.pyfiles - Google Colab: Requires explicit
sourceparameter
Explicit Source Path
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="SourceTracking")
# For Colab or custom setups
run = project.run(
name="experiment",
source="/path/to/script.py"
)Skip Source Detection
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="NoSource")
run = project.run(
name="experiment",
skip_file=True # No source code logged
)Parallel Processing
Thread Pool
from concurrent.futures import ThreadPoolExecutor
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="ParallelExperiments")
def run_experiment(config):
run = project.run(
name=f"exp_{config['id']}",
config=config
)
result = simulate(config)
run.log(step=0, result=result)
configs = [{"id": i, "param": i * 0.1} for i in range(10)]
with ThreadPoolExecutor(max_workers=4) as executor:
executor.map(run_experiment, configs)Parameter Sweeps
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="Sweeps")
experiments = [
{"wavelength": 1.3, "resolution": 20},
{"wavelength": 1.4, "resolution": 25},
{"wavelength": 1.5, "resolution": 30},
]
for config in experiments:
run = project.run(
name=f"sweep_{config['wavelength']}",
config=config
)
result = simulate(config)
run.log(step=0, **result)Query and Analysis
List Runs
from optixlog import list_runs
runs = list_runs(
api_url="https://optixlog.com",
api_key="your_api_key",
project="MyProject",
limit=5
)
for r in runs:
print(f"Run: {r.name}, ID: {r.run_id}")Compare Runs
from optixlog import list_runs, compare_runs
runs = list_runs(
api_url="https://optixlog.com",
api_key="your_api_key",
project="MyProject",
limit=5
)
run_ids = [r.run_id for r in runs]
comparison = compare_runs(
api_url="https://optixlog.com",
api_key="your_api_key",
run_ids=run_ids
)
print(f"Common metrics: {comparison.common_metrics}")Download Artifacts
from optixlog import get_artifacts, download_artifact
artifacts = get_artifacts(
api_url="https://optixlog.com",
api_key="your_api_key",
run_id="run_12345"
)
for artifact in artifacts:
if artifact.kind == "image":
download_artifact(
api_url="https://optixlog.com",
api_key="your_api_key",
media_id=artifact.media_id,
path=f"downloads/{artifact.key}.png"
)Analyze Metrics
from optixlog import get_metrics
metrics = get_metrics(
api_url="https://optixlog.com",
api_key="your_api_key",
run_id="run_12345"
)
for name, values in metrics.items():
steps, vals = zip(*values)
print(f"{name}: min={min(vals):.4f}, max={max(vals):.4f}")Configuration Files
Load from JSON
import json
from optixlog import Optixlog
with open("config.json") as f:
config = json.load(f)
client = Optixlog(api_key=config["api_key"])
project = client.project(name=config["project"])
run = project.run(
name=config["run_name"],
config=config["params"]
)Environment-Based Config
# .env or shell
export OPTIX_API_KEY="your_api_key"import os
from optixlog import Optixlog
client = Optixlog(api_key=os.getenv("OPTIX_API_KEY"))
project = client.project(name="Production")
run = project.run(name="deployment_test")Context Manager Pattern
Use the context manager for automatic status reporting:
from optixlog import Optixlog
client = Optixlog(api_key="your_api_key")
project = client.project(name="ContextDemo")
run = project.run(name="experiment")
# Use as context manager for status reporting
with run:
run.log(step=0, loss=0.5)
run.log(step=1, loss=0.4)
# Prints "✓ Run completed successfully"Best Practices Summary
- Use the fluent API —
client.project().run()for clean code - Use helper methods — Less code, fewer errors
- Store config — Track all parameters for reproducibility
- Consistent naming — Easy to find and compare runs
- Check return values — Handle failures gracefully
- Use tasks — Group parameter sweeps
- Validate inputs — Catch NaN/Inf early
- Optimize frequency — Balance detail vs performance
- Query programmatically — Analyze results in code
- Use context managers — Clean up resources automatically
Next Steps
- API Reference — Complete method documentation
- MPI Support — Parallel simulation support
- Troubleshooting — Common issues and solutions