We Raised $8M Series A to Continue Building Experiment Tracking and Model Registry That “Just Works”

Read more

Organize, automate and standardize experiment tracking in a flexible tool
that your growing team will use

Version data and experiments for easier reproducibility.
Search, visualize, debug, and compare experiments and datasets.
Share and collaborate on experiment results across the organization.

Log ML metadata

Log any model metadata from anywhere in your ML pipeline. Get started in 5 minutes.

Add a snippet to any step of your ML pipeline once. Decide what and how you want to log it. Run it million times.

import neptune.new as neptune

# Connect to Neptune and create a Run
run = neptune.init(project="", api_token="") # credentials

# Log hyperparameters 
run["parameters"] = {"batch_size": 64, 
                     "dropout":0.5,
                     "optimizer": {"type":"SGD", 
                                   "learning_rate": 0.001}}
# Log dataset versions
run["data/train_version"].track_files("train/images")

# Log the training process
for iter in range(100):
    run["train/accuracy"].log(accuracy)

# Log test metrics and charts
run["test/f1_score"] = test_score
run["test/confusion_matrix"].upload(fig)

# Log model weights and versions
run["model/weights"].upload("my_model.pkl")

# Stop logging to your Run
run.stop()
from pytorch_lightning.loggers import NeptuneLogger

neptune_logger = NeptuneLogger(
    api_key="", project="", # your credentials
)

trainer = Trainer(max_epochs=10, 
                  logger=neptune_logger)
trainer.fit(my_model, my_dataloader)
from neptune.new.integrations.tensorflow_keras import NeptuneCallback

run = neptune.init(project="", api_token="") # credentials
neptune_cbk = NeptuneCallback(run=run, base_namespace='metrics')

model.fit(x_train, y_train,
          epochs=5,
          batch_size=64,
          callbacks=[neptune_cbk])
run = neptune.init(project="", api_token="") # credentials

data_dir = "data/CIFAR10"
params = {
    "lr": 1e-2,
    "bs": 128,
    "input_sz": 32 * 32 * 3,
    "n_classes": 10,
    "model_filename": "basemodel",
}
run["config/data_dir"] = data_dir
run["config/params"] = params

for i, (x, y) in enumerate(trainloader, 0):

    optimizer.zero_grad()
    outputs = model.forward(x)
    _, preds = torch.max(outputs, 1)
    loss = criterion(outputs, y)
    acc = (torch.sum(preds == y.data)) / len(x)

    run["logs/training/batch/loss"].log(loss)
    run["logs/training/batch/acc"].log(acc)

    loss.backward()
    optimizer.step()
import neptune.new.integrations.sklearn as npt_utils

run = neptune.init(project="", api_token="") # credentials

parameters = {
    "n_estimators": 120,
    "learning_rate": 0.12,
    "min_samples_split": 3,
    "min_samples_leaf": 2,
}

gbc = GradientBoostingClassifier(**parameters)
gbc.fit(X_train, y_train)

run["cls_summary"] = npt_utils.create_classifier_summary(
    gbc, X_train, X_test, y_train, y_test
)
from neptune.new.integrations.lightgbm import NeptuneCallback,
 create_booster_summary

run = neptune.init(project="", api_token="") # credentials
neptune_callback = NeptuneCallback(run=run)

params = {
    "boosting_type": "gbdt",
    "objective": "multiclass",
    "num_class": 10,
    "metric": ["multi_logloss", "multi_error"],
    "num_leaves": 21,
    "learning_rate": 0.05,
    "max_depth": 12,
}

# Train the model
gbm = lgb.train(
    params,
    lgb_train,
    num_boost_round=200,
    valid_sets=[lgb_train, lgb_eval],
    valid_names=["training", "validation"],
    callbacks=[neptune_callback],
)

run["lgbm_summary"] = create_booster_summary(
    booster=gbm,
    log_trees=True,
    list_trees=[0, 1, 2, 3, 4],
    log_confusion_matrix=True,
    y_pred=y_pred,
    y_true=y_test,
)
from neptune.new.integrations.xgboost import NeptuneCallback

run = neptune.init(project="", api_token="") # credentials
neptune_callback = NeptuneCallback(run=run, log_tree=[0, 1, 2, 3])

params = {
    "eta": 0.7,
    "gamma": 0.001,
    "max_depth": 9,
    "objective": "reg:squarederror",
    "eval_metric": ["mae", "rmse"],
}

xgb.train(
    params=params,
    dtrain=dtrain,
    num_boost_round=num_round,
    evals=evals,
    callbacks=[neptune_callback],
)
import neptune.new.integrations.optuna as optuna_utils

run = neptune.init(project="", api_token="") # credentials
neptune_callback = optuna_utils.NeptuneCallback(run)

study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=20, 
               callbacks=[neptune_callback])

console

kedro neptune init

nodes.py

def report_accuracy(predictions: np.ndarray, test_y: pd.DataFrame,
                    neptune_run: neptune.run.Handler) -> None:
    # ...
    neptune_run['nodes/report/accuracy'] = accuracy

    fig, ax = plt.subplots()
    plot_confusion_matrix(target, predictions, ax=ax)
    neptune_run['nodes/report/confusion_matrix'].upload(fig)

pipeline.py

def create_pipeline(**kwargs):
    return Pipeline(
        [# ...
            node(
                report_accuracy,
                ["example_predictions", "example_test_y","neptune_run"],
                None,
                name="report",
            ),
        ]
    )

Log from many pipeline nodes to the same run

export NEPTUNE_CUSTOM_RUN_ID="SOME RANDOM ID"
multinode pipelines

Log from multiple machines to the same run

export NEPTUNE_CUSTOM_RUN_ID="SOME RANDOM ID"
distributed computing
# Open finished Run "SUN-123"
run = neptune.init(project="", api_token="", #credentials
                             run="SUN-123")

# Download model
run["train/model_weights"].download()

# Continue logging
run["test/accuracy"].log(0.68)

script

run = neptune.init(project="", api_token="", #credentials 
                   mode="offline")

console

neptune sync
Organize experiments

Organize and display experiments and model metadata however you want.

Organize logs in a fully customizable nested structure. Display model metadata in user-defined dashboard templates.

Combine different metadata types in one view.
Define it for one run. Use it anywhere.

Look at GPU, memory consumption, and load times to debug training speed.
See or learning curves, image predictions, and confusion matrix to debug model quality.

See live example

Create different views of the experiment table and save them for later.

You can have separate table views for debugging, comparing parameter sets, or best experiments.

See live example
Compare results

Search, debug, and compare experiments, datasets, and models.

Visualize training live in a cloud interface. See how different parameters and configs affect the results. Optimize models quicker.

Search and sort experiments by any field you logged.
Use our query language to filter runs based on parameter values, metrics, execution times, or anything else.

See live example

Supports rendering Altair, Plotly, Bokeh, video, audio, or any fully contained HTML.

visualize-display-table
Reproduce experiments

Version datasets and experiments for easier reproducibility.

Save dataset versions, environment configs, parameters, code, metrics, and model binaries for every experiment you run.

You can version datasets from your local filesystem or any S3- compatible storage.
Save version, location, size, folder structure, and its contents automatically.

See the docs
# Local file
run["train_dataset"].track_files("./datasets/train.csv")

# Local directory
run["train/images"].track_files("./datasets/images")

# S3 storage
run["train/eval_images"].track_files("s3://datasets/eval_images")

Log parameters one by one or as a nested dictionary. They will be displayed in a folder structure in the Neptune app.

See the docs
run["parameters/epoch_nr"] = 5
run["parameters/batch_size"] = 32

PARAMS = {
"optimizer": "sgd",
"model":{"dense": 512,
"dropout":0.5,
"activation": "relu",
}
}

run["parameters"] = PARAMS

Save .git information, environment configs, code, scripts, and notebook snapshots.

See the docs
run = neptune.init(
project='common/showroom',
source_files=["model.py",
"preparation.py",
"exploration.ipynb",
"**/*.sh",
"config.yaml",
"requirements.txt"
])

Log metrics, losses, learnings curves, diagnostic charts and any other metadata you need to reproduce the experiments.

See the docs
# log score
run["score"] = 0.97
run["test/acc"] = 0.97

# log learning curves
for epoch in range(100):
...
run["train/accuracy"].log(acc)
run["train/loss"].log(loss)
run["metric"].log(metric)

# log diagnostic charts
run["test/confusion_matrix"].upload(fig_cm)
run["test/precision_recall_curve"].upload(fig_prc)

Save model binaries to the Neptune metadata store. Version them for easier comparison and debugging.

See the docs
run["model/binary"].upload("my_model.pkl")
run["model/version"].track_files("my_model.pkl")
Share results

Share and collaborate on experiment results and models across the org

Have a single place where your team can see the results and access all models and experiments.

Access all model metadata via Neptune API.
Whatever you logged, you can query in a similar way.

See the pricing
run = neptune.init( 
    project="", api_token="",
    run="DET-135"
    )
    
batch_size = run['parameters/batch_size'].fetch()
losses = run['train/loss'].fetch_values()
md5 = run['dataset/train'].fetch_hash()
run["trained_model"].download("models/")

Create different projects, add users to them, and grant different permissions levels.

See the pricing

Neptune is priced by usage NOT by users.
You can invite your entire organization including product managers and subject matter experts, and pay only for what you log via Neptune API.

See the pricing
add-entire-organization
See all case studies

Get started

1. Create a free account
Sign up
2. Install Neptune client library
pip install neptune-client
3. Add experiment tracking snippet to your code
import neptune.new as neptune

run = neptune.init_run("Me/MyProject")
run["parameters"] = {"lr":0.1, 
                    "dropout":0.4}
run["test_accuracy"] = 0.84
Try live notebook
Get started with Neptune

Resources

Code examples, videos, projects gallery, and other resources.