The github.com/cloudstic/cli package exposes a high-level Go API that mirrors the CLI commands. Use it to embed backup, restore, and repository management directly into your Go programs.
Installation
go get github.com/cloudstic/cli
Import the root package:
import cloudstic "github.com/cloudstic/cli"
Storage backends and keychain helpers live in sub-packages:
import (
"github.com/cloudstic/cli/pkg/store"
"github.com/cloudstic/cli/pkg/keychain"
"github.com/cloudstic/cli/pkg/source"
)
Quick Start
package main
import (
"bytes"
"context"
"fmt"
"log"
cloudstic "github.com/cloudstic/cli"
"github.com/cloudstic/cli/pkg/keychain"
"github.com/cloudstic/cli/pkg/source"
"github.com/cloudstic/cli/pkg/store"
)
func main() {
ctx := context.Background()
// 1. Open a storage backend
rawStore, err := store.NewLocalStore("./my-repo")
if err != nil {
log.Fatal(err)
}
// 2. Build a keychain with your credentials
kc := keychain.Chain{keychain.WithPassword("my-passphrase")}
// 3. Initialize the repository (first time only)
_, err = cloudstic.InitRepo(ctx, rawStore,
cloudstic.WithInitCredentials(kc),
)
if err != nil {
log.Fatal(err)
}
// 4. Create a client: auto-resolves encryption via the keychain
client, err := cloudstic.NewClient(ctx, rawStore, cloudstic.WithKeychain(kc))
if err != nil {
log.Fatal(err)
}
// 5. Back up a local directory
src := source.NewLocalSource("./documents")
result, err := client.Backup(ctx, src)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Snapshot: %s (%d new files)\n", result.SnapshotHash, result.FilesNew)
// 6. Restore to a ZIP archive
var buf bytes.Buffer
_, err = client.Restore(ctx, &buf, "latest")
if err != nil {
log.Fatal(err)
}
fmt.Printf("Restored %d bytes\n", buf.Len())
}
Storage Backends
All backends implement store.ObjectStore. Pass the raw store to InitRepo and NewClient.
Local
s, err := store.NewLocalStore("/path/to/repo")
Amazon S3 (and S3-compatible)
s, err := store.NewS3Store(ctx, "my-bucket",
store.WithS3Region("us-east-1"),
// For MinIO, Cloudflare R2, Wasabi, etc.:
store.WithS3Endpoint("https://minio.example.com"),
// Explicit credentials (or use the AWS SDK default credential chain):
store.WithS3Credentials(accessKeyID, secretAccessKey),
store.WithS3Prefix("backups/"),
)
Backblaze B2
s, err := store.NewB2Store("my-bucket",
store.WithCredentials(keyID, appKey),
store.WithPrefix("backups/"),
)
SFTP
s, err := store.NewSFTPStore("backup.example.com",
store.WithSFTPBasePath("/home/backup/repo"),
store.WithSFTPUser("backupuser"),
store.WithSFTPKey("~/.ssh/id_ed25519"),
store.WithSFTPPort("22"),
)
Keychain
The keychain resolves credentials to a master key. Build a keychain.Chain before calling InitRepo or NewClient.
// Password-based
kc := keychain.Chain{keychain.WithPassword("my-passphrase")}
// Platform key (raw 32-byte key)
platformKey, _ := hex.DecodeString("64-hex-chars...")
kc := keychain.Chain{keychain.WithPlatformKey(platformKey)}
// Recovery key (24-word BIP39 mnemonic)
kc := keychain.Chain{keychain.WithRecoveryKey("word1 word2 ... word24")}
// Multiple credentials (tried in order until one succeeds)
kc := keychain.Chain{
keychain.WithPassword("my-passphrase"),
keychain.WithRecoveryKey("word1 word2 ... word24"),
}
// AWS KMS
kmsClient, _ := crypto.NewAWSKMSClient(ctx, "arn:aws:kms:us-east-1:123456789:key/abc-123")
kc := keychain.Chain{keychain.WithKMSClient(kmsClient)}
Initializing a Repository
InitRepo is a package-level function that runs on the raw (unencrypted) store. It only needs to be called once per repository.
func InitRepo(ctx context.Context, rawStore store.ObjectStore, opts ...InitOption) (*InitResult, error)
Init Options
| Option | Description |
|---|
WithInitCredentials(kc keychain.Chain) | Key slots to create (password, platform key, KMS) |
WithInitRecovery() | Also generate a 24-word recovery key slot |
WithInitNoEncryption() | Create an unencrypted repository |
WithInitAdoptSlots() | Adopt existing slots if already initialized (prevents error on re-init) |
InitResult
type InitResult struct {
Encrypted bool // whether the repository uses encryption
RecoveryKey string // 24-word mnemonic (empty if WithInitRecovery() was not requested)
AdoptedSlots bool // true if existing slots were adopted
}
Example
result, err := cloudstic.InitRepo(ctx, rawStore,
cloudstic.WithInitCredentials(kc),
cloudstic.WithInitRecovery(),
)
if err != nil {
log.Fatal(err)
}
if result.RecoveryKey != "" {
fmt.Println("Recovery key:", result.RecoveryKey)
// Store this securely: it is displayed only once!
}
Creating a Client
func NewClient(ctx context.Context, base store.ObjectStore, opts ...ClientOption) (*Client, error)
NewClient reads the repository config, resolves the master key via the keychain, and builds the encryption/compression/packfile decorator chain internally.
Client Options
| Option | Description |
|---|
WithKeychain(kc keychain.Chain) | Keychain for automatic master key resolution |
WithEncryptionKey(key []byte) | Direct 32-byte AES key. Bypasses keychain and config detection |
WithReporter(r Reporter) | Progress reporter for UI feedback |
WithPackfile(enable bool) | Bundle small objects into 8MB packs (default: true) |
client, err := cloudstic.NewClient(ctx, rawStore,
cloudstic.WithKeychain(kc),
cloudstic.WithReporter(myReporter),
cloudstic.WithPackfile(true),
)
WithEncryptionKey is intended for SaaS scenarios where the master key is resolved externally. For typical use, prefer WithKeychain.
Backup
func (c *Client) Backup(ctx context.Context, src source.Source, opts ...BackupOption) (*BackupResult, error)
Creates a new backup snapshot from the given source.
Backup Options
| Option | Description |
|---|
WithVerbose() | Log per-file operations |
WithBackupDryRun() | Scan without writing to the repository |
WithTags(tags ...string) | Apply tags to the snapshot |
WithExcludeHash(hash string) | Record the exclude pattern fingerprint in the snapshot |
BackupResult
type BackupResult struct {
SnapshotRef string // "snapshot/<hash>"
SnapshotHash string // bare content hash
Root string // HAMT root ref
FilesNew int
FilesChanged int
FilesUnmodified int
FilesRemoved int
DirsNew int
DirsChanged int
DirsUnmodified int
DirsRemoved int
BytesAddedRaw int64 // uncompressed bytes written
BytesAddedStored int64 // bytes written to the store (post-compression and encryption)
Duration time.Duration
DryRun bool
}
Example
src := source.NewLocalSource("./documents",
source.WithLocalExcludePatterns([]string{"*.log", "node_modules/"}),
)
result, err := client.Backup(ctx, src,
cloudstic.WithTags("production", "weekly"),
)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Snapshot %s: %d new, %d changed files\n",
result.SnapshotHash, result.FilesNew, result.FilesChanged)
Sources
Sources implement source.Source. Available implementations:
// Local filesystem
src := source.NewLocalSource("/path/to/dir",
source.WithLocalExcludePatterns([]string{"*.tmp"}),
)
// SFTP remote directory
src, err := source.NewSFTPSource("backup.example.com",
source.WithSFTPSourceBasePath("/remote/path"),
source.WithSFTPSourceUser("user"),
source.WithSFTPSourceKey("~/.ssh/id_rsa"),
)
// Google Drive (full scan)
src, err := source.NewGDriveSource(ctx,
source.WithTokenPath("/path/to/google_token.json"),
source.WithDriveID("sharedDriveID"), // omit for My Drive
source.WithRootFolderID("folderID"), // omit for entire drive
)
// Google Drive (incremental via Changes API: recommended)
src, err := source.NewGDriveChangeSource(ctx,
source.WithTokenPath("/path/to/google_token.json"),
)
// OneDrive (full scan)
src, err := source.NewOneDriveSource(ctx,
source.WithOneDriveTokenPath("/path/to/onedrive_token.json"),
)
// OneDrive (incremental via Delta API: recommended)
src, err := source.NewOneDriveChangeSource(ctx,
source.WithOneDriveTokenPath("/path/to/onedrive_token.json"),
)
Restore
func (c *Client) Restore(ctx context.Context, w io.Writer, snapshotRef string, opts ...RestoreOption) (*RestoreResult, error)
Writes the snapshot’s file tree as a ZIP archive to w. Pass io.Discard for a dry run.
snapshotRef accepts "", "latest", a bare hash, or "snapshot/<hash>".
Restore Options
| Option | Description |
|---|
WithRestoreDryRun() | Count files/bytes without writing ZIP data |
WithRestoreVerbose() | Log per-file operations |
WithRestorePath(path string) | Restore only the given file or subtree (e.g. "Documents/") |
RestoreResult
type RestoreResult struct {
SnapshotRef string
Root string
FilesWritten int
DirsWritten int
BytesWritten int64
Errors int // number of non-fatal errors
DryRun bool
}
Example
f, err := os.Create("restore.zip")
if err != nil {
log.Fatal(err)
}
defer f.Close()
result, err := client.Restore(ctx, f, "latest",
cloudstic.WithRestorePath("Documents/"),
)
if err != nil {
os.Remove("restore.zip")
log.Fatal(err)
}
fmt.Printf("Restored %d files (%d bytes)\n", result.FilesWritten, result.BytesWritten)
List
func (c *Client) List(ctx context.Context, opts ...ListOption) (*ListResult, error)
Lists all snapshots in the repository. Returns them sorted oldest-first.
ListResult
type ListResult struct {
Snapshots []SnapshotEntry
}
type SnapshotEntry struct {
Ref string // "snapshot/<hash>"
Seq int // incremental sequence number
Created time.Time
Source string // source type (local, gdrive, onedrive, sftp)
Account string // cloud account identifier (email for Drive/OneDrive)
Path string // source path for local/SFTP
Tags []string
}
Example
result, err := client.List(ctx)
if err != nil {
log.Fatal(err)
}
for _, snap := range result.Snapshots {
fmt.Printf("[%d] %s %-10s %s\n",
snap.Seq, snap.Created.Format(time.DateTime), snap.Source, snap.Ref)
}
LsSnapshot
func (c *Client) LsSnapshot(ctx context.Context, snapshotID string, opts ...LsSnapshotOption) (*LsSnapshotResult, error)
Loads all file metadata from a snapshot and returns the full directory tree. Accepts "latest", a bare hash, or "snapshot/<hash>".
LsSnapshotResult
type LsSnapshotResult struct {
Ref string
Snapshot core.Snapshot
RootRefs []string // top-level entry refs
RefToMeta map[string]core.FileMeta // ref → file metadata (name, size, type)
ChildRefs map[string][]string // parent ref → ordered child refs
}
Diff
func (c *Client) Diff(ctx context.Context, snap1, snap2 string, opts ...DiffOption) (*DiffResult, error)
Compares two snapshots and returns the set of added, modified, and removed files. Use "latest" as either argument.
DiffResult
type DiffResult struct {
Ref1 string
Ref2 string
Changes []Change
}
// Change.Type is one of: "added", "modified", "removed", "unchanged"
type Change struct {
Type string
Path string
}
Example
result, err := client.Diff(ctx, "abc123", "latest")
if err != nil {
log.Fatal(err)
}
for _, c := range result.Changes {
if c.Type != "unchanged" {
fmt.Printf("%s %s\n", c.Type, c.Path)
}
}
Prune
func (c *Client) Prune(ctx context.Context, opts ...PruneOption) (*PruneResult, error)
Removes unreachable objects (mark-and-sweep garbage collection). Run after Forget to reclaim storage.
Prune Options
| Option | Description |
|---|
WithPruneDryRun() | Count deletions without removing anything |
WithPruneVerbose() | Log each deleted object |
PruneResult
type PruneResult struct {
ObjectsScanned int
ObjectsDeleted int
BytesReclaimed int64
DryRun bool
}
Forget
Remove a specific snapshot
func (c *Client) Forget(ctx context.Context, snapshotID string, opts ...ForgetOption) (*ForgetResult, error)
Apply a retention policy
func (c *Client) ForgetPolicy(ctx context.Context, opts ...ForgetOption) (*PolicyResult, error)
Forget Options
| Option | Description |
|---|
WithPrune() | Run prune after forgetting |
WithDryRun() | Show what would be removed without deleting |
WithForgetVerbose() | Verbose logging |
WithKeepLast(n int) | Keep the N most recent snapshots |
WithKeepHourly(n int) | Keep one snapshot per hour for the last N hours |
WithKeepDaily(n int) | Keep one snapshot per day for the last N days |
WithKeepWeekly(n int) | Keep one snapshot per week for the last N weeks |
WithKeepMonthly(n int) | Keep one snapshot per month for the last N months |
WithKeepYearly(n int) | Keep one snapshot per year for the last N years |
WithFilterTag(tag string) | Only consider snapshots with this tag (repeatable) |
WithFilterSource(src string) | Only consider snapshots from this source type |
WithFilterAccount(acct string) | Only consider snapshots from this account |
WithFilterPath(path string) | Only consider snapshots from this path |
WithGroupBy(fields string) | Comma-separated grouping fields (default: "source,account,path") |
Example
// Retention policy: keep last 10, 7 daily, 4 weekly, 12 monthly: then prune
result, err := client.ForgetPolicy(ctx,
cloudstic.WithKeepLast(10),
cloudstic.WithKeepDaily(7),
cloudstic.WithKeepWeekly(4),
cloudstic.WithKeepMonthly(12),
cloudstic.WithPrune(),
)
if err != nil {
log.Fatal(err)
}
for _, group := range result.Groups {
fmt.Printf("Group %s: keep %d, remove %d\n",
group.Key, len(group.Keep), len(group.Remove))
}
Check
func (c *Client) Check(ctx context.Context, opts ...CheckOption) (*CheckResult, error)
Verifies repository integrity by walking the full reference chain (snapshots → HAMT nodes → filemeta → content → chunks).
Check Options
| Option | Description |
|---|
WithReadData() | Re-hash all chunk data for byte-level verification |
WithCheckVerbose() | Log each object as it is verified |
WithSnapshotRef(ref string) | Check only the specified snapshot (default: all) |
CheckResult
type CheckResult struct {
SnapshotsChecked int
ObjectsVerified int
Errors []CheckError
}
type CheckError struct {
Type string // "missing", "corrupt", "unreadable"
Key string // object key
Message string
}
Example
result, err := client.Check(ctx, cloudstic.WithReadData())
if err != nil {
log.Fatal(err)
}
if len(result.Errors) > 0 {
for _, e := range result.Errors {
fmt.Printf("[%s] %s: %s\n", e.Type, e.Key, e.Message)
}
}
BreakLock
func (c *Client) BreakLock(ctx context.Context) ([]*RepoLock, error)
Removes stale repository lock files. Returns the list of removed locks (empty slice if none found).
removed, err := client.BreakLock(ctx)
if err != nil {
log.Fatal(err)
}
for _, lock := range removed {
fmt.Printf("Removed %s lock (held by %s)\n", lock.Operation, lock.Holder)
}
Cat
func (c *Client) Cat(ctx context.Context, keys ...string) ([]*CatResult, error)
Fetches the raw (decrypted, decompressed) data for one or more object keys. Useful for debugging and inspection.
CatResult
type CatResult struct {
Key string // the object key requested
Data []byte // raw object data (typically JSON)
}
Object Key Namespaces
| Key | Description |
|---|
config | Repository configuration marker |
index/latest | Latest snapshot pointer |
index/snapshots | Snapshot catalog |
snapshot/<hash> | Snapshot manifest |
node/<hash> | HAMT tree node |
filemeta/<hash> | File metadata object |
content/<hash> | Content manifest (list of chunk refs) |
chunk/<hash> | Raw data chunk |
keys/<slot> | Encryption key slot |
lock/<id> | Repository lock file |
Example
results, err := client.Cat(ctx, "config", "index/latest")
if err != nil {
log.Fatal(err)
}
for _, r := range results {
fmt.Printf("=== %s ===\n%s\n\n", r.Key, r.Data)
}
Key Management
These package-level functions operate on the raw (unencrypted) store.
List key slots
func ListKeySlots(ctx context.Context, rawStore store.ObjectStore) ([]KeySlot, error)
Returns metadata for all key slots. Does not require authentication. Slot metadata is stored unencrypted.
slots, err := cloudstic.ListKeySlots(ctx, rawStore)
for _, s := range slots {
fmt.Printf("Type: %-12s Label: %s\n", s.SlotType, s.Label)
}
Change password
func ChangePassword(ctx context.Context, rawStore store.ObjectStore, kc keychain.Chain, pwd PasswordProvider) error
Replaces the password key slot. kc must unlock the current master key. pwd supplies the new password.
Use PasswordString for a known value at call time:
err := cloudstic.ChangePassword(ctx, rawStore, kc,
cloudstic.PasswordString("new-passphrase"),
)
Use PasswordProviderFunc when the new password must be obtained lazily (e.g. interactive prompt):
err := cloudstic.ChangePassword(ctx, rawStore, kc,
cloudstic.PasswordProviderFunc(func(ctx context.Context) (string, error) {
return promptUser("New password: ")
}),
)
Add recovery key
func AddRecoveryKey(ctx context.Context, rawStore store.ObjectStore, kc keychain.Chain) (string, error)
Generates a BIP39 recovery key slot and returns the 24-word mnemonic.
mnemonic, err := cloudstic.AddRecoveryKey(ctx, rawStore, kc)
if err != nil {
log.Fatal(err)
}
fmt.Println("Recovery key:", mnemonic)
// Store this securely: it is returned only once!
Progress Reporting
cloudstic.Reporter receives progress events during long-running operations:
type Reporter interface {
StartPhase(name string, total int64) Phase
}
type Phase interface {
Increment(n int64)
Done()
}
Pass your reporter to NewClient:
client, err := cloudstic.NewClient(ctx, rawStore,
cloudstic.WithKeychain(kc),
cloudstic.WithReporter(myReporter),
)
If WithReporter is omitted, all progress output is suppressed (no-op default).
Complete Example: Automated Backup
package main
import (
"context"
"encoding/hex"
"fmt"
"log"
"os"
cloudstic "github.com/cloudstic/cli"
"github.com/cloudstic/cli/pkg/keychain"
"github.com/cloudstic/cli/pkg/source"
"github.com/cloudstic/cli/pkg/store"
)
func main() {
ctx := context.Background()
// Load platform key from environment
encKeyHex := os.Getenv("CLOUDSTIC_ENCRYPTION_KEY")
encKey, err := hex.DecodeString(encKeyHex)
if err != nil {
log.Fatalf("invalid CLOUDSTIC_ENCRYPTION_KEY: %v", err)
}
// Open S3 backend
rawStore, err := store.NewS3Store(ctx, "my-backup-bucket",
store.WithS3Region("us-east-1"),
)
if err != nil {
log.Fatal(err)
}
// Build keychain
kc := keychain.Chain{keychain.WithPlatformKey(encKey)}
// Initialize if not already done (adopt-slots makes it idempotent)
if _, err := cloudstic.InitRepo(ctx, rawStore,
cloudstic.WithInitCredentials(kc),
cloudstic.WithInitAdoptSlots(),
); err != nil {
log.Fatal(err)
}
// Create client
client, err := cloudstic.NewClient(ctx, rawStore, cloudstic.WithKeychain(kc))
if err != nil {
log.Fatal(err)
}
// Backup
src := source.NewLocalSource("/data",
source.WithLocalExcludePatterns([]string{"*.tmp", ".cache/"}),
)
result, err := client.Backup(ctx, src, cloudstic.WithTags("automated"))
if err != nil {
log.Fatalf("backup failed: %v", err)
}
fmt.Printf("Backup complete: %s (%d new, %d changed files)\n",
result.SnapshotHash, result.FilesNew, result.FilesChanged)
// Apply retention policy and prune
_, err = client.ForgetPolicy(ctx,
cloudstic.WithKeepLast(7),
cloudstic.WithKeepWeekly(4),
cloudstic.WithKeepMonthly(12),
cloudstic.WithPrune(),
)
if err != nil {
log.Fatalf("retention policy failed: %v", err)
}
}
See Also