Zeish Developer Reference
Zeish is the Zig WASM App Framework. One package covers two runtime modes:
- Native - standalone HTTP servers via
zeish.App(shell apps, BFF proxies, dev servers). - WASM - services that load into Planck via
zeish.WasmApp, with ZSX template integration and the host bridge for response delivery and SSE publishing.
Routing, request/response, middleware, sessions, SSE, schema validation, and configuration APIs are identical across both modes, only the entry point and I/O substrate differ. Concrete provider implementations (Google OAuth, Stripe payments, SendGrid notifications) live under zeish.auth, zeish.pay, and zeish.notify and stay out of your binary unless you import them.
Import: const zeish = @import("zeish");
Quick Start
Native app
const std = @import("std");
const zeish = @import("zeish");
pub fn main() !void {
const allocator = std.heap.smp_allocator;
var threaded: std.Io.Threaded = .init(allocator, .{ .async_limit = .unlimited });
defer threaded.deinit();
const io = threaded.io();
var app = try zeish.App.init(allocator, .{
.port = 3000,
.static_dir = "public",
});
defer app.deinit();
var handler = MyHandler{};
try app.route(MyHandler, null, null, .get, "/hello", &handler, null);
try app.run(io);
}
const MyHandler = struct {
pub fn handle(self: *MyHandler, allocator: std.mem.Allocator, request: *anyopaque) ![]const u8 {
_ = self;
_ = request;
return try allocator.dupe(u8, "<h1>Hello World</h1>");
}
};WASM service
const std = @import("std");
const zeish = @import("zeish");
const planck = @import("planck");
extern fn host_respond(ptr: [*]const u8, len: u32) void;
var app: zeish.WasmApp = undefined;
var client: planck.Client = undefined;
export fn init() i32 {
const allocator = std.heap.wasm_allocator;
client = planck.Client.init(allocator, 8 * 1024) catch return -1;
app = zeish.WasmApp.init(allocator, .{}) catch return -1;
var handler = ListHandler{ .client = &client };
app.route(ListHandler, null, null, .get, "/items", &handler, null) catch return -1;
app.onResponse(struct {
fn hook(_: *const zeish.WasmRequest, res: *zeish.WasmResponse, buf: []u8) void {
const bytes = res.toBytes(buf) catch return;
host_respond(bytes.ptr, @intCast(bytes.len));
}
}.hook);
return 0;
}
export fn process(p: [*]const u8, l: u32) i32 {
app.process(p, l) catch return -1;
return 0;
}The WASM service exports init (called once when Planck loads the module) and process (called per request). Routing and handler signatures match the native API; only the entry shape and the response delivery (host_respond instead of a socket) differ.
Dependency Wiring
Before you can @import("zeish") anywhere in your code, Zig needs to know which *std.Build.Module to give you. The naive way is b.dependency("zeish", .{}).module("zeish"), that asks the dep's own build.zig for its module, and on deep transitive graphs the same logical package ends up as multiple module instances (you'll see the symptom as bson vs bson0, utils vs utils0, and types from those modules stop matching across boundaries).
The durable pattern: use b.dependency("name", .{}).path("src/root.zig") to get a builder-resolved path into the dep's tree, then call b.createModule(...) yourself. One module instance per logical dep, wired explicitly. A small Deps struct + wireDeps() helper at the bottom of build.zig keeps every artifact (exe, tests, release builds) sharing the same instances.
Two files are involved:
build.zig.zon- declares dependencies; Zig fetches and caches them.build.zig- definesDeps, callswireDeps()once, attachesdeps.<name>to each artifact.
You never reference Zig's internal package cache directly - b.dependency().path(...) resolves to whatever location Zig picked.
build.zig.zon
Standard Zig zon. URL+hash entries for published packages; .path for monorepo siblings.
.{
.name = .myapp,
.version = "0.1.0",
.fingerprint = 0x...,
.minimum_zig_version = "0.16.0",
.dependencies = .{
.zeish = .{
.url = "https://github.com/planckapps/zeish/-/archive/v0.1.0/zeish-v0.1.0.tar.gz",
.hash = "zeish-0.1.0-...",
},
.bson = .{
.url = "https://github.com/planckapps/bson/-/archive/v0.1.0/bson-v0.1.0.tar.gz",
.hash = "bson-0.1.0-...",
},
.utils = .{ .url = "...", .hash = "utils-0.1.0-..." },
.tls = .{ .url = "...", .hash = "tls-0.1.0-..." },
.proto = .{ .url = "...", .hash = "proto-0.1.0-..." },
.planck_zig_client = .{ .url = "...", .hash = "planck_zig_client-0.3.0-..." },
},
.paths = .{ "build.zig", "build.zig.zon", "src" },
}Zig walks transitive deps automatically, if zeish's own zon pulls bson, bson is fetched too, no matter how many layers deep it lives.
build.zig
Three responsibilities, all in one place: declare the Deps struct, construct one module per dep via b.createModule(.{ .root_source_file = dep.path("src/root.zig") }) with addImport(...) wiring, then attach the resulting *Module pointers to every artifact.
const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
const deps = wireDeps(b, target, optimize);
// ── Native app ──
const exe_mod = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
exe_mod.addImport("zeish", deps.zeish);
exe_mod.addImport("bson", deps.bson);
const exe = b.addExecutable(.{ .name = "app", .root_module = exe_mod });
b.installArtifact(exe);
// Tests - share the SAME deps struct
const test_mod = b.createModule(.{
.root_source_file = b.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
test_mod.addImport("zeish", deps.zeish);
test_mod.addImport("bson", deps.bson);
const tests = b.addTest(.{ .root_module = test_mod });
b.step("test", "Run tests").dependOn(&b.addRunArtifact(tests).step);
}
// Shared single-instance dep graph
// One `*Module` per logical dep, wired manually. Every artifact gets the
// same instances, so type identity holds across module boundaries.
const Deps = struct {
bson: *std.Build.Module,
utils: *std.Build.Module,
tls: *std.Build.Module,
proto: *std.Build.Module,
planck_zig_client: *std.Build.Module,
zeish: *std.Build.Module,
};
fn wireDeps(b: *std.Build, target: anytype, optimize: anytype) Deps {
// Leaves: no deps
const bson_dep = b.dependency("bson", .{});
const bson = b.createModule(.{
.root_source_file = bson_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
const utils_dep = b.dependency("utils", .{});
const utils = b.createModule(.{
.root_source_file = utils_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
const tls_dep = b.dependency("tls", .{});
const tls = b.createModule(.{
.root_source_file = tls_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
// proto imports utils
const proto_dep = b.dependency("proto", .{});
const proto = b.createModule(.{
.root_source_file = proto_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
proto.addImport("utils", utils);
// planck_zig_client imports tls, bson, utils, proto
const planck_dep = b.dependency("planck_zig_client", .{});
const planck_zig_client = b.createModule(.{
.root_source_file = planck_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
planck_zig_client.addImport("tls", tls);
planck_zig_client.addImport("bson", bson);
planck_zig_client.addImport("utils", utils);
planck_zig_client.addImport("proto", proto);
// zeish pulls everything below it
const zeish_dep = b.dependency("zeish", .{});
const zeish = b.createModule(.{
.root_source_file = zeish_dep.path("src/root.zig"),
.target = target,
.optimize = optimize,
});
zeish.addImport("bson", bson);
zeish.addImport("utils", utils);
zeish.addImport("tls", tls);
zeish.addImport("proto", proto);
zeish.addImport("planck_zig_client", planck_zig_client);
return .{
.bson = bson,
.utils = utils,
.tls = tls,
.proto = proto,
.planck_zig_client = planck_zig_client,
.zeish = zeish,
};
}Any artifact that needs a dep grabs it via deps.<name>. The tests share the same deps.zeish that the exe uses single module instance, type identity preserved.
Rules:
- Use
dep.path("src/root.zig"), notdep.module(...). Calling.module()returns a module the dep's ownbuild.zigconstructed, that's where duplication enters. Calling.path()just gives you a builder-resolved file path; you create the module yourself, exactly once. - Topological order. Declare each module AFTER every module it imports. Zig won't complain if you get this wrong, but you'll get a compile error later when
addImport("foo", foo)references afoothat isn't in scope yet. - One
createModuleper logical dep. Never create the same dep twice (e.g., one call for the app, one for tests), you'll reintroduce the duplication you're trying to avoid. Share the binding across every consumer. addImport(name, mod)-nameis the string consumers use in@import(name). Conventionally it matches the variable name, but it doesn't have to. If a package is published underplanck_zig_clientbut downstream code wants to@import("planck"), calladdImport("planck", planck_zig_client).- Path deps (
.path = "../foo") work the same way.b.dependency("foo", .{}).path("src/root.zig")resolves correctly whetherfoois a URL+hash dep or a path dep, Zig handles both. - Call
wireDeps(b, target, optimize)once per build configuration. Store the result in a localconst deps = ...and pass it around. For multi-target builds (e.g. cross-compile release loop), call it once per target, each target needs its own module graph, but every artifact within one target should share oneDeps.
Adding a new dependency
zig fetch --save=<name> <url>- appends tobuild.zig.zonwith the correct hash.- Edit
build.zig:- Add a
<name>: *std.Build.Module,field toDeps. - Add a block in
wireDeps:b.dependency("<name>", .{})→b.createModule(.{ .root_source_file = <name>_dep.path("src/root.zig"), ... }). - Add
addImportcalls for each name the dep's ownbuild.zig.zondeclares (look at that dep's zon to see what it needs). - Add the field to the
return .{...}struct.
- Add a
- Attach
deps.<name>to whichever artifacts need it insidebuild().
Updating a dependency
- Update the url + hash in
build.zig.zon(orzig fetch --save=<name> <new-url>). zig build- Zig re-fetches and rebuilds. No hash strings inbuild.zigto chase.
Removing a dependency
- Drop the entry from
build.zig.zon. - Drop the
b.dependency+createModuleblock, theDepsstruct field, and thereturnentry fromwireDeps. - Remove
addImport(..., deps.<name>)calls frombuild().
Troubleshooting
error: dependency '<name>' not found The name in b.dependency("<name>", .{}) must match a key in build.zig.zon .dependencies exactly. Check spelling.
@import("foo") fails to resolve inside a dep's source The dep's own transitive addImport is missing. Look at that dep's build.zig.zon, every name in its .dependencies needs a matching addImport call in wireDeps.
Duplicated module disambiguation (bson vs bson0, utils0, etc.) Somewhere b.dependency("bson", .{}).module("bson") is being called, search for any .module( calls in build.zig and replace them with the dep.path(...) + b.createModule(...) pattern. Mixing the two models is what reintroduces the drift.
Hash mismatch on first run The hash in build.zig.zon doesn't match the tarball contents. Re-run zig fetch --save=<name> <url> to get the correct hash and replace.
A single dependency is referenced from multiple places (tests + exe + dev server) Call wireDeps(b, target, optimize) once at the top of build() for each target and pass the resulting deps struct to every artifact. Every consumer must get the same *Module pointer.
Server Configuration
Server settings should come from your app's config file, not hardcoded in source. See Configuration for the full setup.
const config = try zeish.Config.load(AppConfig, allocator, io, .{
.paths = &.{"config.yaml"},
});
var app = try zeish.App.init(allocator, config.server);Routing
Mediator-Dispatched Routes
The framework deserializes request params/body and dispatches to your handler struct:
const ItemParams = struct {
id: ?[]const u8 = null,
category: ?[]const u8 = null,
};
const CreateItemBody = struct {
name: []const u8,
price: f64,
};
const ListHandler = struct {
pub fn handle(self: *ListHandler, allocator: Allocator, request: *anyopaque) ![]const u8 {
const params: *ItemParams = @ptrCast(@alignCast(request));
// params.id, params.category populated from query string / path
return try allocator.dupe(u8, "<h1>Items</h1>");
}
};
const CreateHandler = struct {
pub fn handle(self: *CreateHandler, allocator: Allocator, request: *anyopaque) ![]const u8 {
const body: *CreateItemBody = @ptrCast(@alignCast(request));
// body.name, body.price populated from JSON or form body
return try std.fmt.allocPrint(allocator, "Created: {s}", .{body.name});
}
};
// Register routes
// Handler Params Body method path handler event
try app.route(ListHandler, ItemParams, null, .get, "/items", &list_h, null);
try app.route(CreateHandler, null, CreateItemBody, .post, "/items", &create_h, "items-updated");
try app.route(GetHandler, ItemParams, null, .get, "/items/:id", &get_h, null);Path parameters (:id) and query strings (?category=food) are bound to matching struct fields by name.
Route Options
try app.routeOpts(Handler, Params, null, .get, "/stream", &handler, .{
.content_type = "text/event-stream",
.event = "data-updated", // Auto-publish SSE event after handler
.topic = "my-topic", // SSE topic (default: "default")
});Raw Routes
Full access to Request/Response, for webhooks, multipart uploads, custom headers:
fn webhookHandler(ctx: ?*anyopaque, allocator: Allocator, req: *const zeish.Request, res: *zeish.Response) !void {
const signature = req.getHeader("X-Signature") orelse {
res.status = .bad_request;
try res.write("Missing signature");
return;
};
res.status = .ok;
try res.setHeader("Content-Type", "application/json");
try res.write("{\"received\":true}");
}
try app.routeRaw(.post, "/webhook", webhookHandler, &my_context);Proxy Routes
Reverse proxy to upstream services:
try app.proxy(io, .get, "/api/products", "http://127.0.0.1:3003/products");
try app.proxy(io, .get, "/api/products/:id", "http://127.0.0.1:3003/products");
try app.proxy(io, .get, "/api/orders", "http://127.0.0.1:3008/api/orders");Path rewriting: the static prefix of the pattern is stripped and the remainder (including :id substitutions) is appended to the target URL.
GET /api/products/5 → GET http://127.0.0.1:3003/products/5
GET /api/orders → GET http://127.0.0.1:3008/api/ordersQuery strings are forwarded automatically.
Request
pub const Request = struct {
method: Method,
path: []const u8,
query_string: ?[]const u8,
body: []const u8,
headers: ArrayList(Header),
io: ?std.Io, // Fiber-local I/O handle
request_id: []const u8, // Set by RequestIdMiddleware
keep_alive: bool,
};Methods:
| Method | Signature | Purpose |
|---|---|---|
getHeader | (name: []const u8) -> ?[]const u8 | Case-insensitive header lookup |
getParams | (T: type) -> T | Deserialize query string into struct |
getBody | (allocator, T: type) -> T | Parse body (JSON or form-encoded) |
contentLength | () -> ?usize | Parse Content-Length header |
Response
pub const Response = struct {
status: Status,
headers: ArrayList(Header),
body: ArrayList(u8),
};Methods:
| Method | Purpose |
|---|---|
setHeader(name, value) | Set header (replaces existing) |
write(data) | Append to body |
html(data) | Set Content-Type: text/html + write |
json(data) | Set Content-Type: application/json + write |
Middleware
Middleware runs before (and optionally after) every request.
Writing Middleware
const MyMiddleware = struct {
pub fn execute(self: *MyMiddleware, allocator: Allocator, req: *const Request, res: *Response) !Middleware.Action {
// Pre-routing logic
if (req.getHeader("X-Block") != null) {
res.status = .forbidden;
try res.write("Blocked");
return .stop; // Short-circuit - don't call handler
}
return .next; // Continue to next middleware / handler
}
// Optional: runs after handler completes
pub fn after(self: *MyMiddleware, allocator: Allocator, req: *const Request, res: *Response, matched: ?*const MatchedRoute) !void {
// Post-routing logic (logging, metrics, etc.)
}
pub fn middleware(self: *MyMiddleware) zeish.Middleware {
return zeish.Middleware.from(MyMiddleware, self);
}
};
var mw = MyMiddleware{};
try app.use(mw.middleware());Built-in Middleware
// CORS
var cors = zeish.CorsMiddleware.init(.{
.allow_origin = "*",
.allow_methods = "GET, POST, PUT, DELETE, OPTIONS",
.allow_headers = "Content-Type, Authorization",
});
try app.use(cors.middleware());
// Rate Limiting
var limiter = zeish.RateLimiter.init(allocator, io, .{
.requests_per_minute = 100,
});
var rate_mw = zeish.RateLimitMiddleware.init(io, &limiter);
try app.use(rate_mw.middleware());
// Request ID
var req_id = zeish.RequestIdMiddleware.init(io);
try app.use(req_id.middleware());
// CSRF
var csrf = zeish.CsrfMiddleware.init(io);
try app.use(csrf.middleware());Sessions
Generic session store parameterized on your app's payload type.
Define Session Data
const MySession = struct {
user_id: u64,
role: []const u8,
email: []const u8,
// Required: deep-copy string fields into the store's allocator
pub fn dupe(self: MySession, allocator: Allocator) !MySession {
return .{
.user_id = self.user_id,
.role = try allocator.dupe(u8, self.role),
.email = try allocator.dupe(u8, self.email),
};
}
// Required: free allocator-owned strings
pub fn deinit(self: *MySession, allocator: Allocator) void {
allocator.free(self.role);
allocator.free(self.email);
}
};Use Session Store
const MySessionStore = zeish.SessionStore(MySession);
var store = MySessionStore.init(allocator, io, .{
.max_entries = 100_000,
.default_ttl_ms = 7 * 24 * 3600 * 1000, // 7 days
});
store.start(); // Start background prune fiber
defer store.deinit();
// Create
const token = try store.create(io, .{
.user_id = 42,
.role = "admin",
.email = "alice@example.com",
});
// Read
if (store.get(io, token)) |session| {
// session.user_id, session.role, session.email
}
// Delete
store.destroy(io, token);
// Rotate (new token, same data)
const new_token = try store.rotate(io, old_token);Read Session Cookie
const token = zeish.readSessionCookie(req, "my_session") orelse {
res.status = .unauthorized;
return;
};Server-Sent Events (SSE)
Setup
var bus = zeish.EventBus.init(allocator, io, .{
.heartbeat_interval_ms = 15_000,
.max_subscribers_total = 4000,
.retry_ms = 3000,
.subscriber_queue_size = 8192, // per-subscriber event buffer
});
defer bus.deinit();
try bus.registerTopic("orders", .{
.max_subscribers = 1000,
.replay_buffer_size = 100,
});
app.setEventBus(&bus);
try app.sse("/orders/events", "orders");
try bus.start(); // Spawns heartbeat fiberSizing subscriber_queue_size
Each SSE subscriber gets a bounded queue between the publisher fan-out path and the writer fiber that drains to its socket. When the queue fills (publisher faster than socket writer), events are dropped per the topic's drop_strategy.
The framework's internal stress harness lives at zeish/examples/sse_stress.zig + examples/run_stress_local.sh — run it to verify sizing against your workload.
Publish Events
const event_id = try bus.publish("orders", .{
.event = "order-created",
.data = "{\"order_id\":1}",
});Or auto-publish from route options:
try app.route(Handler, null, Body, .post, "/orders", &h, "order-created");
// Handler response is published as SSE data after the handler returnsCross-Process Bridge
Subscribe to another service's SSE endpoint and republish locally:
var bridge = zeish.sse.SseClientBridge.init(allocator, .{
.upstream_url = "http://127.0.0.1:3008/events",
.local_topic = "orders",
});
try bus.attachBridge(bridge.interface());Browser Client
const es = new EventSource("/orders/events");
es.addEventListener("order-created", function(ev) {
const data = JSON.parse(ev.data);
console.log("New order:", data.order_id);
});Reconnection: the browser sends Last-Event-ID on reconnect; the EventBus replays missed events from the ring buffer.
HTTP Client
Outbound HTTP/HTTPS requests for service-to-service calls.
// Simple request
var resp = try zeish.Client.request(allocator, io, .{
.method = "POST",
.url = "http://127.0.0.1:3008/api/orders",
.headers = &.{.{ "Content-Type", "application/json" }},
.body = "{\"item\":\"pizza\"}",
});
defer resp.deinit();
if (resp.status == 200) {
// resp.body contains the response
}
// With timeout
var resp = try zeish.Client.requestTimed(allocator, io, .{
.url = "https://api.stripe.com/v1/payment_intents",
.timeout_ms = 5000,
});
// With retry + exponential backoff
var resp = try zeish.Client.requestWithRetry(allocator, io,
.{ .url = "http://flaky-service/api" },
.{ .max_attempts = 3, .retry_on_5xx = true },
);Fiber-Local I/O
In mediator-dispatched handlers, use zeish.currentIo() instead of a captured main-thread io:
pub fn handle(self: *Handler, allocator: Allocator, request: *anyopaque) ![]const u8 {
const io = zeish.currentIo() orelse self.fallback_io;
var resp = try zeish.Client.request(allocator, io, .{ .url = "..." });
// ...
}In raw handlers, use req.io:
fn myRawHandler(ctx: ?*anyopaque, allocator: Allocator, req: *const Request, res: *Response) !void {
const io = req.io orelse fallback_io;
var resp = try zeish.Client.request(allocator, io, .{ .url = "..." });
}Schema Validation
Comptime schema definitions with runtime validation:
const zeish = @import("zeish");
const UserSchema = zeish.Schema(&.{
.{ "name", .{ .field_type = .string, .required = true, .min_length = 1, .max_length = 100 } },
.{ "email", .{ .field_type = .string, .required = true } },
.{ "age", .{ .field_type = .int32, .min = 0, .max = 150 } },
.{ "role", .{ .field_type = .string, .enum_values = &.{ "admin", "user", "guest" } } },
});Field types: string, int, int32, double, float, boolean, date, uuid, timestamp, object_id, array, object, binary, decimal128
Validation rules: required, min, max, min_length, max_length, enum_values
CSRF State Store
For OAuth callback CSRF protection:
var state_store = zeish.StateStore.init(allocator);
defer state_store.deinit();
// Generate token (store it, return to browser via redirect URL)
const state = state_store.generate(io);
// Validate on callback (single-use, auto-consumed)
if (state_store.validateAndConsume(io, state_param)) {
// Valid - proceed with OAuth code exchange
} else {
// Invalid or expired - reject
}Default TTL: 5 minutes.
Static Files
var app = try zeish.App.init(allocator, .{
.static_dir = "public",
});Files are pre-loaded into memory at startup. Requests to paths that don't match any route fall back to static file serving. MIME types are auto-detected from file extension.
If the directory doesn't exist, static serving is silently disabled (warning logged).
Health Probes
try app.healthz("/healthz"); // Returns {"status":"ok"}
try app.readyz("/readyz"); // Same (customize for readiness checks)Testing
const testing = std.testing;
test "GET /items returns 200" {
var app = try zeish.App.init(testing.allocator, .{ .port = 0 });
defer app.deinit();
var handler = ListHandler{};
try app.route(ListHandler, null, null, .get, "/items", &handler, null);
var client = zeish.TestClient.init(testing.allocator, &app);
var resp = try client.get("/items");
defer resp.deinit();
try testing.expectEqual(@as(u16, 200), resp.status);
}Metrics
Pluggable metrics interface - wire to Prometheus, StatsD, or any backend:
const MyMetrics = struct {
pub fn counter(self: *MyMetrics, name: []const u8, n: u64, labels: []const zeish.MetricsLabel) void {
// Send to Prometheus / StatsD / etc.
}
pub fn gauge(self: *MyMetrics, name: []const u8, value: i64, labels: []const zeish.MetricsLabel) void { ... }
pub fn histogram(self: *MyMetrics, name: []const u8, value: f64, labels: []const zeish.MetricsLabel) void { ... }
};
var my_metrics = MyMetrics{};
const metrics = zeish.Metrics.from(MyMetrics, &my_metrics);
// Pass to subsystems
var bus = zeish.EventBus.init(allocator, io, .{ .metrics = metrics });
var limiter = zeish.RateLimiter.init(allocator, io, .{ .metrics = metrics });For tests, use RecordingMetricsSink to capture and assert on emitted metrics.
URL Utilities
// Decode
const decoded = zeish.Url.decode("hello%20world"); // "hello world" (in-place)
const decoded = try zeish.Url.decodeAlloc(allocator, "a%2Fb"); // "a/b" (new buffer)
// Encode
const encoded = try zeish.Url.encode(allocator, "hello world", .form); // "hello+world"
const encoded = try zeish.Url.encode(allocator, "hello world", .path); // "hello%20world"Multipart Form Parsing
const content_type = req.getHeader("Content-Type") orelse return error.BadRequest;
var parser = zeish.Multipart.init(content_type, req.body) orelse return error.BadRequest;
var it = parser.iterator();
while (it.next()) |part| {
if (std.mem.eql(u8, part.name, "file")) {
// part.filename, part.content_type, part.data
}
}Configuration
zeish uses a schema-driven YAML config loader. Your Zig struct IS the schema - unknown sections or keys produce hard errors with line numbers. Environment variables override any file-loaded value.
The server: Section (Mandatory)
Every zeish app must include a server: section in its config. Use zeish.ServerConfig as the type - it maps directly to the HTTP server's runtime settings:
const AppConfig = struct {
server: zeish.ServerConfig = .{}, // mandatory - framework-owned
};# config.yaml
server:
host: "0.0.0.0" # Bind address (default: 0.0.0.0)
port: 8080 # TCP port (default: 3000)
static_dir: "public" # Static file directory (default: null)
max_connections: 50000 # Concurrent limit (default: 10000)
max_header_size: 8192 # Max header block bytes (default: 8192)
max_body_size: 4194304 # Max request body bytes (default: 1048576)
idle_timeout_ms: 60000 # Connection idle timeout (default: 30000)
max_requests_per_connection: 10000 # Keep-alive limit (default: 10000)
drain_timeout_ms: 10000 # Graceful shutdown wait (default: 5000)
response_buffer_size: 65536 # Response buffer bytes (default: 65536)All fields have sensible defaults. A minimal config just needs port and static_dir:
server:
port: 8080
static_dir: "public"Custom Sections (App-Specific)
Add any sections your app needs after server:. Optional sections use ?struct = null - they're silently skipped if absent from the YAML:
const AppConfig = struct {
// Mandatory - every zeish app has this
server: zeish.ServerConfig = .{},
// Optional - app-specific, null if section absent from YAML
stripe: ?struct {
secret_key: []const u8 = "",
publishable_key: []const u8 = "",
webhook_secret: []const u8 = "",
} = null,
google: ?struct {
client_id: []const u8 = "",
client_secret: []const u8 = "",
redirect_uri: []const u8 = "",
} = null,
features: ?struct {
enable_beta: bool = false,
max_upload_mb: u32 = 10,
} = null,
};# config.yaml
server:
port: 8080
static_dir: "public"
stripe:
secret_key: "sk_test_..."
publishable_key: "pk_test_..."
# google: section omitted - config.google will be null
features:
enable_beta: true
max_upload_mb: 50Loading and Using Config
// Load from YAML + env var overlay
const config = try zeish.Config.load(AppConfig, allocator, io, .{
.paths = &.{
"./config.yaml", // Try current directory first
"~/.myapp/config.yaml", // Fall back to home directory
},
});
// Pass server config directly to App.init - no hardcoded values
var app = try zeish.App.init(allocator, config.server);
// Access custom sections safely
if (config.stripe) |stripe| {
var provider = StripeProvider.init(allocator, .{
.secret_key = stripe.secret_key,
});
}
if (config.google) |google| {
// Google OAuth configured
}
const max_upload = if (config.features) |f| f.max_upload_mb else 10;Environment Variable Overrides
Every field can be overridden via env var. The var name is derived from the field path in UPPER_SNAKE form:
| YAML Path | Env Var |
|---|---|
server.port | SERVER_PORT |
server.static_dir | SERVER_STATIC_DIR |
stripe.secret_key | STRIPE_SECRET_KEY |
google.client_id | GOOGLE_CLIENT_ID |
features.enable_beta | FEATURES_ENABLE_BETA |
# Override port from env (takes precedence over YAML)
SERVER_PORT=9000 ./myapp
# Override secrets in CI without a config file
STRIPE_SECRET_KEY=sk_live_... GOOGLE_CLIENT_SECRET=... ./myappResolution Order
For each field, the loader picks the first non-empty source from this list:
- Environment variable - e.g.
SERVER_PORT=9000 - YAML file value - e.g.
port: 8080 - Struct default - e.g.
port: u16 = 3000
So an env var always wins over the YAML, and the YAML always wins over the struct default. Fields not provided by any source fall back to the field's Zig default (or null for optional fields).
File Resolution
Multiple paths can be specified. The loader tries each in order and uses the first file that exists. If no file is found, it proceeds with defaults + env vars only (not an error):
.paths = &.{
"./config.yaml", // Deployed: config next to binary
"~/.zxc/myapp-config.yaml", // Dev: home directory
},Error Handling
# Unknown section → hard error
config line 5: unknown section 'bogus'
# Unknown key in known section → hard error
config line 8: unknown key 'typo_field' in section 'server'This prevents config drift - you can't accidentally add a section that nothing reads.
Supported Field Types
| Zig Type | YAML Example | Notes |
|---|---|---|
[]const u8 | key: "value" | Quotes optional |
?[]const u8 | key: "value" | null if absent |
u16, u32, u64 | port: 8080 | Parsed as decimal |
i32, i64 | offset: -100 | Signed integers |
bool | enabled: true | true/1 = true, else false |
f64 | ratio: 0.75 | Floating point |
Complete Example
const std = @import("std");
const zeish = @import("zeish");
const AppConfig = struct {
server: zeish.ServerConfig = .{}, // mandatory
database: ?struct { // optional
url: []const u8 = "127.0.0.1:24000",
pool_size: u32 = 4,
} = null,
};
pub fn main() !void {
const allocator = std.heap.smp_allocator;
var threaded: std.Io.Threaded = .init(allocator, .{ .async_limit = .unlimited });
defer threaded.deinit();
const io = threaded.io();
// Load config from YAML + env vars
const config = try zeish.Config.load(AppConfig, allocator, io, .{
.paths = &.{"config.yaml"},
});
// Server config flows directly from YAML
var app = try zeish.App.init(allocator, config.server);
defer app.deinit();
// Custom sections available as typed structs
if (config.database) |db| {
std.debug.print("DB: {s} (pool={d})\n", .{ db.url, db.pool_size });
}
try app.run(io);
}# config.yaml
server:
port: 8080
static_dir: "public"
max_connections: 20000
database:
url: "127.0.0.1:24000"
pool_size: 8# Override in production
SERVER_PORT=443 DATABASE_URL=prod-db:24000 ./myappWASM Services
A WASM service is a .wasm module that Planck loads into its own process. The service exposes HTTP routes the same way a native zeish.App does - only the entry point and the response delivery change.
WasmApp
var app = zeish.WasmApp.init(allocator, .{
.render_buffer_size = 32 * 1024 * 1024, // HTML render buffer (32 MB)
.response_buffer_size = 32 * 1024 * 1024, // HTTP response buffer (32 MB)
}) catch return -1;
// Same route registration as native App
app.route(Handler, Params, Body, .get, "/items", &handler, "items-updated") catch return -1;
// Middleware
var cors = zeish.CorsMiddleware.init(.{});
app.use(cors.middleware()) catch return -1;
// Response hook - WASM hands bytes to the host via host_respond
app.onResponse(struct {
fn hook(_: *const zeish.WasmRequest, res: *zeish.WasmResponse, buf: []u8) void {
const b = res.toBytes(buf) catch return;
host_respond(b.ptr, @intCast(b.len));
}
}.hook);Key differences from native zeish.App:
zeish.App (native) | zeish.WasmApp | |
|---|---|---|
| Entry point | app.run(io) | host calls process(ptr, len) |
| Response delivery | sockets | host_respond extern |
| SSE publish | bus.publish(...) | host_sse_publish (auto via route event) |
| I/O backend | std.Io.Threaded | host-provided syscalls |
SSE auto-publish. Routes with an event name (last argument to app.route) automatically publish the handler's response as an SSE event via host_sse_publish. Subscribers see the event on whichever EventBus topic the host has wired the service to.
Auth middleware. zeish.TokenAuthMiddleware is the WASM-side bearer/token gate. CSRF, request-ID, and rate-limiting middleware live in zeish core too but are usually applied at the shell layer, not inside WASM.
ZSX templates
ZSX is the template engine bundled with zeish. Templates compile (via planctl) to Zig code that appends HTML to an ArrayList(u8) - no runtime parsing, no allocation per tag.
// In a handler:
const MyTemplate = @import("../ui/my_template.zig").MyTemplate;
pub fn handle(self: *Handler, allocator: Allocator, request: *anyopaque) ![]const u8 {
const items = try self.queryItems(allocator);
var out: std.ArrayList(u8) = .empty;
try MyTemplate.render(.{ .items = items }, &out, allocator);
return out.items;
}The handler returns the rendered bytes; WasmApp ships them through the response hook. For the full template syntax - control flow, partials, slot expressions, escaping rules - see the planctl User Manual.
WASM service configuration
WASM services don't take a top-level server: block - their HTTP server settings live inside the wasm: section of the Planck service config.yaml:
# Planck service config.yaml
name: "kitchen"
address: "0.0.0.0"
port: 24010 # Planck TCP port
service_type: command
# ... database settings (buffers, durability, etc.) ...
wasm:
enabled: true
min_instances: 2
max_instances: 8
autoscale: true
http: # zeish.ServerConfig - nested under wasm
host: "0.0.0.0"
port: 3010 # HTTP port for the WASM service
max_connections: 10000
max_header_size: 8192
max_body_size: 1048576
response_buffer_size: 65536
idle_timeout_ms: 30000
max_requests_per_connection: 10000
drain_timeout_ms: 5000The Planck host binary reads config.wasm.http and passes it to zeish.Server.init():
// db/src/main.zig
var srv = try zeish.Server.init(allocator, config.wasm.http);Shell App (zeish.App) | WASM Service (zeish.WasmApp) | |
|---|---|---|
| Config location | Top-level server: | Nested wasm.http: |
| Who owns it | The app's config.yaml | Planck's service config.yaml |
| Port assignment | Manual or from config | Auto-assigned by workbench on deploy |
| Zig init | App.init(allocator, config.server) | WasmApp.init(allocator, .{...}) |
When deploying via zxc deploy --service, the workbench auto-assigns wasm.http.port from the 3000+ range. You don't need to set it manually.
Providers
zeish ships concrete provider implementations of its vtable interfaces under three namespaces. They're vendor-bound (Google OAuth, Stripe, SendGrid), only the providers you import end up in your binary, so unused ones cost nothing at runtime.
// Auth — Google OAuth provider satisfies zeish.AuthProvider
var google = zeish.auth.GoogleAuthProvider.init(allocator, .{
.client_id = cfg.google.client_id,
.client_secret = cfg.google.client_secret,
.redirect_uri = cfg.google.redirect_uri,
});
const auth_provider = google.authProvider();
// Payments - Stripe provider satisfies zeish.PaymentProvider
var stripe = zeish.pay.StripeProvider.init(allocator, .{
.secret_key = cfg.stripe.secret_key,
});
const payment = stripe.paymentProvider();
// Notifications - SendGrid provider satisfies zeish.NotificationProvider
var sg = zeish.notify.SendGridProvider.init(allocator, .{
.api_key = cfg.sendgrid.api_key,
});
const notifier = sg.notificationProvider();The vtable interfaces (AuthProvider, PaymentProvider, NotificationProvider) are defined in zeish core; the namespaced submodules add the concrete bindings. Roll your own provider by implementing the vtable directly - the *Provider factory pattern in each submodule shows the contract.
Host Externs (WASM only)
Provided by Planck at WASM link time. You declare them via extern fn in your service entry point and zeish wires them automatically.
| Symbol | Signature | Purpose |
|---|---|---|
host_respond | (ptr: [*]const u8, len: u32) void | Hand response bytes back to the host |
host_sse_publish | (topic_ptr, topic_len, event_ptr, event_len, data_ptr, data_len) void | Publish an SSE event on a host bus topic |
Key Types Reference
| Type | Import | Purpose |
|---|---|---|
App | zeish.App | Native HTTP application |
ServerConfig | zeish.ServerConfig | Server config struct (use in YAML config) |
Server | zeish.Server | Low-level HTTP server |
Request | zeish.Request | Parsed HTTP request |
Response | zeish.Response | HTTP response builder |
Router | zeish.Router | Path-based request router |
Middleware | zeish.Middleware | Middleware vtable interface |
CorsMiddleware | zeish.CorsMiddleware | CORS headers |
RateLimitMiddleware | zeish.RateLimitMiddleware | Rate limiting |
RequestIdMiddleware | zeish.RequestIdMiddleware | Request ID generation |
CsrfMiddleware | zeish.CsrfMiddleware | CSRF protection |
SessionStore(T) | zeish.SessionStore | Generic session management |
StateStore | zeish.StateStore | CSRF state tokens |
EventBus | zeish.EventBus | SSE pub/sub hub |
SseClientBridge | zeish.sse.SseClientBridge | Cross-process SSE bridge |
Client | zeish.Client | Outbound HTTP client |
Config | zeish.Config | YAML config loader |
Schema | zeish.Schema | Field validation |
Metrics | zeish.Metrics | Metrics vtable interface |
TestClient | zeish.TestClient | In-memory test client |
Method | zeish.Method | HTTP method enum |
Status | zeish.Status | HTTP status code enum |
Url | zeish.Url | URL encode/decode |
Multipart | zeish.Multipart | Multipart form parser |
Mediator | zeish.Mediator | MediatR-style dispatch |
WasmApp | zeish.WasmApp | WASM application — same route registration as App |
WasmRequest | zeish.WasmRequest | Request decoded from host frame |
WasmResponse | zeish.WasmResponse | Response builder; toBytes serializes for host_respond |
TokenAuthMiddleware | zeish.TokenAuthMiddleware | Bearer / token auth gate (WASM side) |
auth.GoogleAuthProvider | zeish.auth.GoogleAuthProvider | Google OAuth implementation of AuthProvider |
pay.StripeProvider | zeish.pay.StripeProvider | Stripe implementation of PaymentProvider |
notify.SendGridProvider | zeish.notify.SendGridProvider | SendGrid implementation of NotificationProvider |