Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
168 changes: 140 additions & 28 deletions docs/glossary.md

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -262,15 +262,14 @@ describe.sequential('Peer wallet integration', () => {
};

// Transaction signing has no peer fallback — kernel2 has no local
// keys so this should return an error, not forward to kernel1.
const result = await kernel2.queueMessage(
coordinatorKref2,
'signTransaction',
[tx],
);
await waitUntilQuiescent();
expect(result.body).toContain('#error');
expect(result.body).toContain('No authority to sign this transaction');
// keys so this should reject, not forward to kernel1.
await expect(
kernel2.queueMessage(coordinatorKref2, 'signTransaction', [tx]),
).rejects.toMatchObject({
body: expect.stringContaining(
'No authority to sign this transaction',
),
});
},
NETWORK_TIMEOUT,
);
Expand All @@ -281,16 +280,13 @@ describe.sequential('Peer wallet integration', () => {
'returns error when no local keys and no peer wallet',
async () => {
// Kernel2 has no keys and no peer wallet connected
// queueMessage resolves with error CapData (not rejects)
const result = await kernel2.queueMessage(
coordinatorKref2,
'signMessage',
['should fail'],
);
await waitUntilQuiescent();
// Error CapData body contains #error marker
expect(result.body).toContain('#error');
expect(result.body).toContain('No authority to sign message');
await expect(
kernel2.queueMessage(coordinatorKref2, 'signMessage', [
'should fail',
]),
).rejects.toMatchObject({
body: expect.stringContaining('No authority to sign message'),
});
},
NETWORK_TIMEOUT,
);
Expand Down
2 changes: 1 addition & 1 deletion packages/extension/test/e2e/object-registry.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,6 @@ test.describe('Object Registry', () => {

// After revoking, the previously successful message should fail
response = await sendMessage(popupPage, target, method, params);
await expect(response).toContainText(/[Rr]evoked object/u);
await expect(response).toContainText('has been revoked');
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import { makeSQLKernelDatabase } from '@metamask/kernel-store/sqlite/nodejs';
import { kunser } from '@metamask/ocap-kernel';
import type { ClusterConfig } from '@metamask/ocap-kernel';
import { delay } from '@ocap/repo-tools/test-utils';
import { mkdtemp, rm } from 'node:fs/promises';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
import { describe, it, expect } from 'vitest';

import { makeTestKernel } from '../helpers/kernel.ts';

const PROVIDER_BUNDLE =
'http://localhost:3000/orphaned-ephemeral-provider-vat.bundle';
const CONSUMER_BUNDLE =
'http://localhost:3000/orphaned-ephemeral-consumer-vat.bundle';

const clusterConfig: ClusterConfig = {
bootstrap: 'consumer',
vats: {
provider: {
bundleSpec: PROVIDER_BUNDLE,
parameters: {},
},
consumer: {
bundleSpec: CONSUMER_BUNDLE,
parameters: {},
},
},
};

describe('Orphaned ephemeral exo', { timeout: 30_000 }, () => {
it('rejects when provider vat restarts', async () => {
const tempDir = await mkdtemp(join(tmpdir(), 'ocap-ephemeral-'));
const dbFilename = join(tempDir, 'kernel.db');
try {
const kernel = await makeTestKernel(
await makeSQLKernelDatabase({ dbFilename }),
);
try {
const { rootKref, subclusterId } =
await kernel.launchSubcluster(clusterConfig);
await delay();

// Works before restart
const r1 = await kernel.queueMessage(rootKref, 'useEphemeral', []);
expect(kunser(r1)).toBe(999);

// Restart only the provider — the consumer still holds the
// ephemeral ref, but the exo behind it no longer exists.
const subcluster = kernel.getSubcluster(subclusterId);
expect(subcluster).toBeDefined();
await kernel.restartVat(subcluster!.vats.provider);
await delay();

// The consumer's E(ephemeral).increment() targets an orphaned vref.
// Liveslots in the provider throws "I don't remember allocating",
// which terminates the provider and rejects the caller's promise.
// This is surfaced to the caller as "target object has no owner".
await expect(
kernel.queueMessage(rootKref, 'useEphemeral', []),
).rejects.toMatchObject({
body: expect.stringContaining('target object has no owner'),
});
} finally {
await kernel.stop();
}
} finally {
await rm(tempDir, { recursive: true, force: true });
}
});
});
93 changes: 43 additions & 50 deletions packages/kernel-node-runtime/test/e2e/remote-comms.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -582,21 +582,17 @@ describe.sequential('Remote Communications E2E', () => {
const results = await Promise.allSettled(messagePromises);
expect(results).toHaveLength(201);

// Verify that messages within queue capacity were delivered
const successfulResults = results.filter(
(result) => result.status === 'fulfilled',
);
// At least 200 messages should succeed (the queue limit)
expect(successfulResults.length).toBeGreaterThanOrEqual(200);

// Messages beyond queue capacity should be rejected with queue full error
const rejectedResults = results.filter(
// Messages beyond queue capacity should be rejected with queue full error.
// Messages within capacity may fulfill or reject (e.g., if the remote vat
// was restarted and references are stale), but they should NOT contain
// "queue at capacity".
const queueFullResults = results.filter(
(result): result is PromiseRejectedResult =>
result.status === 'rejected',
result.status === 'rejected' &&
String(result.reason).includes('queue at capacity'),
);
for (const result of rejectedResults) {
expect(String(result.reason)).toContain('queue at capacity');
}
// At most 1 message (the 201st) should be rejected due to queue capacity
expect(queueFullResults.length).toBeLessThanOrEqual(1);

const newMessageResult = await kernel1.queueMessage(
aliceRef,
Expand Down Expand Up @@ -761,12 +757,11 @@ describe.sequential('Remote Communications E2E', () => {
kernel2 = restartResult.kernel;

// The message should not have been delivered because we didn't reconnect
const result = await messageAfterClose;
const response = kunser(result);
expect(response).toBeInstanceOf(Error);
expect((response as Error).message).toContain(
'Message delivery failed after intentional close',
);
await expect(messageAfterClose).rejects.toMatchObject({
body: expect.stringContaining(
'Message delivery failed after intentional close',
),
});
},
NETWORK_TIMEOUT * 2,
);
Expand Down Expand Up @@ -844,18 +839,17 @@ describe.sequential('Remote Communications E2E', () => {
await delay(100);

// Try to send a message after closing - should fail
const messageAfterClose = kernel1.queueMessage(
aliceRef,
'sendRemoteMessage',
[bobURL, 'hello', ['Alice']],
);

const result = await messageAfterClose;
const response = kunser(result);
expect(response).toBeInstanceOf(Error);
expect((response as Error).message).toContain(
'Message delivery failed after intentional close',
);
await expect(
kernel1.queueMessage(aliceRef, 'sendRemoteMessage', [
bobURL,
'hello',
['Alice'],
]),
).rejects.toMatchObject({
body: expect.stringContaining(
'Message delivery failed after intentional close',
),
});

// Manually reconnect
await kernel1.reconnectPeer(peerId2);
Expand Down Expand Up @@ -920,18 +914,18 @@ describe.sequential('Remote Communications E2E', () => {
// and trigger promise rejection for pending work.
// The await will naturally wait for the promise to settle - either
// succeeding (unexpected) or failing due to incarnation change detection.
const result = await kernel1.queueMessage(
aliceRef,
'sendRemoteMessage',
[bobURL, 'hello', ['Alice']],
);
const response = kunser(result);

// The message should fail because incarnation changed.
// The handshake detects the new incarnation and triggers onIncarnationChange,
// which resets RemoteHandle state and rejects pending work.
expect(response).toBeInstanceOf(Error);
expect((response as Error).message).toMatch(/Remote connection lost/u);
await expect(
kernel1.queueMessage(aliceRef, 'sendRemoteMessage', [
bobURL,
'hello',
['Alice'],
]),
).rejects.toMatchObject({
body: expect.stringMatching(/Remote connection lost/u),
});
},
NETWORK_TIMEOUT * 3,
);
Expand Down Expand Up @@ -970,16 +964,15 @@ describe.sequential('Remote Communications E2E', () => {
// The message will create a promise with the remote as decider (from URL redemption)
// When we give up on the remote, that promise should be rejected
// The vat should then propagate that rejection to the promise returned here
const messagePromise = kernel1.queueMessage(
aliceRef,
'sendRemoteMessage',
[bobURL, 'hello', ['Alice']],
);

const result = await messagePromise;
const response = kunser(result);
expect(response).toBeInstanceOf(Error);
expect((response as Error).message).toContain('Remote connection lost');
await expect(
kernel1.queueMessage(aliceRef, 'sendRemoteMessage', [
bobURL,
'hello',
['Alice'],
]),
).rejects.toMatchObject({
body: expect.stringContaining('Remote connection lost'),
});
},
NETWORK_TIMEOUT * 2,
);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import { E } from '@endo/eventual-send';
import { makeDefaultExo } from '@metamask/kernel-utils/exo';

/**
* A consumer vat that obtains an ephemeral exo reference from the provider
* during bootstrap and calls it on demand.
*
* @returns The root object.
*/
export function buildRootObject() {
let ephemeralRef: unknown;

return makeDefaultExo('root', {
async bootstrap(vats: { provider: unknown }) {
ephemeralRef = await E(vats.provider).getEphemeral();
},

async useEphemeral() {
return E(ephemeralRef).increment();
},
});
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import { makeDefaultExo } from '@metamask/kernel-utils/exo';

/**
* A provider vat that exposes a single ephemeral (non-durable) exo.
* The exo will not survive a vat restart.
*
* @returns The root object.
*/
export function buildRootObject() {
const ephemeral = makeDefaultExo('EphemeralCounter', {
increment() {
return 999;
},
});

return makeDefaultExo('root', {
getEphemeral() {
return ephemeral;
},
});
}
6 changes: 5 additions & 1 deletion packages/kernel-test/src/endowments.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,11 @@ describe('endowments', () => {

await waitUntilQuiescent();

await kernel.queueMessage(v1Root, 'hello', [`https://${badHost}`]);
await expect(
kernel.queueMessage(v1Root, 'hello', [`https://${badHost}`]),
).rejects.toMatchObject({
body: expect.stringContaining(`Invalid host: ${badHost}`),
});

await waitUntilQuiescent();

Expand Down
50 changes: 50 additions & 0 deletions packages/kernel-test/src/orphaned-ephemeral-exo.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import { makeSQLKernelDatabase } from '@metamask/kernel-store/sqlite/nodejs';
import { waitUntilQuiescent } from '@metamask/kernel-utils';
import { kunser } from '@metamask/ocap-kernel';
import { describe, expect, it } from 'vitest';

import { getBundleSpec, makeKernel, makeTestLogger } from './utils.ts';

describe('orphaned ephemeral exo', () => {
it('rejects when provider vat restarts', async () => {
const { logger } = makeTestLogger();
const database = await makeSQLKernelDatabase({});
const kernel = await makeKernel(database, true, logger);

const { rootKref, subclusterId } = await kernel.launchSubcluster({
bootstrap: 'consumer',
vats: {
provider: {
bundleSpec: getBundleSpec('orphaned-ephemeral-provider'),
parameters: {},
},
consumer: {
bundleSpec: getBundleSpec('orphaned-ephemeral-consumer'),
parameters: {},
},
},
});
await waitUntilQuiescent();

// Works before restart
const r1 = await kernel.queueMessage(rootKref, 'useEphemeral', []);
expect(kunser(r1)).toBe(999);

// Restart only the provider — the consumer still holds the
// ephemeral ref, but the exo behind it no longer exists.
const subcluster = kernel.getSubcluster(subclusterId);
expect(subcluster).toBeDefined();
await kernel.restartVat(subcluster!.vats.provider);
await waitUntilQuiescent();

// The consumer's E(ephemeral).increment() targets an orphaned vref.
// Liveslots in the provider throws "I don't remember allocating",
// which terminates the provider vat. The message is retried in a new
// crank, but the endpoint is gone — so it splats and rejects.
await expect(
kernel.queueMessage(rootKref, 'useEphemeral', []),
).rejects.toMatchObject({
body: expect.stringContaining('has no owner'),
});
});
});
Loading
Loading