Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
59 commits
Select commit Hold shift + click to select a range
f59d824
Add AI transport as a product within docs
GregHolmes Dec 2, 2025
a6d9352
chore: Add AI Transport examples filter
matt423 Dec 15, 2025
a88aba6
chore: Add AI Transport product tile to the homepage
matt423 Dec 15, 2025
d1536b3
ait/token-streaming: add message per token page
mschristensen Dec 9, 2025
f770ce7
ait/message-per-token: add intro
mschristensen Dec 9, 2025
149a322
ait/message-per-token: add token publishing
mschristensen Dec 9, 2025
e0c010e
ait/message-per-token: token streaming patterns
mschristensen Dec 9, 2025
e4d1b1a
ait/message-per-token: client hydration patterns
mschristensen Dec 10, 2025
da59a92
ai-transport: add message per response doc
zknill Dec 11, 2025
0e6811c
fix nav and typos
zknill Dec 11, 2025
ee684fa
ai-transport/token-streaming: unify nav
mschristensen Dec 16, 2025
ffb0064
ai-transport/token-streaming: refine intro
mschristensen Dec 16, 2025
e0c5cd8
ai-transport: refine Publishing section
mschristensen Dec 16, 2025
f324e1e
ai-transport: refine Subscribing section
mschristensen Dec 16, 2025
60fb7fb
ai-transport: refine rewind section
mschristensen Dec 16, 2025
917ec8e
ai-transport/token-streaming: refine history
mschristensen Dec 16, 2025
0b3d40b
ai-transport/token-streaming: in-progress rewind
mschristensen Dec 16, 2025
5abb8aa
ai-transport/token-streaming: in progress history
mschristensen Dec 16, 2025
0ec70fc
ai-transport/token-streaming: remove metadata
mschristensen Dec 16, 2025
718d880
ai-transport/token-streaming: add resume callout
mschristensen Dec 16, 2025
b6cd482
ai-transport/token-streaming: headers
mschristensen Dec 16, 2025
01521db
ait: add sessions & identity docs
mschristensen Dec 10, 2025
f5ce091
chore: update message annotations terminology to include appends
matt423 Jan 5, 2026
b64b75e
ai-transport: misc message per response fixes
mschristensen Jan 5, 2026
9d14080
ait/guides: openai message per token
lawrence-forooghian Dec 10, 2025
0d1878e
ait/guides: openai message per token fixes
mschristensen Jan 7, 2026
1634e19
ait/features: fix messages per response anchor tag
mschristensen Jan 7, 2026
5652841
ait/guides: add open ai message per response guide
mschristensen Jan 7, 2026
3f80212
ait/features: chain of thought
owenpearson Jan 5, 2026
9a55948
ait/features: token streaming overview
GregHolmes Jan 12, 2026
cef6abf
docs: add human-in-the-loop page for AI Transport
GregHolmes Jan 6, 2026
7f4e84c
docs: add user input page for AI Transport
GregHolmes Jan 5, 2026
1b26048
feat: add Anthropic SDK message-per-token guide
matt423 Jan 8, 2026
acfac8d
chore: update message annotations terminology to include appends
matt423 Jan 5, 2026
3665522
Add citations feature documentation for AI Transport
mittulmadaan Jan 6, 2026
187ea19
AIT-133 - Fixed review comments
mittulmadaan Jan 13, 2026
79a8bed
AIT-133 - fix lint issue + resolve conflicts
mittulmadaan Jan 13, 2026
d2a11b2
AIT-133 - nit fix
mittulmadaan Jan 13, 2026
5e68af6
Fixed review comments.
mittulmadaan Jan 13, 2026
a4af341
Update src/pages/docs/ai-transport/features/advanced/citations.mdx
mittulmadaan Jan 14, 2026
b43f674
feat: Add AI Transport message per token example for Javascript
matt423 Dec 22, 2025
f9449a6
feat: Add AI Transport message per token example for React
matt423 Dec 22, 2025
5274a72
feat: Add guide for Anthropic SDK message-per-response streaming
matt423 Jan 14, 2026
7868abe
chore: Add async/await for processEvent in OpenAI guide
matt423 Jan 14, 2026
2d3dab0
ait/features: add tool call page
mschristensen Jan 13, 2026
e4ebb87
ait/features: misc. fixes to citations docs
mschristensen Jan 14, 2026
4ed5e53
docs: move AI Transport feature directories to top level
GregHolmes Jan 15, 2026
0e46617
Add an overview page for AI Transport
m-hulbert Jan 12, 2026
43708ba
ait/features: token streaming rate limits
rainbowFi Jan 12, 2026
45ac85a
ait: advise disabling echo
mschristensen Jan 15, 2026
208d73d
chore: Use 2.17.0 in examples
matt423 Jan 16, 2026
a0dbcdc
feat: Add AI Transport message per response example for Javascript
matt423 Jan 16, 2026
0449aee
feat: Add AI Transport message per response example for React
matt423 Jan 16, 2026
01ab0f8
chore: Use single agent reference in AIT examples
matt423 Jan 16, 2026
6fa00a7
docs: writing style guide
mschristensen Jan 16, 2026
1847a6b
ait: misc improvements
mschristensen Jan 16, 2026
42916b2
AIT-149: Add pricing information to docs (#3106)
rainbowFi Jan 16, 2026
6c6f490
ait: fix appendMessage signature
mschristensen Jan 16, 2026
b1947da
Fixup: faulty link
rainbowFi Jan 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions examples/ai-transport-message-per-response/javascript/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# AI Transport message per response streaming

Enable realtime streaming of AI/LLM responses by appending tokens to a single message over Ably.

AI Transport message-per-response streaming allows applications to provide immediate, responsive AI interactions by streaming tokens in realtime. Unlike the message-per-token pattern, all tokens for a response are appended to a single message, which appears as one entry in channel history. This makes it easy to retrieve and display conversation history while still delivering live tokens in realtime.

The streaming approach significantly improves perceived performance and user engagement. Instead of waiting 5-10 seconds for a complete AI response, users see tokens appearing progressively, creating a more natural conversation flow similar to watching someone type in realtime.

Token streaming is implemented using [Ably AI Transport](/docs/ai-transport). AI Transport provides purpose-built APIs for realtime AI applications, offering reliable message delivery, automatic ordering, and seamless reconnection handling to ensure no tokens are lost during network interruptions.

## Resources

Use the following methods to implement AI Transport message-per-response streaming:

- [`client.channels.get()`](/docs/channels#create): creates a new or retrieves an existing channel for AI Transport token streaming.
- [`channel.publish()`](/docs/channels#publish): publishes the initial message and captures the serial for token appending.
- [`channel.appendMessage()`](/docs/messages#append): appends individual tokens to the message as they arrive from the LLM service.
- [`channel.subscribe()`](/docs/channels#subscribe): subscribes to messages, handling `message.create`, `message.append`, and `message.update` actions.
- [`channel.setOptions()`](/docs/channels/options) with [`rewind`](/docs/channels/options/rewind): enables seamless message recovery during reconnections, delivering historical messages as `message.update` events.

Find out more about [AI Transport](/docs/ai-transport) and [message appending](/docs/ai-transport/token-streaming/message-per-response).

## Getting started

1. Clone the [Ably docs](https://github.com/ably/docs) repository where this example can be found:

```sh
git clone [email protected]:ably/docs.git
```

2. Change directory:

```sh
cd examples/
```

3. Rename the environment file:

```sh
mv .env.example .env.local
```

4. In `.env.local` update the value of `VITE_ABLY_KEY` to be your Ably API key.

5. Install dependencies:

```sh
yarn install
```

6. Run the server:

```sh
yarn run ai-transport-message-per-response-javascript
```

7. Try it out by opening [http://localhost:5173/](http://localhost:5173/) with your browser and selecting a prompt to see realtime AI token streaming.

## Open in CodeSandbox

In CodeSandbox, rename the `.env.example` file to `.env.local` and update the value of your `VITE_ABLY_KEY` variable to use your Ably API key.
49 changes: 49 additions & 0 deletions examples/ai-transport-message-per-response/javascript/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="stylesheet" href="src/styles.css" />
<title>AI Transport Message Per Response - JavaScript</title>
</head>

<body class="bg-gray-100">
<div class="max-w-6xl mx-auto p-5">
<!-- Response section with always visible status -->
<div class="mb-4">
<div class="flex-1">
<div class="text-sm text-gray-600 mt-4 mb-2 flex justify-between">
<span id="prompt-display"></span>
<div class="flex items-center gap-2">
<span class="text-xs bg-gray-200 px-2 py-1 rounded flex items-center gap-1">
<span id="processing-status">Ready</span>
</span>
<!-- Disconnect/Reconnect button -->
<button id="connection-toggle" class="text-xs bg-blue-500 text-white px-2 py-1 rounded hover:bg-blue-600">
Disconnect
</button>
</div>
</div>
<div class="p-4 border border-gray-300 rounded-lg bg-gray-50 h-48 overflow-y-auto text-base leading-relaxed">
<span id="response-text">Select a prompt below to get started</span>
<span id="cursor" class="text-blue-600"></span>
</div>
</div>
</div>

<!-- Prompt selection -->
<div class="mb-4">
<div class="flex flex-wrap gap-2" id="prompt-buttons">
<button
id="prompt-button"
class="px-3 py-2 text-sm border rounded-md transition-colors bg-white hover:bg-blue-50 border-gray-300 hover:border-blue-300 cursor-pointer"
>
What is Ably AI Transport?
</button>
</div>
</div>
</div>

<script type="module" src="src/script.ts"></script>
</body>
</html>
10 changes: 10 additions & 0 deletions examples/ai-transport-message-per-response/javascript/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"name": "ai-transport-message-per-response-javascript",
"version": "1.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
}
}
52 changes: 52 additions & 0 deletions examples/ai-transport-message-per-response/javascript/src/agent.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Agent Service
// This consumes LLM streams and publishes tokens using the message-per-response pattern
// All tokens are appended to a single message, which appears as one entry in channel history

import * as Ably from 'ably';
import { MockLLM } from './llm';

export class Agent {
private client: Ably.Realtime;
private channel: Ably.RealtimeChannel;
private llm: MockLLM;

constructor(ablyKey: string, channelName: string) {
this.client = new Ably.Realtime({
key: ablyKey,
clientId: 'ai-agent',
});
this.channel = this.client.channels.get(channelName);
this.llm = new MockLLM();
}

async processPrompt(prompt: string): Promise<void> {
const stream = await this.llm.responses.create(prompt);
let msgSerial: string | null = null;

for await (const event of stream) {
if (event.type === 'message_start') {
// Create initial empty message and capture its serial
const publishResult = await this.channel.publish({
name: 'response',
data: '',
});
msgSerial = publishResult.serials[0];
} else if (event.type === 'message_delta') {
// Append each token to the same message using its serial
if (msgSerial && event.text) {
this.channel.appendMessage({
serial: msgSerial,
data: event.text,
});
}
} else if (event.type === 'message_stop') {
// Stream complete - all tokens have been appended
console.log('Response complete');
}
}
}

disconnect(): void {
this.client.close();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
export const config = {
ABLY_KEY: import.meta.env.VITE_ABLY_KEY || 'YOUR_ABLY_KEY_HERE',
};
49 changes: 49 additions & 0 deletions examples/ai-transport-message-per-response/javascript/src/llm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Mock LLM Service
// This simulates a generic LLM SDK with streaming capabilities

interface StreamEvent {
type: 'message_start' | 'message_delta' | 'message_stop';
text?: string;
responseId: string;
}

export class MockLLM {
private readonly responseText =
'Ably AI Transport is a solution for building stateful, steerable, multi-device AI experiences into new or existing applications. You can use AI Transport as the transport layer with any LLM or agent framework, without rebuilding your existing stack or being locked to a particular vendor.';

responses = {
create: (prompt: string) => this.createStream(prompt),
};

private async *createStream(_prompt: string): AsyncIterable<StreamEvent> {
const responseId = `resp_${crypto.randomUUID()}`;

// Yield start event
yield { type: 'message_start', responseId };

// Chunk text into tokens (simulates LLM tokenization)
const tokens = this.chunkTextLikeAI(this.responseText);

for (const token of tokens) {
// Simulate realistic delay between tokens
await new Promise((resolve) => setTimeout(resolve, Math.random() * 150 + 50));

// Yield token event
yield { type: 'message_delta', text: token, responseId };
}

// Yield stop event
yield { type: 'message_stop', responseId };
}

private chunkTextLikeAI(text: string): string[] {
const chunks: string[] = [];
let pos = 0;
while (pos < text.length) {
const size = Math.floor(Math.random() * 8) + 1;
chunks.push(text.slice(pos, pos + size));
pos += size;
}
return chunks.filter((chunk) => chunk.length > 0);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import * as Ably from 'ably';
import { Agent } from './agent';
import { config } from './config';

// Generate unique channel name for this session
const CHANNEL_NAME = `ai:response-${crypto.randomUUID()}`;
const client = new Ably.Realtime({
key: config.ABLY_KEY,
});

const channel = client.channels.get(CHANNEL_NAME);

// Agent for processing prompts
const agent = new Agent(config.ABLY_KEY, CHANNEL_NAME);

// DOM elements
const responseTextElement = document.getElementById('response-text') as HTMLDivElement;
const connectionToggle = document.getElementById('connection-toggle') as HTMLButtonElement;
const promptButton = document.getElementById('prompt-button') as HTMLButtonElement;
const processingStatus = document.getElementById('processing-status') as HTMLSpanElement;

// Track responses by message serial
const responses = new Map<string, string>();
let currentSerial: string | null = null;

const updateDisplay = () => {
if (currentSerial) {
responseTextElement.innerText = responses.get(currentSerial) || '';
}
};

// Subscribe to messages - rewind delivers history as message.update,
// then seamlessly transitions to live message.append events
channel.subscribe((message: Ably.Message) => {
const serial = message.serial;
if (!serial) {
return;
}

switch (message.action) {
case 'message.create':
responses.set(serial, message.data || '');
currentSerial = serial;
processingStatus.innerText = 'Streaming';
break;
case 'message.append': {
// Only append if this is for the current response
if (currentSerial === serial) {
const current = responses.get(serial) || '';
responses.set(serial, current + (message.data || ''));
}
break;
}
case 'message.update':
// Full state from history or resync - always use it
responses.set(serial, message.data || '');
currentSerial = serial;
break;
}
updateDisplay();
});

const handlePromptClick = () => {
currentSerial = null;
responseTextElement.innerText = '';
processingStatus.innerText = 'Streaming';

agent.processPrompt('What is Ably AI Transport?');
};

const handleConnect = async () => {
// Set rewind option before attaching to get history as message.update events
channel.setOptions({ params: { rewind: '2m' } });
await channel.attach();
connectionToggle.innerText = 'Disconnect';
processingStatus.innerText = 'Ready';
};

const handleDisconnect = async () => {
await channel.detach();
processingStatus.innerText = 'Paused';
connectionToggle.innerText = 'Connect';
};

const handleConnectionToggle = () => {
if (channel.state === 'attached') {
handleDisconnect();
} else {
handleConnect();
}
};

connectionToggle.onclick = handleConnectionToggle;
promptButton.onclick = handlePromptClick;

handleConnect();
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import baseConfig from '../../tailwind.config';
import type { Config } from 'tailwindcss';

const config: Config = {
...baseConfig,
content: ['./src/**/*.{js,ts,tsx}', './index.html'],
};

export default config;
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import { defineConfig } from 'vite';
import baseConfig from '../../vite.config';

export default defineConfig({
...baseConfig,
envDir: '../../',
});
Loading