import { useState } from 'react';
import { useStreaming } from 'atmosphere.js/react';
const [prompt, setPrompt] = useState('');
const [messages, setMessages] = useState([
{ author: 'assistant', text: 'I can join your room in real time.' }
]);
const { fullText, progress, status, send, reset } = useStreaming({
request: { url: '/ai/chat', transport: 'websocket' }
});
const submit = async (e) => {
e.preventDefault();
if (!prompt.trim()) return;
const next = prompt;
setPrompt('');
setMessages((list) => [...list, { author: 'you', text: next }]);
reset();
await send({ prompt: next, room: 'support' });
};
return (
<section>
{messages.map((m, i) => <p key={i}>{m.author}: {m.text}</p>)}
{fullText && <p>assistant: {fullText}</p>}
<small>{progress?.phase || status}</small>
<form onSubmit={submit}><input value={prompt} onChange={(e) => setPrompt(e.target.value)} /><button disabled={status === 'streaming'}>Send</button></form>
</section>
);
Atmosphere Stream AI to browsers. Keep your stack.
Spring AI and LangChain4j handle inference. Atmosphere handles the other half: streaming tokens to React, Vue, or Svelte over WebSocket with SSE fallback, rooms, presence, and Kafka/Redis clustering. Add one dependency to your Spring Boot or Quarkus app — no rewiring, no custom WebSocket code.
From LLM to browser in minutes
Your AI framework generates tokens. Atmosphere delivers them:
server-side streaming with @AiEndpoint, MCP tools with @McpServer,
and client-side hooks with useStreaming(). No custom WebSocket code.
@ManagedService(path = "/atmosphere/chat")
public class ChatEndpoint {
@Message
public String onMessage(String message) {
return message; // human users chat in real time
}
}
@McpServer(name = "chat-participant", path = "/atmosphere/mcp")
public class ChatMcpParticipant {
@Inject private AtmosphereConfig config;
@McpTool(name = "say_in_chat", description = "Send a message as MCP Agent")
public String sayInChat(
@McpParam(name = "text") String text) {
var chat = config.getBroadcasterFactory().lookup("/atmosphere/chat", true);
chat.broadcast("{\"author\":\"MCP Agent\",\"message\":\"" + text + "\"}");
return "sent";
}
@McpResource(uri = "atmosphere://chat/presence", name = "Chat Presence")
public String presence() {
var chat = config.getBroadcasterFactory().lookup("/atmosphere/chat", false);
var online = chat == null ? "0" : String.valueOf(chat.getAtmosphereResources().size());
return "onlineUsers=" + online;
}
} <script setup>
import { ref } from 'vue';
import { useStreaming } from 'atmosphere.js/vue';
const prompt = ref('');
const messages = ref([
{ author: 'assistant', text: 'I stream updates in real time.' }
]);
const { fullText, progress, status, send, reset } = useStreaming({
request: { url: '/ai/chat', transport: 'websocket' }
});
const submit = async () => {
if (!prompt.value.trim()) return;
const next = prompt.value;
prompt.value = '';
messages.value.push({ author: 'you', text: next });
reset();
await send({ prompt: next, room: 'support' });
};
</script>
<template>
<p v-for="(m, i) in messages" :key="i">{{ m.author }}: {{ m.text }}</p>
<p v-if="fullText">assistant: {{ fullText }}</p>
<small>{{ progress?.phase || status }}</small>
<form @submit.prevent="submit"><input v-model="prompt" /><button :disabled="status === 'streaming'">Send</button></form>
</template> <script>
import { useStreaming } from 'atmosphere.js/svelte';
let prompt = '';
let messages = [
{ author: 'assistant', text: 'MCP + chat updates arrive in one stream.' }
];
const { fullText, progress, status, send, reset } = useStreaming({
request: { url: '/ai/chat', transport: 'websocket' }
});
const submit = async () => {
if (!prompt.trim()) return;
const next = prompt;
prompt = '';
messages = [...messages, { author: 'you', text: next }];
reset();
await send({ prompt: next, room: 'support' });
};
</script>
{#each messages as m}
<p>{m.author}: {m.text}</p>
{/each}
{#if $fullText}<p>assistant: {$fullText}</p>{/if}
<small>{$progress?.phase || $status}</small>
<form on:submit|preventDefault={submit}><input bind:value={prompt} /><button disabled={$status === 'streaming'}>Send</button></form> Works with your stack, not instead of it
One dependency. Zero rewiring. Add a starter to your existing Spring Boot or Quarkus app and Atmosphere handles WebSocket transport, reconnection, and rooms — so your AI framework can focus on inference.
Spring Boot New in 4.0
4.0.2+Auto-configured servlet, Spring DI bridge, and Actuator health indicator.
Quarkus New in 4.0
3.21+Build-time annotation scanning, Arc CDI, and WebSocket support.
Servlet Container
6.0+Tomcat, Jetty, Undertow, GlassFish, Payara — any Servlet 6.0+ (Jakarta EE) container.
AI Streaming AI
Atmosphere AIUse @AiEndpoint + @Prompt with StreamingSession and the built-in OpenAiCompatibleClient for Gemini, OpenAI, Ollama, and OpenAI-compatible APIs.
MCP Server Agents
Agent-readyExpose tools, resources, and prompts with @McpServer/@McpTool. Connect Claude Desktop, VS Code Copilot, or Cursor via Streamable HTTP or WebSocket.
Multi-Node Scaling Clustering
Kafka & RedisScale across nodes with built-in Kafka and Redis broadcasters. No commercial add-ons — just add a dependency.
What you stop building yourself
Transport fallback, reconnection, rooms, presence, clustering — the plumbing every AI chat app needs but nobody wants to maintain.
Virtual Threads
Every connection runs on a JDK 21 virtual thread. Massive scalability with no thread-pool tuning.
WebSocket + Fallbacks
First-class WebSocket with automatic degradation to SSE and long-polling. Heartbeats and reconnection built in.
Rooms & Presence
Built-in room management with presence tracking, message history, and authorization. No external dependencies.
Simple Annotations
Define endpoints with @ManagedService or @RoomService. Handle lifecycle with @Ready, @Message, @Disconnect. Kotlin DSL available.
AI/LLM Streaming
Stream tokens from any LLM to browsers. Built-in OpenAI-compatible client, Spring AI and LangChain4j adapters.
MCP Server
Expose tools, resources, and prompts to AI agents. Connect Claude Desktop, VS Code Copilot, or Cursor via Streamable HTTP.
Support Subscriptions
Get support from the core team with fast response times. Meet your production schedule and compliance requirements.
Bronze
- Coverage Business Hours
- Response (S1) 7 Business Days
- Response (S2) 10 Business Days
- Incidents 2 / year
- Phone Support —
- Email Support ✓
- Emergency Patches —
Silver
- Coverage Business Hours
- Response (S1) 1 Business Day
- Response (S2) 2 Business Days
- Incidents 10 / year
- Phone Support —
- Email Support ✓
- Emergency Patches —
Gold
- Coverage Business Hours
- Response (S1) 4 hours
- Response (S2) 1 Business Day
- Incidents 15 / year
- Phone Support ✓
- Email Support ✓
- Emergency Patches ✓
Platinum
- Coverage 24x7
- Response (S1) 1 hour
- Response (S2) 4 hours
- Incidents Unlimited
- Phone Support ✓
- Email Support ✓
- Emergency Patches ✓