Library for chatbot integrate
npm install axellero-chatbot-ui-reactA modern, drop‑in chatbot UI component built with React. Works out‑of‑the‑box with
OpenAI API and Open WebUI or any compatible backend such as
Axellero Io.
---
- Plug & Play — import, pass a token, chat
- Multi-API Support — OpenAI API and OpenWebUI compatibility
- File & Image Handling — upload, preview, and drag‑and‑drop support
- Vision API Support — image analysis with base64 encoding
- Voice Recording — live transcription and audio processing
- Real-time Streaming — SSE support for live chat responses
- Fully Themeable — light/dark themes with granular color overrides
- Composable Components — header & footer slots for total branding control
- TypeScript Support — full type safety and IntelliSense
- SSR‑friendly — works with Next.js, Gatsby, and other frameworks
- WCAG‑compliant — accessibility built-in
---
``bash`
npm i axellero-chatbot-ui-react # or: yarn add axellero-chatbot-ui-react
---
`tsx`
import 'axellero-chatbot-ui/style.css'
import { AxelleroChatProviders, AxelleroChatBotUi } from 'axellero-chatbot-ui-react'
;
model: 'gpt-4o-mini',
getAuthToken: () => 'Bearer sk-...',
apiType: 'openai',
}}
>
`tsx`
import { AxelleroChatProviders, AxelleroChatBotUi } from 'axellero-chatbot-ui-react'
;
model: 'gpt-4o-mini',
getAuthToken: () => 'Bearer sk-...',
apiType: 'openai',
}}
initialMessages={[{ id: '1', role: 'assistant', content: 'Hello!', timestamp: 0 }]}
>
`tsx`
defaultUrl: 'http://localhost:8080/api', // OpenWebUI endpoint
model: 'llama3.2-3b',
getAuthToken: () => 'Bearer your-token',
apiType: 'openwebui',
}}
>
---
> Copy‑paste the snippet you need and tweak. Each example is self‑contained.
`tsx`
defaultUrl: 'https://api.openai.com/v1',
model: 'gpt-4o-mini',
getAuthToken: () => 'Bearer sk-...',
}}
>
`tsx title="App.tsx"`
import { AxelleroChatBotUi } from 'axellero-chatbot-ui-react'
import { ChatHeader } from './ChatHeader'
;
defaultUrl: 'https://api.openai.com/v1',
model: 'gpt-4o-mini',
getAuthToken: () => 'Bearer sk-...',
}}
>
`tsx title="ChatHeader.tsx"
import { ClearMessagesButton, ChatHeaderProps } from 'axellero-chatbot-ui-react'
export const ChatHeader = ({ clearMessagesIcon }: ChatHeaderProps) => (
$3
`tsx title="Footer.tsx"
import './styles.css'
import {
ChatFooterProps,
ChatInput,
ChatFileUploader,
FilePreview,
VoiceRecorder,
useFilesUpload,
useMessageSend,
useRecordingAudio,
} from 'axellero-chatbot-ui-react'
import { FC } from 'react'export const Footer: FC = ({ icons }) => {
// File management hooks
const {
files,
base64Images,
countLoadingFiles,
onUploadFiles: handleUploadFiles,
onRemoveFile: handleRemoveFile,
onClearFilesAndImages: handleClearFilesAndImages,
onSetCountLoadingFiles: handleSetCountLoadingFiles,
} = useFilesUpload()
// Message handling hooks
const {
inputRef,
message,
messageGenerating,
setMessage,
onChangeMessage: handleChangeMessage,
onSendMessage: handleSendMessage,
onAbort: handleAbort,
} = useMessageSend(files, base64Images, handleClearFilesAndImages)
// Voice recording hooks
const {
isRecording,
isAudioLoading,
onTranscriptAudio: handleTranscriptAudio,
onStartRecording: handleStartRecording,
} = useRecordingAudio(setMessage, inputRef)
return (
{/ File and image preview /}
{(files.length > 0 || base64Images.length > 0 || countLoadingFiles > 0) && (
filePreviewIcon={icons?.filePreviewIcon}
fileDownloadIcon={icons?.downloadAttachment}
fileRemoveIcon={icons?.removeAttachment}
uploadingCount={countLoadingFiles}
files={files}
images={base64Images}
onDelete={handleRemoveFile}
/>
)} {/ Main footer row /}
{/ File uploader /}
{!isRecording && (
uploadIcon={icons?.uploadAttachment}
onFileUploaded={handleUploadFiles}
onSetCountLoadingFiles={handleSetCountLoadingFiles}
/>
)} {/ Text input /}
{!isRecording && (
ref={inputRef}
message={message}
onChange={handleChangeMessage}
buttonDisabled={countLoadingFiles > 0}
messageGenerating={messageGenerating}
sendIcon={icons?.send}
stopSendIcon={icons?.sendStop}
onSendMessage={handleSendMessage}
onStop={handleAbort}
/>
)}
{/ Voice recorder /}
isLoading={isAudioLoading}
isRecording={isRecording}
startRecordingIcon={icons?.voiceRecorder}
stopRecordingIcon={icons?.voiceRecorderStop}
onStart={handleStartRecording}
onRecorded={handleTranscriptAudio}
/>
)
}
`$3
`tsx
onFilesChange={(files) => console.log('Files changed:', files)}
onAudioSend={(audio) => console.log('Audio sent:', audio)}
onAudioTranscript={(transcript) => console.log('Transcript:', transcript)}
onInputChange={(input) => console.log('Input changed:', input)}
/>
`---
🔌 API Reference
| Prop | Type | Default | Description |
| ------------------------------------- | ------------------------------------ | --------------- | ------------------------------------------------ |
|
header | ReactNode | — | Custom element that replaces the default header. |
| footer | ComponentType | — | Custom footer component with full control. |
| theme | Theme | _light palette_ | Light/dark + granular overrides. |
| icons | Partial | built‑in icons | Replace any internal icon. |
| messages | MessageValueContext[] | — | Controlled messages array. |
| initialMessages | MessageValueContext[] | — | Initial messages to display. |
| messageBubble | ComponentType | — | Custom message bubble component. |$3
ReactNode rendered at the top of the chat container. When provided, the stock header is not rendered.$3
ComponentType component that replaces the default footer. Provides full control over the chat interface with helper hooks for file management, message handling, and voice recording.$3
Provider is required. It supplies config and message state to all chat components.
`ts
interface ConfigContextValue {
getAuthToken: () => string // REQUIRED
model: string // REQUIRED (e.g., 'gpt-4o-mini', 'llama3.2-3b')
defaultUrl?: string // Optional: OpenWebUI endpoint; if omitted, OpenAI default URL is used
apiType?: 'openai' | 'openwebui' // Optional: auto-detected by defaultUrl
transport?: Transport // Optional: custom transport
sendLastUserOnly?: boolean // Optional: if true, only last user message is sent
}interface AxelleroChatProvidersProps {
config: ConfigContextValue
initialMessages?: MessageValueContext[]
messages?: MessageValueContext[]
onSetMessages?: Dispatch>
withoutMsgHistory?: boolean // alias for sendLastUserOnly
}
`API Detection:
- If
defaultUrl is provided → OpenWebUI API
- If no defaultUrl → OpenAI API (https://api.openai.com/v1)$3
Provide a plain object; unspecified keys fall back to the default palette.
#### Default Theme (Light)
`ts
const themeProp = {
'root-width': '100%',
'root-height': '100%',
'font-family': 'system-ui, Avenir, Helvetica, Arial, sans-serif',
'font-weight': 400,
'line-height': 1.5,
'font-size': '1rem',
type: 'light' | 'dark',
colors: {
common: {
gray50: '#f9fafb',
gray100: '#f3f4f6',
gray150: '#f0f0f0',
gray200: '#e5e7eb',
gray250: '#e6e6e6',
gray300: '#d1d5db',
gray500: '#6b7280',
gray700: '#374151',
gray900: '#222',
blue500: '#2563eb',
blue600: '#646cff',
blue700: '#535bf2',
'shadow-light': 'rgba(0,0,0,0.06)',
'shadow-medium': 'rgba(0,0,0,0.08)',
white87: 'rgba(255,255,255,0.87)',
background: '#f9fafb',
'font-color': '#222',
},
header: {
border: '#00000017',
title: '#646464',
},
voiceRecorder: {
'loader-bg': 'rgba(0,0,0,0.10)',
},
messageBubble: {
'bot-bg': '#f3f4f6',
'bot-text': '#000',
'user-bg': '#e0f2fe',
'user-text': '#0c4a6e',
'user-border': 'none',
'bot-border': 'none',
'table-bg': '#ffffff',
'table-border': '#d1d5db',
'table-head-bg': '#e2e8f0',
'table-head-text': '#1e293b',
'table-cell-text': '#1f2937',
'table-row-even-bg': '#f9fafb',
'table-row-hover-bg': '#f1f5f9',
'table-shadow': 'rgba(0,0,0,0.06)',
},
emptyBox: {
text: '#555',
title: '#222',
subtitle: '#666',
},
filesPreview: {
'box-bg': '#f3f4f6',
'icon-color': '#6b7280',
'box-shadow': 'rgba(0,0,0,0.08)',
'remove-bg': '#ef4444',
'remove-border': '#fff',
'remove-text': '#fff',
filename: '#374151',
'scrollbar-thumb': '#d1d5db',
'overlay-bg': 'rgba(0,0,0,0.35)',
'skeleton-mid': '#e5e7eb',
},
ui: {
button: {
bg: '#f9f9f9',
text: '#222',
'hover-bg': '#f0f0f0',
'active-bg': '#e6e6e6',
'hover-shadow': 'rgba(0,0,0,0.06)',
'active-shadow': 'rgba(0,0,0,0.08)',
'outline-green': '#16a34a',
'outline-blue': '#2563eb',
'gray-bg': '#e5e7eb',
'gray-text': '#374151',
},
iconButton: {
color: '#6b7280',
'hover-bg': '#f0f0f0',
'active-bg': '#e6e6e6',
'hover-shadow': 'rgba(0,0,0,0.06)',
'active-shadow': 'rgba(0,0,0,0.08)',
},
textArea: {
border: '#e5e7eb',
bg: '#f9fafb',
text: '#374151',
'focus-border': '#2563eb',
shadow: 'rgba(0,0,0,0.06)',
},
loader: {
'dot-bg': 'rgba(51,51,51,0.49)',
},
},
},
}
`$3
`ts
interface IconSet {
clearMessages: ReactNode
send: ReactNode
sendStop: ReactNode
voiceRecorder: ReactNode
voiceRecorderStop: ReactNode
uploadAttachment: ReactNode
downloadAttachment: ReactNode
removeAttachment: ReactNode
filePreviewIcon: ReactNode
}
`Pass only the icons you wish to override; the rest fall back to defaults.
---
🎯 Core Features
$3
- Multiple File Types: Support for images, documents, PDFs, and more
- Image Preview: Base64 encoding for Vision API compatibility
- Drag & Drop: Intuitive file upload interface
- File Management: Upload, preview, download, and remove files
$3
- Audio Capture: Record voice messages directly in the chat
- Transcription: Automatic speech-to-text conversion
- Real-time Processing: Live audio feedback and status
$3
- SSE Support: Real-time streaming responses
- Chunk Processing: Handle partial responses gracefully
- Error Handling: Robust error handling for network issues
$3
- OpenAI API: Full compatibility with GPT models and Vision API
- OpenWebUI: Support for local and remote OpenWebUI instances
- Custom Transport: Extensible transport layer for custom backends
---
🏗️ Architecture
The component follows a modular architecture with clear separation of concerns:
- Entities: Core data models (messages, files, images)
- Features: Business logic (chat, file upload)
- Widgets: UI components (header, footer, messages)
- Shared: Utilities, contexts, and common functionality
---
📝 License
> © 2025 Axellero LLC
Custom API Implementation
You can completely replace the default API logic by providing a
customCompletionsAPI function in your config. This gives you full control over the API requests and responses.$3
`typescript
const config: ConfigContextValue = {
model: 'gpt-4',
getAuthToken: () => 'your-token',
customCompletionsAPI: async (config, history) => {
// Your custom API logic here
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
Authorization: Bearer ${config.getAuthToken()},
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: config.model,
messages: history,
stream: true,
temperature: 0.7,
max_tokens: 1000,
// Add any additional fields you need
}),
}) // Handle the response as you prefer
// You have full control over the implementation
},
}
`$3
`typescript
const config: ConfigContextValue = {
model: 'gpt-4',
getAuthToken: () => 'your-token',
customCompletionsAPI: async (config, history) => {
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
Authorization: Bearer ${config.getAuthToken()},
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: config.model,
messages: history,
stream: true,
// Advanced parameters
temperature: 0.7,
max_tokens: 1000,
presence_penalty: 0.1,
frequency_penalty: 0.1,
top_p: 0.9,
user: 'custom-user-id',
logit_bias: { '1234': -100 }, // Block specific tokens
// Function calling
functions: [
{
name: 'get_weather',
description: 'Get current weather',
parameters: {
type: 'object',
properties: {
location: { type: 'string' },
},
},
},
],
function_call: 'auto',
}),
}) // Custom SSE handling
const reader = response.body?.getReader()
const decoder = new TextDecoder()
while (true) {
const { done, value } = await reader.read()
if (done) break
// Your custom chunk processing logic
const chunk = decoder.decode(value, { stream: true })
// Process chunks as needed
}
},
}
`$3
`typescript
const config: ConfigContextValue = {
model: 'gpt-4',
getAuthToken: () => 'your-token',
customCompletionsAPI: async (config, history) => {
// Создаем ID для сообщения бота
const botMsgId = nanoid() // Добавляем сообщение бота в UI (это нужно делать в контексте React)
// Для этого вам нужно передать callback или использовать другой способ
const addBotMessage = (content: string, error?: string) => {
// Здесь вы обновляете сообщения в UI
// Например, через callback или через контекст
}
try {
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
Authorization:
Bearer ${config.getAuthToken()},
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: config.model,
messages: history,
stream: true,
temperature: 0.7,
max_tokens: 1000,
}),
}) if (!response.ok) {
throw new Error(
API request failed: ${response.status})
} // Обрабатываем SSE и обновляем UI
const reader = response.body?.getReader()
const decoder = new TextDecoder()
let fullText = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
fullText += chunk
// Обновляем сообщение бота в UI
addBotMessage(fullText)
}
} catch (error) {
// Обрабатываем ошибки и обновляем UI
addBotMessage('', error.message)
}
},
}
`Important: When using
customCompletionsAPI, you need to handle all UI updates yourself. The library only provides the basic structure.$3
When using
customCompletionsAPI, here's the division of responsibilities:Library does:
- ✅ Adds user message to chat
- ✅ Clears input and files
- ✅ Calls your custom function
You control:
- ❌ Bot message creation and management
- ❌ Abort controller for cancellation
- ❌ Error handling and display
- ❌ Message content updates
- ❌ Loading states
- ❌ All API logic
$3
`typescript
const config: ConfigContextValue = {
model: 'gpt-4',
getAuthToken: () => 'your-token',
customCompletionsAPI: async (config, history) => {
// 1. Создаем сообщение бота (это ваша ответственность)
const botMsgId = nanoid()
const addBotMessage = (content: string, error?: string) => {
// Здесь вы добавляете/обновляете сообщение бота в UI
// Например, через callback или контекст
} // 2. Создаем abort controller (это ваша ответственность)
const abortController = new AbortController()
try {
// 3. Добавляем сообщение бота в UI
addBotMessage('')
const response = await fetch('https://api.openai.com/v1/chat/completions', {
method: 'POST',
headers: {
Authorization:
Bearer ${config.getAuthToken()},
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: config.model,
messages: history,
stream: true,
temperature: 0.7,
max_tokens: 1000,
}),
signal: abortController.signal, // Ваш abort controller
}) if (!response.ok) {
throw new Error(
API request failed: ${response.status})
} // 4. Обрабатываем SSE и обновляем UI
const reader = response.body?.getReader()
const decoder = new TextDecoder()
let fullText = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
fullText += chunk
// Обновляем сообщение бота в UI
addBotMessage(fullText)
}
} catch (error) {
// 5. Обрабатываем ошибки (это ваша ответственность)
if (error.name === 'AbortError') {
addBotMessage('', 'Request cancelled')
} else {
addBotMessage('', error.message)
}
}
},
}
`$3
- Custom API endpoints: Different API providers or endpoints
- Additional parameters: Temperature, max_tokens, function calling, etc.
- Custom processing: WebSocket integration, database logging, custom chunk handling
- Rate limiting: Custom rate limiting logic
- Authentication: Custom auth flows
- Response transformation: Modify responses before processing
$3
- When
customCompletionsAPI is provided, the default API logic is completely bypassed
- You have full control over everything: request body, headers, response handling, and UI updates
- The function receives the current config and message history
- You're responsible for implementing your own message handling: adding bot messages, updating content, handling errors
- You're responsible for implementing your own SSE (Server-Sent Events) handling
- Make sure to handle errors appropriately in your custom implementation
- The library only adds the user message to the chat - everything else (bot message, abort controller, error handling) is your responsibility$3
`typescript
import { AxelleroChatBotUi } from 'axellero-chatbot-ui-react'function App() {
return (
config={config}
// ... other props
/>
)
}
``