diff --git a/.github/workflows/build-helper.yml b/.github/workflows/build-helper.yml index 2f5aad3568..fb158e3358 100644 --- a/.github/workflows/build-helper.yml +++ b/.github/workflows/build-helper.yml @@ -12,6 +12,7 @@ on: env: GO_VERSION: "1.24" NODE_VERSION: 22 + NODE_OPTIONS: --max-old-space-size=4096 jobs: build-app: outputs: @@ -31,7 +32,7 @@ jobs: # runner: "windows-11-arm64-16core" runs-on: ${{ matrix.runner }} steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install Linux Build Dependencies (Linux only) if: matrix.platform == 'linux' run: | @@ -166,13 +167,13 @@ jobs: AWS_SECRET_ACCESS_KEY: "${{ secrets.ARTIFACTS_KEY_SECRET }}" AWS_DEFAULT_REGION: us-west-2 - name: Upload artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.runner }} path: make - name: Upload Snapcraft logs on failure if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: ${{ matrix.runner }}-log path: /home/runner/.local/state/snapcraft/log diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 25d9b7149d..1fb15b9bf5 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -34,7 +34,7 @@ jobs: with: app-id: ${{ vars.WAVE_BUILDER_APPID }} private-key: ${{ secrets.WAVE_BUILDER_KEY }} - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: token: ${{ steps.app-token.outputs.token }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 60930e9c0f..2f159bbc2f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -63,7 +63,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Install Task uses: arduino/setup-task@v2 diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml index 2a093ff357..14a102c81e 100644 --- a/.github/workflows/copilot-setup-steps.yml +++ b/.github/workflows/copilot-setup-steps.yml @@ -18,7 +18,7 @@ jobs: contents: read steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 # Go + Node versions match your helper - uses: actions/setup-go@v6 diff --git a/.github/workflows/deploy-docsite.yml b/.github/workflows/deploy-docsite.yml index 68857f62f1..377a8b6c14 100644 --- a/.github/workflows/deploy-docsite.yml +++ b/.github/workflows/deploy-docsite.yml @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-latest if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-node@v6 diff --git a/.github/workflows/publish-release.yml b/.github/workflows/publish-release.yml index 784a2fbec4..821102d5c1 100644 --- a/.github/workflows/publish-release.yml +++ b/.github/workflows/publish-release.yml @@ -11,7 +11,7 @@ jobs: if: ${{ startsWith(github.ref, 'refs/tags/') }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install Task uses: arduino/setup-task@v2 with: @@ -30,7 +30,7 @@ jobs: needs: [publish-s3] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install Task uses: arduino/setup-task@v2 with: @@ -55,7 +55,7 @@ jobs: needs: [publish-s3] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install Task uses: arduino/setup-task@v2 with: @@ -80,7 +80,7 @@ jobs: needs: [publish-s3] runs-on: windows-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install Task uses: arduino/setup-task@v2 with: diff --git a/.github/workflows/testdriver-build.yml b/.github/workflows/testdriver-build.yml index 46cb473bf3..934eb2c756 100644 --- a/.github/workflows/testdriver-build.yml +++ b/.github/workflows/testdriver-build.yml @@ -41,7 +41,7 @@ jobs: runs-on: windows-latest if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 # General build dependencies - uses: actions/setup-go@v6 @@ -77,7 +77,7 @@ jobs: # Upload .exe as an artifact - name: Upload .exe artifact id: upload - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: windows-exe path: make/*.exe diff --git a/README.md b/README.md index ad49c7c2b2..da8224ef5a 100644 --- a/README.md +++ b/README.md @@ -24,11 +24,27 @@ Modern development involves constantly switching between terminals and browsers - Flexible drag & drop interface to organize terminal blocks, editors, web browsers, and AI assistants - Built-in editor for seamlessly editing remote files with syntax highlighting and modern editor features - Rich file preview system for remote files (markdown, images, video, PDFs, CSVs, directories) -- Integrated AI chat with support for multiple models (OpenAI, Claude, Azure, Perplexity, Ollama) +- Quick full-screen toggle for any block - expand terminals, editors, and previews for better visibility, then instantly return to multi-block view +- Wave AI - Context-aware terminal assistant that reads your terminal output, analyzes widgets, and performs file operations +- AI chat widget with support for multiple models (OpenAI, Claude, Azure, Perplexity, Ollama) - Command Blocks for isolating and monitoring individual commands with auto-close options - One-click remote connections with full terminal and file system access +- Secure secret storage using native system backends - store API keys and credentials locally, access them across SSH sessions - Rich customization including tab themes, terminal styles, and background images - Powerful `wsh` command system for managing your workspace from the CLI and sharing data between terminal sessions +- Connected file management with `wsh file` - seamlessly copy and sync files between local, remote SSH hosts, Wave filesystem, and S3 + +## Wave AI + +Wave AI is your context-aware terminal assistant with access to your workspace: + +- **Terminal Context**: Reads terminal output and scrollback for debugging and analysis +- **File Operations**: Read, write, and edit files with automatic backups and user approval +- **CLI Integration**: Use `wsh ai` to pipe output or attach files directly from the command line +- **Free Beta**: Included AI credits while we refine the experience +- **Coming Soon**: Command execution (with approval), local model support, and alternate AI providers (BYOK) + +Learn more in our [Wave AI documentation](https://docs.waveterm.dev/waveai). ## Installation diff --git a/ROADMAP.md b/ROADMAP.md index 20e472b52a..c41bece9ae 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -13,6 +13,10 @@ Wave Terminal's AI assistant is already powerful and continues to evolve. Here's ### AI Provider Support - ✅ OpenAI (including gpt-5 and gpt-5-mini models) +- ✅ Google Gemini (v0.13) +- ✅ OpenRouter and custom OpenAI-compatible endpoints (v0.13) +- ✅ Azure OpenAI (modern and legacy APIs) (v0.13) +- ✅ Local AI models via Ollama, LM Studio, vLLM, and other OpenAI-compatible servers (v0.13) ### Context & Input @@ -32,33 +36,28 @@ Wave Terminal's AI assistant is already powerful and continues to evolve. Here's ### AI Configuration & Flexibility -- 🔷 BYOK (Bring Your Own Key) - Use your own API keys for any supported provider +- ✅ BYOK (Bring Your Own Key) - Use your own API keys for any supported provider (v0.13) +- ✅ Local AI agents - Run AI models locally on your machine (v0.13) - 🔧 Enhanced provider configuration options - 🔷 Context (add markdown files to give persistent system context) ### Expanded Provider Support -Top priorities are Claude (for better coding support), and the OpenAI Completions API which will allow us to interface with -many more local/open models. - - 🔷 Anthropic Claude - Full integration with extended thinking and tool use -- 🔷 OpenAI Completions API - Support for older model formats -- 🤞 Google Gemini - Complete integration -- 🤞 Local AI agents - Run AI models locally on your machine ### Advanced AI Tools #### File Operations -- 🔧 AI file writing with intelligent diff previews -- 🔧 Rollback support for AI-made changes +- ✅ AI file writing with intelligent diff previews +- ✅ Rollback support for AI-made changes - 🔷 Multi-file editing workflows - 🔷 Safe file modification patterns #### Terminal Command Execution - 🔧 Execute commands directly from AI -- 🔧 Intelligent terminal state detection +- ✅ Intelligent terminal state detection - 🔧 Command result capture and parsing ### Remote & Advanced Capabilities diff --git a/aiprompts/aimodesconfig.md b/aiprompts/aimodesconfig.md new file mode 100644 index 0000000000..207b6fad88 --- /dev/null +++ b/aiprompts/aimodesconfig.md @@ -0,0 +1,709 @@ +# Wave AI Modes Configuration - Visual Editor Architecture + +## Overview + +Wave Terminal's AI modes configuration system allows users to define custom AI assistants with different providers, models, and capabilities. The configuration is stored in `~/.waveterm/config/waveai.json` and provides a flexible way to configure multiple AI modes that appear in the Wave AI panel. + +**Key Design Decisions:** +- Visual editor works on **valid JSON only** - if JSON is invalid, fall back to JSON editor +- Default modes (`waveai@quick`, `waveai@balanced`, `waveai@deep`) are **read-only** in visual editor +- Edits modify the **in-memory JSON directly** - changes saved via existing save button +- Mode keys are **auto-generated** from provider + model or random ID (last 4-6 chars) +- Secrets use **fixed naming convention** per provider (e.g., `OPENAI_KEY`, `OPENROUTER_KEY`) +- Quick **inline secret editor** instead of complex secret management + +## Current System Architecture + +### Data Structure + +**Location:** `pkg/wconfig/settingsconfig.go:264-284` + +```go +type AIModeConfigType struct { + // Display Configuration + DisplayName string `json:"display:name"` // Required + DisplayOrder float64 `json:"display:order,omitempty"` + DisplayIcon string `json:"display:icon,omitempty"` + DisplayShortDesc string `json:"display:shortdesc,omitempty"` + DisplayDescription string `json:"display:description,omitempty"` + + // Provider & Model + Provider string `json:"ai:provider,omitempty"` // wave, google, openrouter, openai, azure, azure-legacy, custom + APIType string `json:"ai:apitype"` // Required: anthropic-messages, openai-responses, openai-chat + Model string `json:"ai:model"` // Required + + // AI Behavior + ThinkingLevel string `json:"ai:thinkinglevel,omitempty"` // low, medium, high + Capabilities []string `json:"ai:capabilities,omitempty"` // pdfs, images, tools + + // Connection Details + Endpoint string `json:"ai:endpoint,omitempty"` + APIVersion string `json:"ai:apiversion,omitempty"` + APIToken string `json:"ai:apitoken,omitempty"` + APITokenSecretName string `json:"ai:apitokensecretname,omitempty"` + + // Azure-Specific + AzureResourceName string `json:"ai:azureresourcename,omitempty"` + AzureDeployment string `json:"ai:azuredeployment,omitempty"` + + // Wave AI Specific + WaveAICloud bool `json:"waveai:cloud,omitempty"` + WaveAIPremium bool `json:"waveai:premium,omitempty"` +} +``` + +**Storage:** `FullConfigType.WaveAIModes` - `map[string]AIModeConfigType` + +Keys follow pattern: `provider@modename` (e.g., `waveai@quick`, `openai@gpt4`) + +### Provider Types & Defaults + +**Defined in:** `pkg/aiusechat/uctypes/uctypes.go:27-35` + +1. **wave** - Wave AI Cloud service + - Auto-sets: `waveai:cloud = true`, endpoint from env or default + - Default endpoint: `https://cfapi.waveterm.dev/api/waveai` + - Used for Wave's hosted AI modes + +2. **openai** - OpenAI API + - Auto-sets: endpoint `https://api.openai.com/v1` + - Auto-detects API type based on model: + - Legacy models (gpt-4o, gpt-3.5): `openai-chat` + - New models (gpt-5*, gpt-4.1*, o1*, o3*): `openai-responses` + +3. **openrouter** - OpenRouter service + - Auto-sets: endpoint `https://openrouter.ai/api/v1`, API type `openai-chat` + +4. **google** - Google AI (Gemini, etc.) + - No auto-defaults currently + +5. **azure** - Azure OpenAI (new unified API) + - Auto-sets: API version `v1`, endpoint from resource name + - Endpoint pattern: `https://{resource}.openai.azure.com/openai/v1/{responses|chat/completions}` + - Auto-detects API type based on model + +6. **azure-legacy** - Azure OpenAI (legacy chat completions) + - Auto-sets: API version `2025-04-01-preview`, API type `openai-chat` + - Endpoint pattern: `https://{resource}.openai.azure.com/openai/deployments/{deployment}/chat/completions?api-version={version}` + - Requires `AzureResourceName` and `AzureDeployment` + +7. **custom** - Custom provider + - No auto-defaults + - User must specify all fields manually + +### Default Configuration + +**Location:** `pkg/wconfig/defaultconfig/waveai.json` + +Ships with three Wave AI modes: +- `waveai@quick` - Fast responses (gpt-5-mini, low thinking) +- `waveai@balanced` - Balanced (gpt-5.1, low thinking) [premium] +- `waveai@deep` - Maximum capability (gpt-5.1, medium thinking) [premium] + +### Current UI State + +**Location:** `frontend/app/view/waveconfig/waveaivisual.tsx` + +Currently shows placeholder: "Visual editor coming soon..." + +The component receives: +- `model: WaveConfigViewModel` - Access to config file operations +- Existing patterns from `SecretsContent` for list/detail views + +## Visual Editor Design Plan + +### High-Level Architecture + +``` +┌─────────────────────────────────────────────────────────┐ +│ Wave AI Modes Configuration │ +│ ┌───────────────┐ ┌──────────────────────────────┐ │ +│ │ │ │ │ │ +│ │ Mode List │ │ Mode Editor/Viewer │ │ +│ │ │ │ │ │ +│ │ [Quick] │ │ Provider: [wave ▼] │ │ +│ │ [Balanced] │ │ │ │ +│ │ [Deep] │ │ Display Configuration │ │ +│ │ [Custom] │ │ ├─ Name: ... │ │ +│ │ │ │ ├─ Icon: ... │ │ +│ │ [+ Add New] │ │ └─ Description: ... │ │ +│ │ │ │ │ │ +│ │ │ │ Provider Configuration │ │ +│ │ │ │ (Provider-specific fields) │ │ +│ │ │ │ │ │ +│ │ │ │ [Save] [Delete] [Cancel] │ │ +│ └───────────────┘ └──────────────────────────────┘ │ +└─────────────────────────────────────────────────────────┘ +``` + +### Component Structure + +```typescript +WaveAIVisualContent +├─ ModeList (left panel) +│ ├─ Header with "Add New Mode" button +│ ├─ List of existing modes (sorted by display:order) +│ │ └─ ModeListItem (icon, name, short desc, provider badge) +│ └─ Empty state if no modes +│ +└─ ModeEditor (right panel) + ├─ Provider selector dropdown (when creating/editing) + ├─ Display section (common to all providers) + │ ├─ Name input (required) + │ ├─ Icon picker (optional) + │ ├─ Display order (optional, number) + │ ├─ Short description (optional) + │ └─ Description textarea (optional) + │ + ├─ Provider Configuration section (dynamic based on provider) + │ └─ [Provider-specific form fields] + │ + └─ Action buttons (Save, Delete, Cancel) +``` + +### Provider-Specific Form Fields + +#### 1. Wave Provider (`wave`) +**Read-only/Auto-managed:** +- Endpoint (shows default or env override) +- Cloud flag (always true) +- Secret: Not applicable (managed by Wave) + +**User-configurable:** +- Model (required, text input with suggestions: gpt-5-mini, gpt-5.1) +- API Type (required, dropdown: openai-responses, openai-chat) +- Thinking Level (optional, dropdown: low, medium, high) +- Capabilities (optional, checkboxes: tools, images, pdfs) +- Premium flag (checkbox) + +#### 2. OpenAI Provider (`openai`) +**Auto-managed:** +- Endpoint (shows: api.openai.com/v1) +- API Type (auto-detected from model, editable) +- Secret Name: Fixed as `OPENAI_KEY` + +**User-configurable:** +- Model (required, text input with suggestions: gpt-4o, gpt-5-mini, gpt-5.1, o1-preview) +- API Key (via secret modal - see Secret Management below) +- Thinking Level (optional) +- Capabilities (optional) + +#### 3. OpenRouter Provider (`openrouter`) +**Auto-managed:** +- Endpoint (shows: openrouter.ai/api/v1) +- API Type (always openai-chat) +- Secret Name: Fixed as `OPENROUTER_KEY` + +**User-configurable:** +- Model (required, text input - OpenRouter model format) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 4. Azure Provider (`azure`) +**Auto-managed:** +- API Version (always v1) +- Endpoint (computed from resource name) +- API Type (auto-detected from model) +- Secret Name: Fixed as `AZURE_KEY` + +**User-configurable:** +- Azure Resource Name (required, validated format) +- Model (required) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 5. Azure Legacy Provider (`azure-legacy`) +**Auto-managed:** +- API Version (default: 2025-04-01-preview, editable) +- API Type (always openai-chat) +- Endpoint (computed from resource + deployment + version) +- Secret Name: Fixed as `AZURE_KEY` + +**User-configurable:** +- Azure Resource Name (required, validated) +- Azure Deployment (required) +- Model (required) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 6. Google Provider (`google`) +**Auto-managed:** +- Secret Name: Fixed as `GOOGLE_KEY` + +**User-configurable:** +- Model (required) +- API Type (required dropdown) +- Endpoint (required) +- API Key (via secret modal) +- API Version (optional) +- Thinking Level (optional) +- Capabilities (optional) + +#### 7. Custom Provider (`custom`) +**User must specify everything:** +- Model (required) +- API Type (required dropdown) +- Endpoint (required) +- Secret Name (required text input - user defines their own secret name) +- API Key (via secret modal using custom secret name) +- API Version (optional) +- Thinking Level (optional) +- Capabilities (optional) +- Azure Resource Name (optional) +- Azure Deployment (optional) + +### Data Flow + +``` +Load JSON → Parse → Render Visual Editor + ↓ + User Edits Mode → Update fileContentAtom (JSON string) + ↓ + Click Save → Existing save logic validates & writes +``` + +**Simplified Operations:** +1. **Load:** Parse `fileContentAtom` JSON string into mode objects for display +2. **Edit Mode:** Update parsed object → stringify → set `fileContentAtom` → marks as edited +3. **Add Mode:** + - Generate unique key from provider/model or random ID + - Add new mode to parsed object → stringify → set `fileContentAtom` +4. **Delete Mode:** Remove key from parsed object → stringify → set `fileContentAtom` +5. **Save:** Existing `model.saveFile()` handles validation and write + +**Mode Key Generation:** +```typescript +function generateModeKey(provider: string, model: string): string { + // Try semantic key first: provider@model-sanitized + const sanitized = model.toLowerCase() + .replace(/[^a-z0-9]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, ''); + const semanticKey = `${provider}@${sanitized}`; + + // Check for collision, if exists append random suffix + if (existingModes[semanticKey]) { + const randomId = crypto.randomUUID().slice(-6); + return `${provider}@${sanitized}-${randomId}`; + } + return semanticKey; +} +// Examples: openai@gpt-4o, openrouter@claude-3-5-sonnet, azure@custom-fb4a2c +``` + +**Secret Naming Convention:** +```typescript +// Fixed secret names per provider (except custom) +const SECRET_NAMES = { + openai: "OPENAI_KEY", + openrouter: "OPENROUTER_KEY", + azure: "AZURE_KEY", + "azure-legacy": "AZURE_KEY", + google: "GOOGLE_KEY", + // custom provider: user specifies their own secret name +} as const; + +function getSecretName(provider: string, customSecretName?: string): string { + if (provider === "custom") { + return customSecretName || "CUSTOM_API_KEY"; + } + return SECRET_NAMES[provider]; +} +``` + +### Secret Management UI + +**Secret Status Indicator:** +Display next to API Key field for providers that need one: +- ✅ Green check icon: Secret exists and is set +- ⚠️ Warning icon (yellow/orange): Secret not set or empty +- Click icon to open secret modal + +**Secret Modal:** +``` +┌─────────────────────────────────────┐ +│ Set API Key for OpenAI │ +│ │ +│ Secret Name: OPENAI_KEY │ +│ [read-only for non-custom] │ +│ │ +│ API Key: │ +│ [********************] [Show/Hide]│ +│ │ +│ [Cancel] [Save] │ +└─────────────────────────────────────┘ +``` + +**Modal Behavior:** +1. **Open Modal:** Click status icon or "Set API Key" button +2. **Show Secret Name:** + - Non-custom providers: Read-only, shows fixed name + - Custom provider: Editable text input (user specifies) +3. **API Key Input:** + - Masked password field + - Show/Hide toggle button + - Load existing value if secret already exists +4. **Save:** + - Validates not empty + - Calls RPC to set secret + - Updates status icon +5. **Cancel:** Close without changes + +**Integration with Mode Editor:** +- Check secret existence on mode load/select +- Update icon based on RPC `GetSecretsCommand` result +- "Save" button for mode only saves JSON config +- Secret is set immediately via modal (separate from JSON save) + +### Key Features + +#### 1. Mode List +- Display modes sorted by `display:order` (ascending) +- Show icon, name, short description +- Badge showing provider type +- Highlight Wave AI premium modes +- Click to edit + +#### 2. Add New Mode Flow +1. Click "Add New Mode" +2. Enter mode key (validated: alphanumeric, @, -, ., _) +3. Select provider from dropdown +4. Form dynamically updates to show provider-specific fields +5. Fill required fields (marked with *) +6. Save → validates → adds to config → refreshes list + +#### 3. Edit Mode Flow +1. Click mode from list +2. Load mode data into form +3. Provider is fixed (show read-only or with warning about changing) +4. Edit fields +5. Save → validates → updates config → refreshes list + +**Raw JSON Editor Option:** +- "Edit Raw JSON" button in mode editor (available for all modes) +- Opens modal with Monaco editor showing just this mode's JSON +- Validates JSON structure before allowing save +- Useful for: + - Modes without a provider field (edge cases) + - Advanced users who want precise control + - Copying/modifying complex configurations +- Validation checks: + - Valid JSON syntax + - Required fields present (`display:name`, `ai:apitype`, `ai:model`) + - Enum values valid + - Custom error messages for each validation failure + +#### 4. Delete Mode Flow +1. Click mode from list +2. Delete button in editor +3. Confirm dialog +4. Remove from config → save → refresh list + +#### 5. Secret Integration +- For API Token fields, provide two options: + - Direct input (text field, masked) + - Secret reference (dropdown of existing secrets + link to secrets page) +- When secret is selected, store name in `ai:apitokensecretname` +- When direct token, store in `ai:apitoken` + +#### 6. Validation +- **Mode Key:** Must match pattern `^[a-zA-Z0-9_@.-]+$` +- **Required Fields:** `display:name`, `ai:apitype`, `ai:model` +- **Azure Resource Name:** Must match `^[a-z0-9]([a-z0-9-]*[a-z0-9])?$` (1-63 chars) +- **Provider:** Must be one of the valid enum values +- **API Type:** Must be valid enum value +- **Thinking Level:** Must be low/medium/high if present +- **Capabilities:** Must be from valid enum (pdfs, images, tools) + +#### 7. Smart Defaults +When provider changes or model changes: +- Show info about what will be auto-configured +- Display computed endpoint (read-only with info icon) +- Display auto-detected API type (editable with warning) +- Pre-fill common values based on provider + +### UI Components Needed + +#### New Components +```typescript +// Main container +WaveAIVisualContent + +// Left panel +ModeList +├─ ModeListItem (icon, name, provider badge, premium badge, drag handle) +└─ AddModeButton + +// Right panel - viewer +ModeViewer +├─ ModeHeader (name, icon, actions) +├─ DisplaySection (read-only view of display fields) +├─ ProviderSection (read-only view of provider config) +└─ EditButton + +// Right panel - editor +ModeEditor +├─ ProviderSelector (dropdown, only for new modes) +├─ DisplayFieldsForm +├─ ProviderFieldsForm (dynamic based on provider) +│ ├─ WaveProviderForm +│ ├─ OpenAIProviderForm +│ ├─ OpenRouterProviderForm +│ ├─ AzureProviderForm +│ ├─ AzureLegacyProviderForm +│ ├─ GoogleProviderForm +│ └─ CustomProviderForm +└─ ActionButtons (Edit Raw JSON, Delete, Cancel) + +// Modals +RawJSONModal +├─ Title ("Edit Raw JSON: {mode name}") +├─ MonacoEditor (JSON, single mode object) +├─ ValidationErrors (inline display) +└─ Actions (Cancel, Save) + +// Shared components +SecretSelector (dropdown + link to secrets) +InfoTooltip (explains auto-configured fields) +ProviderBadge (visual indicator) +IconPicker (select from available icons) +DragHandle (for reordering modes in list) +``` + +**Drag & Drop for Reordering:** +```typescript +// Reordering updates display:order automatically +function handleModeReorder(draggedKey: string, targetKey: string) { + const modes = parseAIModes(fileContent); + const modesList = Object.entries(modes) + .sort((a, b) => (a[1]["display:order"] || 0) - (b[1]["display:order"] || 0)); + + // Find indices + const draggedIndex = modesList.findIndex(([k]) => k === draggedKey); + const targetIndex = modesList.findIndex(([k]) => k === targetKey); + + // Recalculate display:order for all modes + const newOrder = [...modesList]; + newOrder.splice(draggedIndex, 1); + newOrder.splice(targetIndex, 0, modesList[draggedIndex]); + + // Assign new order values (0, 10, 20, 30...) + newOrder.forEach(([key, mode], index) => { + modes[key] = { ...mode, "display:order": index * 10 }; + }); + + updateFileContent(JSON.stringify(modes, null, 2)); +} +``` + +### Model Extensions (Minimal) + +**No new atoms needed!** Visual editor uses existing `fileContentAtom`: + +```typescript +// Use existing atoms from WaveConfigViewModel: +// - fileContentAtom (contains JSON string) +// - hasEditedAtom (tracks if modified) +// - errorMessageAtom (for errors) + +// Visual editor parses fileContentAtom on render: +function parseAIModes(jsonString: string): Record | null { + try { + return JSON.parse(jsonString); + } catch { + return null; // Show "invalid JSON" error + } +} + +// Updates modify fileContentAtom: +function updateMode(key: string, mode: AIModeConfigType) { + const modes = parseAIModes(globalStore.get(model.fileContentAtom)); + if (!modes) return; + + modes[key] = mode; + const newJson = JSON.stringify(modes, null, 2); + globalStore.set(model.fileContentAtom, newJson); + globalStore.set(model.hasEditedAtom, true); +} + +// Secrets use existing model methods: +// - model.refreshSecrets() - already exists +// - RpcApi.GetSecretsCommand() - check if secret exists +// - RpcApi.SetSecretsCommand() - set secret value +``` + +**Component State (useState):** +```typescript +// In WaveAIVisualContent component: +const [selectedModeKey, setSelectedModeKey] = useState(null); +const [isAddingMode, setIsAddingMode] = useState(false); +const [showSecretModal, setShowSecretModal] = useState(false); +const [secretModalProvider, setSecretModalProvider] = useState(""); +``` + +### Implementation Phases + +#### Phase 1: Foundation & List View +- Parse `fileContentAtom` JSON into modes on render +- Display mode list (left panel, ~300px) + - Built-in modes with 🔒 icon at top + - Custom modes below + - Sort by `display:order` +- Select mode → show in right panel (empty state initially) +- Handle invalid JSON → show error, switch to JSON tab + +#### Phase 2: Built-in Mode Viewer +- Click built-in mode → show read-only details +- Display all fields (display, provider, config) +- "Built-in Mode" badge/banner +- No edit/delete buttons + +#### Phase 3: Custom Mode Editor (Basic) +- Click custom mode → load into editor form +- Display fields (name, icon, order, description) +- Provider field (read-only, badge) +- Model field (text input) +- Save → update `fileContentAtom` JSON +- Cancel → revert to previous selection + +#### Phase 4: Provider-Specific Fields +- Dynamic form based on provider type +- OpenAI: model, thinking level, capabilities +- Azure: resource name, model, thinking, capabilities +- Azure Legacy: resource name, deployment, model +- OpenRouter: model +- Google: model, API type, endpoint +- Custom: everything manual +- Info tooltips for auto-configured fields + +#### Phase 5: Secret Integration +- Check secret existence on mode select +- Display status icon (✅ / ⚠️) +- Click icon → open secret modal +- Secret modal: fixed name (or custom input), password field +- Save secret → immediate RPC call +- Update status icon after save + +#### Phase 6: Add New Mode +- "Add New Mode" button +- Provider dropdown selector +- Auto-generate mode key from provider + model +- Form with provider-specific fields +- Add to modes → update JSON → mark edited +- Select newly created mode + +#### Phase 7: Delete Mode +- Delete button for custom modes only +- Simple confirmation dialog +- Remove from modes → update JSON → deselect + +#### Phase 8: Raw JSON Editor +- "Edit Raw JSON" button in mode editor (all modes) +- Modal with Monaco editor for single mode +- JSON validation before save: + - Syntax check with error highlighting + - Required fields check (`display:name`, `ai:apitype`, `ai:model`) + - Enum validation (provider, apitype, thinkinglevel, capabilities) + - Display specific error messages per validation failure +- Parse validated JSON and update mode in main JSON +- Useful for edge cases (modes without provider) and power users + +#### Phase 9: Drag & Drop Reordering +- Add drag handle icon to custom mode list items +- Implement drag & drop functionality: + - Visual feedback during drag (opacity, cursor) + - Drop target highlighting + - Smooth reordering animation +- On drop: + - Recalculate `display:order` for all affected modes + - Use spacing (0, 10, 20, 30...) for easy manual adjustment + - Update JSON with new order values + - Built-in modes always stay at top (negative order values) + +#### Phase 10: Polish & UX Refinements +- Field validation with inline error messages +- Empty state when no mode selected +- Icon picker dropdown (Font Awesome icons) +- Capabilities checkboxes with descriptions +- Thinking level dropdown with explanations +- Help tooltips throughout +- Keyboard shortcuts (e.g., Ctrl/Cmd+E for raw JSON) +- Loading states for secret checks +- Smooth transitions and animations + +#### Phase 8: Raw JSON Editor +- "Edit Raw JSON" button in mode editor +- Modal with Monaco editor for single mode +- JSON validation before save: + - Syntax check + - Required fields check + - Enum validation + - Display specific error messages +- Parse and update mode in main JSON + +#### Phase 9: Drag & Drop Reordering +- Make mode list items draggable (custom modes only) +- Visual feedback during drag (drag handle icon) +- Drop target highlighting +- On drop: + - Calculate new `display:order` values + - Maintain spacing between modes + - Update all affected modes in JSON + - Preserve built-in modes at top + +#### Phase 10: Polish & UX Refinements +- Field validation (required, format) +- Error messages inline +- Empty state when no mode selected +- Icon picker dropdown +- Capabilities checkboxes +- Thinking level dropdown +- Help tooltips throughout +- Keyboard shortcuts (e.g., Cmd+E for raw JSON) + +### Technical Considerations + +1. **JSON Sync:** Parse/stringify from `fileContentAtom` on every read/write +2. **Validation:** Validate on blur or before updating JSON +3. **Built-in Detection:** Check if key starts with `waveai@` → read-only +4. **Type Safety:** Use `AIModeConfigType` from gotypes.d.ts +5. **State Management:** + - Model atoms for shared state (`fileContentAtom`, `hasEditedAtom`) + - Component useState for UI state (selected mode, modals) +6. **Error Handling:** + - Invalid JSON → show message, disable visual editor + - Parse errors → gracefully handle, don't crash +7. **Performance:** + - Parse JSON on mount and when `fileContentAtom` changes externally + - Debounce frequent updates if needed +8. **Secret Checks:** + - Load secret existence on mode select + - Cache results to avoid repeated RPC calls + +### Testing Strategy + +1. **Unit Tests:** Validation functions, key generation +2. **Integration Tests:** Form submission, backend sync +3. **E2E Tests:** Full add/edit/delete flows +4. **Provider Tests:** Each provider form with various inputs +5. **Edge Cases:** Empty config, invalid JSON, malformed data + +### Documentation Needs + +1. **In-app help:** Tooltips and info bubbles explaining fields +2. **Provider guides:** What each provider needs, where to get API keys +3. **Examples:** Show example configurations for common setups +4. **Troubleshooting:** Common errors and solutions + +## Next Steps + +1. Create detailed mockups/wireframes +2. Implement Phase 1 (basic list view) +3. Add RPC methods if needed for secrets integration +4. Iterate on provider forms +5. Polish and ship + +This design provides a user-friendly way to configure AI modes without directly editing JSON, while still maintaining the power and flexibility of the underlying system. \ No newline at end of file diff --git a/aiprompts/tailwind-container-queries.md b/aiprompts/tailwind-container-queries.md index 007cc080cf..646bf970bb 100644 --- a/aiprompts/tailwind-container-queries.md +++ b/aiprompts/tailwind-container-queries.md @@ -19,20 +19,52 @@ In v3: install `@tailwindcss/container-queries`. - `@container` marks the parent. - `@sm:` / `@md:` refer to **container width**, not viewport. +#### Max-Width Container Queries + +For max-width queries, use `@max-` prefix: + +```html +
+ +
Only on containers < sm
+ + +
+ Fixed overlay on small, normal on large +
+
+``` + +- `@max-sm:` = max-width query (container **below** sm breakpoint) +- `@sm:` = min-width query (container **at or above** sm breakpoint) + +**IMPORTANT**: The syntax is `@max-w600:` NOT `max-@w600:` (prefix comes before the @) + #### Notes - Based on native CSS container queries (well supported in modern browsers). - Breakpoints for container queries reuse Tailwind’s `sm`, `md`, `lg`, etc. scales. - Safe for modern webapps; no IE/legacy support. -we have special breakpoints set up for panels: +We have special breakpoints set up for panels: + --container-w600: 600px; + --container-w450: 450px; --container-xs: 300px; --container-xxs: 200px; --container-tiny: 120px; since often sm, md, and lg are too big for panels. -so to use you'd do: +Usage examples: + +```html + +
-@xs:ml-4 + +
+ + +
+``` diff --git a/cmd/generateschema/main-generateschema.go b/cmd/generateschema/main-generateschema.go index aa16eeb960..5480f3dd5d 100644 --- a/cmd/generateschema/main-generateschema.go +++ b/cmd/generateschema/main-generateschema.go @@ -18,6 +18,8 @@ const WaveSchemaSettingsFileName = "schema/settings.json" const WaveSchemaConnectionsFileName = "schema/connections.json" const WaveSchemaAiPresetsFileName = "schema/aipresets.json" const WaveSchemaWidgetsFileName = "schema/widgets.json" +const WaveSchemaBgPresetsFileName = "schema/bgpresets.json" +const WaveSchemaWaveAIFileName = "schema/waveai.json" func generateSchema(template any, dir string) error { settingsSchema := jsonschema.Reflect(template) @@ -59,4 +61,16 @@ func main() { if err != nil { log.Fatalf("widgets schema error: %v", err) } + + bgPresetsTemplate := make(map[string]wconfig.BgPresetsType) + err = generateSchema(&bgPresetsTemplate, WaveSchemaBgPresetsFileName) + if err != nil { + log.Fatalf("bg presets schema error: %v", err) + } + + waveAITemplate := make(map[string]wconfig.AIModeConfigType) + err = generateSchema(&waveAITemplate, WaveSchemaWaveAIFileName) + if err != nil { + log.Fatalf("waveai schema error: %v", err) + } } diff --git a/cmd/server/main-server.go b/cmd/server/main-server.go index b52d97491c..5259b60ffa 100644 --- a/cmd/server/main-server.go +++ b/cmd/server/main-server.go @@ -14,6 +14,7 @@ import ( "time" "github.com/joho/godotenv" + "github.com/wavetermdev/waveterm/pkg/aiusechat" "github.com/wavetermdev/waveterm/pkg/authkey" "github.com/wavetermdev/waveterm/pkg/blockcontroller" "github.com/wavetermdev/waveterm/pkg/blocklogger" @@ -22,6 +23,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/panichandler" "github.com/wavetermdev/waveterm/pkg/remote/conncontroller" "github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs" + "github.com/wavetermdev/waveterm/pkg/secretstore" "github.com/wavetermdev/waveterm/pkg/service" "github.com/wavetermdev/waveterm/pkg/telemetry" "github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata" @@ -91,6 +93,9 @@ func doShutdown(reason string) { // watch stdin, kill server if stdin is closed func stdinReadWatch() { + defer func() { + panichandler.PanicHandler("stdinReadWatch", recover()) + }() buf := make([]byte, 1024) for { _, err := os.Stdin.Read(buf) @@ -109,6 +114,9 @@ func startConfigWatcher() { } func telemetryLoop() { + defer func() { + panichandler.PanicHandler("telemetryLoop", recover()) + }() var nextSend int64 time.Sleep(InitialTelemetryWait) for { @@ -120,6 +128,42 @@ func telemetryLoop() { } } +func sendNoTelemetryUpdate(telemetryEnabled bool) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + clientData, err := wstore.DBGetSingleton[*waveobj.Client](ctx) + if err != nil { + log.Printf("telemetry update: error getting client data: %v\n", err) + return + } + if clientData == nil { + log.Printf("telemetry update: client data is nil\n") + return + } + err = wcloud.SendNoTelemetryUpdate(ctx, clientData.OID, !telemetryEnabled) + if err != nil { + log.Printf("[error] sending no-telemetry update: %v\n", err) + return + } +} + +func setupTelemetryConfigHandler() { + watcher := wconfig.GetWatcher() + if watcher == nil { + return + } + currentConfig := watcher.GetFullConfig() + currentTelemetryEnabled := currentConfig.Settings.TelemetryEnabled + + watcher.RegisterUpdateHandler(func(newConfig wconfig.FullConfigType) { + newTelemetryEnabled := newConfig.Settings.TelemetryEnabled + if newTelemetryEnabled != currentTelemetryEnabled { + currentTelemetryEnabled = newTelemetryEnabled + go sendNoTelemetryUpdate(newTelemetryEnabled) + } + }) +} + func backupCleanupLoop() { defer func() { panichandler.PanicHandler("backupCleanupLoop", recover()) @@ -182,18 +226,25 @@ func updateTelemetryCounts(lastCounts telemetrydata.TEventProps) telemetrydata.T customWidgets := fullConfig.CountCustomWidgets() customAIPresets := fullConfig.CountCustomAIPresets() customSettings := wconfig.CountCustomSettings() + customAIModes := fullConfig.CountCustomAIModes() props.UserSet = &telemetrydata.TEventUserProps{ SettingsCustomWidgets: customWidgets, SettingsCustomAIPresets: customAIPresets, SettingsCustomSettings: customSettings, + SettingsCustomAIModes: customAIModes, + } + + secretsCount, err := secretstore.CountSecrets() + if err == nil { + props.UserSet.SettingsSecretsCount = secretsCount } if utilfn.CompareAsMarshaledJson(props, lastCounts) { return lastCounts } tevent := telemetrydata.MakeTEvent("app:counts", props) - err := telemetry.RecordTEvent(ctx, tevent) + err = telemetry.RecordTEvent(ctx, tevent) if err != nil { log.Printf("error recording counts tevent: %v\n", err) } @@ -232,6 +283,9 @@ func beforeSendActivityUpdate(ctx context.Context) { } func startupActivityUpdate(firstLaunch bool) { + defer func() { + panichandler.PanicHandler("startupActivityUpdate", recover()) + }() ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) defer cancelFn() activity := wshrpc.ActivityUpdate{Startup: 1} @@ -465,18 +519,29 @@ func main() { return } + err = shellutil.FixupWaveZshHistory() + if err != nil { + log.Printf("error fixing up wave zsh history: %v\n", err) + } createMainWshClient() sigutil.InstallShutdownSignalHandlers(doShutdown) sigutil.InstallSIGUSR1Handler() startConfigWatcher() + aiusechat.InitAIModeConfigWatcher() maybeStartPprofServer() go stdinReadWatch() go telemetryLoop() + setupTelemetryConfigHandler() go updateTelemetryCountsLoop() go backupCleanupLoop() go startupActivityUpdate(firstLaunch) // must be after startConfigWatcher() blocklogger.InitBlockLogger() - go wavebase.GetSystemSummary() // get this cached (used in AI) + go func() { + defer func() { + panichandler.PanicHandler("GetSystemSummary", recover()) + }() + wavebase.GetSystemSummary() + }() webListener, err := web.MakeTCPListener("web") if err != nil { diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index eace7ca61a..2684336e45 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -24,8 +24,10 @@ import ( var testSchemaJSON string const ( - DefaultAnthropicModel = "claude-sonnet-4-5" - DefaultOpenAIModel = "gpt-5.1" + DefaultAnthropicModel = "claude-sonnet-4-5" + DefaultOpenAIModel = "gpt-5.1" + DefaultOpenRouterModel = "mistralai/mistral-small-3.2-24b-instruct" + DefaultGeminiModel = "gemini-3-pro-preview" ) // TestResponseWriter implements http.ResponseWriter and additional interfaces for testing @@ -113,7 +115,7 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_OpenAI, + APIType: uctypes.APIType_OpenAIResponses, APIToken: apiKey, Model: model, MaxTokens: 4096, @@ -155,6 +157,106 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool } } +func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { + apiKey := os.Getenv("OPENAI_APIKEY") + if apiKey == "" { + fmt.Println("Error: OPENAI_APIKEY environment variable not set") + os.Exit(1) + } + + opts := &uctypes.AIOptsType{ + APIType: uctypes.APIType_OpenAIChat, + APIToken: apiKey, + Endpoint: "https://api.openai.com/v1/chat/completions", + Model: model, + MaxTokens: 4096, + ThinkingLevel: uctypes.ThinkingLevelMedium, + } + + chatID := uuid.New().String() + + aiMessage := &uctypes.AIMessage{ + MessageId: uuid.New().String(), + Parts: []uctypes.AIMessagePart{ + { + Type: uctypes.AIMessagePartTypeText, + Text: message, + }, + }, + } + + fmt.Printf("Testing OpenAI Completions API with WaveAIPostMessageWrap, model: %s\n", model) + fmt.Printf("Message: %s\n", message) + fmt.Printf("Chat ID: %s\n", chatID) + fmt.Println("---") + + testWriter := &TestResponseWriter{} + sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx) + defer sseHandler.Close() + + chatOpts := uctypes.WaveChatOpts{ + ChatId: chatID, + ClientId: uuid.New().String(), + Config: *opts, + Tools: tools, + SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."}, + } + err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts) + if err != nil { + fmt.Printf("OpenAI Completions API streaming error: %v\n", err) + } +} + +func testOpenRouter(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { + apiKey := os.Getenv("OPENROUTER_APIKEY") + if apiKey == "" { + fmt.Println("Error: OPENROUTER_APIKEY environment variable not set") + os.Exit(1) + } + + opts := &uctypes.AIOptsType{ + APIType: uctypes.APIType_OpenAIChat, + APIToken: apiKey, + Endpoint: "https://openrouter.ai/api/v1/chat/completions", + Model: model, + MaxTokens: 4096, + ThinkingLevel: uctypes.ThinkingLevelMedium, + } + + chatID := uuid.New().String() + + aiMessage := &uctypes.AIMessage{ + MessageId: uuid.New().String(), + Parts: []uctypes.AIMessagePart{ + { + Type: uctypes.AIMessagePartTypeText, + Text: message, + }, + }, + } + + fmt.Printf("Testing OpenRouter with WaveAIPostMessageWrap, model: %s\n", model) + fmt.Printf("Message: %s\n", message) + fmt.Printf("Chat ID: %s\n", chatID) + fmt.Println("---") + + testWriter := &TestResponseWriter{} + sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx) + defer sseHandler.Close() + + chatOpts := uctypes.WaveChatOpts{ + ChatId: chatID, + ClientId: uuid.New().String(), + Config: *opts, + Tools: tools, + SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."}, + } + err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts) + if err != nil { + fmt.Printf("OpenRouter streaming error: %v\n", err) + } +} + func testAnthropic(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { apiKey := os.Getenv("ANTHROPIC_APIKEY") if apiKey == "" { @@ -163,7 +265,7 @@ func testAnthropic(ctx context.Context, model, message string, tools []uctypes.T } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_Anthropic, + APIType: uctypes.APIType_AnthropicMessages, APIToken: apiKey, Model: model, MaxTokens: 4096, @@ -205,6 +307,57 @@ func testAnthropic(ctx context.Context, model, message string, tools []uctypes.T } } +func testGemini(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { + apiKey := os.Getenv("GOOGLE_APIKEY") + if apiKey == "" { + fmt.Println("Error: GOOGLE_APIKEY environment variable not set") + os.Exit(1) + } + + opts := &uctypes.AIOptsType{ + APIType: uctypes.APIType_GoogleGemini, + APIToken: apiKey, + Model: model, + MaxTokens: 8192, + Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, + } + + // Generate a chat ID + chatID := uuid.New().String() + + // Convert to AIMessage format for WaveAIPostMessageWrap + aiMessage := &uctypes.AIMessage{ + MessageId: uuid.New().String(), + Parts: []uctypes.AIMessagePart{ + { + Type: uctypes.AIMessagePartTypeText, + Text: message, + }, + }, + } + + fmt.Printf("Testing Google Gemini streaming with WaveAIPostMessageWrap, model: %s\n", model) + fmt.Printf("Message: %s\n", message) + fmt.Printf("Chat ID: %s\n", chatID) + fmt.Println("---") + + testWriter := &TestResponseWriter{} + sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx) + defer sseHandler.Close() + + chatOpts := uctypes.WaveChatOpts{ + ChatId: chatID, + ClientId: uuid.New().String(), + Config: *opts, + Tools: tools, + SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."}, + } + err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts) + if err != nil { + fmt.Printf("Google Gemini streaming error: %v\n", err) + } +} + func testT1(ctx context.Context) { tool := aiusechat.GetAdderToolDefinition() tools := []uctypes.ToolDefinition{tool} @@ -217,33 +370,58 @@ func testT2(ctx context.Context) { testOpenAI(ctx, DefaultOpenAIModel, "what is 2+2+8, use the provider adder tool", tools) } +func testT3(ctx context.Context) { + testOpenAIComp(ctx, "gpt-4o", "what is 2+2? please be brief", nil) +} + +func testT4(ctx context.Context) { + tool := aiusechat.GetAdderToolDefinition() + tools := []uctypes.ToolDefinition{tool} + testGemini(ctx, DefaultGeminiModel, "what is 2+2+8, use the provider adder tool", tools) +} + func printUsage() { - fmt.Println("Usage: go run main-testai.go [--anthropic] [--tools] [--model ] [message]") + fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp|--openrouter|--gemini] [--tools] [--model ] [message]") fmt.Println("Examples:") fmt.Println(" go run main-testai.go 'What is 2+2?'") fmt.Println(" go run main-testai.go --model o4-mini 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic --model claude-3-5-sonnet-20241022 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openaicomp --model gpt-4o 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openrouter 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openrouter --model anthropic/claude-3.5-sonnet 'What is 2+2?'") + fmt.Println(" go run main-testai.go --gemini 'What is 2+2?'") + fmt.Println(" go run main-testai.go --gemini --model gemini-1.5-pro 'What is 2+2?'") fmt.Println(" go run main-testai.go --tools 'Help me configure GitHub Actions monitoring'") fmt.Println("") fmt.Println("Default models:") fmt.Printf(" OpenAI: %s\n", DefaultOpenAIModel) fmt.Printf(" Anthropic: %s\n", DefaultAnthropicModel) + fmt.Printf(" OpenAI Completions: gpt-4o\n") + fmt.Printf(" OpenRouter: %s\n", DefaultOpenRouterModel) + fmt.Printf(" Google Gemini: %s\n", DefaultGeminiModel) fmt.Println("") fmt.Println("Environment variables:") fmt.Println(" OPENAI_APIKEY (for OpenAI models)") fmt.Println(" ANTHROPIC_APIKEY (for Anthropic models)") + fmt.Println(" OPENROUTER_APIKEY (for OpenRouter models)") + fmt.Println(" GOOGLE_APIKEY (for Google Gemini models)") } func main() { - var anthropic, tools, help, t1, t2 bool + var anthropic, openaicomp, openrouter, gemini, tools, help, t1, t2, t3, t4 bool var model string flag.BoolVar(&anthropic, "anthropic", false, "Use Anthropic API instead of OpenAI") + flag.BoolVar(&openaicomp, "openaicomp", false, "Use OpenAI Completions API") + flag.BoolVar(&openrouter, "openrouter", false, "Use OpenRouter API") + flag.BoolVar(&gemini, "gemini", false, "Use Google Gemini API") flag.BoolVar(&tools, "tools", false, "Enable GitHub Actions Monitor tools for testing") - flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic)", DefaultOpenAIModel, DefaultAnthropicModel)) + flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic, %s for OpenRouter, %s for Gemini)", DefaultOpenAIModel, DefaultAnthropicModel, DefaultOpenRouterModel, DefaultGeminiModel)) flag.BoolVar(&help, "help", false, "Show usage information") flag.BoolVar(&t1, "t1", false, fmt.Sprintf("Run preset T1 test (%s with 'what is 2+2')", DefaultAnthropicModel)) flag.BoolVar(&t2, "t2", false, fmt.Sprintf("Run preset T2 test (%s with 'what is 2+2')", DefaultOpenAIModel)) + flag.BoolVar(&t3, "t3", false, "Run preset T3 test (OpenAI Completions API with gpt-5.1)") + flag.BoolVar(&t4, "t4", false, "Run preset T4 test (OpenAI Completions API with gemini-3-pro-preview)") flag.Parse() if help { @@ -262,11 +440,25 @@ func main() { testT2(ctx) return } + if t3 { + testT3(ctx) + return + } + if t4 { + testT4(ctx) + return + } // Set default model based on API type if not provided if model == "" { if anthropic { model = DefaultAnthropicModel + } else if openaicomp { + model = "gpt-4o" + } else if openrouter { + model = DefaultOpenRouterModel + } else if gemini { + model = DefaultGeminiModel } else { model = DefaultOpenAIModel } @@ -285,6 +477,12 @@ func main() { if anthropic { testAnthropic(ctx, model, message, toolDefs) + } else if openaicomp { + testOpenAIComp(ctx, model, message, toolDefs) + } else if openrouter { + testOpenRouter(ctx, model, message, toolDefs) + } else if gemini { + testGemini(ctx, model, message, toolDefs) } else { testOpenAI(ctx, model, message, toolDefs) } diff --git a/cmd/wsh/cmd/wshcmd-editconfig.go b/cmd/wsh/cmd/wshcmd-editconfig.go index 2adf1b7647..5f2153dd77 100644 --- a/cmd/wsh/cmd/wshcmd-editconfig.go +++ b/cmd/wsh/cmd/wshcmd-editconfig.go @@ -5,12 +5,10 @@ package cmd import ( "fmt" - "path/filepath" "github.com/spf13/cobra" "github.com/wavetermdev/waveterm/pkg/waveobj" "github.com/wavetermdev/waveterm/pkg/wshrpc" - "github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient" ) var editConfigMagnified bool @@ -34,32 +32,23 @@ func editConfigRun(cmd *cobra.Command, args []string) (rtnErr error) { sendActivity("editconfig", rtnErr == nil) }() - // Get config directory from Wave info - resp, err := wshclient.WaveInfoCommand(RpcClient, &wshrpc.RpcOpts{Timeout: 2000}) - if err != nil { - return fmt.Errorf("getting Wave info: %w", err) - } - configFile := "settings.json" // default if len(args) > 0 { configFile = args[0] } - settingsFile := filepath.Join(resp.ConfigDir, configFile) - wshCmd := &wshrpc.CommandCreateBlockData{ BlockDef: &waveobj.BlockDef{ Meta: map[string]interface{}{ - waveobj.MetaKey_View: "preview", - waveobj.MetaKey_File: settingsFile, - waveobj.MetaKey_Edit: true, + waveobj.MetaKey_View: "waveconfig", + waveobj.MetaKey_File: configFile, }, }, Magnified: editConfigMagnified, Focused: true, } - _, err = RpcClient.SendRpcRequest(wshrpc.Command_CreateBlock, wshCmd, &wshrpc.RpcOpts{Timeout: 2000}) + _, err := RpcClient.SendRpcRequest(wshrpc.Command_CreateBlock, wshCmd, &wshrpc.RpcOpts{Timeout: 2000}) if err != nil { return fmt.Errorf("opening config file: %w", err) } diff --git a/cmd/wsh/cmd/wshcmd-secret.go b/cmd/wsh/cmd/wshcmd-secret.go index bce1d0beb9..f2c287579a 100644 --- a/cmd/wsh/cmd/wshcmd-secret.go +++ b/cmd/wsh/cmd/wshcmd-secret.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/spf13/cobra" + "github.com/wavetermdev/waveterm/pkg/waveobj" "github.com/wavetermdev/waveterm/pkg/wshrpc" "github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient" ) @@ -16,6 +17,8 @@ import ( // secretNameRegex must match the validation in pkg/wconfig/secretstore.go var secretNameRegex = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`) +var secretUiMagnified bool + var secretCmd = &cobra.Command{ Use: "secret", Short: "manage secrets", @@ -54,12 +57,22 @@ var secretDeleteCmd = &cobra.Command{ PreRunE: preRunSetupRpcClient, } +var secretUiCmd = &cobra.Command{ + Use: "ui", + Short: "open secrets UI", + Args: cobra.NoArgs, + RunE: secretUiRun, + PreRunE: preRunSetupRpcClient, +} + func init() { + secretUiCmd.Flags().BoolVarP(&secretUiMagnified, "magnified", "m", false, "open secrets UI in magnified mode") rootCmd.AddCommand(secretCmd) secretCmd.AddCommand(secretGetCmd) secretCmd.AddCommand(secretSetCmd) secretCmd.AddCommand(secretListCmd) secretCmd.AddCommand(secretDeleteCmd) + secretCmd.AddCommand(secretUiCmd) } func secretGetRun(cmd *cobra.Command, args []string) (rtnErr error) { @@ -156,4 +169,27 @@ func secretDeleteRun(cmd *cobra.Command, args []string) (rtnErr error) { WriteStdout("secret deleted: %s\n", name) return nil +} + +func secretUiRun(cmd *cobra.Command, args []string) (rtnErr error) { + defer func() { + sendActivity("secret", rtnErr == nil) + }() + + wshCmd := &wshrpc.CommandCreateBlockData{ + BlockDef: &waveobj.BlockDef{ + Meta: map[string]interface{}{ + waveobj.MetaKey_View: "waveconfig", + waveobj.MetaKey_File: "secrets", + }, + }, + Magnified: secretUiMagnified, + Focused: true, + } + + _, err := RpcClient.SendRpcRequest(wshrpc.Command_CreateBlock, wshCmd, &wshrpc.RpcOpts{Timeout: 2000}) + if err != nil { + return fmt.Errorf("opening secrets UI: %w", err) + } + return nil } \ No newline at end of file diff --git a/docs/docs/ai-presets.mdx b/docs/docs/ai-presets.mdx index b8c7b34546..6321dae3ad 100644 --- a/docs/docs/ai-presets.mdx +++ b/docs/docs/ai-presets.mdx @@ -1,7 +1,7 @@ --- sidebar_position: 3.6 id: "ai-presets" -title: "AI Presets" +title: "AI Presets (Deprecated)" --- :::warning Deprecation Notice The AI Widget and its presets are being replaced by [Wave AI](./waveai.mdx). Please refer to the Wave AI documentation for the latest AI features and configuration options. diff --git a/docs/docs/config.mdx b/docs/docs/config.mdx index dafd8c99ee..a349c4ab51 100644 --- a/docs/docs/config.mdx +++ b/docs/docs/config.mdx @@ -37,6 +37,7 @@ wsh editconfig | app:dismissarchitecturewarning | bool | Disable warnings on app start when you are using a non-native architecture for Wave. For more info, see [Why does Wave warn me about ARM64 translation when it launches?](./faq#why-does-wave-warn-me-about-arm64-translation-when-it-launches). | | app:defaultnewblock | string | Sets the default new block (Cmd:n, Cmd:d). "term" for terminal block, "launcher" for launcher block (default = "term") | | app:showoverlayblocknums | bool | Set to false to disable the Ctrl+Shift block number overlay that appears when holding Ctrl+Shift (defaults to true) | +| app:ctrlvpaste | bool | On Windows/Linux, when null (default) uses Control+V on Windows only. Set to true to force Control+V on all non-macOS platforms, false to disable the accelerator. macOS always uses Command+V regardless of this setting | | ai:preset | string | the default AI preset to use | | ai:baseurl | string | Set the AI Base Url (must be OpenAI compatible) | | ai:apitoken | string | your AI api token | diff --git a/docs/docs/connections.mdx b/docs/docs/connections.mdx index 77dc4aacd6..08a8ac2632 100644 --- a/docs/docs/connections.mdx +++ b/docs/docs/connections.mdx @@ -4,6 +4,8 @@ id: "connections" title: "Connections" --- +import { VersionBadge } from "@site/src/components/versionbadge"; + # Connections Wave allows users to connect to various machines and unify them together in a way that preserves the unique behavior of each. At the moment, this extends to SSH remote connections, local WSL connections, and AWS S3 buckets. @@ -156,6 +158,7 @@ In addition to the regular ssh config file, wave also has its own config file to | ssh:batchmode | A boolean indicating if password and passphrase prompts should be skipped. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:pubkeyauthentication | A boolean indicating if public key authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:passwordauthentication | A boolean indicating if password authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored. | +| ssh:passwordsecretname | A string specifying the name of a secret stored in the [secret store](/secrets) to use as the SSH password. When set, this password will be automatically used for password authentication instead of prompting the user. | | ssh:kbdinteractiveauthentication | A boolean indicating if keyboard interactive authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored. | | ssh:preferredauthentications | A list of strings indicating an ordering of different types of authentications. Each authentication type will be tried in order. This supports `"publickey"`, `"keyboard-interactive"`, and `"password"` as valid types. Other types of authentication are not handled and will be skipped. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:addkeystoagent | A boolean indicating if the keys used for a connection should be added to the ssh agent. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| diff --git a/docs/docs/faq.mdx b/docs/docs/faq.mdx index 74967cbb91..37c714e610 100644 --- a/docs/docs/faq.mdx +++ b/docs/docs/faq.mdx @@ -6,25 +6,6 @@ title: "FAQ" # FAQ -### How do I enable Claude Code support with Shift+Enter? - -Wave supports Claude Code and similar AI coding tools that expect Shift+Enter to send an escape sequence + newline (`\u001b\n`) instead of a regular carriage return. This can be enabled using the `term:shiftenternewline` configuration setting. - -To enable this globally for all terminals: -```bash -wsh setconfig term:shiftenternewline=true -``` - -To enable this for just a specific terminal block: -```bash -wsh setmeta term:shiftenternewline=true -``` - -You can also set this in your [settings.json](./config) file: -```json -"term:shiftenternewline": true -``` - ### How can I see the block numbers? The block numbers will appear when you hold down Ctrl-Shift (and disappear once you release the key combo). @@ -48,87 +29,6 @@ Just remember in JSON, backslashes need to be escaped. So add this to your [sett `wsh` is an internal CLI for extending control over Wave to the command line, you can learn more about it [here](./wsh). To prevent misuse by other applications, `wsh` requires an access token provided by Wave to work and will not function outside of the app. -### How do I make new blocks or splits inherit my shell’s current directory? - -Wave uses a special escape sequence (OSC 7) to track the shell’s working directory and maintain the working directory of new terminal blocks and splits. Wave listens for these sequences to update its `cmd:cwd` metadata. That metadata is copied to new blocks when you: - -- Open a new terminal block (Alt N / Cmd N) -- Split a pane (Cmd D / Cmd Shift D) - -Not all shells emit this escape sequence, so new blocks or splits may start in your home directory instead. To ensure your shell emits the OSC 7 escape sequence, add the following to your shell startup/config file and restart Wave (or source your config). - -#### Bash - -Add to `~/.bashrc` or `~/.bash_profile`: - -```bash -# Emit OSC 7 on each prompt to tell terminal about new working directory -__update_cwd() { - # Only run in interactive shells - [[ $- == *i* ]] || return - # Only run if attached to a terminal - [ -t 1 ] || return - # Redirect to tty so output doesn't show in shell - printf "\033]7;file://%s%s\007" "$HOSTNAME" "${PWD// /%20}" > /dev/tty -} -if [[ -n "$PROMPT_COMMAND" ]]; then - export PROMPT_COMMAND="__update_cwd; $PROMPT_COMMAND" -else - export PROMPT_COMMAND="__update_cwd" -fi -``` - -#### Zsh - -Add to `~/.zshrc`: - -```zsh -# Emit OSC 7 escape on directory change and prompt -function _wave_emit_cwd() { - printf "\033]7;file://%s%s\007" "$HOSTNAME" "${PWD// /%20}" > /dev/tty -} -autoload -U add-zsh-hook -add-zsh-hook chpwd _wave_emit_cwd -add-zsh-hook precmd _wave_emit_cwd -``` - -#### Fish - -> Fish shell (v4.0.0 and later) emits OSC 7 by default—no config required. - -For older Fish versions, add to `~/.config/fish/config.fish`: - -```fish -# Emit OSC 7 on each PWD change -function _wave_emit_cwd --on-variable PWD - printf "\033]7;file://%s%s\007" (hostname) (string replace ' ' '%20' $PWD) > /dev/tty -end -``` - -After configuring, open a new block or split (Alt T / Cmd T, Alt N / Cmd N, Cmd D / Cmd Shift D) and verify blocks start in your current directory. - -#### Verifying Current Directory Preservation - -1. Open a Wave terminal block. -2. `cd` into a project folder, e.g. `cd ~/projects/foo`. -3. Right-click on the block's title bar and select "Copy BlockId" to retrieve the block’s ID. -4. Use the copied BlockId to retrieve the block’s metadata: - - ```bash - # Example: replace BLOCK_ID with your actual block reference - wsh getmeta --block BLOCK_ID - ``` - -5. Confirm the output JSON contains a `cmd:cwd` field, for example: - - ```json - { - "cmd:cwd": "/Users/you/projects/foo", - ... - } - ``` - -6. Open a new block or split the pane—both should start in `/Users/you/projects/foo`. ## Why does Wave warn me about ARM64 translation when it launches? diff --git a/docs/docs/releasenotes.mdx b/docs/docs/releasenotes.mdx index f14b1252c7..b540906e06 100644 --- a/docs/docs/releasenotes.mdx +++ b/docs/docs/releasenotes.mdx @@ -6,6 +6,33 @@ sidebar_position: 200 # Release Notes +### v0.13.0 — Dec 8, 2025 + +**Wave v0.13 Brings Local AI Support, BYOK, and Unified Configuration** + +Wave v0.13 is a major release that opens up Wave AI to local models, third-party providers, and bring-your-own-key (BYOK) configurations. This release also includes a completely redesigned configuration system and several terminal improvements. + +**Local AI & BYOK Support:** +- **OpenAI-Compatible API** - Wave now supports any provider or local server using the `/v1/chat/completions` endpoint, enabling use of Ollama, LM Studio, vLLM, OpenRouter, and countless other local and hosted models +- **Google Gemini Integration** - Native support for Google's Gemini models with a dedicated API adapter +- **Provider Presets** - Simplified configuration with built-in presets for OpenAI, OpenRouter, Google, Azure, and custom endpoints +- **Multiple AI Modes** - Easily switch between different models and providers with a unified interface +- See the new [Wave AI Modes documentation](https://docs.waveterm.dev/waveai-modes) for configuration examples and setup guides + +**Unified Configuration Widget:** +- **New Config Interface** - Replaced the basic JSON editor with a dedicated configuration widget accessible from the sidebar +- **Better Organization** - Browse and edit different configuration types (general settings, AI modes, secrets) with improved validation and error handling +- **Integrated Secrets Management** - Access Wave's secret store directly from the config widget for secure credential management + +**Terminal Improvements:** +- **Bracketed Paste Mode** - Now enabled by default to improve multi-line paste behavior and compatibility with tools like Claude Code +- **Windows Paste Fix** - Ctrl+V now works as a standard paste accelerator on Windows +- **SSH Password Management** - Store SSH connection passwords in Wave's secret store to avoid re-typing credentials + +**Other Changes:** +- Package updates and dependency upgrades +- Various bug fixes and stability improvements + ### v0.12.5 — Nov 24, 2025 Quick patch release to fix paste behavior on Linux (prevent raw HTML from getting pasted to the terminal). diff --git a/docs/docs/secrets.mdx b/docs/docs/secrets.mdx new file mode 100644 index 0000000000..e01612c5b8 --- /dev/null +++ b/docs/docs/secrets.mdx @@ -0,0 +1,147 @@ +--- +sidebar_position: 3.2 +id: "secrets" +title: "Secrets" +--- + +import { VersionBadge } from "@site/src/components/versionbadge"; + +# Secrets + + + +Wave Terminal provides a secure way to store sensitive information like passwords, API keys, and tokens. Secrets are stored encrypted in your system's native keychain (macOS Keychain, Windows Credential Manager, or Linux Secret Service), ensuring your sensitive data remains protected. + +## Why Use Secrets? + +Secrets in Wave Terminal allow you to: + +- **Store SSH passwords** - Automatically authenticate to SSH connections without typing passwords +- **Manage API keys** - Keep API tokens, keys, and credentials secure +- **Share across sessions** - Access your secrets from any terminal block or remote connection +- **Avoid plaintext storage** - Never store sensitive data in configuration files or scripts + +## Opening the Secrets UI + +There are several ways to access the secrets management interface: + +1. **From the widgets bar** (recommended): + - Click the **** settings icon on the widgets bar + - Select **Secrets** from the menu + +2. **From the command line:** + ```bash + wsh secret ui + ``` + + +The secrets UI provides a visual interface to view, add, edit, and delete secrets. + +## Managing Secrets via CLI + +Wave Terminal provides a complete CLI for managing secrets from any terminal block: + +```bash +# List all secret names (not values) +wsh secret list + +# Get a specific secret value +wsh secret get MY_SECRET_NAME + +# Set a secret (format: name=value, no spaces around =) +wsh secret set GITHUB_TOKEN=ghp_xxxxxxxxxx +wsh secret set DB_PASSWORD=super_secure_password + +# Delete a secret +wsh secret delete MY_SECRET_NAME +``` + +## Secret Naming Rules + +Secret names must match the pattern: `^[A-Za-z][A-Za-z0-9_]*$` + +This means: +- Must start with a letter (A-Z or a-z) +- Can only contain letters, numbers, and underscores +- Cannot contain spaces or special characters + +**Valid names:** `MY_SECRET`, `ApiKey`, `ssh_password_1` +**Invalid names:** `123_SECRET`, `my-secret`, `secret name` + +## Using Secrets with SSH Connections + + + +Secrets can be used to automatically provide passwords for SSH connections, eliminating the need to type passwords repeatedly. + +### Configure in connections.json + +Add the `ssh:passwordsecretname` field to your connection configuration: + +```json +{ + "myserver": { + "ssh:hostname": "example.com", + "ssh:user": "myuser", + "ssh:passwordsecretname": "SERVER_PASSWORD" + } +} +``` + +Then store your password as a secret: + +```bash +wsh secret set SERVER_PASSWORD=my_actual_password +``` + +Now when Wave connects to `myserver`, it will automatically use the password from your secret store instead of prompting you. + +### Benefits + +- **Security**: Password stored encrypted in your system keychain +- **Convenience**: No need to type passwords for each connection +- **Flexibility**: Update passwords by changing the secret, not the configuration + +## Security Considerations + +- **Encrypted Storage**: Secrets are stored encrypted in your Wave configuration directory. The encryption key itself is protected by your operating system's secure credential storage (macOS Keychain, Windows Credential Manager, or Linux Secret Service). + +- **No Plaintext**: Secrets are never stored unencrypted in logs or accessible files. + +- **Access Control**: Secrets are only accessible to Wave Terminal. + + +## Storage Backend + +Wave Terminal automatically detects and uses the appropriate secret storage backend for your operating system: + +- **macOS**: Uses the macOS Keychain +- **Windows**: Uses Windows Credential Manager +- **Linux**: Uses the Secret Service API (freedesktop.org specification) + +:::warning Linux Secret Storage +On Linux systems, Wave requires a compatible secret service backend (typically GNOME Keyring or KWallet). These are usually pre-installed with your desktop environment. If no compatible backend is detected, you won't be able to set secrets, and the UI will display a warning. +::: + +## Troubleshooting + +### "No appropriate secret manager found" + +This error occurs on Linux when no compatible secret service backend is available. Install GNOME Keyring or KWallet and ensure the secret service is running. + +### Secret not found + +Ensure the secret name is spelled correctly (names are case-sensitive) and that the secret exists: + +```bash +wsh secret list +``` + +### Permission denied on Linux + +The secret service may require you to unlock your keyring. This typically happens after login. Consult your desktop environment's documentation for keyring management. + +## Related Documentation + +- [Connections](/connections) - Learn about SSH connections and configuration +- [wsh Command Reference](/wsh-reference#secret) - Complete CLI command documentation for secrets \ No newline at end of file diff --git a/docs/docs/waveai-modes.mdx b/docs/docs/waveai-modes.mdx new file mode 100644 index 0000000000..d8b94ee460 --- /dev/null +++ b/docs/docs/waveai-modes.mdx @@ -0,0 +1,472 @@ +--- +sidebar_position: 1.6 +id: "waveai-modes" +title: "Wave AI (Local Models + BYOK)" +--- + +import { VersionBadge } from "@site/src/components/versionbadge"; + + + +Wave AI supports custom AI modes that allow you to use local models, custom API endpoints, and alternative AI providers. This gives you complete control over which models and providers you use with Wave's AI features. + +## Configuration Overview + +AI modes are configured in `~/.config/waveterm/waveai.json`. + +**To edit using the UI:** +1. Click the settings (gear) icon in the widget bar +2. Select "Settings" from the menu +3. Choose "Wave AI Modes" from the settings sidebar + +**Or launch from the command line:** +```bash +wsh editconfig waveai.json +``` + +Each mode defines a complete AI configuration including the model, API endpoint, authentication, and display properties. + +## Provider-Based Configuration + +Wave AI now supports provider-based configuration which automatically applies sensible defaults for common providers. By specifying the `ai:provider` field, you can significantly simplify your configuration as the system will automatically set up endpoints, API types, and secret names. + +### Supported Providers + +- **`openai`** - OpenAI API (automatically configures endpoint and secret name) [[see example](#openai)] +- **`openrouter`** - OpenRouter API (automatically configures endpoint and secret name) [[see example](#openrouter)] +- **`google`** - Google AI (Gemini) [[see example](#google-ai-gemini)] +- **`azure`** - Azure OpenAI Service (modern API) [[see example](#azure-openai-modern-api)] +- **`azure-legacy`** - Azure OpenAI Service (legacy deployment API) [[see example](#azure-openai-legacy-deployment-api)] +- **`custom`** - Custom API endpoint (fully manual configuration) [[see examples](#local-model-examples)] + +### Supported API Types + +Wave AI supports the following API types: + +- **`openai-chat`**: Uses the `/v1/chat/completions` endpoint (most common) +- **`openai-responses`**: Uses the `/v1/responses` endpoint (modern API for GPT-5+ models) +- **`google-gemini`**: Google's Gemini API format (automatically set when using `ai:provider: "google"`, not typically used directly) + +## Global Wave AI Settings + +You can configure global Wave AI behavior in your Wave Terminal settings (separate from the mode configurations in `waveai.json`). + +### Setting a Default AI Mode + +After configuring a local model or custom mode, you can make it the default by setting `waveai:defaultmode` in your Wave Terminal settings. + +:::important +Use the **mode key** (the key in your `waveai.json` configuration), not the display name. For example, use `"ollama-llama"` (the key), not `"Ollama - Llama 3.3"` (the display name). +::: + +**Using the settings command:** +```bash +wsh setconfig waveai:defaultmode="ollama-llama" +``` + +**Or edit settings.json directly:** +1. Click the settings (gear) icon in the widget bar +2. Select "Settings" from the menu +3. Add the `waveai:defaultmode` key to your settings.json: +```json + "waveai:defaultmode": "ollama-llama" +``` + +This will make the specified mode the default selection when opening Wave AI features. + +### Hiding Wave Cloud Modes + +If you prefer to use only your local or custom models and want to hide Wave's cloud AI modes from the mode dropdown, set `waveai:showcloudmodes` to `false`: + +**Using the settings command:** +```bash +wsh setconfig waveai:showcloudmodes=false +``` + +**Or edit settings.json directly:** +1. Click the settings (gear) icon in the widget bar +2. Select "Settings" from the menu +3. Add the `waveai:showcloudmodes` key to your settings.json: +```json + "waveai:showcloudmodes": false +``` + +This will hide Wave's built-in cloud AI modes, showing only your custom configured modes. + +## Local Model Examples + +### Ollama + +[Ollama](https://ollama.ai) provides an OpenAI-compatible API for running models locally: + +```json +{ + "ollama-llama": { + "display:name": "Ollama - Llama 3.3", + "display:order": 1, + "display:icon": "microchip", + "display:description": "Local Llama 3.3 70B model via Ollama", + "ai:apitype": "openai-chat", + "ai:model": "llama3.3:70b", + "ai:thinkinglevel": "medium", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + } +} +``` + +:::tip +The `ai:apitoken` field is required but Ollama ignores it - you can set it to any value like `"ollama"`. +::: + +### LM Studio + +[LM Studio](https://lmstudio.ai) provides a local server that can run various models: + +```json +{ + "lmstudio-qwen": { + "display:name": "LM Studio - Qwen", + "display:order": 2, + "display:icon": "server", + "display:description": "Local Qwen model via LM Studio", + "ai:apitype": "openai-chat", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct", + "ai:thinkinglevel": "medium", + "ai:endpoint": "http://localhost:1234/v1/chat/completions", + "ai:apitoken": "not-needed" + } +} +``` + +### vLLM + +[vLLM](https://docs.vllm.ai) is a high-performance inference server with OpenAI API compatibility: + +```json +{ + "vllm-local": { + "display:name": "vLLM", + "display:order": 3, + "display:icon": "server", + "display:description": "Local model via vLLM", + "ai:apitype": "openai-chat", + "ai:model": "your-model-name", + "ai:thinkinglevel": "medium", + "ai:endpoint": "http://localhost:8000/v1/chat/completions", + "ai:apitoken": "not-needed" + } +} +``` + +## Cloud Provider Examples + +### OpenAI + +Using the `openai` provider automatically configures the endpoint and secret name: + +```json +{ + "openai-gpt4o": { + "display:name": "GPT-4o", + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://api.openai.com/v1/chat/completions` +- `ai:apitype` to `openai-chat` (or `openai-responses` for GPT-5+ models) +- `ai:apitokensecretname` to `OPENAI_KEY` (store your OpenAI API key with this name) +- `ai:capabilities` to `["tools", "images", "pdfs"]` (automatically determined based on model) + +For newer models like GPT-4.1 or GPT-5, the API type is automatically determined: + +```json +{ + "openai-gpt41": { + "display:name": "GPT-4.1", + "ai:provider": "openai", + "ai:model": "gpt-4.1" + } +} +``` + +### OpenRouter + +[OpenRouter](https://openrouter.ai) provides access to multiple AI models. Using the `openrouter` provider simplifies configuration: + +```json +{ + "openrouter-qwen": { + "display:name": "OpenRouter - Qwen", + "ai:provider": "openrouter", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://openrouter.ai/api/v1/chat/completions` +- `ai:apitype` to `openai-chat` +- `ai:apitokensecretname` to `OPENROUTER_KEY` (store your OpenRouter API key with this name) + +:::note +For OpenRouter, you must manually specify `ai:capabilities` based on your model's features. Example: +```json +{ + "openrouter-qwen": { + "display:name": "OpenRouter - Qwen", + "ai:provider": "openrouter", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct", + "ai:capabilities": ["tools"] + } +} +``` +::: + +### Google AI (Gemini) + +[Google AI](https://ai.google.dev) provides the Gemini family of models. Using the `google` provider simplifies configuration: + +```json +{ + "google-gemini": { + "display:name": "Gemini 3 Pro", + "ai:provider": "google", + "ai:model": "gemini-3-pro-preview" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://generativelanguage.googleapis.com/v1beta/models/{model}:streamGenerateContent` +- `ai:apitype` to `google-gemini` +- `ai:apitokensecretname` to `GOOGLE_AI_KEY` (store your Google AI API key with this name) +- `ai:capabilities` to `["tools", "images", "pdfs"]` (automatically configured) + +### Azure OpenAI (Modern API) + +For the modern Azure OpenAI API, use the `azure` provider: + +```json +{ + "azure-gpt4": { + "display:name": "Azure GPT-4", + "ai:provider": "azure", + "ai:model": "gpt-4", + "ai:azureresourcename": "your-resource-name" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://your-resource-name.openai.azure.com/openai/v1/chat/completions` (or `/responses` for newer models) +- `ai:apitype` based on the model +- `ai:apitokensecretname` to `AZURE_OPENAI_KEY` (store your Azure OpenAI key with this name) + +:::note +For Azure providers, you must manually specify `ai:capabilities` based on your model's features. Example: +```json +{ + "azure-gpt4": { + "display:name": "Azure GPT-4", + "ai:provider": "azure", + "ai:model": "gpt-4", + "ai:azureresourcename": "your-resource-name", + "ai:capabilities": ["tools", "images"] + } +} +``` +::: + +### Azure OpenAI (Legacy Deployment API) + +For legacy Azure deployments, use the `azure-legacy` provider: + +```json +{ + "azure-legacy-gpt4": { + "display:name": "Azure GPT-4 (Legacy)", + "ai:provider": "azure-legacy", + "ai:azureresourcename": "your-resource-name", + "ai:azuredeployment": "your-deployment-name" + } +} +``` + +The provider automatically constructs the full endpoint URL and sets the API version (defaults to `2025-04-01-preview`). You can override the API version with `ai:azureapiversion` if needed. + +:::note +For Azure Legacy provider, you must manually specify `ai:capabilities` based on your model's features. +::: + +## Using Secrets for API Keys + +Instead of storing API keys directly in the configuration, you should use Wave's secret store to keep your credentials secure. Secrets are stored encrypted using your system's native keychain. + +### Storing an API Key + +**Using the Secrets UI (recommended):** +1. Click the settings (gear) icon in the widget bar +2. Select "Secrets" from the menu +3. Click "Add New Secret" +4. Enter the secret name (e.g., `OPENAI_API_KEY`) and your API key +5. Click "Save" + +**Or from the command line:** +```bash +wsh secret set OPENAI_KEY=sk-xxxxxxxxxxxxxxxx +wsh secret set OPENROUTER_KEY=sk-xxxxxxxxxxxxxxxx +``` + +### Referencing the Secret + +When using providers like `openai` or `openrouter`, the secret name is automatically set. Just ensure the secret exists with the correct name: + +```json +{ + "my-openai-mode": { + "display:name": "OpenAI GPT-4o", + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +The `openai` provider automatically looks for the `OPENAI_KEY` secret. See the [Secrets documentation](./secrets.mdx) for more information on managing secrets securely in Wave. + +## Multiple Modes Example + +You can define multiple AI modes and switch between them easily: + +```json +{ + "ollama-llama": { + "display:name": "Ollama - Llama 3.3", + "display:order": 1, + "ai:model": "llama3.3:70b", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + }, + "ollama-codellama": { + "display:name": "Ollama - CodeLlama", + "display:order": 2, + "ai:model": "codellama:34b", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + }, + "openai-gpt4o": { + "display:name": "GPT-4o", + "display:order": 10, + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +## Troubleshooting + +### Connection Issues + +If Wave can't connect to your model server: + +1. **For cloud providers with `ai:provider` set**: Ensure you have the correct secret stored (e.g., `OPENAI_KEY`, `OPENROUTER_KEY`) +2. **For local/custom endpoints**: Verify the server is running (`curl http://localhost:11434/v1/models` for Ollama) +3. Check the `ai:endpoint` is the complete endpoint URL including the path (e.g., `http://localhost:11434/v1/chat/completions`) +4. Verify the `ai:apitype` matches your server's API (defaults are usually correct when using providers) +5. Check firewall settings if using a non-localhost address + +### Model Not Found + +If you get "model not found" errors: + +1. Verify the model name matches exactly what your server expects +2. For Ollama, use `ollama list` to see available models +3. Some servers require prefixes or specific naming formats + +### API Type Selection + +- The API type defaults to `openai-chat` if not specified, which works for most providers +- Use `openai-chat` for Ollama, LM Studio, custom endpoints, and most cloud providers +- Use `openai-responses` for newer OpenAI models (GPT-5+) or when your provider specifically requires it +- Provider presets automatically set the correct API type when needed + +## Configuration Reference + +### Minimal Configuration (with Provider) + +```json +{ + "mode-key": { + "display:name": "Qwen (OpenRouter)", + "ai:provider": "openrouter", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct" + } +} +``` + +### Full Configuration (all fields) + +```json +{ + "mode-key": { + "display:name": "Display Name", + "display:order": 1, + "display:icon": "icon-name", + "display:description": "Full description", + "ai:provider": "custom", + "ai:apitype": "openai-chat", + "ai:model": "model-name", + "ai:thinkinglevel": "medium", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:azureapiversion": "v1", + "ai:apitoken": "your-token", + "ai:apitokensecretname": "PROVIDER_KEY", + "ai:azureresourcename": "your-resource", + "ai:azuredeployment": "your-deployment", + "ai:capabilities": ["tools", "images", "pdfs"] + } +} +``` + +### Field Reference + +| Field | Required | Description | +|-------|----------|-------------| +| `display:name` | Yes | Name shown in the AI mode selector | +| `display:order` | No | Sort order in the selector (lower numbers first) | +| `display:icon` | No | Icon identifier for the mode (can use any [FontAwesome icon](https://fontawesome.com/search), use the name without the "fa-" prefix). Default is "sparkles" | +| `display:description` | No | Full description of the mode | +| `ai:provider` | No | Provider preset: `openai`, `openrouter`, `google`, `azure`, `azure-legacy`, `custom` | +| `ai:apitype` | No | API type: `openai-chat`, `openai-responses`, or `google-gemini` (defaults to `openai-chat` if not specified) | +| `ai:model` | No | Model identifier (required for most providers) | +| `ai:thinkinglevel` | No | Thinking level: `low`, `medium`, or `high` | +| `ai:endpoint` | No | *Full* API endpoint URL (auto-set by provider when available) | +| `ai:azureapiversion` | No | Azure API version (for `azure-legacy` provider, defaults to `2025-04-01-preview`) | +| `ai:apitoken` | No | API key/token (not recommended - use secrets instead) | +| `ai:apitokensecretname` | No | Name of secret containing API token (auto-set by provider) | +| `ai:azureresourcename` | No | Azure resource name (for Azure providers) | +| `ai:azuredeployment` | No | Azure deployment name (for `azure-legacy` provider) | +| `ai:capabilities` | No | Array of supported capabilities: `"tools"`, `"images"`, `"pdfs"` | +| `waveai:cloud` | No | Internal - for Wave Cloud AI configuration only | +| `waveai:premium` | No | Internal - for Wave Cloud AI configuration only | + +### AI Capabilities + +The `ai:capabilities` field specifies what features the AI mode supports: + +- **`tools`** - Enables AI tool usage for file reading/writing, shell integration, and widget interaction +- **`images`** - Allows image attachments in chat (model can view uploaded images) +- **`pdfs`** - Allows PDF file attachments in chat (model can read PDF content) + +**Provider-specific behavior:** +- **OpenAI and Google providers**: Capabilities are automatically configured based on the model. You don't need to specify them. +- **OpenRouter, Azure, Azure-Legacy, and Custom providers**: You must manually specify capabilities based on your model's features. + +:::warning +If you don't include `"tools"` in the `ai:capabilities` array, the AI model will not be able to interact with your Wave terminal widgets, read/write files, or execute commands. Most AI modes should include `"tools"` for the best Wave experience. +::: + +Most models support `tools` and can benefit from it. Vision-capable models should include `images`. Not all models support PDFs, so only include `pdfs` if your model can process them. diff --git a/docs/docs/waveai.mdx b/docs/docs/waveai.mdx index e352865ef9..ab9259d5a0 100644 --- a/docs/docs/waveai.mdx +++ b/docs/docs/waveai.mdx @@ -34,7 +34,7 @@ Controls AI's access to your workspace: ## File Attachments -Drag files onto the AI panel to attach: +Drag files onto the AI panel to attach (not supported with all models): | Type | Formats | Size Limit | Notes | |------|---------|------------|-------| @@ -68,21 +68,38 @@ Supports text files, images, PDFs, and directories. Use `-n` for new chat, `-s` - **Navigate Web**: Changes URLs in web browser widgets ### All Widgets -- **Capture Screenshots**: Takes screenshots of any widget for visual analysis +- **Capture Screenshots**: Takes screenshots of any widget for visual analysis (not supported on all models) :::warning Security File system operations require explicit approval. You control all file access. ::: +## Local Models & BYOK + +Wave AI supports using your own AI models and API keys: + +- **Local Models**: Run AI models locally with [Ollama](https://ollama.ai), [LM Studio](https://lmstudio.ai), [vLLM](https://docs.vllm.ai), and other OpenAI-compatible servers +- **BYOK (Bring Your Own Key)**: Use your own API keys with OpenAI, OpenRouter, Google AI (Gemini), Azure OpenAI, and other cloud providers +- **Multiple Modes**: Configure and switch between multiple AI providers and models +- **Privacy**: Keep your data local or use your preferred cloud provider + +See the [**Local Models & BYOK guide**](./waveai-modes.mdx) for complete configuration instructions, examples, and troubleshooting. + ## Privacy +**Default Wave AI Service:** - Messages are proxied through the Wave Cloud AI service (powered by OpenAI's APIs). Please refer to OpenAI's privacy policy for details on how they handle your data. - Wave does not store your chats, attachments, or use them for training - Usage counters included in anonymous telemetry - File access requires explicit approval +**Local Models & BYOK:** +- When using local models, your chat data never leaves your machine +- When using BYOK with cloud providers, requests are sent directly to your chosen provider +- Refer to your provider's privacy policy for details on how they handle your data + :::info Under Active Development -Wave AI is in active beta with included AI credits while we refine the experience. BYOK will be available once we've stabilized core features and gathered feedback on what works best. Share feedback in our [Discord](https://discord.gg/XfvZ334gwU). +Wave AI is in active beta with included AI credits while we refine the experience. Share feedback in our [Discord](https://discord.gg/XfvZ334gwU). **Coming Soon:** - **Remote File Access**: Read files on SSH-connected systems diff --git a/docs/docs/wsh-reference.mdx b/docs/docs/wsh-reference.mdx index a6006010d0..b9df30f31d 100644 --- a/docs/docs/wsh-reference.mdx +++ b/docs/docs/wsh-reference.mdx @@ -342,19 +342,36 @@ This will connect to a WSL distribution on the local machine. It will use the de ## web -You can search for a given url using: +The `web` command opens URLs in a web block within Wave Terminal. ```sh -wsh web open [url] +wsh web open [url] [-m] [-r blockid] ``` -Alternatively, you can search with the configured search engine using: +You can open a specific URL or perform a search using the configured search engine. + +Flags: + +- `-m, --magnified` - open the web block in magnified mode +- `-r, --replace ` - replace an existing block instead of creating a new one + +Examples: ```sh -wsh web open [search-query] +# Open a URL +wsh web open https://waveterm.dev + +# Search with the configured search engine +wsh web open "wave terminal documentation" + +# Open in magnified mode +wsh web open -m https://github.com + +# Replace an existing block +wsh web open -r 2 https://example.com ``` -Both of these commands will open a new web block with the desired page. +The command will open a new web block with the desired page, or replace an existing block if the `-r` flag is used. Note that `--replace` and `--magnified` cannot be used together. --- @@ -978,6 +995,30 @@ wsh secret delete old_api_key wsh secret delete temp_token ``` +### ui + +```sh +wsh secret ui [-m] +``` + +Open the secrets management interface in a new block. This provides a graphical interface for viewing and managing all your secrets. + +Flags: + +- `-m, --magnified` - open the secrets UI in magnified mode + +Examples: + +```sh +# Open the secrets UI +wsh secret ui + +# Open the secrets UI in magnified mode +wsh secret ui -m +``` + +The secrets UI provides a convenient visual way to browse, add, edit, and delete secrets without needing to use the command-line interface. + :::tip Use secrets in your scripts to avoid hardcoding sensitive values. Secrets work across remote machines - store an API key locally with `wsh secret set`, then access it from any SSH or WSL connection with `wsh secret get`. The secret is securely retrieved from your local machine without needing to duplicate it on remote systems. ::: diff --git a/docs/package.json b/docs/package.json index cf09870d02..d1f8a4acc0 100644 --- a/docs/package.json +++ b/docs/package.json @@ -47,8 +47,8 @@ "eslint": "^8.57.0", "eslint-config-prettier": "^10.1.8", "eslint-plugin-mdx": "^3.6.2", - "prettier": "^3.6.2", - "prettier-plugin-jsdoc": "^1.5.0", + "prettier": "^3.7.4", + "prettier-plugin-jsdoc": "^1.7.0", "prettier-plugin-organize-imports": "^4.3.0", "remark-cli": "^12.0.1", "remark-frontmatter": "^5.0.0", @@ -56,7 +56,7 @@ "remark-preset-lint-consistent": "^6.0.1", "remark-preset-lint-recommended": "^7.0.1", "typescript": "^5.9.3", - "typescript-eslint": "^8.46.4" + "typescript-eslint": "^8.48.1" }, "resolutions": { "path-to-regexp@npm:2.2.1": "^3", diff --git a/docs/src/components/versionbadge.css b/docs/src/components/versionbadge.css new file mode 100644 index 0000000000..63ac0b3771 --- /dev/null +++ b/docs/src/components/versionbadge.css @@ -0,0 +1,22 @@ +.version-badge { + display: inline-block; + padding: 0.125rem 0.5rem; + margin-left: 0.25rem; + font-size: 0.75rem; + font-weight: 600; + line-height: 1.5; + border-radius: 0.25rem; + background-color: var(--ifm-color-primary-lightest); + color: var(--ifm-background-color); + vertical-align: middle; + white-space: nowrap; +} + +.version-badge.no-left-margin { + margin-left: 0; +} + +[data-theme="dark"] .version-badge { + background-color: var(--ifm-color-primary-dark); + color: var(--ifm-background-color); +} diff --git a/docs/src/components/versionbadge.tsx b/docs/src/components/versionbadge.tsx new file mode 100644 index 0000000000..58c616440c --- /dev/null +++ b/docs/src/components/versionbadge.tsx @@ -0,0 +1,10 @@ +import "./versionbadge.css"; + +interface VersionBadgeProps { + version: string; + noLeftMargin?: boolean; +} + +export function VersionBadge({ version, noLeftMargin }: VersionBadgeProps) { + return {version}; +} \ No newline at end of file diff --git a/emain/emain-menu.ts b/emain/emain-menu.ts index 36efa8ec65..79d7e17362 100644 --- a/emain/emain-menu.ts +++ b/emain/emain-menu.ts @@ -73,7 +73,20 @@ async function getWorkspaceMenu(ww?: WaveBrowserWindow): Promise => img.src = url; }); }; + + +/** + * Filter and organize AI mode configs into Wave and custom provider groups + * Returns organized configs that should be displayed based on settings and premium status + */ +export interface FilteredAIModeConfigs { + waveProviderConfigs: Array<{ mode: string } & AIModeConfigType>; + otherProviderConfigs: Array<{ mode: string } & AIModeConfigType>; + shouldShowCloudModes: boolean; +} + +export const getFilteredAIModeConfigs = ( + aiModeConfigs: Record, + showCloudModes: boolean, + inBuilder: boolean, + hasPremium: boolean +): FilteredAIModeConfigs => { + const hideQuick = inBuilder && hasPremium; + + const allConfigs = Object.entries(aiModeConfigs) + .map(([mode, config]) => ({ mode, ...config })) + .filter((config) => !(hideQuick && config.mode === "waveai@quick")); + + const otherProviderConfigs = allConfigs + .filter((config) => config["ai:provider"] !== "wave") + .sort(sortByDisplayOrder); + + const hasCustomModels = otherProviderConfigs.length > 0; + const shouldShowCloudModes = showCloudModes || !hasCustomModels; + + const waveProviderConfigs = shouldShowCloudModes + ? allConfigs.filter((config) => config["ai:provider"] === "wave").sort(sortByDisplayOrder) + : []; + + return { + waveProviderConfigs, + otherProviderConfigs, + shouldShowCloudModes, + }; +}; + +/** + * Get the display name for an AI mode configuration. + * If display:name is set, use that. Otherwise, construct from model/provider. + * For azure-legacy, show "azureresourcename (azure)". + * For other providers, show "model (provider)". + */ +export function getModeDisplayName(config: AIModeConfigType): string { + if (config["display:name"]) { + return config["display:name"]; + } + + const provider = config["ai:provider"]; + const model = config["ai:model"]; + const azureResourceName = config["ai:azureresourcename"]; + + if (provider === "azure-legacy") { + return `${azureResourceName || "unknown"} (azure)`; + } + + return `${model || "unknown"} (${provider || "custom"})`; +} diff --git a/frontend/app/aipanel/aimessage.tsx b/frontend/app/aipanel/aimessage.tsx index e6fb70ce11..1c9dea2b66 100644 --- a/frontend/app/aipanel/aimessage.tsx +++ b/frontend/app/aipanel/aimessage.tsx @@ -223,7 +223,7 @@ export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => { className={cn( "px-2 rounded-lg [&>*:first-child]:!mt-0", message.role === "user" - ? "py-2 bg-accent-800 text-white max-w-[calc(100%-50px)] @w450:max-w-[calc(100%-105px)]" + ? "py-2 bg-accent-800 text-white max-w-[calc(100%-50px)]" : "min-w-[min(100%,500px)]" )} > diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx new file mode 100644 index 0000000000..5ae1d8a385 --- /dev/null +++ b/frontend/app/aipanel/aimode.tsx @@ -0,0 +1,299 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { Tooltip } from "@/app/element/tooltip"; +import { atoms, getSettingsKeyAtom } from "@/app/store/global"; +import { RpcApi } from "@/app/store/wshclientapi"; +import { TabRpcClient } from "@/app/store/wshrpcutil"; +import { cn, fireAndForget, makeIconClass } from "@/util/util"; +import { useAtomValue } from "jotai"; +import { memo, useRef, useState } from "react"; +import { getFilteredAIModeConfigs, getModeDisplayName } from "./ai-utils"; +import { WaveAIModel } from "./waveai-model"; + +interface AIModeMenuItemProps { + config: AIModeConfigWithMode; + isSelected: boolean; + isDisabled: boolean; + onClick: () => void; + isFirst?: boolean; + isLast?: boolean; +} + +const AIModeMenuItem = memo(({ config, isSelected, isDisabled, onClick, isFirst, isLast }: AIModeMenuItemProps) => { + return ( + + ); +}); + +AIModeMenuItem.displayName = "AIModeMenuItem"; + +interface ConfigSection { + sectionName: string; + configs: AIModeConfigWithMode[]; + isIncompatible?: boolean; +} + +function computeCompatibleSections( + currentMode: string, + aiModeConfigs: Record, + waveProviderConfigs: AIModeConfigWithMode[], + otherProviderConfigs: AIModeConfigWithMode[] +): ConfigSection[] { + const currentConfig = aiModeConfigs[currentMode]; + const allConfigs = [...waveProviderConfigs, ...otherProviderConfigs]; + + if (!currentConfig) { + return [{ sectionName: "Incompatible Modes", configs: allConfigs, isIncompatible: true }]; + } + + const currentSwitchCompat = currentConfig["ai:switchcompat"] || []; + const compatibleConfigs: AIModeConfigWithMode[] = [{ ...currentConfig, mode: currentMode }]; + const incompatibleConfigs: AIModeConfigWithMode[] = []; + + if (currentSwitchCompat.length === 0) { + allConfigs.forEach((config) => { + if (config.mode !== currentMode) { + incompatibleConfigs.push(config); + } + }); + } else { + allConfigs.forEach((config) => { + if (config.mode === currentMode) return; + + const configSwitchCompat = config["ai:switchcompat"] || []; + const hasMatch = currentSwitchCompat.some((currentTag: string) => configSwitchCompat.includes(currentTag)); + + if (hasMatch) { + compatibleConfigs.push(config); + } else { + incompatibleConfigs.push(config); + } + }); + } + + const sections: ConfigSection[] = []; + const compatibleSectionName = compatibleConfigs.length === 1 ? "Current" : "Compatible Modes"; + sections.push({ sectionName: compatibleSectionName, configs: compatibleConfigs }); + + if (incompatibleConfigs.length > 0) { + sections.push({ sectionName: "Incompatible Modes", configs: incompatibleConfigs, isIncompatible: true }); + } + + return sections; +} + +function computeWaveCloudSections(waveProviderConfigs: AIModeConfigWithMode[], otherProviderConfigs: AIModeConfigWithMode[]): ConfigSection[] { + const sections: ConfigSection[] = []; + + if (waveProviderConfigs.length > 0) { + sections.push({ sectionName: "Wave AI Cloud", configs: waveProviderConfigs }); + } + if (otherProviderConfigs.length > 0) { + sections.push({ sectionName: "Custom", configs: otherProviderConfigs }); + } + + return sections; +} + +interface AIModeDropdownProps { + compatibilityMode?: boolean; +} + +export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdownProps) => { + const model = WaveAIModel.getInstance(); + const aiMode = useAtomValue(model.currentAIMode); + const aiModeConfigs = useAtomValue(model.aiModeConfigs); + const waveaiModeConfigs = useAtomValue(atoms.waveaiModeConfigAtom); + const widgetContextEnabled = useAtomValue(model.widgetAccessAtom); + const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); + const showCloudModes = useAtomValue(getSettingsKeyAtom("waveai:showcloudmodes")); + const defaultMode = useAtomValue(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + const [isOpen, setIsOpen] = useState(false); + const dropdownRef = useRef(null); + + const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; + + const { waveProviderConfigs, otherProviderConfigs } = getFilteredAIModeConfigs( + aiModeConfigs, + showCloudModes, + model.inBuilder, + hasPremium + ); + + let currentMode = aiMode || defaultMode; + const currentConfig = aiModeConfigs[currentMode]; + if (currentConfig) { + if (!hasPremium && currentConfig["waveai:premium"]) { + currentMode = "waveai@quick"; + } + if (model.inBuilder && hasPremium && currentMode === "waveai@quick") { + currentMode = "waveai@balanced"; + } + } + + const sections: ConfigSection[] = compatibilityMode + ? computeCompatibleSections(currentMode, aiModeConfigs, waveProviderConfigs, otherProviderConfigs) + : computeWaveCloudSections(waveProviderConfigs, otherProviderConfigs); + + const showSectionHeaders = compatibilityMode || sections.length > 1; + + const handleSelect = (mode: string) => { + const config = aiModeConfigs[mode]; + if (!config) return; + if (!hasPremium && config["waveai:premium"]) { + return; + } + model.setAIMode(mode); + setIsOpen(false); + }; + + const displayConfig = aiModeConfigs[currentMode]; + const displayName = displayConfig ? getModeDisplayName(displayConfig) : "Unknown"; + const displayIcon = displayConfig?.["display:icon"] || "sparkles"; + const resolvedConfig = waveaiModeConfigs[currentMode]; + const hasToolsSupport = resolvedConfig && resolvedConfig["ai:capabilities"]?.includes("tools"); + const showNoToolsWarning = widgetContextEnabled && resolvedConfig && !hasToolsSupport; + + const handleConfigureClick = () => { + fireAndForget(async () => { + RpcApi.RecordTEventCommand( + TabRpcClient, + { + event: "action:other", + props: { + "action:type": "waveai:configuremodes:contextmenu", + }, + }, + { noresponse: true } + ); + await model.openWaveAIConfig(); + setIsOpen(false); + }); + }; + + return ( +
+ + + {showNoToolsWarning && ( + + Warning: This custom mode was configured without the "tools" capability in the + "ai:capabilities" array. Without tool support, Wave AI will not be able to interact with + widgets or files. +
+ } + placement="bottom" + > +
+ + No Tools Support +
+ + )} + + {isOpen && ( + <> +
setIsOpen(false)} /> +
+ {sections.map((section, sectionIndex) => { + const isFirstSection = sectionIndex === 0; + const isLastSection = sectionIndex === sections.length - 1; + + return ( +
+ {!isFirstSection &&
} + {showSectionHeaders && ( + <> +
+ {section.sectionName} +
+ {section.isIncompatible && ( +
+ (Start a New Chat to Switch) +
+ )} + + )} + {section.configs.map((config, index) => { + const isFirst = index === 0 && isFirstSection && !showSectionHeaders; + const isLast = index === section.configs.length - 1 && isLastSection; + const isPremiumDisabled = !hasPremium && config["waveai:premium"]; + const isIncompatibleDisabled = section.isIncompatible || false; + const isDisabled = isPremiumDisabled || isIncompatibleDisabled; + const isSelected = currentMode === config.mode; + return ( + handleSelect(config.mode)} + isFirst={isFirst} + isLast={isLast} + /> + ); + })} +
+ ); + })} +
+ +
+ + )} +
+ ); +}); + +AIModeDropdown.displayName = "AIModeDropdown"; diff --git a/frontend/app/aipanel/aipanel-contextmenu.ts b/frontend/app/aipanel/aipanel-contextmenu.ts index b7a7f718d4..ffa9336d8e 100644 --- a/frontend/app/aipanel/aipanel-contextmenu.ts +++ b/frontend/app/aipanel/aipanel-contextmenu.ts @@ -1,9 +1,10 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 +import { getFilteredAIModeConfigs, getModeDisplayName } from "@/app/aipanel/ai-utils"; import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils"; import { ContextMenuModel } from "@/app/store/contextmenu"; -import { atoms, isDev } from "@/app/store/global"; +import { atoms, getSettingsKeyAtom, isDev } from "@/app/store/global"; import { globalStore } from "@/app/store/jotaiStore"; import { RpcApi } from "@/app/store/wshclientapi"; import { TabRpcClient } from "@/app/store/wshrpcutil"; @@ -41,49 +42,76 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick"); + const aiModeConfigs = globalStore.get(model.aiModeConfigs); + const showCloudModes = globalStore.get(getSettingsKeyAtom("waveai:showcloudmodes")); + const currentAIMode = rtInfo?.["waveai:mode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick"); const defaultTokens = model.inBuilder ? 24576 : 4096; const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens; - const thinkingModeSubmenu: ContextMenuItem[] = [ - { - label: "Quick (gpt-5-mini)", - type: "checkbox", - checked: currentThinkingMode === "quick", - click: () => { - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:thinkingmode": "quick" }, - }); - }, - }, - { - label: hasPremium ? "Balanced (gpt-5.1, low thinking)" : "Balanced (premium)", - type: "checkbox", - checked: currentThinkingMode === "balanced", - enabled: hasPremium, - click: () => { - if (!hasPremium) return; - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:thinkingmode": "balanced" }, - }); - }, - }, - { - label: hasPremium ? "Deep (gpt-5.1, full thinking)" : "Deep (premium)", - type: "checkbox", - checked: currentThinkingMode === "deep", - enabled: hasPremium, - click: () => { - if (!hasPremium) return; - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:thinkingmode": "deep" }, - }); - }, - }, - ]; + const { waveProviderConfigs, otherProviderConfigs } = getFilteredAIModeConfigs( + aiModeConfigs, + showCloudModes, + model.inBuilder, + hasPremium + ); + + const aiModeSubmenu: ContextMenuItem[] = []; + + if (waveProviderConfigs.length > 0) { + aiModeSubmenu.push({ + label: "Wave AI Modes", + type: "header", + enabled: false, + }); + + waveProviderConfigs.forEach(({ mode, ...config }) => { + const isPremium = config["waveai:premium"] === true; + const isEnabled = !isPremium || hasPremium; + aiModeSubmenu.push({ + label: getModeDisplayName(config), + type: "checkbox", + checked: currentAIMode === mode, + enabled: isEnabled, + click: () => { + if (!isEnabled) return; + RpcApi.SetRTInfoCommand(TabRpcClient, { + oref: model.orefContext, + data: { "waveai:mode": mode }, + }); + }, + }); + }); + } + + if (otherProviderConfigs.length > 0) { + if (waveProviderConfigs.length > 0) { + aiModeSubmenu.push({ type: "separator" }); + } + + aiModeSubmenu.push({ + label: "Custom Modes", + type: "header", + enabled: false, + }); + + otherProviderConfigs.forEach(({ mode, ...config }) => { + const isPremium = config["waveai:premium"] === true; + const isEnabled = !isPremium || hasPremium; + aiModeSubmenu.push({ + label: getModeDisplayName(config), + type: "checkbox", + checked: currentAIMode === mode, + enabled: isEnabled, + click: () => { + if (!isEnabled) return; + RpcApi.SetRTInfoCommand(TabRpcClient, { + oref: model.orefContext, + data: { "waveai:mode": mode }, + }); + }, + }); + }); + } const maxTokensSubmenu: ContextMenuItem[] = []; @@ -164,8 +192,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo } menu.push({ - label: "Thinking Mode", - submenu: thinkingModeSubmenu, + label: "AI Mode", + submenu: aiModeSubmenu, }); menu.push({ @@ -173,6 +201,25 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo submenu: maxTokensSubmenu, }); + menu.push({ type: "separator" }); + + menu.push({ + label: "Configure Modes", + click: () => { + RpcApi.RecordTEventCommand( + TabRpcClient, + { + event: "action:other", + props: { + "action:type": "waveai:configuremodes:contextmenu", + }, + }, + { noresponse: true } + ); + model.openWaveAIConfig(); + }, + }); + if (model.canCloseWaveAIPanel()) { menu.push({ type: "separator" }); diff --git a/frontend/app/aipanel/aipanel.tsx b/frontend/app/aipanel/aipanel.tsx index 79ae04fcc1..c386348b32 100644 --- a/frontend/app/aipanel/aipanel.tsx +++ b/frontend/app/aipanel/aipanel.tsx @@ -16,12 +16,14 @@ import { memo, useCallback, useEffect, useRef, useState } from "react"; import { useDrop } from "react-dnd"; import { formatFileSizeError, isAcceptableFile, validateFileSize } from "./ai-utils"; import { AIDroppedFiles } from "./aidroppedfiles"; +import { AIModeDropdown } from "./aimode"; import { AIPanelHeader } from "./aipanelheader"; import { AIPanelInput } from "./aipanelinput"; import { AIPanelMessages } from "./aipanelmessages"; import { AIRateLimitStrip } from "./airatelimitstrip"; +import { WaveUIMessage } from "./aitypes"; +import { BYOKAnnouncement } from "./byokannouncement"; import { TelemetryRequiredMessage } from "./telemetryrequired"; -import { ThinkingLevelDropdown } from "./thinkingmode"; import { WaveAIModel } from "./waveai-model"; const AIBlockMask = memo(() => { @@ -82,10 +84,14 @@ KeyCap.displayName = "KeyCap"; const AIWelcomeMessage = memo(() => { const modKey = isMacOS() ? "⌘" : "Alt"; + const fullConfig = jotai.useAtomValue(atoms.fullConfigAtom); + const hasCustomModes = fullConfig?.waveai + ? Object.keys(fullConfig.waveai).some((key) => !key.startsWith("waveai@")) + : false; return (
- +

Welcome to Wave AI

@@ -154,6 +160,7 @@ const AIWelcomeMessage = memo(() => {
+ {!hasCustomModes && }
BETA: Free to use. Daily limits keep our costs in check.
@@ -217,7 +224,7 @@ const AIPanelComponentInner = memo(() => { const telemetryEnabled = jotai.useAtomValue(getSettingsKeyAtom("telemetry:enabled")) ?? false; const isPanelVisible = jotai.useAtomValue(model.getPanelVisibleAtom()); - const { messages, sendMessage, status, setMessages, error, stop } = useChat({ + const { messages, sendMessage, status, setMessages, error, stop } = useChat({ transport: new DefaultChatTransport({ api: model.getUseChatEndpointUrl(), prepareSendMessagesRequest: (opts) => { @@ -246,6 +253,8 @@ const AIPanelComponentInner = memo(() => { model.registerUseChatData(sendMessage, setMessages, status, stop); // console.log("AICHAT messages", messages); + (window as any).aichatmessages = messages; + (window as any).aichatstatus = status; const handleKeyDown = (waveEvent: WaveKeyboardEvent): boolean => { if (checkKeyPressed(waveEvent, "Cmd:k")) { @@ -498,7 +507,7 @@ const AIPanelComponentInner = memo(() => { onContextMenu={(e) => handleWaveAIContextMenu(e, true)} >
- +
{model.inBuilder ? : }
diff --git a/frontend/app/aipanel/aipanelheader.tsx b/frontend/app/aipanel/aipanelheader.tsx index a2c4d586c2..7a54f7cb26 100644 --- a/frontend/app/aipanel/aipanelheader.tsx +++ b/frontend/app/aipanel/aipanelheader.tsx @@ -15,8 +15,15 @@ export const AIPanelHeader = memo(() => { handleWaveAIContextMenu(e, false); }; + const handleContextMenu = (e: React.MouseEvent) => { + handleWaveAIContextMenu(e, false); + }; + return ( -
+

Wave AI diff --git a/frontend/app/aipanel/aipanelinput.tsx b/frontend/app/aipanel/aipanelinput.tsx index 6eb8aff630..19ec4a9be6 100644 --- a/frontend/app/aipanel/aipanelinput.tsx +++ b/frontend/app/aipanel/aipanelinput.tsx @@ -4,6 +4,7 @@ import { formatFileSizeError, isAcceptableFile, validateFileSize } from "@/app/aipanel/ai-utils"; import { waveAIHasFocusWithin } from "@/app/aipanel/waveai-focus-utils"; import { type WaveAIModel } from "@/app/aipanel/waveai-model"; +import { Tooltip } from "@/element/tooltip"; import { cn } from "@/util/util"; import { useAtom, useAtomValue } from "jotai"; import { memo, useCallback, useEffect, useRef } from "react"; @@ -145,31 +146,35 @@ export const AIPanelInput = memo(({ onSubmit, status, model }: AIPanelInputProps style={{ fontSize: "13px" }} rows={2} /> - - + + + + + +

diff --git a/frontend/app/aipanel/aipanelmessages.tsx b/frontend/app/aipanel/aipanelmessages.tsx index a32e3936b4..a0284153da 100644 --- a/frontend/app/aipanel/aipanelmessages.tsx +++ b/frontend/app/aipanel/aipanelmessages.tsx @@ -4,11 +4,12 @@ import { useAtomValue } from "jotai"; import { memo, useEffect, useRef } from "react"; import { AIMessage } from "./aimessage"; -import { ThinkingLevelDropdown } from "./thinkingmode"; +import { AIModeDropdown } from "./aimode"; +import { type WaveUIMessage } from "./aitypes"; import { WaveAIModel } from "./waveai-model"; interface AIPanelMessagesProps { - messages: any[]; + messages: WaveUIMessage[]; status: string; onContextMenu?: (e: React.MouseEvent) => void; } @@ -45,24 +46,24 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane useEffect(() => { const wasStreaming = prevStatusRef.current === "streaming"; const isNowNotStreaming = status !== "streaming"; - + if (wasStreaming && isNowNotStreaming) { requestAnimationFrame(() => { scrollToBottom(); }); } - + prevStatusRef.current = status; }, [status]); return (
-
- +
+
{messages.map((message, index) => { const isLastMessage = index === messages.length - 1; diff --git a/frontend/app/aipanel/aitypes.ts b/frontend/app/aipanel/aitypes.ts index a1192ec7ed..cc3c73d224 100644 --- a/frontend/app/aipanel/aitypes.ts +++ b/frontend/app/aipanel/aitypes.ts @@ -4,14 +4,14 @@ import { ChatRequestOptions, FileUIPart, UIMessage, UIMessagePart } from "ai"; type WaveUIDataTypes = { - // pkg/aiusechat/uctypes/usechat-types.go UIMessageDataUserFile + // pkg/aiusechat/uctypes/uctypes.go UIMessageDataUserFile userfile: { filename: string; size: number; mimetype: string; previewurl?: string; }; - // pkg/aiusechat/uctypes/usechat-types.go UIMessageDataToolUse + // pkg/aiusechat/uctypes/uctypes.go UIMessageDataToolUse tooluse: { toolcallid: string; toolname: string; diff --git a/frontend/app/aipanel/byokannouncement.tsx b/frontend/app/aipanel/byokannouncement.tsx new file mode 100644 index 0000000000..935cc4a3b0 --- /dev/null +++ b/frontend/app/aipanel/byokannouncement.tsx @@ -0,0 +1,73 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { RpcApi } from "@/app/store/wshclientapi"; +import { TabRpcClient } from "@/app/store/wshrpcutil"; +import { WaveAIModel } from "./waveai-model"; + +const BYOKAnnouncement = () => { + const model = WaveAIModel.getInstance(); + + const handleOpenConfig = async () => { + RpcApi.RecordTEventCommand( + TabRpcClient, + { + event: "action:other", + props: { + "action:type": "waveai:configuremodes:panel", + }, + }, + { noresponse: true } + ); + await model.openWaveAIConfig(); + }; + + const handleViewDocs = () => { + RpcApi.RecordTEventCommand( + TabRpcClient, + { + event: "action:other", + props: { + "action:type": "waveai:viewdocs:panel", + }, + }, + { noresponse: true } + ); + }; + + return ( +
+
+ +
+
New: BYOK & Local AI Support
+
+ Wave AI now supports bring-your-own-key (BYOK) with OpenAI, Google Gemini, Azure, and + OpenRouter, plus local models via Ollama, LM Studio, and other OpenAI-compatible providers. +
+
+ + + View Docs + +
+
+
+
+ ); +}; + +BYOKAnnouncement.displayName = "BYOKAnnouncement"; + +export { BYOKAnnouncement }; diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx deleted file mode 100644 index 1e0fb76be7..0000000000 --- a/frontend/app/aipanel/thinkingmode.tsx +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2025, Command Line Inc. -// SPDX-License-Identifier: Apache-2.0 - -import { atoms } from "@/app/store/global"; -import { cn } from "@/util/util"; -import { useAtomValue } from "jotai"; -import { memo, useRef, useState } from "react"; -import { WaveAIModel } from "./waveai-model"; - -type ThinkingMode = "quick" | "balanced" | "deep"; - -interface ThinkingModeMetadata { - icon: string; - name: string; - desc: string; - premium: boolean; -} - -const ThinkingModeData: Record = { - quick: { - icon: "fa-bolt", - name: "Quick", - desc: "Fastest responses (gpt-5-mini)", - premium: false, - }, - balanced: { - icon: "fa-sparkles", - name: "Balanced", - desc: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", - premium: true, - }, - deep: { - icon: "fa-lightbulb", - name: "Deep", - desc: "Slower but most capable\n(gpt-5.1 with full reasoning)", - premium: true, - }, -}; - -export const ThinkingLevelDropdown = memo(() => { - const model = WaveAIModel.getInstance(); - const thinkingMode = useAtomValue(model.thinkingMode); - const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); - const [isOpen, setIsOpen] = useState(false); - const dropdownRef = useRef(null); - - const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const hideQuick = model.inBuilder && hasPremium; - - const handleSelect = (mode: ThinkingMode) => { - const metadata = ThinkingModeData[mode]; - if (!hasPremium && metadata.premium) { - return; - } - model.setThinkingMode(mode); - setIsOpen(false); - }; - - let currentMode = (thinkingMode as ThinkingMode) || "balanced"; - const currentMetadata = ThinkingModeData[currentMode]; - if (!hasPremium && currentMetadata.premium) { - currentMode = "quick"; - } - if (hideQuick && currentMode === "quick") { - currentMode = "balanced"; - } - - return ( -
- - - {isOpen && ( - <> -
setIsOpen(false)} /> -
- {(Object.keys(ThinkingModeData) as ThinkingMode[]) - .filter((mode) => !(hideQuick && mode === "quick")) - .map((mode, index, filteredModes) => { - const metadata = ThinkingModeData[mode]; - const isFirst = index === 0; - const isLast = index === filteredModes.length - 1; - const isDisabled = !hasPremium && metadata.premium; - const isSelected = currentMode === mode; - return ( - - ); - })} -
- - )} -
- ); -}); - -ThinkingLevelDropdown.displayName = "ThinkingLevelDropdown"; diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index 7af0914e88..abc2615868 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -8,7 +8,7 @@ import { WaveUIMessagePart, } from "@/app/aipanel/aitypes"; import { FocusManager } from "@/app/store/focusManager"; -import { atoms, createBlock, getOrefMetaKeyAtom } from "@/app/store/global"; +import { atoms, createBlock, getOrefMetaKeyAtom, getSettingsKeyAtom } from "@/app/store/global"; import { globalStore } from "@/app/store/jotaiStore"; import * as WOS from "@/app/store/wos"; import { RpcApi } from "@/app/store/wshclientapi"; @@ -57,7 +57,8 @@ export class WaveAIModel { widgetAccessAtom!: jotai.Atom; droppedFiles: jotai.PrimitiveAtom = jotai.atom([]); chatId!: jotai.PrimitiveAtom; - thinkingMode: jotai.PrimitiveAtom = jotai.atom("balanced"); + currentAIMode: jotai.PrimitiveAtom = jotai.atom("waveai@balanced"); + aiModeConfigs!: jotai.Atom>; errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom; modelAtom!: jotai.Atom; containerWidth: jotai.PrimitiveAtom = jotai.atom(0); @@ -76,12 +77,19 @@ export class WaveAIModel { private constructor(orefContext: ORef, inBuilder: boolean) { this.orefContext = orefContext; this.inBuilder = inBuilder; + const defaultMode = globalStore.get(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + this.currentAIMode = jotai.atom(defaultMode); this.chatId = jotai.atom(null) as jotai.PrimitiveAtom; this.modelAtom = jotai.atom((get) => { const modelMetaAtom = getOrefMetaKeyAtom(this.orefContext, "waveai:model"); return get(modelMetaAtom) ?? "gpt-5.1"; }); + this.aiModeConfigs = jotai.atom((get) => { + const fullConfig = get(atoms.fullConfigAtom); + return fullConfig?.waveai ?? {}; + }); + this.widgetAccessAtom = jotai.atom((get) => { if (this.inBuilder) { @@ -337,11 +345,11 @@ export class WaveAIModel { }); } - setThinkingMode(mode: string) { - globalStore.set(this.thinkingMode, mode); + setAIMode(mode: string) { + globalStore.set(this.currentAIMode, mode); RpcApi.SetRTInfoCommand(TabRpcClient, { oref: this.orefContext, - data: { "waveai:thinkingmode": mode }, + data: { "waveai:mode": mode }, }); } @@ -359,8 +367,9 @@ export class WaveAIModel { } globalStore.set(this.chatId, chatIdValue); - const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "balanced"; - globalStore.set(this.thinkingMode, thinkingModeValue); + const defaultMode = globalStore.get(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + const aiModeValue = rtInfo?.["waveai:mode"] ?? defaultMode; + globalStore.set(this.currentAIMode, aiModeValue); try { const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue }); @@ -538,6 +547,16 @@ export class WaveAIModel { await createBlock(blockDef, false, true); } + async openWaveAIConfig() { + const blockDef: BlockDef = { + meta: { + view: "waveconfig", + file: "waveai.json", + }, + }; + await createBlock(blockDef, false, true); + } + openRestoreBackupModal(toolcallid: string) { globalStore.set(this.restoreBackupModalToolCallId, toolcallid); } diff --git a/frontend/app/app.scss b/frontend/app/app.scss index 9cbe55aba8..9ce3bc9f5d 100644 --- a/frontend/app/app.scss +++ b/frontend/app/app.scss @@ -35,6 +35,10 @@ body { a.plain-link { color: var(--secondary-text-color); + + &:hover { + text-decoration: underline; + } } *::-webkit-scrollbar { diff --git a/frontend/app/block/block.tsx b/frontend/app/block/block.tsx index d8260965d4..7b307b9f33 100644 --- a/frontend/app/block/block.tsx +++ b/frontend/app/block/block.tsx @@ -36,6 +36,7 @@ import clsx from "clsx"; import { atom, useAtomValue } from "jotai"; import { memo, Suspense, useCallback, useEffect, useLayoutEffect, useMemo, useRef, useState } from "react"; import { QuickTipsViewModel } from "../view/quicktipsview/quicktipsview"; +import { WaveConfigViewModel } from "../view/waveconfig/waveconfig-model"; import "./block.scss"; import { BlockFrame } from "./blockframe"; import { blockViewToIcon, blockViewToName } from "./blockutil"; @@ -54,6 +55,7 @@ BlockRegistry.set("launcher", LauncherViewModel); BlockRegistry.set("tsunami", TsunamiViewModel); BlockRegistry.set("aifilediff", AiFileDiffViewModel); BlockRegistry.set("secretstore", SecretStoreViewModel); +BlockRegistry.set("waveconfig", WaveConfigViewModel); function makeViewModel(blockId: string, blockView: string, nodeModel: BlockNodeModel): ViewModel { const ctor = BlockRegistry.get(blockView); diff --git a/frontend/app/element/toggle.tsx b/frontend/app/element/toggle.tsx index 29de968e4f..2f09fb1e58 100644 --- a/frontend/app/element/toggle.tsx +++ b/frontend/app/element/toggle.tsx @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import { useRef } from "react"; +import { cn } from "@/util/util"; import "./toggle.scss"; interface ToggleProps { @@ -9,9 +10,10 @@ interface ToggleProps { onChange: (value: boolean) => void; label?: string; id?: string; + className?: string; } -const Toggle = ({ checked, onChange, label, id }: ToggleProps) => { +const Toggle = ({ checked, onChange, label, id, className }: ToggleProps) => { const inputRef = useRef(null); const handleChange = (e: any) => { @@ -29,7 +31,7 @@ const Toggle = ({ checked, onChange, label, id }: ToggleProps) => { const inputId = id || `toggle-${Math.random().toString(36).substr(2, 9)}`; return ( -
+