diff --git a/AGENTS.md b/AGENTS.md index 0a42143..2c0858d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -22,7 +22,7 @@ Your content is instantly available to browsers, LLMs, and AI agents.. Write mar - **Total Posts**: 17 - **Total Pages**: 4 - **Latest Post**: 2025-12-29 -- **Last Updated**: 2026-01-06T02:32:19.578Z +- **Last Updated**: 2026-01-06T21:21:00.308Z ## Tech stack diff --git a/CLAUDE.md b/CLAUDE.md index 85c7890..a266f67 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -5,7 +5,7 @@ Project instructions for Claude Code. ## Project context - + Markdown sync framework. Write markdown in `content/`, run sync commands, content appears instantly via Convex real-time database. Built for developers and AI agents. diff --git a/changelog.md b/changelog.md index c267ccd..b47240e 100644 --- a/changelog.md +++ b/changelog.md @@ -4,6 +4,52 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +## [2.11.0] - 2026-01-06 + +### Added + +- Ask AI header button with RAG-based Q&A about site content + - Header button with sparkle icon (before search button, after social icons) + - Keyboard shortcuts: Cmd+J or Cmd+/ (Mac), Ctrl+J or Ctrl+/ (Windows/Linux) + - Real-time streaming responses via Convex Persistent Text Streaming + - Model selector: Claude Sonnet 4 (default) or GPT-4o + - Markdown rendering with syntax highlighting in responses + - Internal links use React Router for seamless navigation + - Source citations with links to referenced posts/pages + - Copy response button (hover to reveal) for copying AI answers + - Clear chat button to reset conversation +- AskAIConfig in siteConfig.ts for configuration + - `enabled`: Toggle Ask AI feature + - `defaultModel`: Default model ID + - `models`: Array of available models with id, name, and provider + +### How It Works + +1. User question stored in database with session ID +2. Query converted to embedding using OpenAI text-embedding-ada-002 +3. Vector search finds top 5 relevant posts/pages +4. Content sent to selected AI model with RAG system prompt +5. Response streams in real-time with source citations appended + +### Technical + +- New component: `src/components/AskAIModal.tsx` with StreamingMessage subcomponent +- New file: `convex/askAI.ts` - Session mutations and queries (regular runtime) +- New file: `convex/askAI.node.ts` - HTTP streaming action (Node.js runtime) +- New table: `askAISessions` with question, streamId, model, createdAt, sources fields +- New HTTP endpoint: `/ask-ai-stream` for streaming responses +- Updated `convex/convex.config.ts` with persistentTextStreaming component +- Updated `convex/http.ts` with /ask-ai-stream route and OPTIONS handler +- Updated `src/components/Layout.tsx` with Ask AI button and modal +- Updated `src/styles/global.css` with Ask AI modal styles + +### Requirements + +- `semanticSearch.enabled: true` in siteConfig (for embeddings) +- `OPENAI_API_KEY` in Convex (for embedding generation) +- `ANTHROPIC_API_KEY` in Convex (for Claude models) +- Run `npm run sync` to generate embeddings for content + ## [2.10.2] - 2026-01-06 ### Added diff --git a/content/pages/about.md b/content/pages/about.md index f9a9866..6336121 100644 --- a/content/pages/about.md +++ b/content/pages/about.md @@ -85,6 +85,7 @@ It's a hybrid: developer workflow for publishing + real-time delivery like a dyn - Dual search modes: Keyword (exact match) and Semantic (meaning-based) with Cmd+K toggle - Semantic search uses OpenAI embeddings for finding conceptually similar content +- Ask AI header button (Cmd+J) for RAG-based Q&A about site content with streaming responses - Full text search with Command+K shortcut and result highlighting - Static raw markdown files at `/raw/{slug}.md` - RSS feeds (`/rss.xml` and `/rss-full.xml`) and sitemap for SEO diff --git a/content/pages/changelog-page.md b/content/pages/changelog-page.md index a8391e9..df7afae 100644 --- a/content/pages/changelog-page.md +++ b/content/pages/changelog-page.md @@ -11,6 +11,68 @@ docsSectionOrder: 4 All notable changes to this project. +## v2.11.0 + +Released January 6, 2026 + +**Ask AI header button with RAG-based Q&A** + +New header button that opens a chat modal for asking questions about site content. Uses semantic search to find relevant posts and pages, then generates AI responses with source citations. + +**Features:** + +- Header button with sparkle icon (before search button) +- Keyboard shortcuts: Cmd+J or Cmd+/ (Mac), Ctrl+J or Ctrl+/ (Windows/Linux) +- Real-time streaming responses via Convex Persistent Text Streaming +- Model selector: Claude Sonnet 4 (default) or GPT-4o +- Markdown rendering with syntax highlighting +- Internal links use React Router for seamless navigation +- Source citations with links to referenced content +- Copy response button (hover to reveal) for copying AI answers +- Chat history within session (clears on page refresh) +- Clear chat button to reset conversation + +**How it works:** + +1. User question is stored in database with session ID +2. Query is converted to embedding using OpenAI text-embedding-ada-002 +3. Vector search finds top 5 relevant posts/pages +4. Content is sent to selected AI model with RAG system prompt +5. Response streams in real-time with source citations appended + +**Configuration:** + +Enable in `src/config/siteConfig.ts`: + +```typescript +askAI: { + enabled: true, + defaultModel: "claude-sonnet-4-20250514", + models: [ + { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", provider: "anthropic" }, + { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, + ], +}, +``` + +**Requirements:** + +- `semanticSearch.enabled: true` (for embeddings) +- `OPENAI_API_KEY` in Convex (for embeddings) +- `ANTHROPIC_API_KEY` in Convex (for Claude models) +- Run `npm run sync` to generate embeddings + +**Technical details:** + +- New component: `src/components/AskAIModal.tsx` +- New Convex files: `convex/askAI.ts` (mutations/queries), `convex/askAI.node.ts` (HTTP action) +- New table: `askAISessions` with `by_stream` index +- HTTP endpoint: `/ask-ai-stream` for streaming responses +- Uses `@convex-dev/persistent-text-streaming` component +- Separated Node.js runtime (askAI.node.ts) from regular runtime (askAI.ts) + +Updated files: `convex/schema.ts`, `convex/askAI.ts`, `convex/askAI.node.ts`, `convex/http.ts`, `convex/convex.config.ts`, `src/components/AskAIModal.tsx`, `src/components/Layout.tsx`, `src/config/siteConfig.ts`, `src/styles/global.css` + ## v2.10.2 Released January 6, 2026 diff --git a/content/pages/docs-ask-ai.md b/content/pages/docs-ask-ai.md new file mode 100644 index 0000000..139298f --- /dev/null +++ b/content/pages/docs-ask-ai.md @@ -0,0 +1,171 @@ +--- +title: "Ask AI" +slug: "docs-ask-ai" +published: true +order: 2 +showInNav: false +layout: "sidebar" +rightSidebar: true +showImageAtTop: true +authorName: "Markdown" +authorImage: "/images/authors/markdown.png" +image: "/images/askai.png" +showFooter: true +docsSection: true +docsSectionOrder: 5 +docsSectionGroup: "Setup" +docsSectionGroupIcon: "Rocket" +--- + +## Ask AI + +Ask AI is a header button that opens a chat modal for asking questions about your site content. It uses RAG (Retrieval-Augmented Generation) to find relevant content and generate AI responses with source citations. + +Press `Cmd+J` or `Cmd+/` (Mac) or `Ctrl+J` or `Ctrl+/` (Windows/Linux) to open the Ask AI modal. + +--- + +### How Ask AI works + +``` ++------------------+ +-------------------+ +------------------+ +| User question |--->| OpenAI Embedding |--->| Vector Search | +| "How do I..." | | text-embedding- | | Find top 5 | +| | | ada-002 | | relevant pages | ++------------------+ +-------------------+ +--------+---------+ + | + v ++------------------+ +-------------------+ +------------------+ +| Streaming |<---| AI Model |<---| RAG Context | +| Response with | | Claude/GPT-4o | | Build prompt | +| Source Links | | generates answer | | with content | ++------------------+ +-------------------+ +------------------+ +``` + +1. Your question is stored in the database with a session ID +2. Query is converted to a vector embedding using OpenAI +3. Convex vector search finds the 5 most relevant posts and pages +4. Content is combined into a RAG prompt with system instructions +5. AI model generates an answer based only on your site content +6. Response streams in real-time with source citations appended + +### Features + +| Feature | Description | +| ------------------ | ------------------------------------------------------ | +| Streaming | Responses appear word-by-word in real-time | +| Model Selection | Choose between Claude Sonnet 4 or GPT-4o | +| Source Citations | Every response includes links to source content | +| Markdown Rendering | Responses support full markdown formatting | +| Internal Links | Links to your pages use React Router (no page reload) | +| Copy Response | Hover over any response to copy it to clipboard | +| Keyboard Shortcuts | Cmd+J or Cmd+/ to open, Escape to close, Enter to send | + +### Configuration + +Ask AI requires semantic search to be enabled (for embeddings): + +```typescript +// src/config/siteConfig.ts +semanticSearch: { + enabled: true, +}, + +askAI: { + enabled: true, + defaultModel: "claude-sonnet-4-20250514", + models: [ + { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", provider: "anthropic" }, + { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, + ], +}, +``` + +### Environment variables + +Set these in your Convex dashboard: + +```bash +# Required for embeddings (vector search) +npx convex env set OPENAI_API_KEY sk-your-key-here + +# Required for Claude models +npx convex env set ANTHROPIC_API_KEY sk-ant-your-key-here +``` + +After setting environment variables, run `npm run sync` to generate embeddings for your content. + +### When to use Ask AI vs Search + +| Use Case | Tool | +| -------------------------------- | ----------------------- | +| Quick navigation to a known page | Keyword Search (Cmd+K) | +| Find exact code or commands | Keyword Search | +| "How do I do X?" questions | Ask AI (Cmd+J or Cmd+/) | +| Understanding a concept | Ask AI | +| Need highlighted matches on page | Keyword Search | +| Want AI-synthesized answers | Ask AI | + +### Technical details + +**Frontend:** + +| File | Purpose | +| ------------------------------- | ------------------------------------ | +| `src/components/AskAIModal.tsx` | Chat modal with streaming messages | +| `src/components/Layout.tsx` | Header button and keyboard shortcuts | +| `src/config/siteConfig.ts` | AskAIConfig interface and settings | + +**Backend (Convex):** + +| File | Purpose | +| ------------------------- | ----------------------------------------------- | +| `convex/askAI.ts` | Session mutations and queries (regular runtime) | +| `convex/askAI.node.ts` | HTTP streaming action (Node.js runtime) | +| `convex/schema.ts` | askAISessions table definition | +| `convex/http.ts` | /ask-ai-stream endpoint registration | +| `convex/convex.config.ts` | persistentTextStreaming component | + +**Database:** + +The `askAISessions` table stores: + +- `question`: The user's question +- `streamId`: Persistent Text Streaming ID +- `model`: Selected AI model ID +- `createdAt`: Timestamp +- `sources`: Optional array of cited sources + +### Limitations + +- **Requires semantic search**: Embeddings must be generated for content +- **API costs**: Each query costs embedding generation (~$0.0001) plus AI model usage +- **Latency**: ~1-3 seconds for initial response (embedding + search + AI) +- **Content scope**: Only searches published posts and pages +- **No conversation history**: Each session starts fresh (no multi-turn context) + +### Troubleshooting + +**"Failed to load response" error:** + +1. Check that `ANTHROPIC_API_KEY` or `OPENAI_API_KEY` is set in Convex +2. Verify the API key is valid and has credits +3. Check browser console for specific error messages + +**Empty or irrelevant responses:** + +1. Run `npm run sync` to ensure embeddings are generated +2. Check that `semanticSearch.enabled: true` in siteConfig +3. Verify content exists in your posts/pages + +**Modal doesn't open:** + +1. Check that `askAI.enabled: true` in siteConfig +2. Check that `semanticSearch.enabled: true` in siteConfig +3. Both conditions must be true for the button to appear + +### Resources + +- [Semantic Search Documentation](/docs-semantic-search) - How embeddings work +- [Convex Persistent Text Streaming](https://github.com/get-convex/persistent-text-streaming) - Streaming component +- [Convex Vector Search](https://docs.convex.dev/search/vector-search) - Vector search documentation diff --git a/convex/_generated/api.d.ts b/convex/_generated/api.d.ts index 53e06e5..5b408d5 100644 --- a/convex/_generated/api.d.ts +++ b/convex/_generated/api.d.ts @@ -11,6 +11,7 @@ import type * as aiChatActions from "../aiChatActions.js"; import type * as aiChats from "../aiChats.js"; import type * as aiImageGeneration from "../aiImageGeneration.js"; +import type * as askAI from "../askAI.js"; import type * as cms from "../cms.js"; import type * as contact from "../contact.js"; import type * as contactActions from "../contactActions.js"; @@ -39,6 +40,7 @@ declare const fullApi: ApiFromModules<{ aiChatActions: typeof aiChatActions; aiChats: typeof aiChats; aiImageGeneration: typeof aiImageGeneration; + askAI: typeof askAI; cms: typeof cms; contact: typeof contact; contactActions: typeof contactActions; @@ -643,4 +645,39 @@ export declare const components: { >; }; }; + persistentTextStreaming: { + lib: { + addChunk: FunctionReference< + "mutation", + "internal", + { final: boolean; streamId: string; text: string }, + any + >; + createStream: FunctionReference<"mutation", "internal", {}, any>; + getStreamStatus: FunctionReference< + "query", + "internal", + { streamId: string }, + "pending" | "streaming" | "done" | "error" | "timeout" + >; + getStreamText: FunctionReference< + "query", + "internal", + { streamId: string }, + { + status: "pending" | "streaming" | "done" | "error" | "timeout"; + text: string; + } + >; + setStreamStatus: FunctionReference< + "mutation", + "internal", + { + status: "pending" | "streaming" | "done" | "error" | "timeout"; + streamId: string; + }, + any + >; + }; + }; }; diff --git a/convex/askAI.node.ts b/convex/askAI.node.ts new file mode 100644 index 0000000..0ced2f9 --- /dev/null +++ b/convex/askAI.node.ts @@ -0,0 +1,317 @@ +"use node"; + +import { httpAction, action } from "./_generated/server"; +import { internal } from "./_generated/api"; +import { components } from "./_generated/api"; +import { PersistentTextStreaming, StreamId } from "@convex-dev/persistent-text-streaming"; +import Anthropic from "@anthropic-ai/sdk"; +import OpenAI from "openai"; +import { v } from "convex/values"; + +// Initialize Persistent Text Streaming component +const streaming = new PersistentTextStreaming(components.persistentTextStreaming); + +// System prompt for RAG-based Q&A +const RAG_SYSTEM_PROMPT = `You are a helpful assistant that answers questions about this website's content. + +Guidelines: +- Answer questions based ONLY on the provided context +- If the context doesn't contain relevant information, say so honestly +- Cite sources by mentioning the page/post title when referencing specific content +- Be concise but thorough +- Format responses in markdown when appropriate +- Do not make up information not present in the context`; + +// CORS headers for all responses +const corsHeaders = { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type", +}; + +// HTTP action for streaming AI responses +export const streamResponse = httpAction(async (ctx, request) => { + let body: { streamId?: string }; + + try { + body = await request.json(); + } catch { + return new Response(JSON.stringify({ error: "Invalid JSON body" }), { + status: 400, + headers: { "Content-Type": "application/json", ...corsHeaders }, + }); + } + + const { streamId } = body; + + // Validate streamId + if (!streamId) { + return new Response(JSON.stringify({ error: "Missing streamId" }), { + status: 400, + headers: { "Content-Type": "application/json", ...corsHeaders }, + }); + } + + // Get the question and model from the database + const session = await ctx.runQuery(internal.askAI.getSessionByStreamId, { streamId }); + + if (!session) { + return new Response(JSON.stringify({ error: "Session not found" }), { + status: 404, + headers: { "Content-Type": "application/json", ...corsHeaders }, + }); + } + + const { question, model } = session; + + console.log("Ask AI received:", { + streamId: streamId.slice(0, 20), + question: question.slice(0, 50), + model + }); + + // Pre-fetch search results before starting the stream + let searchResults: Array<{ title: string; slug: string; type: string; content: string }> = []; + let searchError: string | null = null; + + try { + const apiKey = process.env.OPENAI_API_KEY; + if (!apiKey) { + searchError = "OPENAI_API_KEY not configured. Please add it to your Convex dashboard environment variables."; + } else { + const openai = new OpenAI({ apiKey }); + + console.log("Generating embedding for query:", question.trim().slice(0, 50)); + + const embeddingResponse = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: question.trim(), + }); + const queryEmbedding = embeddingResponse.data[0].embedding; + + console.log("Embedding generated, searching..."); + + // Search posts + const postResults = await ctx.vectorSearch("posts", "by_embedding", { + vector: queryEmbedding, + limit: 5, + filter: (q) => q.eq("published", true), + }); + + // Search pages + const pageResults = await ctx.vectorSearch("pages", "by_embedding", { + vector: queryEmbedding, + limit: 5, + filter: (q) => q.eq("published", true), + }); + + console.log("Found:", postResults.length, "posts,", pageResults.length, "pages"); + + // Fetch full documents + const posts = await ctx.runQuery(internal.semanticSearchQueries.fetchPostsByIds, { + ids: postResults.map((r) => r._id), + }); + const pages = await ctx.runQuery(internal.semanticSearchQueries.fetchPagesByIds, { + ids: pageResults.map((r) => r._id), + }); + + // Build results + const results: Array<{ title: string; slug: string; type: string; content: string; score: number }> = []; + + for (const result of postResults) { + const post = posts.find((p) => p._id === result._id); + if (post) { + results.push({ + title: post.title, + slug: post.slug, + type: "post", + content: post.content, + score: result._score, + }); + } + } + + for (const result of pageResults) { + const page = pages.find((p) => p._id === result._id); + if (page) { + results.push({ + title: page.title, + slug: page.slug, + type: "page", + content: page.content, + score: result._score, + }); + } + } + + results.sort((a, b) => b.score - a.score); + searchResults = results.slice(0, 5); + + console.log("Search completed, found", searchResults.length, "relevant results"); + } + } catch (error) { + console.error("Search error:", error); + searchError = error instanceof Error ? error.message : "Search failed"; + } + + // Now start the streaming with pre-fetched results + const generateAnswer = async ( + _ctx: unknown, + _request: unknown, + _streamId: unknown, + appendChunk: (chunk: string) => Promise + ) => { + try { + // Handle search errors + if (searchError) { + await appendChunk(`**Error:** ${searchError}`); + return; + } + + if (searchResults.length === 0) { + await appendChunk("I couldn't find any relevant content to answer your question. Please make sure:\n\n1. Semantic search is enabled in siteConfig.ts\n2. Content has been synced with `npm run sync`\n3. OPENAI_API_KEY is configured in Convex dashboard"); + return; + } + + // Build context from search results + const contextParts = searchResults.map( + (r) => `## ${r.title}\nURL: /${r.slug}\n\n${r.content.slice(0, 2000)}` + ); + const context = contextParts.join("\n\n---\n\n"); + + const fullPrompt = `Based on the following content from the website, answer this question: "${question}" + +CONTEXT: +${context} + +Please provide a helpful answer based on the context above.`; + + // Generate response with selected model + if (model === "gpt-4o") { + const openaiApiKey = process.env.OPENAI_API_KEY; + if (!openaiApiKey) { + await appendChunk("**Error:** OPENAI_API_KEY not configured."); + return; + } + + const openai = new OpenAI({ apiKey: openaiApiKey }); + const stream = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [ + { role: "system", content: RAG_SYSTEM_PROMPT }, + { role: "user", content: fullPrompt }, + ], + stream: true, + }); + + for await (const chunk of stream) { + const content = chunk.choices[0]?.delta?.content; + if (content) { + await appendChunk(content); + } + } + } else { + // Use Anthropic (default) + const anthropicApiKey = process.env.ANTHROPIC_API_KEY; + if (!anthropicApiKey) { + await appendChunk("**Error:** ANTHROPIC_API_KEY not configured in Convex dashboard."); + return; + } + + const anthropic = new Anthropic({ apiKey: anthropicApiKey }); + + // Use non-streaming for more reliable error handling + const response = await anthropic.messages.create({ + model: "claude-sonnet-4-20250514", + max_tokens: 2048, + system: RAG_SYSTEM_PROMPT, + messages: [{ role: "user", content: fullPrompt }], + }); + + // Extract text from response + for (const block of response.content) { + if (block.type === "text") { + // Stream word by word for better UX + const words = block.text.split(/(\s+)/); + for (const word of words) { + await appendChunk(word); + } + } + } + } + + // Add source citations + await appendChunk("\n\n---\n\n**Sources:**\n"); + for (const source of searchResults) { + await appendChunk(`- [${source.title}](/${source.slug})\n`); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : "Unknown error"; + console.error("Generation error:", error); + + try { + await appendChunk(`\n\n**Error:** ${errorMessage}`); + } catch { + // Stream may already be closed, ignore + } + } + }; + + const response = await streaming.stream( + ctx, + request, + streamId as StreamId, + generateAnswer + ); + + // Set CORS headers + response.headers.set("Access-Control-Allow-Origin", "*"); + response.headers.set("Access-Control-Allow-Methods", "POST, OPTIONS"); + response.headers.set("Access-Control-Allow-Headers", "Content-Type"); + response.headers.set("Vary", "Origin"); + + return response; +}); + +// CORS preflight handler +export const streamResponseOptions = httpAction(async () => { + return new Response(null, { + status: 204, + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type", + "Access-Control-Max-Age": "86400", + }, + }); +}); + +// Check if Ask AI is properly configured (environment variables set) +export const checkConfiguration = action({ + args: {}, + returns: v.object({ + configured: v.boolean(), + hasOpenAI: v.boolean(), + hasAnthropic: v.boolean(), + missingKeys: v.array(v.string()), + }), + handler: async () => { + const hasOpenAI = !!process.env.OPENAI_API_KEY; + const hasAnthropic = !!process.env.ANTHROPIC_API_KEY; + + const missingKeys: string[] = []; + if (!hasOpenAI) missingKeys.push("OPENAI_API_KEY"); + if (!hasAnthropic) missingKeys.push("ANTHROPIC_API_KEY"); + + // Ask AI requires at least OPENAI_API_KEY for embeddings + // and either ANTHROPIC_API_KEY or OPENAI_API_KEY for LLM + const configured = hasOpenAI && (hasAnthropic || hasOpenAI); + + return { + configured, + hasOpenAI, + hasAnthropic, + missingKeys, + }; + }, +}); diff --git a/convex/askAI.ts b/convex/askAI.ts new file mode 100644 index 0000000..de83afa --- /dev/null +++ b/convex/askAI.ts @@ -0,0 +1,61 @@ +import { v } from "convex/values"; +import { mutation, query, internalQuery } from "./_generated/server"; +import { components } from "./_generated/api"; +import { PersistentTextStreaming, StreamIdValidator, StreamId } from "@convex-dev/persistent-text-streaming"; + +// Initialize Persistent Text Streaming component (works in Convex runtime) +const streaming = new PersistentTextStreaming(components.persistentTextStreaming); + +// Create a new Ask AI session with streaming +export const createSession = mutation({ + args: { + question: v.string(), + model: v.optional(v.string()), + }, + returns: v.object({ + sessionId: v.id("askAISessions"), + streamId: v.string(), + }), + handler: async (ctx, { question, model }) => { + const streamId = await streaming.createStream(ctx); + const sessionId = await ctx.db.insert("askAISessions", { + question, + streamId, + model: model || "claude-sonnet-4-20250514", + createdAt: Date.now(), + }); + return { sessionId, streamId }; + }, +}); + +// Get stream body for database fallback (used by useStream hook) +export const getStreamBody = query({ + args: { + streamId: StreamIdValidator, + }, + handler: async (ctx, { streamId }) => { + return await streaming.getStreamBody(ctx, streamId as StreamId); + }, +}); + +// Internal query to get session by streamId (used by HTTP action) +export const getSessionByStreamId = internalQuery({ + args: { + streamId: v.string(), + }, + returns: v.union( + v.object({ + question: v.string(), + model: v.optional(v.string()), + }), + v.null() + ), + handler: async (ctx, { streamId }) => { + const session = await ctx.db + .query("askAISessions") + .withIndex("by_stream", (q) => q.eq("streamId", streamId)) + .first(); + if (!session) return null; + return { question: session.question, model: session.model }; + }, +}); diff --git a/convex/convex.config.ts b/convex/convex.config.ts index 53031cb..6370927 100644 --- a/convex/convex.config.ts +++ b/convex/convex.config.ts @@ -1,5 +1,6 @@ import { defineApp } from "convex/server"; import aggregate from "@convex-dev/aggregate/convex.config.js"; +import persistentTextStreaming from "@convex-dev/persistent-text-streaming/convex.config"; const app = defineApp(); @@ -12,5 +13,8 @@ app.use(aggregate, { name: "totalPageViews" }); // Aggregate component for unique visitors count app.use(aggregate, { name: "uniqueVisitors" }); +// Persistent text streaming for real-time AI responses in Ask AI feature +app.use(persistentTextStreaming); + export default app; diff --git a/convex/http.ts b/convex/http.ts index 156044e..e8d71dc 100644 --- a/convex/http.ts +++ b/convex/http.ts @@ -2,6 +2,7 @@ import { httpRouter } from "convex/server"; import { httpAction } from "./_generated/server"; import { api } from "./_generated/api"; import { rssFeed, rssFullFeed } from "./rss"; +import { streamResponse, streamResponseOptions } from "./askAI.node"; const http = httpRouter(); @@ -399,4 +400,18 @@ http.route({ }), }); +// Ask AI streaming endpoint for RAG-based Q&A +http.route({ + path: "/ask-ai-stream", + method: "POST", + handler: streamResponse, +}); + +// CORS preflight for Ask AI endpoint +http.route({ + path: "/ask-ai-stream", + method: "OPTIONS", + handler: streamResponseOptions, +}); + export default http; diff --git a/convex/schema.ts b/convex/schema.ts index ee3e2c5..1c95585 100644 --- a/convex/schema.ts +++ b/convex/schema.ts @@ -226,4 +226,22 @@ export default defineSchema({ createdAt: v.number(), // Timestamp when submitted emailSentAt: v.optional(v.number()), // Timestamp when email was sent (if applicable) }).index("by_createdAt", ["createdAt"]), + + // Ask AI sessions for header AI chat feature + // Stores questions and stream IDs for RAG-based Q&A + askAISessions: defineTable({ + question: v.string(), // User's question + streamId: v.string(), // Persistent text streaming ID + model: v.optional(v.string()), // Selected AI model + createdAt: v.number(), // Timestamp when session was created + sources: v.optional( + v.array( + v.object({ + title: v.string(), + slug: v.string(), + type: v.string(), + }) + ) + ), // Optional sources cited in the response + }).index("by_stream", ["streamId"]), }); diff --git a/files.md b/files.md index b26df77..e7a9d8a 100644 --- a/files.md +++ b/files.md @@ -35,7 +35,7 @@ A brief description of each file in the codebase. | File | Description | | --------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `siteConfig.ts` | Centralized site configuration (name, logo, blog page, posts display with homepage post limit and read more link, featured section with configurable title via featuredTitle, GitHub contributions, nav order, inner page logo settings, hardcoded navigation items for React routes, GitHub repository config for AI service raw URLs, font family configuration, right sidebar configuration, footer configuration with markdown support, social footer configuration, homepage configuration, AI chat configuration, aiDashboard configuration with multi-model support for text chat and image generation, newsletter configuration with admin and notifications, contact form configuration, weekly digest configuration, stats page configuration with public/private toggle, dashboard configuration with optional WorkOS authentication via requireAuth, image lightbox configuration with enabled toggle, semantic search configuration with enabled toggle and disabled by default to avoid blocking forks without OPENAI_API_KEY, twitter configuration for Twitter Cards meta tags) | +| `siteConfig.ts` | Centralized site configuration (name, logo, blog page, posts display with homepage post limit and read more link, featured section with configurable title via featuredTitle, GitHub contributions, nav order, inner page logo settings, hardcoded navigation items for React routes, GitHub repository config for AI service raw URLs, font family configuration, right sidebar configuration, footer configuration with markdown support, social footer configuration, homepage configuration, AI chat configuration, aiDashboard configuration with multi-model support for text chat and image generation, newsletter configuration with admin and notifications, contact form configuration, weekly digest configuration, stats page configuration with public/private toggle, dashboard configuration with optional WorkOS authentication via requireAuth, image lightbox configuration with enabled toggle, semantic search configuration with enabled toggle and disabled by default to avoid blocking forks without OPENAI_API_KEY, twitter configuration for Twitter Cards meta tags, askAI configuration with enabled toggle, default model, and available models for header Ask AI feature) | ### Pages (`src/pages/`) @@ -76,6 +76,7 @@ A brief description of each file in the codebase. | `NewsletterSignup.tsx` | Newsletter signup form component for email-only subscriptions. Displays configurable title/description, validates email, and submits to Convex. Shows on home, blog page, and posts based on siteConfig.newsletter settings. Supports frontmatter override via newsletter: true/false. Includes honeypot field for bot protection. | | `ContactForm.tsx` | Contact form component with name, email, and message fields. Displays when contactForm: true in frontmatter. Submits to Convex which sends email via AgentMail to configured recipient. Requires AGENTMAIL_API_KEY and AGENTMAIL_INBOX environment variables. Includes honeypot field for bot protection. | | `SocialFooter.tsx` | Social footer component with social icons on left (GitHub, Twitter/X, LinkedIn, Instagram, YouTube, TikTok, Discord, Website) and copyright on right. Configurable via siteConfig.socialFooter. Shows below main footer on homepage, blog posts, and pages. Supports frontmatter override via showSocialFooter: true/false. Auto-updates copyright year. Exports `platformIcons` for reuse in header. | +| `AskAIModal.tsx` | Ask AI chat modal for RAG-based Q&A about site content. Opens via header button (Cmd+J) when enabled. Uses Convex Persistent Text Streaming for real-time responses. Supports model selection (Claude, GPT-4o). Features streaming messages with markdown rendering, internal link handling via React Router, and source citations. Requires siteConfig.askAI.enabled and siteConfig.semanticSearch.enabled. | ### Context (`src/context/`) @@ -109,7 +110,7 @@ A brief description of each file in the codebase. | File | Description | | ------------------ | ------------------------------------------------------------------------------------------------------------------ | -| `schema.ts` | Database schema (posts, pages, viewCounts, pageViews, activeSessions, aiChats, newsletterSubscribers, newsletterSentPosts, contactMessages) with indexes for tag queries (by_tags), AI queries, blog featured posts (by_blogFeatured), source tracking (by_source), and vector search (by_embedding). Posts and pages include showSocialFooter, showImageAtTop, blogFeatured, contactForm, source, and embedding fields for frontmatter control, cloud CMS tracking, and semantic search. | +| `schema.ts` | Database schema (posts, pages, viewCounts, pageViews, activeSessions, aiChats, aiGeneratedImages, newsletterSubscribers, newsletterSentPosts, contactMessages, askAISessions) with indexes for tag queries (by_tags), AI queries, blog featured posts (by_blogFeatured), source tracking (by_source), and vector search (by_embedding). Posts and pages include showSocialFooter, showImageAtTop, blogFeatured, contactForm, source, and embedding fields for frontmatter control, cloud CMS tracking, and semantic search. askAISessions stores question, streamId, model, and sources for Ask AI RAG feature. | | `cms.ts` | CRUD mutations for dashboard cloud CMS: createPost, updatePost, deletePost, createPage, updatePage, deletePage, exportPostAsMarkdown, exportPageAsMarkdown. Posts/pages created via dashboard have `source: "dashboard"` (protected from sync overwrites). | | `importAction.ts` | Server-side Convex action for direct URL import via Firecrawl API. Scrapes URL, converts to markdown, saves directly to database with `source: "dashboard"`. Requires FIRECRAWL_API_KEY environment variable. | | `posts.ts` | Queries and mutations for blog posts, view counts, getAllTags, getPostsByTag, getRelatedPosts, and getBlogFeaturedPosts. Includes tag-based queries for tag pages and related posts functionality. | @@ -130,7 +131,9 @@ A brief description of each file in the codebase. | `newsletter.ts` | Newsletter mutations and queries: subscribe, unsubscribe, getSubscriberCount, getActiveSubscribers, getAllSubscribers (admin), deleteSubscriber (admin), getNewsletterStats, getPostsForNewsletter, wasPostSent, recordPostSent, scheduleSendPostNewsletter, scheduleSendCustomNewsletter, scheduleSendStatsSummary, getStatsForSummary. | | `newsletterActions.ts` | Newsletter actions (Node.js runtime): sendPostNewsletter, sendCustomNewsletter, sendWeeklyDigest, notifyNewSubscriber, sendWeeklyStatsSummary. Uses AgentMail SDK for email delivery. Includes markdown-to-HTML conversion for custom emails. | | `contact.ts` | Contact form mutations and actions: submitContact, sendContactEmail (AgentMail API), markEmailSent. | -| `convex.config.ts` | Convex app configuration with aggregate component registrations (pageViewsByPath, totalPageViews, uniqueVisitors) | +| `askAI.ts` | Ask AI session management: createSession mutation (creates streaming session with question/model in DB), getStreamBody query (for database fallback), getSessionByStreamId internal query (retrieves question/model for HTTP action). Uses Persistent Text Streaming component. | +| `askAI.node.ts` | Ask AI HTTP action for streaming responses (Node.js runtime). Retrieves question from database, performs vector search using existing semantic search embeddings, generates AI response via Anthropic Claude or OpenAI GPT-4o, streams via appendChunk. Includes CORS headers and source citations. | +| `convex.config.ts` | Convex app configuration with aggregate component registrations (pageViewsByPath, totalPageViews, uniqueVisitors) and persistentTextStreaming component | | `tsconfig.json` | Convex TypeScript configuration | ### HTTP Endpoints (defined in `http.ts`) @@ -148,6 +151,7 @@ A brief description of each file in the codebase. | `/.well-known/ai-plugin.json` | AI plugin manifest | | `/openapi.yaml` | OpenAPI 3.0 specification | | `/llms.txt` | AI agent discovery | +| `/ask-ai-stream` | Ask AI streaming endpoint for RAG-based Q&A (POST with streamId) | ## Content (`content/blog/`) diff --git a/package-lock.json b/package-lock.json index c916741..1ac1ed2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,7 @@ "dependencies": { "@anthropic-ai/sdk": "^0.71.2", "@convex-dev/aggregate": "^0.2.0", + "@convex-dev/persistent-text-streaming": "^0.3.0", "@convex-dev/workos": "^0.0.1", "@google/genai": "^1.0.1", "@mendable/firecrawl-js": "^1.21.1", @@ -391,6 +392,17 @@ "convex": "^1.24.8" } }, + "node_modules/@convex-dev/persistent-text-streaming": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@convex-dev/persistent-text-streaming/-/persistent-text-streaming-0.3.0.tgz", + "integrity": "sha512-y7CteewFHrBKhVSoLxTMEwWPEmc/3J+BTJ+x+8pvh5DCUlwN80eWmfojpmGOQr7xSc6UC/c7DxlZXQPN7dVlKg==", + "license": "Apache-2.0", + "peerDependencies": { + "convex": "^1.24.8", + "react": "~18.3.1 || ^19.0.0", + "react-dom": "~18.3.1 || ^19.0.0" + } + }, "node_modules/@convex-dev/workos": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/@convex-dev/workos/-/workos-0.0.1.tgz", diff --git a/package.json b/package.json index ec82225..7a9615f 100644 --- a/package.json +++ b/package.json @@ -29,6 +29,7 @@ "dependencies": { "@anthropic-ai/sdk": "^0.71.2", "@convex-dev/aggregate": "^0.2.0", + "@convex-dev/persistent-text-streaming": "^0.3.0", "@convex-dev/workos": "^0.0.1", "@google/genai": "^1.0.1", "@mendable/firecrawl-js": "^1.21.1", diff --git a/public/images/askai.png b/public/images/askai.png new file mode 100644 index 0000000..90a6b73 Binary files /dev/null and b/public/images/askai.png differ diff --git a/public/llms.txt b/public/llms.txt index fb752da..f1e24e8 100644 --- a/public/llms.txt +++ b/public/llms.txt @@ -1,6 +1,6 @@ # llms.txt - Information for AI assistants and LLMs # Learn more: https://llmstxt.org/ -# Last updated: 2026-01-06T02:32:19.579Z +# Last updated: 2026-01-06T21:21:00.309Z > Your content is instantly available to browsers, LLMs, and AI agents. diff --git a/public/raw/about.md b/public/raw/about.md index 0e77eb9..6381b8a 100644 --- a/public/raw/about.md +++ b/public/raw/about.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify. @@ -84,6 +84,7 @@ It's a hybrid: developer workflow for publishing + real-time delivery like a dyn - Dual search modes: Keyword (exact match) and Semantic (meaning-based) with Cmd+K toggle - Semantic search uses OpenAI embeddings for finding conceptually similar content +- Ask AI header button (Cmd+J) for RAG-based Q&A about site content with streaming responses - Full text search with Command+K shortcut and result highlighting - Static raw markdown files at `/raw/{slug}.md` - RSS feeds (`/rss.xml` and `/rss-full.xml`) and sitemap for SEO diff --git a/public/raw/changelog.md b/public/raw/changelog.md index ac56d5c..9a77950 100644 --- a/public/raw/changelog.md +++ b/public/raw/changelog.md @@ -2,11 +2,102 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- All notable changes to this project. +## v2.11.0 + +Released January 6, 2026 + +**Ask AI header button with RAG-based Q&A** + +New header button that opens a chat modal for asking questions about site content. Uses semantic search to find relevant posts and pages, then generates AI responses with source citations. + +**Features:** + +- Header button with sparkle icon (before search button) +- Keyboard shortcuts: Cmd+J or Cmd+/ (Mac), Ctrl+J or Ctrl+/ (Windows/Linux) +- Real-time streaming responses via Convex Persistent Text Streaming +- Model selector: Claude Sonnet 4 (default) or GPT-4o +- Markdown rendering with syntax highlighting +- Internal links use React Router for seamless navigation +- Source citations with links to referenced content +- Copy response button (hover to reveal) for copying AI answers +- Chat history within session (clears on page refresh) +- Clear chat button to reset conversation + +**How it works:** + +1. User question is stored in database with session ID +2. Query is converted to embedding using OpenAI text-embedding-ada-002 +3. Vector search finds top 5 relevant posts/pages +4. Content is sent to selected AI model with RAG system prompt +5. Response streams in real-time with source citations appended + +**Configuration:** + +Enable in `src/config/siteConfig.ts`: + +```typescript +askAI: { + enabled: true, + defaultModel: "claude-sonnet-4-20250514", + models: [ + { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", provider: "anthropic" }, + { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, + ], +}, +``` + +**Requirements:** + +- `semanticSearch.enabled: true` (for embeddings) +- `OPENAI_API_KEY` in Convex (for embeddings) +- `ANTHROPIC_API_KEY` in Convex (for Claude models) +- Run `npm run sync` to generate embeddings + +**Technical details:** + +- New component: `src/components/AskAIModal.tsx` +- New Convex files: `convex/askAI.ts` (mutations/queries), `convex/askAI.node.ts` (HTTP action) +- New table: `askAISessions` with `by_stream` index +- HTTP endpoint: `/ask-ai-stream` for streaming responses +- Uses `@convex-dev/persistent-text-streaming` component +- Separated Node.js runtime (askAI.node.ts) from regular runtime (askAI.ts) + +Updated files: `convex/schema.ts`, `convex/askAI.ts`, `convex/askAI.node.ts`, `convex/http.ts`, `convex/convex.config.ts`, `src/components/AskAIModal.tsx`, `src/components/Layout.tsx`, `src/config/siteConfig.ts`, `src/styles/global.css` + +## v2.10.2 + +Released January 6, 2026 + +**SEO fixes for GitHub Issue #4** + +Seven SEO issues resolved to improve search engine optimization: + +1. **Canonical URL** - Dynamic canonical link tags added client-side for posts and pages +2. **Single H1 per page** - Markdown H1s demoted to H2 elements with `.blog-h1-demoted` class (maintains H1 visual styling) +3. **DOM order fix** - Article now loads before sidebar in DOM for better SEO (CSS `order` property maintains visual layout) +4. **X-Robots-Tag** - HTTP header added via netlify.toml (public routes indexed, dashboard/API routes noindexed) +5. **Hreflang tags** - Self-referencing hreflang (en, x-default) for language targeting +6. **og:url consistency** - Uses same canonicalUrl variable as canonical link tag +7. **twitter:site** - New `TwitterConfig` in siteConfig.ts for Twitter Cards + +**Configuration:** + +Add your Twitter handle in `src/config/siteConfig.ts`: + +```typescript +twitter: { + site: "@yourhandle", + creator: "@yourhandle", +}, +``` + +**Updated files:** `src/config/siteConfig.ts`, `src/pages/Post.tsx`, `src/components/BlogPost.tsx`, `src/styles/global.css`, `convex/http.ts`, `netlify.toml`, `index.html`, `fork-config.json.example` + ## v2.10.1 Released January 5, 2026 diff --git a/public/raw/contact.md b/public/raw/contact.md index 0b6bc53..62ae28f 100644 --- a/public/raw/contact.md +++ b/public/raw/contact.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- You found the contact page. Nice diff --git a/public/raw/docs-ask-ai.md b/public/raw/docs-ask-ai.md new file mode 100644 index 0000000..9d599ca --- /dev/null +++ b/public/raw/docs-ask-ai.md @@ -0,0 +1,159 @@ +# Ask AI + +--- +Type: page +Date: 2026-01-07 +--- + +## Ask AI + +Ask AI is a header button that opens a chat modal for asking questions about your site content. It uses RAG (Retrieval-Augmented Generation) to find relevant content and generate AI responses with source citations. + +Press `Cmd+J` or `Cmd+/` (Mac) or `Ctrl+J` or `Ctrl+/` (Windows/Linux) to open the Ask AI modal. + +--- + +### How Ask AI works + +``` ++------------------+ +-------------------+ +------------------+ +| User question |--->| OpenAI Embedding |--->| Vector Search | +| "How do I..." | | text-embedding- | | Find top 5 | +| | | ada-002 | | relevant pages | ++------------------+ +-------------------+ +--------+---------+ + | + v ++------------------+ +-------------------+ +------------------+ +| Streaming |<---| AI Model |<---| RAG Context | +| Response with | | Claude/GPT-4o | | Build prompt | +| Source Links | | generates answer | | with content | ++------------------+ +-------------------+ +------------------+ +``` + +1. Your question is stored in the database with a session ID +2. Query is converted to a vector embedding using OpenAI +3. Convex vector search finds the 5 most relevant posts and pages +4. Content is combined into a RAG prompt with system instructions +5. AI model generates an answer based only on your site content +6. Response streams in real-time with source citations appended + +### Features + +| Feature | Description | +| ------------------ | ------------------------------------------------------ | +| Streaming | Responses appear word-by-word in real-time | +| Model Selection | Choose between Claude Sonnet 4 or GPT-4o | +| Source Citations | Every response includes links to source content | +| Markdown Rendering | Responses support full markdown formatting | +| Internal Links | Links to your pages use React Router (no page reload) | +| Copy Response | Hover over any response to copy it to clipboard | +| Keyboard Shortcuts | Cmd+J or Cmd+/ to open, Escape to close, Enter to send | + +### Configuration + +Ask AI requires semantic search to be enabled (for embeddings): + +```typescript +// src/config/siteConfig.ts +semanticSearch: { + enabled: true, +}, + +askAI: { + enabled: true, + defaultModel: "claude-sonnet-4-20250514", + models: [ + { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", provider: "anthropic" }, + { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, + ], +}, +``` + +### Environment variables + +Set these in your Convex dashboard: + +```bash +# Required for embeddings (vector search) +npx convex env set OPENAI_API_KEY sk-your-key-here + +# Required for Claude models +npx convex env set ANTHROPIC_API_KEY sk-ant-your-key-here +``` + +After setting environment variables, run `npm run sync` to generate embeddings for your content. + +### When to use Ask AI vs Search + +| Use Case | Tool | +| -------------------------------- | ----------------------- | +| Quick navigation to a known page | Keyword Search (Cmd+K) | +| Find exact code or commands | Keyword Search | +| "How do I do X?" questions | Ask AI (Cmd+J or Cmd+/) | +| Understanding a concept | Ask AI | +| Need highlighted matches on page | Keyword Search | +| Want AI-synthesized answers | Ask AI | + +### Technical details + +**Frontend:** + +| File | Purpose | +| ------------------------------- | ------------------------------------ | +| `src/components/AskAIModal.tsx` | Chat modal with streaming messages | +| `src/components/Layout.tsx` | Header button and keyboard shortcuts | +| `src/config/siteConfig.ts` | AskAIConfig interface and settings | + +**Backend (Convex):** + +| File | Purpose | +| ------------------------- | ----------------------------------------------- | +| `convex/askAI.ts` | Session mutations and queries (regular runtime) | +| `convex/askAI.node.ts` | HTTP streaming action (Node.js runtime) | +| `convex/schema.ts` | askAISessions table definition | +| `convex/http.ts` | /ask-ai-stream endpoint registration | +| `convex/convex.config.ts` | persistentTextStreaming component | + +**Database:** + +The `askAISessions` table stores: + +- `question`: The user's question +- `streamId`: Persistent Text Streaming ID +- `model`: Selected AI model ID +- `createdAt`: Timestamp +- `sources`: Optional array of cited sources + +### Limitations + +- **Requires semantic search**: Embeddings must be generated for content +- **API costs**: Each query costs embedding generation (~$0.0001) plus AI model usage +- **Latency**: ~1-3 seconds for initial response (embedding + search + AI) +- **Content scope**: Only searches published posts and pages +- **No conversation history**: Each session starts fresh (no multi-turn context) + +### Troubleshooting + +**"Failed to load response" error:** + +1. Check that `ANTHROPIC_API_KEY` or `OPENAI_API_KEY` is set in Convex +2. Verify the API key is valid and has credits +3. Check browser console for specific error messages + +**Empty or irrelevant responses:** + +1. Run `npm run sync` to ensure embeddings are generated +2. Check that `semanticSearch.enabled: true` in siteConfig +3. Verify content exists in your posts/pages + +**Modal doesn't open:** + +1. Check that `askAI.enabled: true` in siteConfig +2. Check that `semanticSearch.enabled: true` in siteConfig +3. Both conditions must be true for the button to appear + +### Resources + +- [Semantic Search Documentation](/docs-semantic-search) - How embeddings work +- [Convex Persistent Text Streaming](https://github.com/get-convex/persistent-text-streaming) - Streaming component +- [Convex Vector Search](https://docs.convex.dev/search/vector-search) - Vector search documentation \ No newline at end of file diff --git a/public/raw/docs-configuration.md b/public/raw/docs-configuration.md index cc4c23a..72f7d2b 100644 --- a/public/raw/docs-configuration.md +++ b/public/raw/docs-configuration.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Configuration diff --git a/public/raw/docs-content.md b/public/raw/docs-content.md index ed94144..1f40316 100644 --- a/public/raw/docs-content.md +++ b/public/raw/docs-content.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Content diff --git a/public/raw/docs-dashboard.md b/public/raw/docs-dashboard.md index 992cfbe..418b1e5 100644 --- a/public/raw/docs-dashboard.md +++ b/public/raw/docs-dashboard.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Dashboard diff --git a/public/raw/docs-deployment.md b/public/raw/docs-deployment.md index aa557dd..63bc0c9 100644 --- a/public/raw/docs-deployment.md +++ b/public/raw/docs-deployment.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Deployment diff --git a/public/raw/docs-frontmatter.md b/public/raw/docs-frontmatter.md index edce2cd..d411afe 100644 --- a/public/raw/docs-frontmatter.md +++ b/public/raw/docs-frontmatter.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Frontmatter diff --git a/public/raw/docs-search.md b/public/raw/docs-search.md index 57c34b3..88e71a4 100644 --- a/public/raw/docs-search.md +++ b/public/raw/docs-search.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Keyword Search diff --git a/public/raw/docs-semantic-search.md b/public/raw/docs-semantic-search.md index 42914a9..fa673e4 100644 --- a/public/raw/docs-semantic-search.md +++ b/public/raw/docs-semantic-search.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Semantic Search diff --git a/public/raw/documentation.md b/public/raw/documentation.md index 235df36..d5faa9e 100644 --- a/public/raw/documentation.md +++ b/public/raw/documentation.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- ## Getting started diff --git a/public/raw/footer.md b/public/raw/footer.md index 12fd6ca..03eab29 100644 --- a/public/raw/footer.md +++ b/public/raw/footer.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- Built with [Convex](https://convex.dev) for real-time sync and deployed on [Netlify](https://netlify.com). Read the [project on GitHub](https://github.com/waynesutton/markdown-site) to fork and deploy your own. View [real-time site stats](/stats). diff --git a/public/raw/home-intro.md b/public/raw/home-intro.md index b48fc2a..00ecefa 100644 --- a/public/raw/home-intro.md +++ b/public/raw/home-intro.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- An open-source publishing framework built for AI agents and developers to ship **[docs](/docs)**, or **[blogs](/blog)** or **[websites](/)**. diff --git a/public/raw/index.md b/public/raw/index.md index 99f8746..b8e7b9f 100644 --- a/public/raw/index.md +++ b/public/raw/index.md @@ -67,12 +67,13 @@ agents. --> - **[Using Images in Blog Posts](/raw/using-images-in-posts.md)** - Learn how to add header images, inline images, and Open Graph images to your markdown posts. - Date: 2025-12-14 | Reading time: 4 min read | Tags: images, tutorial, markdown, open-graph -## Pages (15) +## Pages (16) - **[Footer](/raw/footer.md)** - **[Home Intro](/raw/home-intro.md)** - **[Documentation](/raw/documentation.md)** - **[About](/raw/about.md)** - An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs. +- **[Ask AI](/raw/docs-ask-ai.md)** - **[Content](/raw/docs-content.md)** - **[Search](/raw/docs-search.md)** - **[Semantic Search](/raw/docs-semantic-search.md)** @@ -87,7 +88,7 @@ agents. --> --- -**Total Content:** 18 posts, 15 pages +**Total Content:** 18 posts, 16 pages All content is available as raw markdown files at `/raw/{slug}.md` diff --git a/public/raw/newsletter.md b/public/raw/newsletter.md index c7a7406..a86256c 100644 --- a/public/raw/newsletter.md +++ b/public/raw/newsletter.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- # Newsletter Demo Page diff --git a/public/raw/projects.md b/public/raw/projects.md index fbb9f7d..747fcf0 100644 --- a/public/raw/projects.md +++ b/public/raw/projects.md @@ -2,7 +2,7 @@ --- Type: page -Date: 2026-01-06 +Date: 2026-01-07 --- This markdown framework is open source and built to be extended. Here is what ships out of the box. diff --git a/src/components/AskAIModal.tsx b/src/components/AskAIModal.tsx new file mode 100644 index 0000000..ed212eb --- /dev/null +++ b/src/components/AskAIModal.tsx @@ -0,0 +1,423 @@ +import { useState, useEffect, useRef, useCallback } from "react"; +import { useMutation, useAction } from "convex/react"; +import { useStream } from "@convex-dev/persistent-text-streaming/react"; +import { StreamId } from "@convex-dev/persistent-text-streaming"; +import { api } from "../../convex/_generated/api"; +import { Link } from "react-router-dom"; +import ReactMarkdown from "react-markdown"; +import remarkGfm from "remark-gfm"; +import { + X, + PaperPlaneTilt, + Sparkle, + SpinnerGap, + Trash, + Copy, + Check, + Warning, +} from "@phosphor-icons/react"; +import { siteConfig } from "../config/siteConfig"; + +interface AskAIModalProps { + isOpen: boolean; + onClose: () => void; +} + +interface Message { + id: string; + role: "user" | "assistant"; + content: string; + streamId?: string; + isDriven?: boolean; +} + +// Streaming message component that uses useStream hook +function StreamingMessage({ + streamId, + isDriven, + convexUrl, + onCopy, + isCopied, +}: { + streamId: string; + isDriven: boolean; + convexUrl: string; + onCopy: (text: string) => void; + isCopied: boolean; +}) { + const { text, status } = useStream( + api.askAI.getStreamBody, + new URL(`${convexUrl}/ask-ai-stream`), + isDriven, + streamId as StreamId + ); + + const isLoading = status === "pending" || status === "streaming"; + // Show copy button when not loading and we have text (status could be "complete", "done", etc.) + const showCopyButton = !isLoading && status !== "error" && !!text; + + return ( +
+
+ {text ? ( + { + // Check if it's an internal link + if (href?.startsWith("/")) { + return ( + + {children} + + ); + } + return ( + + {children} + + ); + }, + }} + > + {text} + + ) : ( +
+ + Searching and thinking... +
+ )} + {isLoading && text && |} +
+ {showCopyButton && ( + + )} + {status === "error" && ( +
Failed to load response
+ )} +
+ ); +} + +// Configuration status interface +interface ConfigStatus { + configured: boolean; + hasOpenAI: boolean; + hasAnthropic: boolean; + missingKeys: string[]; +} + +export default function AskAIModal({ isOpen, onClose }: AskAIModalProps) { + const [messages, setMessages] = useState([]); + const [inputValue, setInputValue] = useState(""); + const [isSubmitting, setIsSubmitting] = useState(false); + const [selectedModel, setSelectedModel] = useState( + siteConfig.askAI?.defaultModel || "claude-sonnet-4-20250514" + ); + const [drivenIds, setDrivenIds] = useState>(new Set()); + const [copiedId, setCopiedId] = useState(null); + const [configStatus, setConfigStatus] = useState(null); + const [configChecked, setConfigChecked] = useState(false); + + const inputRef = useRef(null); + const messagesEndRef = useRef(null); + + const createSession = useMutation(api.askAI.createSession); + const checkConfiguration = useAction(api.askAI.checkConfiguration); + + // Check configuration when modal opens + useEffect(() => { + if (isOpen && !configChecked) { + checkConfiguration({}) + .then((status) => { + setConfigStatus(status); + setConfigChecked(true); + }) + .catch((err) => { + console.error("Failed to check Ask AI configuration:", err); + setConfigChecked(true); + }); + } + }, [isOpen, configChecked, checkConfiguration]); + + // Handle copy message + const handleCopy = useCallback(async (content: string, messageId: string) => { + await navigator.clipboard.writeText(content); + setCopiedId(messageId); + setTimeout(() => setCopiedId(null), 2000); + }, []); + + // Get Convex URL from environment and convert to site URL for HTTP routes + // VITE_CONVEX_URL is like https://xxx.convex.cloud + // HTTP routes are served from https://xxx.convex.site + const convexCloudUrl = import.meta.env.VITE_CONVEX_URL as string; + const convexUrl = convexCloudUrl.replace(".convex.cloud", ".convex.site"); + + // Focus input when modal opens + useEffect(() => { + if (isOpen && inputRef.current) { + inputRef.current.focus(); + } + }, [isOpen]); + + // Auto-scroll to bottom on new messages + useEffect(() => { + messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); + }, [messages]); + + // Handle send message + const handleSend = useCallback(async () => { + if (!inputValue.trim() || isSubmitting) return; + + const question = inputValue.trim(); + setInputValue(""); + setIsSubmitting(true); + + // Add user message + const userMessageId = `user-${Date.now()}`; + setMessages((prev) => [ + ...prev, + { id: userMessageId, role: "user", content: question }, + ]); + + try { + // Create session with question and model stored in database + // The useStream hook will trigger the HTTP action which retrieves these from DB + const { streamId } = await createSession({ question, model: selectedModel }); + + // Add assistant message with stream + const assistantMessageId = `assistant-${Date.now()}`; + setMessages((prev) => [ + ...prev, + { + id: assistantMessageId, + role: "assistant", + content: "", + streamId, + isDriven: true, + }, + ]); + + // Mark this stream as driven by this client + // The useStream hook will make the HTTP POST request automatically + setDrivenIds((prev) => new Set(prev).add(streamId)); + } catch (error) { + console.error("Failed to create session:", error); + // Add error message + setMessages((prev) => [ + ...prev, + { + id: `error-${Date.now()}`, + role: "assistant", + content: "**Error:** Failed to start conversation. Please try again.", + }, + ]); + } finally { + setIsSubmitting(false); + } + }, [inputValue, isSubmitting, createSession, selectedModel]); + + // Handle keyboard events + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Escape") { + onClose(); + return; + } + + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + handleSend(); + } + }, + [onClose, handleSend] + ); + + // Clear conversation + const handleClear = () => { + setMessages([]); + setDrivenIds(new Set()); + }; + + // Handle backdrop click + const handleBackdropClick = (e: React.MouseEvent) => { + if (e.target === e.currentTarget) { + onClose(); + } + }; + + if (!isOpen) return null; + + return ( +
+
+ {/* Header */} +
+
+ + Ask AI +
+
+ {/* Model selector */} + {siteConfig.askAI?.models && siteConfig.askAI.models.length > 1 && ( + + )} + + +
+
+ + {/* Configuration warning banner */} + {configChecked && configStatus && !configStatus.configured && ( +
+ +
+ Ask AI is not fully configured +

+ Missing environment variables in Convex dashboard:{" "} + {configStatus.missingKeys.join(", ")} +

+

+ Add these keys via: npx convex env set KEY_NAME value +

+
+
+ )} + + {/* Messages */} +
+ {messages.length === 0 && configStatus?.configured !== false && ( +
+ +

Ask a question about this site

+

+ I'll search the content and provide an answer with sources +

+
+ )} + + {messages.map((msg) => ( +
+ {msg.role === "user" ? ( +
+
+

{msg.content}

+
+
+ ) : msg.streamId ? ( + handleCopy(text, msg.id)} + isCopied={copiedId === msg.id} + /> + ) : ( +
+
+ + {msg.content} + +
+ {msg.content && ( + + )} +
+ )} +
+ ))} + +
+
+ + {/* Input */} +
+