mirror of
https://github.com/waynesutton/markdown-site.git
synced 2026-01-12 04:09:14 +00:00
feat: Multi-model AI chat and image generation in Dashboard
This commit is contained in:
2
convex/_generated/api.d.ts
vendored
2
convex/_generated/api.d.ts
vendored
@@ -10,6 +10,7 @@
|
||||
|
||||
import type * as aiChatActions from "../aiChatActions.js";
|
||||
import type * as aiChats from "../aiChats.js";
|
||||
import type * as aiImageGeneration from "../aiImageGeneration.js";
|
||||
import type * as contact from "../contact.js";
|
||||
import type * as contactActions from "../contactActions.js";
|
||||
import type * as crons from "../crons.js";
|
||||
@@ -31,6 +32,7 @@ import type {
|
||||
declare const fullApi: ApiFromModules<{
|
||||
aiChatActions: typeof aiChatActions;
|
||||
aiChats: typeof aiChats;
|
||||
aiImageGeneration: typeof aiImageGeneration;
|
||||
contact: typeof contact;
|
||||
contactActions: typeof contactActions;
|
||||
crons: typeof crons;
|
||||
|
||||
@@ -9,8 +9,20 @@ import type {
|
||||
TextBlockParam,
|
||||
ImageBlockParam,
|
||||
} from "@anthropic-ai/sdk/resources/messages/messages";
|
||||
import OpenAI from "openai";
|
||||
import { GoogleGenAI, Content } from "@google/genai";
|
||||
import FirecrawlApp from "@mendable/firecrawl-js";
|
||||
import type { Id } from "./_generated/dataModel";
|
||||
import type { ChatCompletionMessageParam } from "openai/resources/chat/completions";
|
||||
|
||||
// Model validator for multi-model support
|
||||
const modelValidator = v.union(
|
||||
v.literal("claude-sonnet-4-20250514"),
|
||||
v.literal("gpt-4o"),
|
||||
v.literal("gemini-2.0-flash")
|
||||
);
|
||||
|
||||
// Type for model selection
|
||||
type AIModel = "claude-sonnet-4-20250514" | "gpt-4o" | "gemini-2.0-flash";
|
||||
|
||||
// Default system prompt for writing assistant
|
||||
const DEFAULT_SYSTEM_PROMPT = `You are a helpful writing assistant. Help users write clearly and concisely.
|
||||
@@ -82,14 +94,229 @@ async function scrapeUrl(url: string): Promise<{
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get provider from model ID
|
||||
*/
|
||||
function getProviderFromModel(model: AIModel): "anthropic" | "openai" | "google" {
|
||||
if (model.startsWith("claude")) return "anthropic";
|
||||
if (model.startsWith("gpt")) return "openai";
|
||||
if (model.startsWith("gemini")) return "google";
|
||||
return "anthropic"; // Default fallback
|
||||
}
|
||||
|
||||
/**
|
||||
* Get API key for a provider, returns null if not configured
|
||||
*/
|
||||
function getApiKeyForProvider(provider: "anthropic" | "openai" | "google"): string | null {
|
||||
switch (provider) {
|
||||
case "anthropic":
|
||||
return process.env.ANTHROPIC_API_KEY || null;
|
||||
case "openai":
|
||||
return process.env.OPENAI_API_KEY || null;
|
||||
case "google":
|
||||
return process.env.GOOGLE_AI_API_KEY || null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get not configured message for a provider
|
||||
*/
|
||||
function getNotConfiguredMessage(provider: "anthropic" | "openai" | "google"): string {
|
||||
const configs = {
|
||||
anthropic: {
|
||||
name: "Claude (Anthropic)",
|
||||
envVar: "ANTHROPIC_API_KEY",
|
||||
consoleUrl: "https://console.anthropic.com/",
|
||||
consoleName: "Anthropic Console",
|
||||
},
|
||||
openai: {
|
||||
name: "GPT (OpenAI)",
|
||||
envVar: "OPENAI_API_KEY",
|
||||
consoleUrl: "https://platform.openai.com/api-keys",
|
||||
consoleName: "OpenAI Platform",
|
||||
},
|
||||
google: {
|
||||
name: "Gemini (Google)",
|
||||
envVar: "GOOGLE_AI_API_KEY",
|
||||
consoleUrl: "https://aistudio.google.com/apikey",
|
||||
consoleName: "Google AI Studio",
|
||||
},
|
||||
};
|
||||
|
||||
const config = configs[provider];
|
||||
return (
|
||||
`**${config.name} is not configured.**\n\n` +
|
||||
`To enable this model, add your \`${config.envVar}\` to the Convex environment variables.\n\n` +
|
||||
`**Setup steps:**\n` +
|
||||
`1. Get an API key from [${config.consoleName}](${config.consoleUrl})\n` +
|
||||
`2. Add it to Convex: \`npx convex env set ${config.envVar} your-key-here\`\n` +
|
||||
`3. For production, set it in the [Convex Dashboard](https://dashboard.convex.dev/)\n\n` +
|
||||
`See the [Convex environment variables docs](https://docs.convex.dev/production/environment-variables) for more details.`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Call Anthropic Claude API
|
||||
*/
|
||||
async function callAnthropicApi(
|
||||
apiKey: string,
|
||||
model: string,
|
||||
systemPrompt: string,
|
||||
messages: Array<{
|
||||
role: "user" | "assistant";
|
||||
content: string | Array<ContentBlockParam>;
|
||||
}>
|
||||
): Promise<string> {
|
||||
const anthropic = new Anthropic({ apiKey });
|
||||
|
||||
const response = await anthropic.messages.create({
|
||||
model,
|
||||
max_tokens: 2048,
|
||||
system: systemPrompt,
|
||||
messages,
|
||||
});
|
||||
|
||||
const textContent = response.content.find((block) => block.type === "text");
|
||||
if (!textContent || textContent.type !== "text") {
|
||||
throw new Error("No text content in Claude response");
|
||||
}
|
||||
|
||||
return textContent.text;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call OpenAI GPT API
|
||||
*/
|
||||
async function callOpenAIApi(
|
||||
apiKey: string,
|
||||
model: string,
|
||||
systemPrompt: string,
|
||||
messages: Array<{
|
||||
role: "user" | "assistant";
|
||||
content: string | Array<ContentBlockParam>;
|
||||
}>
|
||||
): Promise<string> {
|
||||
const openai = new OpenAI({ apiKey });
|
||||
|
||||
// Convert messages to OpenAI format
|
||||
const openaiMessages: ChatCompletionMessageParam[] = [
|
||||
{ role: "system", content: systemPrompt },
|
||||
];
|
||||
|
||||
for (const msg of messages) {
|
||||
if (typeof msg.content === "string") {
|
||||
if (msg.role === "user") {
|
||||
openaiMessages.push({ role: "user", content: msg.content });
|
||||
} else {
|
||||
openaiMessages.push({ role: "assistant", content: msg.content });
|
||||
}
|
||||
} else {
|
||||
// Convert content blocks to OpenAI format
|
||||
const content: Array<{ type: "text"; text: string } | { type: "image_url"; image_url: { url: string } }> = [];
|
||||
for (const block of msg.content) {
|
||||
if (block.type === "text") {
|
||||
content.push({ type: "text", text: block.text });
|
||||
} else if (block.type === "image" && "source" in block && block.source.type === "url") {
|
||||
content.push({ type: "image_url", image_url: { url: block.source.url } });
|
||||
}
|
||||
}
|
||||
if (msg.role === "user") {
|
||||
openaiMessages.push({
|
||||
role: "user",
|
||||
content: content.length === 1 && content[0].type === "text" ? content[0].text : content,
|
||||
});
|
||||
} else {
|
||||
// Assistant messages only support string content in OpenAI
|
||||
const textContent = content.filter(c => c.type === "text").map(c => (c as { type: "text"; text: string }).text).join("\n");
|
||||
openaiMessages.push({ role: "assistant", content: textContent });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const response = await openai.chat.completions.create({
|
||||
model,
|
||||
max_tokens: 2048,
|
||||
messages: openaiMessages,
|
||||
});
|
||||
|
||||
const textContent = response.choices[0]?.message?.content;
|
||||
if (!textContent) {
|
||||
throw new Error("No text content in OpenAI response");
|
||||
}
|
||||
|
||||
return textContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call Google Gemini API
|
||||
*/
|
||||
async function callGeminiApi(
|
||||
apiKey: string,
|
||||
model: string,
|
||||
systemPrompt: string,
|
||||
messages: Array<{
|
||||
role: "user" | "assistant";
|
||||
content: string | Array<ContentBlockParam>;
|
||||
}>
|
||||
): Promise<string> {
|
||||
const ai = new GoogleGenAI({ apiKey });
|
||||
|
||||
// Convert messages to Gemini format
|
||||
const geminiMessages: Content[] = [];
|
||||
|
||||
for (const msg of messages) {
|
||||
const role = msg.role === "assistant" ? "model" : "user";
|
||||
|
||||
if (typeof msg.content === "string") {
|
||||
geminiMessages.push({
|
||||
role,
|
||||
parts: [{ text: msg.content }],
|
||||
});
|
||||
} else {
|
||||
// Convert content blocks to Gemini format
|
||||
const parts: Array<{ text: string } | { inlineData: { mimeType: string; data: string } }> = [];
|
||||
for (const block of msg.content) {
|
||||
if (block.type === "text") {
|
||||
parts.push({ text: block.text });
|
||||
}
|
||||
// Note: Gemini handles images differently, would need base64 encoding
|
||||
// For now, skip image blocks in Gemini
|
||||
}
|
||||
if (parts.length > 0) {
|
||||
geminiMessages.push({ role, parts });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const response = await ai.models.generateContent({
|
||||
model,
|
||||
contents: geminiMessages,
|
||||
config: {
|
||||
systemInstruction: systemPrompt,
|
||||
maxOutputTokens: 2048,
|
||||
},
|
||||
});
|
||||
|
||||
const textContent = response.candidates?.[0]?.content?.parts?.find(
|
||||
(part: { text?: string }) => part.text
|
||||
);
|
||||
|
||||
if (!textContent || !("text" in textContent)) {
|
||||
throw new Error("No text content in Gemini response");
|
||||
}
|
||||
|
||||
return textContent.text as string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate AI response for a chat
|
||||
* Calls Claude API and saves the response
|
||||
* Supports multiple AI providers: Anthropic, OpenAI, Google
|
||||
*/
|
||||
export const generateResponse = action({
|
||||
args: {
|
||||
chatId: v.id("aiChats"),
|
||||
userMessage: v.string(),
|
||||
model: v.optional(modelValidator),
|
||||
pageContext: v.optional(v.string()),
|
||||
attachments: v.optional(
|
||||
v.array(
|
||||
@@ -105,17 +332,14 @@ export const generateResponse = action({
|
||||
},
|
||||
returns: v.string(),
|
||||
handler: async (ctx, args) => {
|
||||
// Get API key - return friendly message if not configured
|
||||
const apiKey = process.env.ANTHROPIC_API_KEY;
|
||||
// Use default model if not specified
|
||||
const selectedModel: AIModel = args.model || "claude-sonnet-4-20250514";
|
||||
const provider = getProviderFromModel(selectedModel);
|
||||
|
||||
// Get API key for the selected provider - lazy check only when model is used
|
||||
const apiKey = getApiKeyForProvider(provider);
|
||||
if (!apiKey) {
|
||||
const notConfiguredMessage =
|
||||
"**AI chat is not configured on production.**\n\n" +
|
||||
"To enable AI responses, add your `ANTHROPIC_API_KEY` to the Convex environment variables.\n\n" +
|
||||
"**Setup steps:**\n" +
|
||||
"1. Get an API key from [Anthropic Console](https://console.anthropic.com/)\n" +
|
||||
"2. Add it to Convex: `npx convex env set ANTHROPIC_API_KEY your-key-here`\n" +
|
||||
"3. For production, set it in the [Convex Dashboard](https://dashboard.convex.dev/)\n\n" +
|
||||
"See the [Convex environment variables docs](https://docs.convex.dev/production/environment-variables) for more details.";
|
||||
const notConfiguredMessage = getNotConfiguredMessage(provider);
|
||||
|
||||
// Save the message to chat history so it appears in the conversation
|
||||
await ctx.runMutation(internal.aiChats.addAssistantMessage, {
|
||||
@@ -172,15 +396,15 @@ export const generateResponse = action({
|
||||
|
||||
// Build messages array from chat history (last 20 messages)
|
||||
const recentMessages = chat.messages.slice(-20);
|
||||
const claudeMessages: Array<{
|
||||
const formattedMessages: Array<{
|
||||
role: "user" | "assistant";
|
||||
content: string | Array<ContentBlockParam>;
|
||||
}> = [];
|
||||
|
||||
// Convert chat messages to Claude format
|
||||
// Convert chat messages to provider-agnostic format
|
||||
for (const msg of recentMessages) {
|
||||
if (msg.role === "assistant") {
|
||||
claudeMessages.push({
|
||||
formattedMessages.push({
|
||||
role: "assistant",
|
||||
content: msg.content,
|
||||
});
|
||||
@@ -230,7 +454,7 @@ export const generateResponse = action({
|
||||
}
|
||||
}
|
||||
|
||||
claudeMessages.push({
|
||||
formattedMessages.push({
|
||||
role: "user",
|
||||
content:
|
||||
contentParts.length === 1 && contentParts[0].type === "text"
|
||||
@@ -282,7 +506,7 @@ export const generateResponse = action({
|
||||
}
|
||||
}
|
||||
|
||||
claudeMessages.push({
|
||||
formattedMessages.push({
|
||||
role: "user",
|
||||
content:
|
||||
newMessageContent.length === 1 && newMessageContent[0].type === "text"
|
||||
@@ -290,27 +514,26 @@ export const generateResponse = action({
|
||||
: newMessageContent,
|
||||
});
|
||||
|
||||
// Initialize Anthropic client
|
||||
const anthropic = new Anthropic({
|
||||
apiKey,
|
||||
});
|
||||
// Call the appropriate AI provider
|
||||
let assistantMessage: string;
|
||||
|
||||
// Call Claude API
|
||||
const response = await anthropic.messages.create({
|
||||
model: "claude-sonnet-4-20250514",
|
||||
max_tokens: 2048,
|
||||
system: systemPrompt,
|
||||
messages: claudeMessages,
|
||||
});
|
||||
|
||||
// Extract text content from response
|
||||
const textContent = response.content.find((block) => block.type === "text");
|
||||
if (!textContent || textContent.type !== "text") {
|
||||
throw new Error("No text content in Claude response");
|
||||
try {
|
||||
switch (provider) {
|
||||
case "anthropic":
|
||||
assistantMessage = await callAnthropicApi(apiKey, selectedModel, systemPrompt, formattedMessages);
|
||||
break;
|
||||
case "openai":
|
||||
assistantMessage = await callOpenAIApi(apiKey, selectedModel, systemPrompt, formattedMessages);
|
||||
break;
|
||||
case "google":
|
||||
assistantMessage = await callGeminiApi(apiKey, selectedModel, systemPrompt, formattedMessages);
|
||||
break;
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Unknown error";
|
||||
assistantMessage = `**Error from ${provider}:** ${errorMessage}`;
|
||||
}
|
||||
|
||||
const assistantMessage = textContent.text;
|
||||
|
||||
// Save the assistant message to the chat
|
||||
await ctx.runMutation(internal.aiChats.addAssistantMessage, {
|
||||
chatId: args.chatId,
|
||||
|
||||
@@ -354,3 +354,60 @@ export const getChatsBySession = query({
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Save generated image metadata (internal - called from action)
|
||||
*/
|
||||
export const saveGeneratedImage = internalMutation({
|
||||
args: {
|
||||
sessionId: v.string(),
|
||||
prompt: v.string(),
|
||||
model: v.string(),
|
||||
storageId: v.id("_storage"),
|
||||
mimeType: v.string(),
|
||||
},
|
||||
returns: v.id("aiGeneratedImages"),
|
||||
handler: async (ctx, args) => {
|
||||
const imageId = await ctx.db.insert("aiGeneratedImages", {
|
||||
sessionId: args.sessionId,
|
||||
prompt: args.prompt,
|
||||
model: args.model,
|
||||
storageId: args.storageId,
|
||||
mimeType: args.mimeType,
|
||||
createdAt: Date.now(),
|
||||
});
|
||||
|
||||
return imageId;
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get recent generated images for a session (internal - called from action)
|
||||
*/
|
||||
export const getRecentImagesInternal = internalQuery({
|
||||
args: {
|
||||
sessionId: v.string(),
|
||||
limit: v.number(),
|
||||
},
|
||||
returns: v.array(
|
||||
v.object({
|
||||
_id: v.id("aiGeneratedImages"),
|
||||
_creationTime: v.number(),
|
||||
sessionId: v.string(),
|
||||
prompt: v.string(),
|
||||
model: v.string(),
|
||||
storageId: v.id("_storage"),
|
||||
mimeType: v.string(),
|
||||
createdAt: v.number(),
|
||||
})
|
||||
),
|
||||
handler: async (ctx, args) => {
|
||||
const images = await ctx.db
|
||||
.query("aiGeneratedImages")
|
||||
.withIndex("by_session", (q) => q.eq("sessionId", args.sessionId))
|
||||
.order("desc")
|
||||
.take(args.limit);
|
||||
|
||||
return images;
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
230
convex/aiImageGeneration.ts
Normal file
230
convex/aiImageGeneration.ts
Normal file
@@ -0,0 +1,230 @@
|
||||
"use node";
|
||||
|
||||
import { v } from "convex/values";
|
||||
import type { Id } from "./_generated/dataModel";
|
||||
import { action } from "./_generated/server";
|
||||
import { internal } from "./_generated/api";
|
||||
|
||||
// Type for images returned from internal query
|
||||
type GeneratedImageRecord = {
|
||||
_id: Id<"aiGeneratedImages">;
|
||||
_creationTime: number;
|
||||
sessionId: string;
|
||||
prompt: string;
|
||||
model: string;
|
||||
storageId: Id<"_storage">;
|
||||
mimeType: string;
|
||||
createdAt: number;
|
||||
};
|
||||
import { GoogleGenAI } from "@google/genai";
|
||||
|
||||
// Image model validator
|
||||
const imageModelValidator = v.union(
|
||||
v.literal("gemini-2.0-flash-exp-image-generation"),
|
||||
v.literal("imagen-3.0-generate-002")
|
||||
);
|
||||
|
||||
// Aspect ratio validator
|
||||
const aspectRatioValidator = v.union(
|
||||
v.literal("1:1"),
|
||||
v.literal("16:9"),
|
||||
v.literal("9:16"),
|
||||
v.literal("4:3"),
|
||||
v.literal("3:4")
|
||||
);
|
||||
|
||||
/**
|
||||
* Generate an image using Gemini's image generation API
|
||||
* Stores the result in Convex storage and returns metadata
|
||||
*/
|
||||
export const generateImage = action({
|
||||
args: {
|
||||
sessionId: v.string(),
|
||||
prompt: v.string(),
|
||||
model: imageModelValidator,
|
||||
aspectRatio: v.optional(aspectRatioValidator),
|
||||
},
|
||||
returns: v.object({
|
||||
success: v.boolean(),
|
||||
storageId: v.optional(v.id("_storage")),
|
||||
url: v.optional(v.string()),
|
||||
error: v.optional(v.string()),
|
||||
}),
|
||||
handler: async (ctx, args) => {
|
||||
// Check for API key - return friendly error if not configured
|
||||
const apiKey = process.env.GOOGLE_AI_API_KEY;
|
||||
if (!apiKey) {
|
||||
return {
|
||||
success: false,
|
||||
error:
|
||||
"**Gemini Image Generation is not configured.**\n\n" +
|
||||
"To use image generation, add your `GOOGLE_AI_API_KEY` to the Convex environment variables.\n\n" +
|
||||
"**Setup steps:**\n" +
|
||||
"1. Get an API key from [Google AI Studio](https://aistudio.google.com/apikey)\n" +
|
||||
"2. Add it to Convex: `npx convex env set GOOGLE_AI_API_KEY your-key-here`\n" +
|
||||
"3. For production, set it in the [Convex Dashboard](https://dashboard.convex.dev/)\n\n" +
|
||||
"See the [Convex environment variables docs](https://docs.convex.dev/production/environment-variables) for more details.",
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const ai = new GoogleGenAI({ apiKey });
|
||||
|
||||
// Configure generation based on model
|
||||
let imageBytes: Uint8Array;
|
||||
let mimeType = "image/png";
|
||||
|
||||
if (args.model === "gemini-2.0-flash-exp-image-generation") {
|
||||
// Gemini Flash experimental image generation
|
||||
const response = await ai.models.generateContent({
|
||||
model: args.model,
|
||||
contents: [{ role: "user", parts: [{ text: args.prompt }] }],
|
||||
config: {
|
||||
responseModalities: ["image", "text"],
|
||||
},
|
||||
});
|
||||
|
||||
// Extract image from response
|
||||
const parts = response.candidates?.[0]?.content?.parts;
|
||||
const imagePart = parts?.find(
|
||||
(part) => {
|
||||
const inlineData = part.inlineData as { mimeType?: string; data?: string } | undefined;
|
||||
return inlineData?.mimeType?.startsWith("image/");
|
||||
}
|
||||
);
|
||||
|
||||
const inlineData = imagePart?.inlineData as { mimeType?: string; data?: string } | undefined;
|
||||
if (!imagePart || !inlineData || !inlineData.mimeType || !inlineData.data) {
|
||||
return {
|
||||
success: false,
|
||||
error: "No image was generated. Try a different prompt.",
|
||||
};
|
||||
}
|
||||
|
||||
mimeType = inlineData.mimeType;
|
||||
imageBytes = base64ToBytes(inlineData.data);
|
||||
} else {
|
||||
// Imagen 3.0 model
|
||||
const response = await ai.models.generateImages({
|
||||
model: args.model,
|
||||
prompt: args.prompt,
|
||||
config: {
|
||||
numberOfImages: 1,
|
||||
aspectRatio: args.aspectRatio || "1:1",
|
||||
},
|
||||
});
|
||||
|
||||
const image = response.generatedImages?.[0];
|
||||
if (!image || !image.image?.imageBytes) {
|
||||
return {
|
||||
success: false,
|
||||
error: "No image was generated. Try a different prompt.",
|
||||
};
|
||||
}
|
||||
|
||||
mimeType = "image/png";
|
||||
imageBytes = base64ToBytes(image.image.imageBytes);
|
||||
}
|
||||
|
||||
// Store the image in Convex storage
|
||||
const blob = new Blob([imageBytes as BlobPart], { type: mimeType });
|
||||
const storageId = await ctx.storage.store(blob);
|
||||
|
||||
// Get the URL for the stored image
|
||||
const url = await ctx.storage.getUrl(storageId);
|
||||
|
||||
// Save metadata to database
|
||||
await ctx.runMutation(internal.aiChats.saveGeneratedImage, {
|
||||
sessionId: args.sessionId,
|
||||
prompt: args.prompt,
|
||||
model: args.model,
|
||||
storageId,
|
||||
mimeType,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
storageId,
|
||||
url: url || undefined,
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Unknown error";
|
||||
|
||||
// Check for specific API errors
|
||||
if (errorMessage.includes("quota") || errorMessage.includes("rate")) {
|
||||
return {
|
||||
success: false,
|
||||
error: "**Rate limit exceeded.** Please try again in a few moments.",
|
||||
};
|
||||
}
|
||||
|
||||
if (errorMessage.includes("safety") || errorMessage.includes("blocked")) {
|
||||
return {
|
||||
success: false,
|
||||
error: "**Image generation blocked.** The prompt may have triggered content safety filters. Try rephrasing your prompt.",
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: `**Image generation failed:** ${errorMessage}`,
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get recent generated images for a session
|
||||
*/
|
||||
export const getRecentImages = action({
|
||||
args: {
|
||||
sessionId: v.string(),
|
||||
limit: v.optional(v.number()),
|
||||
},
|
||||
returns: v.array(
|
||||
v.object({
|
||||
_id: v.id("aiGeneratedImages"),
|
||||
prompt: v.string(),
|
||||
model: v.string(),
|
||||
url: v.union(v.string(), v.null()),
|
||||
createdAt: v.number(),
|
||||
})
|
||||
),
|
||||
handler: async (ctx, args): Promise<Array<{
|
||||
_id: Id<"aiGeneratedImages">;
|
||||
prompt: string;
|
||||
model: string;
|
||||
url: string | null;
|
||||
createdAt: number;
|
||||
}>> => {
|
||||
const images: GeneratedImageRecord[] = await ctx.runQuery(internal.aiChats.getRecentImagesInternal, {
|
||||
sessionId: args.sessionId,
|
||||
limit: args.limit || 10,
|
||||
});
|
||||
|
||||
// Get URLs for each image
|
||||
const imagesWithUrls = await Promise.all(
|
||||
images.map(async (image: GeneratedImageRecord) => ({
|
||||
_id: image._id,
|
||||
prompt: image.prompt,
|
||||
model: image.model,
|
||||
url: await ctx.storage.getUrl(image.storageId),
|
||||
createdAt: image.createdAt,
|
||||
}))
|
||||
);
|
||||
|
||||
return imagesWithUrls;
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Helper to convert base64 string to Uint8Array
|
||||
*/
|
||||
function base64ToBytes(base64: string): Uint8Array {
|
||||
const binaryString = atob(base64);
|
||||
const bytes = new Uint8Array(binaryString.length);
|
||||
for (let i = 0; i < binaryString.length; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import { rssFeed, rssFullFeed } from "./rss";
|
||||
|
||||
const http = httpRouter();
|
||||
|
||||
// Site configuration
|
||||
// Site configuration - update these for your site (or run npm run configure)
|
||||
const SITE_URL = process.env.SITE_URL || "https://www.markdown.fast";
|
||||
const SITE_NAME = "markdown sync framework";
|
||||
|
||||
@@ -100,7 +100,7 @@ http.route({
|
||||
site: SITE_NAME,
|
||||
url: SITE_URL,
|
||||
description:
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs.. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.",
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.",
|
||||
posts: posts.map((post: { title: string; slug: string; description: string; date: string; readTime?: string; tags: string[] }) => ({
|
||||
title: post.title,
|
||||
slug: post.slug,
|
||||
@@ -223,7 +223,7 @@ http.route({
|
||||
site: SITE_NAME,
|
||||
url: SITE_URL,
|
||||
description:
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs.. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.",
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.",
|
||||
exportedAt: new Date().toISOString(),
|
||||
totalPosts: fullPosts.length,
|
||||
posts: fullPosts,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { httpAction } from "./_generated/server";
|
||||
import { api } from "./_generated/api";
|
||||
|
||||
// Site configuration for RSS feed
|
||||
// Site configuration for RSS feed - update these for your site (or run npm run configure)
|
||||
const SITE_URL = process.env.SITE_URL || "https://www.markdown.fast";
|
||||
const SITE_TITLE = "markdown sync framework";
|
||||
const SITE_DESCRIPTION =
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs.. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.";
|
||||
"An open-source publishing framework built for AI agents and developers to ship websites, docs, or blogs. Write markdown, sync from the terminal. Your content is instantly available to browsers, LLMs, and AI agents. Built on Convex and Netlify.";
|
||||
|
||||
// Escape XML special characters
|
||||
function escapeXml(text: string): string {
|
||||
|
||||
@@ -149,6 +149,18 @@ export default defineSchema({
|
||||
.index("by_session_and_context", ["sessionId", "contextId"])
|
||||
.index("by_session", ["sessionId"]),
|
||||
|
||||
// AI generated images from Gemini image generation
|
||||
aiGeneratedImages: defineTable({
|
||||
sessionId: v.string(), // Anonymous session ID from localStorage
|
||||
prompt: v.string(), // User's image prompt
|
||||
model: v.string(), // Model used: "gemini-2.5-flash-image" or "gemini-3-pro-image-preview"
|
||||
storageId: v.id("_storage"), // Convex storage ID for the generated image
|
||||
mimeType: v.string(), // Image MIME type: "image/png" or "image/jpeg"
|
||||
createdAt: v.number(), // Timestamp when image was generated
|
||||
})
|
||||
.index("by_session", ["sessionId"])
|
||||
.index("by_createdAt", ["createdAt"]),
|
||||
|
||||
// Newsletter subscribers table
|
||||
// Stores email subscriptions with unsubscribe tokens
|
||||
newsletterSubscribers: defineTable({
|
||||
|
||||
Reference in New Issue
Block a user