diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..926251bdd5e3d99057da7f8bf0951da17b009f85 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+OpenAIChatAssistant/generated-icon.png filter=lfs diff=lfs merge=lfs -text
diff --git a/OpenAIChatAssistant/.config/configstore/firebase-tools.json b/OpenAIChatAssistant/.config/configstore/firebase-tools.json
new file mode 100644
index 0000000000000000000000000000000000000000..57848b77ab25ed8044919b6d014aec00411ff19c
--- /dev/null
+++ b/OpenAIChatAssistant/.config/configstore/firebase-tools.json
@@ -0,0 +1,15 @@
+{
+ "motd": {
+ "cloudBuildErrorAfter": 1594252800000,
+ "cloudBuildWarnAfter": 1590019200000,
+ "defaultNode10After": 1594252800000,
+ "minVersion": "3.0.5",
+ "node8DeploysDisabledAfter": 1613390400000,
+ "node8RuntimeDisabledAfter": 1615809600000,
+ "node8WarnAfter": 1600128000000,
+ "fetched": 1746200259916
+ },
+ "usage": true,
+ "analytics-uuid": "39420299-5030-4806-bca2-b74705ec958b",
+ "lastError": 1746202219061
+}
\ No newline at end of file
diff --git a/OpenAIChatAssistant/.config/configstore/update-notifier-firebase-tools.json b/OpenAIChatAssistant/.config/configstore/update-notifier-firebase-tools.json
new file mode 100644
index 0000000000000000000000000000000000000000..2b6879e758278daac6fb5f0c77005abd4ca270f5
--- /dev/null
+++ b/OpenAIChatAssistant/.config/configstore/update-notifier-firebase-tools.json
@@ -0,0 +1,4 @@
+{
+ "optOut": false,
+ "lastUpdateCheck": 1746200259769
+}
\ No newline at end of file
diff --git a/OpenAIChatAssistant/.config/gh/config.yml b/OpenAIChatAssistant/.config/gh/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5d14b83293a7868eae45faa2ef5a6a85bf3afc7d
--- /dev/null
+++ b/OpenAIChatAssistant/.config/gh/config.yml
@@ -0,0 +1,17 @@
+# The current version of the config schema
+version: 1
+# What protocol to use when performing git operations. Supported values: ssh, https
+git_protocol: https
+# What editor gh should run when creating issues, pull requests, etc. If blank, will refer to environment.
+editor:
+# When to interactively prompt. This is a global config that cannot be overridden by hostname. Supported values: enabled, disabled
+prompt: enabled
+# A pager program to send command output to, e.g. "less". If blank, will refer to environment. Set the value to "cat" to disable the pager.
+pager:
+# Aliases allow you to create nicknames for gh commands
+aliases:
+ co: pr checkout
+# The path to a unix socket through which send HTTP connections. If blank, HTTP traffic will be handled by net/http.DefaultTransport.
+http_unix_socket:
+# What web browser gh should use when opening URLs. If blank, will refer to environment.
+browser:
diff --git a/OpenAIChatAssistant/.config/gh/hosts.yml b/OpenAIChatAssistant/.config/gh/hosts.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9c1a0cf8bedfb6e250a829246142d26c9716bde4
--- /dev/null
+++ b/OpenAIChatAssistant/.config/gh/hosts.yml
@@ -0,0 +1,7 @@
+github.com:
+ users:
+ Bella288:
+ oauth_token: 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
+ git_protocol: https
+ oauth_token: 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
+ user: Bella288
diff --git a/OpenAIChatAssistant/.github/workflows/static.yml b/OpenAIChatAssistant/.github/workflows/static.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f2c9e97c91d0ca32002d6eba53cf1981031047bb
--- /dev/null
+++ b/OpenAIChatAssistant/.github/workflows/static.yml
@@ -0,0 +1,43 @@
+# Simple workflow for deploying static content to GitHub Pages
+name: Deploy static content to Pages
+
+on:
+ # Runs on pushes targeting the default branch
+ push:
+ branches: ["main"]
+
+ # Allows you to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+concurrency:
+ group: "pages"
+ cancel-in-progress: false
+
+jobs:
+ # Single deploy job since we're just deploying
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup Pages
+ uses: actions/configure-pages@v5
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ # Upload entire repository
+ path: '.'
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/OpenAIChatAssistant/.gitignore b/OpenAIChatAssistant/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..79de8b8ee9bb2315eb3e7c95dae46d86ec050feb
--- /dev/null
+++ b/OpenAIChatAssistant/.gitignore
@@ -0,0 +1,38 @@
+
+# Dependencies
+node_modules
+.pnp
+.pnp.js
+
+# Production
+dist
+build
+
+# Testing
+coverage
+
+# Environment
+.env
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+
+# Logs
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+
+# Editor/IDE
+.vscode
+.idea
+*.swp
+*.swo
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Replit specific
+.replit
+.config
diff --git a/OpenAIChatAssistant/.replit b/OpenAIChatAssistant/.replit
new file mode 100644
index 0000000000000000000000000000000000000000..af568fd616e3d460e33b787086f7175b586e38ff
--- /dev/null
+++ b/OpenAIChatAssistant/.replit
@@ -0,0 +1,45 @@
+modules = ["nodejs-20", "web", "postgresql-16", "python-3.11"]
+run = "npm run dev"
+hidden = [".config", ".git", "generated-icon.png", "node_modules", "dist"]
+
+[nix]
+channel = "stable-24_05"
+packages = ["gh"]
+
+[deployment]
+deploymentTarget = "autoscale"
+build = ["sh", "-c", "npm run build"]
+run = ["sh", "-c", "NODE_ENV=production tsx server/index.ts"]
+
+[[ports]]
+localPort = 5000
+externalPort = 80
+
+[[ports]]
+localPort = 9005
+externalPort = 3000
+
+[[ports]]
+localPort = 9006
+externalPort = 3001
+
+[workflows]
+runButton = "Project"
+
+[[workflows.workflow]]
+name = "Project"
+mode = "parallel"
+author = "agent"
+
+[[workflows.workflow.tasks]]
+task = "workflow.run"
+args = "Start application"
+
+[[workflows.workflow]]
+name = "Start application"
+author = "agent"
+
+[[workflows.workflow.tasks]]
+task = "shell.exec"
+args = "npm run dev"
+waitForPort = 5000
diff --git a/OpenAIChatAssistant/OpenChat/LICENSE b/OpenAIChatAssistant/OpenChat/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..394512cf6b7f282adb4993c57a33626091dbbdb7
--- /dev/null
+++ b/OpenAIChatAssistant/OpenChat/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 Bella Lawrence
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/OpenAIChatAssistant/OpenChatAI/.gitattributes b/OpenAIChatAssistant/OpenChatAI/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/OpenAIChatAssistant/OpenChatAI/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/OpenAIChatAssistant/OpenChatAI/README.md b/OpenAIChatAssistant/OpenChatAI/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..644df25a71a90fc1ac75f8d059852fb570e8c95e
--- /dev/null
+++ b/OpenAIChatAssistant/OpenChatAI/README.md
@@ -0,0 +1,10 @@
+---
+title: OpenChatAI
+emoji: 🦀
+colorFrom: purple
+colorTo: blue
+sdk: static
+pinned: false
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/OpenAIChatAssistant/OpenChatAI/index.html b/OpenAIChatAssistant/OpenChatAI/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..b0c4b3666032a737f3903db53e6a8a9272483e28
--- /dev/null
+++ b/OpenAIChatAssistant/OpenChatAI/index.html
@@ -0,0 +1,19 @@
+
+
+
+
+
+ My static Space
+
+
+
+
+
Welcome to your static Space!
+
You can modify this app directly by editing index.html in the Files and versions tab.
+
+ Also don't forget to check the
+ Spaces documentation.
+
+
+
+
diff --git a/OpenAIChatAssistant/OpenChatAI/style.css b/OpenAIChatAssistant/OpenChatAI/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..114adf441e9032febb46bc056b2a8bb651075f0d
--- /dev/null
+++ b/OpenAIChatAssistant/OpenChatAI/style.css
@@ -0,0 +1,28 @@
+body {
+ padding: 2rem;
+ font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
+}
+
+h1 {
+ font-size: 16px;
+ margin-top: 0;
+}
+
+p {
+ color: rgb(107, 114, 128);
+ font-size: 15px;
+ margin-bottom: 10px;
+ margin-top: 5px;
+}
+
+.card {
+ max-width: 620px;
+ margin: 0 auto;
+ padding: 16px;
+ border: 1px solid lightgray;
+ border-radius: 16px;
+}
+
+.card p:last-child {
+ margin-bottom: 0;
+}
diff --git a/OpenAIChatAssistant/README.md b/OpenAIChatAssistant/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..12ee7052c729a2d3674fb087e3e2bc19fbb16a4a
--- /dev/null
+++ b/OpenAIChatAssistant/README.md
@@ -0,0 +1 @@
+# OpenChat
diff --git a/OpenAIChatAssistant/attached_assets/content-1746193714894.md b/OpenAIChatAssistant/attached_assets/content-1746193714894.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2466a27d36bd861c853e715ebdd7ceba6bf494e
--- /dev/null
+++ b/OpenAIChatAssistant/attached_assets/content-1746193714894.md
@@ -0,0 +1,7 @@
+[Skip to content](https://vercel.com/login?next=%2Fbella288s-projects%2Fchatopen%2FE9m12YJ7cJYv5DZawZpXn98haWBJ#geist-skip-nav)
+
+# Log in to Vercel
+
+Continue withGitHubContinue withGitLabContinue withBitbucket
+
+[Don't have an account? Sign Up](https://vercel.com/signup?next=%2Fbella288s-projects%2Fchatopen%2FE9m12YJ7cJYv5DZawZpXn98haWBJ)
\ No newline at end of file
diff --git a/OpenAIChatAssistant/attached_assets/content-1746193784985.md b/OpenAIChatAssistant/attached_assets/content-1746193784985.md
new file mode 100644
index 0000000000000000000000000000000000000000..02bbfa34ae9a12923a5267f446a1147cefc0008b
--- /dev/null
+++ b/OpenAIChatAssistant/attached_assets/content-1746193784985.md
@@ -0,0 +1,1467 @@
+```
+var __defProp = Object.defineProperty;
+var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
+}) : x)(function(x) {
+ if (typeof require !== "undefined") return require.apply(this, arguments);
+ throw Error('Dynamic require of "' + x + '" is not supported');
+});
+var __export = (target, all) => {
+ for (var name in all)
+ __defProp(target, name, { get: all[name], enumerable: true });
+};
+
+// server/index.ts
+import express2 from "express";
+
+// server/routes.ts
+import { createServer } from "http";
+
+// shared/schema.ts
+var schema_exports = {};
+__export(schema_exports, {
+ conversationSchema: () => conversationSchema,
+ conversations: () => conversations,
+ insertConversationSchema: () => insertConversationSchema,
+ insertMessageSchema: () => insertMessageSchema,
+ insertUserSchema: () => insertUserSchema,
+ messageRoleSchema: () => messageRoleSchema,
+ messageSchema: () => messageSchema,
+ messages: () => messages,
+ personalityTypeSchema: () => personalityTypeSchema,
+ updateUserProfileSchema: () => updateUserProfileSchema,
+ users: () => users
+});
+import { pgTable, text, serial, integer, timestamp } from "drizzle-orm/pg-core";
+import { createInsertSchema } from "drizzle-zod";
+import { z } from "zod";
+var users = pgTable("users", {
+ id: serial("id").primaryKey(),
+ username: text("username").notNull().unique(),
+ password: text("password").notNull(),
+ fullName: text("full_name"),
+ location: text("location"),
+ interests: text("interests").array(),
+ profession: text("profession"),
+ pets: text("pets"),
+ additionalInfo: text("additional_info"),
+ systemContext: text("system_context")
+});
+var insertUserSchema = createInsertSchema(users).pick({
+ username: true,
+ password: true
+});
+var updateUserProfileSchema = createInsertSchema(users).pick({
+ fullName: true,
+ location: true,
+ interests: true,
+ profession: true,
+ pets: true,
+ additionalInfo: true,
+ systemContext: true
+}).partial();
+var messages = pgTable("messages", {
+ id: serial("id").primaryKey(),
+ content: text("content").notNull(),
+ role: text("role").notNull(),
+ // 'user' or 'assistant'
+ conversationId: text("conversation_id").notNull(),
+ createdAt: timestamp("created_at").defaultNow().notNull()
+});
+var insertMessageSchema = createInsertSchema(messages).pick({
+ content: true,
+ role: true,
+ conversationId: true
+});
+var personalityTypeSchema = z.enum([\
+ "default",\
+ "professional",\
+ "friendly",\
+ "expert",\
+ "poetic",\
+ "concise"\
+]);
+var conversations = pgTable("conversations", {
+ id: text("id").primaryKey(),
+ title: text("title").notNull(),
+ createdAt: timestamp("created_at").defaultNow().notNull(),
+ personality: text("personality").default("default").notNull(),
+ userId: integer("user_id").references(() => users.id)
+});
+var insertConversationSchema = createInsertSchema(conversations).pick({
+ id: true,
+ title: true,
+ personality: true,
+ userId: true
+});
+var messageRoleSchema = z.enum(["user", "assistant", "system"]);
+var messageSchema = z.object({
+ content: z.string(),
+ role: messageRoleSchema
+});
+var conversationSchema = z.object({
+ messages: z.array(messageSchema),
+ personality: personalityTypeSchema.optional().default("default"),
+ conversationId: z.string().optional(),
+ userId: z.number().optional()
+});
+
+// server/db.ts
+import { Pool, neonConfig } from "@neondatabase/serverless";
+import { drizzle } from "drizzle-orm/neon-serverless";
+import ws from "ws";
+neonConfig.webSocketConstructor = ws;
+if (!process.env.DATABASE_URL) {
+ throw new Error(
+ "DATABASE_URL must be set. Did you forget to provision a database?"
+ );
+}
+var pool = new Pool({ connectionString: process.env.DATABASE_URL });
+var db = drizzle({ client: pool, schema: schema_exports });
+
+// server/storage.ts
+import { eq, desc, asc } from "drizzle-orm";
+import { nanoid } from "nanoid";
+import session from "express-session";
+import connectPgSimple from "connect-pg-simple";
+var DatabaseStorage = class {
+ sessionStore;
+ constructor() {
+ const PgStore = connectPgSimple(session);
+ this.sessionStore = new PgStore({
+ pool,
+ createTableIfMissing: true
+ });
+ this.initializeDefaultConversation();
+ }
+ async initializeDefaultConversation() {
+ try {
+ const defaultConversation = await this.getConversation("default");
+ if (!defaultConversation) {
+ await this.createConversation({
+ id: "default",
+ title: "New Conversation",
+ personality: "general"
+ });
+ }
+ } catch (error) {
+ console.error("Error initializing default conversation:", error);
+ }
+ }
+ // Message operations
+ async getMessages(conversationId) {
+ return db.select().from(messages).where(eq(messages.conversationId, conversationId)).orderBy(asc(messages.createdAt));
+ }
+ async createMessage(insertMessage) {
+ const [newMessage] = await db.insert(messages).values({
+ ...insertMessage,
+ createdAt: /* @__PURE__ */ new Date()
+ }).returning();
+ return newMessage;
+ }
+ async deleteMessages(conversationId) {
+ await db.delete(messages).where(eq(messages.conversationId, conversationId));
+ }
+ // Conversation operations
+ async getConversation(id) {
+ const [conversation] = await db.select().from(conversations).where(eq(conversations.id, id));
+ return conversation;
+ }
+ async getConversations() {
+ return db.select().from(conversations).orderBy(desc(conversations.createdAt));
+ }
+ async createConversation(conversation) {
+ if (conversation.id) {
+ const existingConversation = await this.getConversation(conversation.id);
+ if (existingConversation) {
+ const [updatedConversation] = await db.update(conversations).set({
+ title: conversation.title,
+ personality: conversation.personality || "general",
+ // Only update userId if provided
+ ...conversation.userId && { userId: conversation.userId }
+ }).where(eq(conversations.id, conversation.id)).returning();
+ return updatedConversation;
+ }
+ }
+ const [newConversation] = await db.insert(conversations).values({
+ id: conversation.id || nanoid(),
+ title: conversation.title,
+ personality: conversation.personality || "general",
+ userId: conversation.userId,
+ // Include the user ID (can be null for unassociated conversations)
+ createdAt: /* @__PURE__ */ new Date()
+ }).returning();
+ return newConversation;
+ }
+ async deleteConversation(id) {
+ if (id === "default") {
+ return false;
+ }
+ try {
+ await this.deleteMessages(id);
+ const [deletedConversation] = await db.delete(conversations).where(eq(conversations.id, id)).returning();
+ return !!deletedConversation;
+ } catch (error) {
+ console.error("Error deleting conversation:", error);
+ return false;
+ }
+ }
+ async updateConversationPersonality(id, personality) {
+ const [updatedConversation] = await db.update(conversations).set({ personality }).where(eq(conversations.id, id)).returning();
+ return updatedConversation;
+ }
+ async updateConversationTitle(id, title) {
+ const [updatedConversation] = await db.update(conversations).set({ title }).where(eq(conversations.id, id)).returning();
+ return updatedConversation;
+ }
+ // User operations
+ async getUserProfile(id) {
+ const [user] = await db.select().from(users).where(eq(users.id, id));
+ return user;
+ }
+ async getUserByUsername(username) {
+ const [user] = await db.select().from(users).where(eq(users.username, username));
+ return user;
+ }
+ async createUser(userData) {
+ const [user] = await db.insert(users).values(userData).returning();
+ return user;
+ }
+ async updateUserProfile(id, profile) {
+ const { password, ...updateData } = profile;
+ const [updatedUser] = await db.update(users).set(updateData).where(eq(users.id, id)).returning();
+ return updatedUser;
+ }
+ // Filter conversations by user ID
+ async getUserConversations(userId) {
+ return db.select().from(conversations).where(eq(conversations.userId, userId)).orderBy(desc(conversations.createdAt));
+ }
+};
+var storage = new DatabaseStorage();
+
+// server/auth.ts
+import passport from "passport";
+
+// server/session.ts
+import session2 from "express-session";
+import connectPg from "connect-pg-simple";
+var PostgresSessionStore = connectPg(session2);
+var sessionStore = new PostgresSessionStore({
+ pool,
+ createTableIfMissing: true,
+ tableName: "session"
+ // Default table name
+});
+function setupSession(app2) {
+ const sessionSecret = process.env.SESSION_SECRET || __require("crypto").randomBytes(32).toString("hex");
+ if (!process.env.SESSION_SECRET) {
+ console.warn("SESSION_SECRET not set in environment, using a random value");
+ process.env.SESSION_SECRET = sessionSecret;
+ }
+ const sessionConfig = {
+ store: sessionStore,
+ secret: sessionSecret,
+ resave: false,
+ saveUninitialized: false,
+ cookie: {
+ secure: process.env.NODE_ENV === "production",
+ // Use secure cookies in production
+ httpOnly: true,
+ maxAge: 1e3 * 60 * 60 * 24 * 7
+ // 1 week
+ }
+ };
+ if (process.env.NODE_ENV === "production") {
+ app2.set("trust proxy", 1);
+ if (sessionConfig.cookie) {
+ sessionConfig.cookie.secure = true;
+ sessionConfig.cookie.sameSite = "none";
+ }
+ }
+ app2.use(session2(sessionConfig));
+}
+
+// server/auth.ts
+function setupAuth(app2) {
+ setupSession(app2);
+ app2.use(passport.initialize());
+ app2.use(passport.session());
+ passport.serializeUser((user, done) => {
+ done(null, user.id);
+ });
+ passport.deserializeUser(async (id, done) => {
+ try {
+ const user = await storage.getUserProfile(id);
+ done(null, user);
+ } catch (error) {
+ done(error);
+ }
+ });
+ app2.get("/api/auth/replit", async (req, res) => {
+ try {
+ const userId = req.headers["x-replit-user-id"];
+ const username = req.headers["x-replit-user-name"];
+ const profileImage = req.headers["x-replit-user-profile-image"];
+ const roles = req.headers["x-replit-user-roles"];
+ const teams = req.headers["x-replit-user-teams"];
+ if (!userId || !username) {
+ return res.status(401).json({ message: "Not authenticated with Replit" });
+ }
+ let user = await storage.getUserByUsername(username);
+ if (!user) {
+ user = await storage.createUser({
+ username,
+ password: userId,
+ system_context: `A chat with ${username}. User roles: ${roles || "none"}. Teams: ${teams || "none"}.`,
+ full_name: username,
+ interests: roles ? roles.split(",") : [],
+ location: "",
+ // Add default empty values for profile fields
+ profession: "",
+ pets: ""
+ });
+ } else {
+ user = await storage.updateUserProfile(user.id, {
+ full_name: username,
+ interests: roles ? roles.split(",") : [],
+ system_context: `A chat with ${username}. User roles: ${roles || "none"}. Teams: ${teams || "none"}.`
+ });
+ }
+ req.login(user, (err) => {
+ if (err) {
+ return res.status(500).json({ message: "Failed to login" });
+ }
+ const { password, ...userWithoutPassword } = user;
+ res.json(userWithoutPassword);
+ });
+ } catch (error) {
+ console.error("Replit auth error:", error);
+ res.status(500).json({ message: "Authentication failed" });
+ }
+ });
+ app2.get("/api/user", (req, res) => {
+ if (!req.isAuthenticated()) {
+ return res.status(401).json({ message: "Not authenticated" });
+ }
+ const { password, ...userWithoutPassword } = req.user;
+ res.json(userWithoutPassword);
+ });
+ app2.post("/api/logout", (req, res) => {
+ if (req.session) {
+ req.session.destroy((err) => {
+ if (err) {
+ console.error("Session destruction error:", err);
+ }
+ res.clearCookie("connect.sid");
+ res.status(200).json({ success: true });
+ });
+ } else {
+ res.status(200).json({ success: true });
+ }
+ });
+ app2.patch("/api/user/profile", async (req, res, next) => {
+ if (!req.isAuthenticated()) {
+ return res.status(401).json({ message: "Not authenticated" });
+ }
+ try {
+ const userId = req.user.id;
+ const updatedUser = await storage.updateUserProfile(userId, req.body);
+ if (!updatedUser) {
+ return res.status(404).json({ message: "User not found" });
+ }
+ const { password, ...userWithoutPassword } = updatedUser;
+ res.json(userWithoutPassword);
+ } catch (error) {
+ next(error);
+ }
+ });
+}
+
+// server/openai.ts
+import OpenAI from "openai";
+
+// server/fallbackChat.ts
+import { InferenceClient } from "@huggingface/inference";
+var novitaApiKey = process.env.NOVITA_API_KEY || "";
+var huggingFaceClient = new InferenceClient(novitaApiKey);
+var QWEN_MODEL = "Qwen/Qwen3-235B-A22B";
+var MAX_TOKENS = 512;
+var QWEN_SYSTEM_MESSAGE = `You are a helpful AI assistant. Provide clear, concise responses without showing your thinking process.
+Do not use XML tags like or in your responses.
+Keep your responses informative, friendly, and to the point.`;
+function convertMessages(messages2, userSystemContext) {
+ let systemContent = QWEN_SYSTEM_MESSAGE;
+ if (userSystemContext) {
+ const getMatchValue = (match) => {
+ if (match && match[1]) {
+ return match[1].trim();
+ }
+ return null;
+ };
+ const nameMatches = [\
+ getMatchValue(userSystemContext.match(/name(?:\s+is)?(?:\s*:\s*|\s+)([\w\s.']+)/i)),\
+ getMatchValue(userSystemContext.match(/My name is ([\w\s.']+)/i)),\
+ getMatchValue(userSystemContext.match(/I am ([\w\s.']+)/i)),\
+ getMatchValue(userSystemContext.match(/I'm ([\w\s.']+)/i))\
+ ].filter(Boolean);
+ const locationMatches = [\
+ getMatchValue(userSystemContext.match(/location(?:\s+is)?(?:\s*:\s*|\s+)([\w\s.,]+)/i)),\
+ getMatchValue(userSystemContext.match(/(?:I live|I'm from|I reside) in ([\w\s.,]+)/i)),\
+ getMatchValue(userSystemContext.match(/from ([\w\s.,]+)/i))\
+ ].filter(Boolean);
+ const interestsMatches = [\
+ getMatchValue(userSystemContext.match(/interests(?:\s+are)?(?:\s*:\s*|\s+)([\w\s,.;{}]+)/i)),\
+ getMatchValue(userSystemContext.match(/(?:I like|I enjoy|I love) ([\w\s,.;]+)/i))\
+ ].filter(Boolean);
+ const professionMatches = [\
+ getMatchValue(userSystemContext.match(/profession(?:\s+is)?(?:\s*:\s*|\s+)([\w\s&,.-]+)/i)),\
+ getMatchValue(userSystemContext.match(/(?:I work as|I am a|I'm a) ([\w\s&,.-]+)/i)),\
+ getMatchValue(userSystemContext.match(/(?:I'm|I am) (?:a|an) ([\w\s&,.-]+)/i))\
+ ].filter(Boolean);
+ const petsMatches = [\
+ getMatchValue(userSystemContext.match(/pets?(?:\s+are)?(?:\s*:\s*|\s+)([\w\s,.()]+)/i)),\
+ getMatchValue(userSystemContext.match(/(?:I have|I own) (?:a pet|pets|a) ([\w\s,.()]+)/i))\
+ ].filter(Boolean);
+ const userName = nameMatches.length > 0 ? nameMatches[0] : null;
+ const userLocation = locationMatches.length > 0 ? locationMatches[0] : null;
+ const userInterests = interestsMatches.length > 0 ? interestsMatches[0] : null;
+ const userProfession = professionMatches.length > 0 ? professionMatches[0] : null;
+ const userPets = petsMatches.length > 0 ? petsMatches[0] : null;
+ let bellaInfo = "";
+ if (userSystemContext.includes("Bella Lawrence") || userName && userName.includes("Bella")) {
+ bellaInfo = `
+- Your name is Bella Lawrence
+- You live in Fort Wayne, Indiana
+- Your interests include Python
+- Your profession is Student
+- You have pets named Barley (cat), Pebbles (dog), and Buttercup (rabbit)
+`;
+ console.log("Using Bella's profile information directly");
+ }
+ let userInfo = "";
+ if (userName) userInfo += `- Your name is ${userName}
+`;
+ if (userLocation) userInfo += `- You live in ${userLocation}
+`;
+ if (userInterests) userInfo += `- Your interests include ${userInterests}
+`;
+ if (userProfession) userInfo += `- Your profession is ${userProfession}
+`;
+ if (userPets) userInfo += `- You have pets: ${userPets}
+`;
+ const profileInfo = bellaInfo || userInfo || userSystemContext;
+ systemContent = `${QWEN_SYSTEM_MESSAGE}
+
+IMPORTANT: The following is personal information about the user you are talking with.
+You MUST remember these details and use them in your responses:
+
+${profileInfo}
+
+INSTRUCTIONS:
+1. When asked "What's my name?" respond with the name listed above.
+2. When asked about name, location, interests, profession, or pets, use EXACTLY the information above.
+3. NEVER say you don't know or can't access this information - it's right above!
+4. Answer as if you've always known this information - don't say "according to your profile" or similar phrases.
+
+REMEMBER: You already know the user's name and details. ALWAYS use this information when asked.`;
+ const hasNameQuestion = messages2.some((msg) => {
+ const content = msg.content.toLowerCase();
+ return content.includes("what's my name") || content.includes("what is my name") || content.includes("do you know my name") || content.includes("who am i");
+ });
+ if (hasNameQuestion) {
+ console.log("Detected name question - ensuring proper response");
+ systemContent += `
+
+IMPORTANT REMINDER: The user has asked about their name. Their name is ${userName || "Bella Lawrence"}. DO NOT say you don't know their name.`;
+ }
+ console.log("Including enhanced user system context in fallback chat");
+ if (userName) console.log(`Extracted user name: ${userName}`);
+ if (userLocation) console.log(`Extracted user location: ${userLocation}`);
+ }
+ const formattedMessages = [{\
+ role: "system",\
+ content: systemContent\
+ }];
+ const compatibleMessages = messages2.filter((msg) => msg.role !== "system");
+ if (compatibleMessages.length === 0) {
+ formattedMessages.push({
+ role: "user",
+ content: "Hello, can you introduce yourself?"
+ });
+ return formattedMessages;
+ }
+ const lastMessage = compatibleMessages[compatibleMessages.length - 1];
+ if (lastMessage.role !== "user") {
+ compatibleMessages.push({
+ role: "user",
+ content: "Can you help me with this?"
+ });
+ }
+ formattedMessages.push(...compatibleMessages.map((msg) => ({
+ role: msg.role,
+ content: msg.content
+ })));
+ return formattedMessages;
+}
+async function generateFallbackResponse(messages2, userSystemContext) {
+ try {
+ console.log("Generating fallback response using Qwen model");
+ const formattedMessages = convertMessages(messages2, userSystemContext);
+ const response = await huggingFaceClient.chatCompletion({
+ provider: "novita",
+ model: QWEN_MODEL,
+ messages: formattedMessages,
+ max_tokens: MAX_TOKENS
+ });
+ if (response.choices && response.choices.length > 0 && response.choices[0].message) {
+ let content = response.choices[0].message.content || "";
+ content = content.replace(/[\s\S]*?<\/think>/g, "");
+ content = content.replace(/<[^>]*>/g, "");
+ content = content.replace(/^\s+|\s+$/g, "");
+ content = content.replace(/\n{3,}/g, "\n\n");
+ if (!content.trim()) {
+ content = "I'm sorry, I couldn't generate a proper response.";
+ }
+ return `${content}
+
+(Note: I'm currently operating in fallback mode using the Qwen model because the OpenAI API is unavailable)`;
+ } else {
+ throw new Error("No valid response from Qwen model");
+ }
+ } catch (error) {
+ console.error("Error generating response with Qwen model:", error);
+ return "I apologize, but I'm currently experiencing technical difficulties with both primary and fallback AI services. Please try again later.";
+ }
+}
+async function canUseOpenAI() {
+ try {
+ const apiKey = process.env.OPENAI_API_KEY;
+ return Boolean(apiKey && apiKey.startsWith("sk-") && apiKey.length > 20);
+ } catch (error) {
+ console.error("Error checking OpenAI API availability:", error);
+ return false;
+ }
+}
+async function canUseQwen() {
+ try {
+ return Boolean(novitaApiKey && novitaApiKey.length > 0);
+ } catch (error) {
+ console.error("Error checking Qwen availability:", error);
+ return false;
+ }
+}
+
+// server/openai.ts
+var OPENAI_MODEL = "gpt-4o";
+var openai = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY
+});
+var systemMessage = {
+ role: "system",
+ content: `You are a helpful AI assistant. Provide concise and accurate responses to user queries.
+ Your goal is to be informative and educational. Use clear language and provide examples where appropriate.
+ Always be respectful and considerate in your responses.`
+};
+var currentModel = "openai";
+async function generateChatResponse(messages2, userSystemContext) {
+ try {
+ const openAIAvailable = await canUseOpenAI();
+ if (!openAIAvailable) {
+ const qwenAvailable = await canUseQwen();
+ if (qwenAvailable) {
+ if (currentModel !== "qwen") {
+ console.log("Switching to Qwen model as fallback");
+ currentModel = "qwen";
+ }
+ return await generateFallbackResponse(messages2, userSystemContext);
+ } else {
+ currentModel = "unavailable";
+ throw new Error("Both OpenAI and Qwen models are unavailable. Please check your API keys.");
+ }
+ }
+ if (currentModel !== "openai") {
+ console.log("Using OpenAI model");
+ currentModel = "openai";
+ }
+ let enhancedSystemMessage = { ...systemMessage };
+ if (userSystemContext) {
+ const nameMatch = userSystemContext.match(/name(?:\s+is)?(?:\s*:\s*|\s+)([\w\s.']+)/i);
+ const locationMatch = userSystemContext.match(/location(?:\s+is)?(?:\s*:\s*|\s+)([\w\s.,]+)/i);
+ const interestsMatch = userSystemContext.match(/interests(?:\s+are)?(?:\s*:\s*|\s+)([\w\s,.;]+)/i);
+ const professionMatch = userSystemContext.match(/profession(?:\s+is)?(?:\s*:\s*|\s+)([\w\s&,.-]+)/i);
+ const petsMatch = userSystemContext.match(/pets?(?:\s+are)?(?:\s*:\s*|\s+)([\w\s,.]+)/i);
+ let userInfo = "";
+ if (nameMatch) userInfo += `- Name: ${nameMatch[1].trim()}
+`;
+ if (locationMatch) userInfo += `- Location: ${locationMatch[1].trim()}
+`;
+ if (interestsMatch) userInfo += `- Interests: ${interestsMatch[1].trim()}
+`;
+ if (professionMatch) userInfo += `- Profession: ${professionMatch[1].trim()}
+`;
+ if (petsMatch) userInfo += `- Pets: ${petsMatch[1].trim()}
+`;
+ enhancedSystemMessage.content = `${systemMessage.content}
+
+USER PROFILE INFORMATION:
+${userInfo || userSystemContext}
+
+IMPORTANT: You must remember these user details and incorporate them naturally in your responses when relevant.
+When the user asks about their name, location, interests, profession, or pets, always answer using the information above.
+Never say you don't know their personal details if they're listed above. Answer as if you already know this information.
+
+Original system context provided by user:
+${userSystemContext}`;
+ console.log("Including enhanced user system context in OpenAI chat");
+ }
+ const conversationWithSystem = [enhancedSystemMessage, ...messages2];
+ const response = await openai.chat.completions.create({
+ model: OPENAI_MODEL,
+ messages: conversationWithSystem,
+ temperature: 0.7,
+ max_tokens: 1e3
+ });
+ return response.choices[0].message.content || "I'm sorry, I couldn't generate a response.";
+ } catch (error) {
+ console.error("AI Model error:", error);
+ if (currentModel === "openai") {
+ console.log("OpenAI API error, attempting to use Qwen fallback");
+ try {
+ const qwenAvailable = await canUseQwen();
+ if (qwenAvailable) {
+ currentModel = "qwen";
+ return await generateFallbackResponse(messages2, userSystemContext);
+ } else {
+ currentModel = "unavailable";
+ }
+ } catch (fallbackError) {
+ console.error("Qwen fallback also failed:", fallbackError);
+ currentModel = "unavailable";
+ }
+ }
+ if (error.response) {
+ const status = error.response.status;
+ if (status === 429) {
+ if (error.code === "insufficient_quota") {
+ throw new Error("OpenAI API quota exceeded. Your account may need a valid payment method or has reached its limit.");
+ } else {
+ throw new Error("Rate limit exceeded. Please try again later.");
+ }
+ } else if (status === 401) {
+ throw new Error("API key is invalid or expired.");
+ } else {
+ throw new Error(`OpenAI API error: ${error.response?.data?.error?.message || "Unknown error"}`);
+ }
+ } else if (error.request) {
+ throw new Error("No response received from AI service. Please check your internet connection.");
+ } else {
+ throw new Error(`Error: ${error.message}`);
+ }
+ }
+}
+
+// server/personalities.ts
+var personalityConfigs = {
+ default: {
+ name: "Balanced",
+ description: "A helpful, balanced AI assistant that provides informative responses.",
+ systemPrompt: `You are a helpful AI assistant. Provide concise and accurate responses to user queries.
+ Your goal is to be informative and educational. Use clear language and provide examples where appropriate.
+ Always be respectful and considerate in your responses.`,
+ temperature: 0.7,
+ emoji: "\u{1F916}"
+ },
+ professional: {
+ name: "Professional",
+ description: "Formal and business-oriented with precise, structured responses.",
+ systemPrompt: `You are a professional AI assistant with expertise in business communication.
+ Provide well-structured, formal responses that are precise and to the point.
+ Use professional terminology where appropriate, but remain accessible.
+ Organize complex information in a clear, logical manner.
+ Maintain a courteous and professional tone at all times.`,
+ temperature: 0.5,
+ emoji: "\u{1F454}"
+ },
+ friendly: {
+ name: "Friendly",
+ description: "Casual, warm and conversational with a touch of humor.",
+ systemPrompt: `You are a friendly and approachable AI assistant.
+ Communicate in a warm, conversational tone as if chatting with a friend.
+ Feel free to use casual language, contractions, and the occasional appropriate humor.
+ Be encouraging and positive in your responses.
+ Make complex topics feel accessible and less intimidating.`,
+ temperature: 0.8,
+ emoji: "\u{1F60A}"
+ },
+ expert: {
+ name: "Expert",
+ description: "Technical and detailed with in-depth knowledge and explanations.",
+ systemPrompt: `You are an expert-level AI assistant with comprehensive technical knowledge.
+ Provide detailed, nuanced responses that demonstrate expert-level understanding.
+ Don't hesitate to use technical terminology and include background context where helpful.
+ When appropriate, explain underlying principles and concepts.
+ Present multiple perspectives or approaches when relevant.`,
+ temperature: 0.4,
+ emoji: "\u{1F468}\u200D\u{1F52C}"
+ },
+ poetic: {
+ name: "Poetic",
+ description: "Creative and eloquent with a focus on beautiful language.",
+ systemPrompt: `You are a poetic and creative AI assistant with a love for beautiful language.
+ Express ideas with eloquence, metaphor, and creative flair.
+ Draw connections to literature, art, and the human experience.
+ Use rich imagery and evocative language in your responses.
+ Even when explaining factual information, find ways to make your language sing.`,
+ temperature: 0.9,
+ emoji: "\u{1F3AD}"
+ },
+ concise: {
+ name: "Concise",
+ description: "Brief and to-the-point with no unnecessary words.",
+ systemPrompt: `You are a concise AI assistant that values brevity and clarity.
+ Provide the shortest possible response that fully answers the query.
+ Use bullet points where appropriate.
+ Eliminate unnecessary words, phrases, and preambles.
+ Focus only on the most essential information.`,
+ temperature: 0.5,
+ emoji: "\u{1F4CB}"
+ }
+};
+function getPersonalityConfig(personality) {
+ return personalityConfigs[personality] || personalityConfigs.default;
+}
+
+// server/flux.ts
+import { z as z2 } from "zod";
+var imageGenerationSchema = z2.object({
+ prompt: z2.string().min(1).max(1e3),
+ seed: z2.number().optional().default(0),
+ randomize_seed: z2.boolean().optional().default(true),
+ width: z2.number().min(256).max(1024).optional().default(512),
+ height: z2.number().min(256).max(1024).optional().default(512),
+ guidance_scale: z2.number().min(0).max(20).optional().default(7.5),
+ num_inference_steps: z2.number().min(1).max(50).optional().default(20)
+});
+async function generateImage(params) {
+ try {
+ const apiKey = process.env.REPLICATE_API_KEY;
+ if (!apiKey) {
+ throw new Error("REPLICATE_API_KEY is not set in environment variables");
+ }
+ const inputData = {
+ input: {
+ prompt: params.prompt,
+ width: params.width,
+ height: params.height,
+ seed: params.randomize_seed ? Math.floor(Math.random() * 1e6) : params.seed,
+ guidance_scale: params.guidance_scale,
+ num_inference_steps: params.num_inference_steps
+ }
+ };
+ const startResponse = await fetch(
+ "https://api.replicate.com/v1/models/black-forest-labs/flux-dev/predictions",
+ {
+ method: "POST",
+ headers: {
+ "Authorization": `Bearer ${apiKey}`,
+ "Content-Type": "application/json"
+ },
+ body: JSON.stringify(inputData)
+ }
+ );
+ if (!startResponse.ok) {
+ const errorData = await startResponse.json();
+ throw new Error(`Replicate API error: ${JSON.stringify(errorData)}`);
+ }
+ const prediction = await startResponse.json();
+ const predictionId = prediction.id;
+ let imageUrl = null;
+ let attempts = 0;
+ const maxAttempts = 30;
+ while (!imageUrl && attempts < maxAttempts) {
+ await new Promise((resolve) => setTimeout(resolve, 1e3));
+ const statusResponse = await fetch(
+ `https://api.replicate.com/v1/predictions/${predictionId}`,
+ {
+ headers: {
+ "Authorization": `Bearer ${apiKey}`
+ }
+ }
+ );
+ if (!statusResponse.ok) {
+ const errorData = await statusResponse.json();
+ throw new Error(`Replicate API status error: ${JSON.stringify(errorData)}`);
+ }
+ const status = await statusResponse.json();
+ if (status.status === "succeeded") {
+ if (status.output && typeof status.output === "string") {
+ imageUrl = status.output;
+ } else if (Array.isArray(status.output) && status.output.length > 0) {
+ imageUrl = status.output[0];
+ }
+ } else if (status.status === "failed") {
+ throw new Error(`Image generation failed: ${status.error || "Unknown error"}`);
+ }
+ attempts++;
+ }
+ if (!imageUrl) {
+ throw new Error("Timed out waiting for image generation");
+ }
+ return imageUrl;
+ } catch (error) {
+ console.error("Error generating image:", error);
+ throw new Error(`Failed to generate image: ${error instanceof Error ? error.message : String(error)}`);
+ }
+}
+async function isFluxAvailable() {
+ try {
+ const apiKey = process.env.REPLICATE_API_KEY;
+ if (!apiKey) {
+ return false;
+ }
+ const response = await fetch(
+ "https://api.replicate.com/v1/models/black-forest-labs/flux-dev",
+ {
+ headers: {
+ "Authorization": `Bearer ${apiKey}`
+ }
+ }
+ );
+ return response.ok;
+ } catch (error) {
+ console.error("Error checking FLUX availability:", error);
+ return false;
+ }
+}
+
+// server/video.ts
+import { InferenceClient as InferenceClient2 } from "@huggingface/inference";
+import { z as z3 } from "zod";
+var videoGenerationSchema = z3.object({
+ prompt: z3.string().min(1).max(1e3),
+ model: z3.enum(["Wan-AI/Wan2.1-T2V-14B"]).default("Wan-AI/Wan2.1-T2V-14B")
+});
+async function generateVideo(params) {
+ try {
+ const replicateApiKey = process.env.REPLICATE_API_KEY;
+ if (!replicateApiKey) {
+ throw new Error("REPLICATE_API_KEY is not set in environment variables");
+ }
+ const client = new InferenceClient2(replicateApiKey);
+ const result = await client.textToVideo({
+ provider: "replicate",
+ model: params.model,
+ inputs: params.prompt
+ });
+ if (!result) {
+ throw new Error("Failed to generate video: No result returned");
+ }
+ const videoBuffer = await result.arrayBuffer();
+ const videoBase64 = Buffer.from(videoBuffer).toString("base64");
+ const videoUrl = `data:video/mp4;base64,${videoBase64}`;
+ return videoUrl;
+ } catch (error) {
+ console.error("Error generating video:", error);
+ throw new Error(`Failed to generate video: ${error instanceof Error ? error.message : String(error)}`);
+ }
+}
+async function isVideoGenerationAvailable() {
+ try {
+ const replicateApiKey = process.env.REPLICATE_API_KEY;
+ if (!replicateApiKey) {
+ return false;
+ }
+ const client = new InferenceClient2(replicateApiKey);
+ return !!client;
+ } catch (error) {
+ console.error("Error checking video generation availability:", error);
+ return false;
+ }
+}
+
+// server/routes.ts
+import OpenAI2 from "openai";
+import { nanoid as nanoid2 } from "nanoid";
+var currentModelStatus = {
+ model: "openai",
+ isOpenAIAvailable: true,
+ isQwenAvailable: true,
+ lastChecked: /* @__PURE__ */ new Date()
+};
+async function updateModelStatus() {
+ try {
+ const isOpenAIAvailable = await canUseOpenAI();
+ const isQwenAvailable = await canUseQwen();
+ let model = "unavailable";
+ if (isOpenAIAvailable) {
+ model = "openai";
+ } else if (isQwenAvailable) {
+ model = "qwen";
+ }
+ currentModelStatus = {
+ model,
+ isOpenAIAvailable,
+ isQwenAvailable,
+ lastChecked: /* @__PURE__ */ new Date()
+ };
+ console.log(`Updated model status: ${model} (OpenAI: ${isOpenAIAvailable}, Qwen: ${isQwenAvailable})`);
+ return currentModelStatus;
+ } catch (error) {
+ console.error("Error updating model status:", error);
+ return currentModelStatus;
+ }
+}
+updateModelStatus();
+async function registerRoutes(app2) {
+ setupAuth(app2);
+ app2.get("/api/conversations", async (req, res) => {
+ try {
+ let conversations2;
+ if (req.isAuthenticated() && req.user) {
+ const userId = req.user.id;
+ conversations2 = await storage.getUserConversations(userId);
+ } else {
+ conversations2 = await storage.getConversations();
+ conversations2 = conversations2.filter((conv) => !conv.userId);
+ }
+ res.json(conversations2);
+ } catch (error) {
+ console.error("Error fetching conversations:", error);
+ res.status(500).json({ message: "Failed to fetch conversations." });
+ }
+ });
+ app2.post("/api/conversations", async (req, res) => {
+ try {
+ const conversationId = nanoid2();
+ let title = req.body.title;
+ if ((!title || title === "New Conversation") && req.body.firstMessage) {
+ try {
+ const openaiClient = new OpenAI2();
+ const response = await openaiClient.chat.completions.create({
+ model: "gpt-3.5-turbo",
+ messages: [\
+ {\
+ role: "system",\
+ content: "Generate a brief, descriptive title (3-5 words) for a conversation that starts with this message. Respond with just the title."\
+ },\
+ {\
+ role: "user",\
+ content: req.body.firstMessage\
+ }\
+ ],
+ max_tokens: 20,
+ temperature: 0.7
+ });
+ title = response.choices[0].message.content?.trim() || "New Conversation";
+ } catch (err) {
+ console.error("Error generating AI title:", err);
+ title = "New Conversation";
+ }
+ }
+ const conversationData = {
+ id: conversationId,
+ title,
+ personality: req.body.personality || "general"
+ };
+ if (req.isAuthenticated() && req.user) {
+ conversationData.userId = req.user.id;
+ }
+ const result = insertConversationSchema.safeParse(conversationData);
+ if (!result.success) {
+ return res.status(400).json({ message: "Invalid conversation data." });
+ }
+ const conversation = await storage.createConversation(result.data);
+ res.status(201).json(conversation);
+ } catch (error) {
+ console.error("Error creating conversation:", error);
+ res.status(500).json({ message: "Failed to create conversation." });
+ }
+ });
+ app2.post("/api/conversations/:id/generate-title", async (req, res) => {
+ try {
+ const { id } = req.params;
+ const messages2 = await storage.getMessages(id);
+ if (messages2.length < 2) {
+ return res.status(400).json({ message: "Need at least one exchange to generate a title" });
+ }
+ const contextMessages = messages2.slice(0, Math.min(4, messages2.length)).map((msg) => `${msg.role}: ${msg.content}`).join("\n");
+ let title;
+ try {
+ const openaiClient = new OpenAI2();
+ const response = await openaiClient.chat.completions.create({
+ model: "gpt-3.5-turbo",
+ messages: [\
+ {\
+ role: "system",\
+ content: "You are a helpful assistant that generates short, descriptive titles (max 6 words) for conversations based on their content. Respond with just the title."\
+ },\
+ {\
+ role: "user",\
+ content: `Generate a short, descriptive title (maximum 6 words) for this conversation:\
+${contextMessages}`\
+ }\
+ ],
+ max_tokens: 20,
+ temperature: 0.7
+ });
+ title = response.choices[0].message.content?.trim();
+ if (!title) {
+ title = `Chat ${(/* @__PURE__ */ new Date()).toLocaleDateString()}`;
+ }
+ } catch (err) {
+ console.error("Error generating AI title:", err);
+ title = `Chat ${(/* @__PURE__ */ new Date()).toLocaleDateString()}`;
+ }
+ const updatedConversation = await storage.updateConversationTitle(id, title);
+ if (!updatedConversation) {
+ return res.status(404).json({ message: "Conversation not found" });
+ }
+ res.json(updatedConversation);
+ } catch (error) {
+ console.error("Error generating title:", error);
+ res.status(500).json({ message: "Failed to generate title." });
+ }
+ });
+ app2.get("/api/conversations/:id/messages", async (req, res) => {
+ try {
+ const { id } = req.params;
+ const conversation = await storage.getConversation(id);
+ if (!conversation) {
+ return res.status(404).json({ message: "Conversation not found." });
+ }
+ if (conversation.userId && req.isAuthenticated() && req.user) {
+ if (conversation.userId !== req.user.id) {
+ return res.status(403).json({ message: "You don't have permission to access this conversation." });
+ }
+ }
+ const messages2 = await storage.getMessages(id);
+ if (req.isAuthenticated() && req.user) {
+ const userContext = {
+ role: "system",
+ content: req.user.systemContext || `Chat with ${req.user.username}`,
+ conversationId: id,
+ createdAt: /* @__PURE__ */ new Date()
+ };
+ messages2.unshift(userContext);
+ }
+ res.json(messages2);
+ } catch (error) {
+ console.error("Error fetching messages:", error);
+ res.status(500).json({ message: "Failed to fetch messages." });
+ }
+ });
+ app2.post("/api/chat", async (req, res) => {
+ try {
+ await updateModelStatus();
+ if (currentModelStatus.model === "unavailable") {
+ return res.status(503).json({
+ message: "All AI models are currently unavailable. Please check your API keys."
+ });
+ }
+ const result = conversationSchema.safeParse(req.body);
+ if (!result.success) {
+ return res.status(400).json({ message: "Invalid chat data format." });
+ }
+ const { messages: messages2 } = result.data;
+ const conversationId = req.body.conversationId || "default";
+ const conversation = await storage.getConversation(conversationId);
+ if (!conversation && conversationId !== "default") {
+ return res.status(404).json({ message: "Conversation not found." });
+ }
+ if (conversation && conversation.userId) {
+ if (!req.isAuthenticated() || !req.user || conversation.userId !== req.user.id) {
+ return res.status(403).json({ message: "You don't have permission to access this conversation." });
+ }
+ }
+ const userMessage = messages2[messages2.length - 1];
+ if (userMessage.role !== "user") {
+ return res.status(400).json({ message: "Last message must be from the user." });
+ }
+ await storage.createMessage({
+ content: userMessage.content,
+ role: userMessage.role,
+ conversationId
+ });
+ let userSystemContext = void 0;
+ if (req.isAuthenticated() && req.user && req.user.systemContext) {
+ userSystemContext = req.user.systemContext;
+ console.log(
+ "Including user system context in conversation:",
+ userSystemContext ? "Yes" : "None available"
+ );
+ }
+ const aiResponse = await generateChatResponse(messages2, userSystemContext);
+ const savedMessage = await storage.createMessage({
+ content: aiResponse,
+ role: "assistant",
+ conversationId
+ });
+ res.json({
+ message: savedMessage,
+ conversationId,
+ modelInfo: {
+ model: currentModelStatus.model,
+ isFallback: currentModelStatus.model !== "openai"
+ }
+ });
+ } catch (error) {
+ console.error("Chat API error:", error);
+ res.status(500).json({
+ message: error.message || "Failed to process chat message."
+ });
+ }
+ });
+ app2.get("/api/model-status", async (_req, res) => {
+ try {
+ const fiveMinutesAgo = new Date(Date.now() - 5 * 60 * 1e3);
+ if (currentModelStatus.lastChecked < fiveMinutesAgo) {
+ await updateModelStatus();
+ }
+ return res.json(currentModelStatus);
+ } catch (error) {
+ console.error("Error getting model status:", error);
+ return res.status(500).json({ message: "Failed to get model status" });
+ }
+ });
+ app2.delete("/api/conversations/:id", async (req, res) => {
+ try {
+ const { id } = req.params;
+ if (id === "default") {
+ return res.status(400).json({ message: "Cannot delete the default conversation" });
+ }
+ const conversation = await storage.getConversation(id);
+ if (!conversation) {
+ return res.status(404).json({ message: "Conversation not found" });
+ }
+ if (conversation.userId && req.isAuthenticated() && req.user) {
+ if (conversation.userId !== req.user.id) {
+ return res.status(403).json({ message: "You don't have permission to delete this conversation." });
+ }
+ }
+ const success = await storage.deleteConversation(id);
+ if (success) {
+ res.status(200).json({ message: "Conversation deleted successfully" });
+ } else {
+ res.status(500).json({ message: "Failed to delete conversation" });
+ }
+ } catch (error) {
+ console.error("Error deleting conversation:", error);
+ res.status(500).json({ message: "Server error deleting conversation" });
+ }
+ });
+ app2.patch("/api/conversations/:id/title", async (req, res) => {
+ try {
+ const { id } = req.params;
+ const { title } = req.body;
+ if (!title || typeof title !== "string" || title.trim().length === 0) {
+ return res.status(400).json({ message: "Valid title is required" });
+ }
+ const conversation = await storage.getConversation(id);
+ if (!conversation) {
+ return res.status(404).json({ message: "Conversation not found" });
+ }
+ if (conversation.userId && req.isAuthenticated() && req.user) {
+ if (conversation.userId !== req.user.id) {
+ return res.status(403).json({ message: "You don't have permission to update this conversation." });
+ }
+ }
+ const updatedConversation = await storage.createConversation({
+ ...conversation,
+ title: title.trim()
+ });
+ res.json(updatedConversation);
+ } catch (error) {
+ console.error("Error updating conversation title:", error);
+ res.status(500).json({ message: "Failed to update conversation title" });
+ }
+ });
+ app2.patch("/api/conversations/:id/personality", async (req, res) => {
+ try {
+ const { id } = req.params;
+ const { personality } = req.body;
+ const result = personalityTypeSchema.safeParse(personality);
+ if (!result.success) {
+ return res.status(400).json({
+ message: "Invalid personality type",
+ validOptions: personalityTypeSchema.options
+ });
+ }
+ const conversation = await storage.getConversation(id);
+ if (!conversation) {
+ return res.status(404).json({ message: "Conversation not found" });
+ }
+ if (conversation.userId && req.isAuthenticated() && req.user) {
+ if (conversation.userId !== req.user.id) {
+ return res.status(403).json({ message: "You don't have permission to update this conversation." });
+ }
+ }
+ const updatedConversation = await storage.updateConversationPersonality(id, result.data);
+ const personalityConfig = getPersonalityConfig(result.data);
+ res.json({
+ ...updatedConversation,
+ personalityConfig: {
+ name: personalityConfig.name,
+ description: personalityConfig.description,
+ emoji: personalityConfig.emoji
+ }
+ });
+ } catch (error) {
+ console.error("Error updating conversation personality:", error);
+ res.status(500).json({ message: "Failed to update conversation personality" });
+ }
+ });
+ app2.get("/api/personalities", async (_req, res) => {
+ try {
+ const personalityTypes = personalityTypeSchema.options;
+ const personalities = personalityTypes.map((type) => {
+ const config = getPersonalityConfig(type);
+ return {
+ id: type,
+ name: config.name,
+ description: config.description,
+ emoji: config.emoji
+ };
+ });
+ res.json(personalities);
+ } catch (error) {
+ console.error("Error fetching personalities:", error);
+ res.status(500).json({ message: "Failed to fetch personalities" });
+ }
+ });
+ app2.post("/api/generate-image", async (req, res) => {
+ try {
+ const result = imageGenerationSchema.safeParse(req.body);
+ if (!result.success) {
+ return res.status(400).json({
+ message: "Invalid image generation parameters",
+ errors: result.error.format()
+ });
+ }
+ const imageUrl = await generateImage(result.data);
+ return res.json({
+ success: true,
+ imageUrl,
+ params: result.data
+ });
+ } catch (error) {
+ console.error("Error generating image:", error);
+ return res.status(500).json({
+ success: false,
+ message: error.message || "Failed to generate image"
+ });
+ }
+ });
+ app2.get("/api/flux-status", async (_req, res) => {
+ try {
+ const isAvailable = await isFluxAvailable();
+ return res.json({
+ isAvailable,
+ model: "FLUX.1-dev"
+ });
+ } catch (error) {
+ console.error("Error checking FLUX availability:", error);
+ return res.status(500).json({
+ isAvailable: false,
+ message: "Error checking FLUX availability"
+ });
+ }
+ });
+ app2.post("/api/generate-video", async (req, res) => {
+ try {
+ const result = videoGenerationSchema.safeParse(req.body);
+ if (!result.success) {
+ return res.status(400).json({
+ message: "Invalid video generation parameters",
+ errors: result.error.format()
+ });
+ }
+ const videoUrl = await generateVideo(result.data);
+ return res.json({
+ success: true,
+ videoUrl,
+ params: result.data
+ });
+ } catch (error) {
+ console.error("Error generating video:", error);
+ return res.status(500).json({
+ success: false,
+ message: error.message || "Failed to generate video"
+ });
+ }
+ });
+ app2.get("/api/video-status", async (_req, res) => {
+ try {
+ const isAvailable = await isVideoGenerationAvailable();
+ return res.json({
+ isAvailable,
+ model: "Wan-AI/Wan2.1-T2V-14B"
+ });
+ } catch (error) {
+ console.error("Error checking video generation availability:", error);
+ return res.status(500).json({
+ isAvailable: false,
+ message: "Error checking video generation availability"
+ });
+ }
+ });
+ app2.get("/api/health", (_req, res) => {
+ return res.json({ status: "ok" });
+ });
+ const httpServer = createServer(app2);
+ return httpServer;
+}
+
+// server/vite.ts
+import express from "express";
+import fs from "fs";
+import path2 from "path";
+import { createServer as createViteServer, createLogger } from "vite";
+
+// vite.config.ts
+import { defineConfig } from "vite";
+import react from "@vitejs/plugin-react";
+import path from "path";
+import runtimeErrorOverlay from "@replit/vite-plugin-runtime-error-modal";
+var vite_config_default = defineConfig({
+ plugins: [\
+ react(),\
+ runtimeErrorOverlay(),\
+ ...process.env.NODE_ENV !== "production" && process.env.REPL_ID !== void 0 ? [\
+ await import("@replit/vite-plugin-cartographer").then(\
+ (m) => m.cartographer()\
+ )\
+ ] : []\
+ ],
+ resolve: {
+ alias: {
+ "@": path.resolve(import.meta.dirname, "client", "src"),
+ "@shared": path.resolve(import.meta.dirname, "shared"),
+ "@assets": path.resolve(import.meta.dirname, "attached_assets")
+ }
+ },
+ root: path.resolve(import.meta.dirname, "client"),
+ build: {
+ outDir: path.resolve(import.meta.dirname, "dist/public"),
+ emptyOutDir: true
+ }
+});
+
+// server/vite.ts
+import { nanoid as nanoid3 } from "nanoid";
+var viteLogger = createLogger();
+function log(message, source = "express") {
+ const formattedTime = (/* @__PURE__ */ new Date()).toLocaleTimeString("en-US", {
+ hour: "numeric",
+ minute: "2-digit",
+ second: "2-digit",
+ hour12: true
+ });
+ console.log(`${formattedTime} [${source}] ${message}`);
+}
+async function setupVite(app2, server) {
+ const serverOptions = {
+ middlewareMode: true,
+ hmr: { server },
+ allowedHosts: true
+ };
+ const vite = await createViteServer({
+ ...vite_config_default,
+ configFile: false,
+ customLogger: {
+ ...viteLogger,
+ error: (msg, options) => {
+ viteLogger.error(msg, options);
+ process.exit(1);
+ }
+ },
+ server: serverOptions,
+ appType: "custom"
+ });
+ app2.use(vite.middlewares);
+ app2.use("*", async (req, res, next) => {
+ const url = req.originalUrl;
+ try {
+ const clientTemplate = path2.resolve(
+ import.meta.dirname,
+ "..",
+ "client",
+ "index.html"
+ );
+ let template = await fs.promises.readFile(clientTemplate, "utf-8");
+ template = template.replace(
+ `src="/src/main.tsx"`,
+ `src="/src/main.tsx?v=${nanoid3()}"`
+ );
+ const page = await vite.transformIndexHtml(url, template);
+ res.status(200).set({ "Content-Type": "text/html" }).end(page);
+ } catch (e) {
+ vite.ssrFixStacktrace(e);
+ next(e);
+ }
+ });
+}
+function serveStatic(app2) {
+ const distPath = path2.resolve(import.meta.dirname, "public");
+ if (!fs.existsSync(distPath)) {
+ throw new Error(
+ `Could not find the build directory: ${distPath}, make sure to build the client first`
+ );
+ }
+ app2.use(express.static(distPath));
+ app2.use("*", (_req, res) => {
+ res.sendFile(path2.resolve(distPath, "index.html"));
+ });
+}
+
+// server/index.ts
+var app = express2();
+app.use(express2.json());
+app.use(express2.urlencoded({ extended: false }));
+app.use((req, res, next) => {
+ const start = Date.now();
+ const path3 = req.path;
+ let capturedJsonResponse = void 0;
+ const originalResJson = res.json;
+ res.json = function(bodyJson, ...args) {
+ capturedJsonResponse = bodyJson;
+ return originalResJson.apply(res, [bodyJson, ...args]);
+ };
+ res.on("finish", () => {
+ const duration = Date.now() - start;
+ if (path3.startsWith("/api")) {
+ let logLine = `${req.method} ${path3} ${res.statusCode} in ${duration}ms`;
+ if (capturedJsonResponse) {
+ logLine += ` :: ${JSON.stringify(capturedJsonResponse)}`;
+ }
+ if (logLine.length > 80) {
+ logLine = logLine.slice(0, 79) + "\u2026";
+ }
+ log(logLine);
+ }
+ });
+ next();
+});
+(async () => {
+ const server = await registerRoutes(app);
+ app.use((err, _req, res, _next) => {
+ const status = err.status || err.statusCode || 500;
+ const message = err.message || "Internal Server Error";
+ res.status(status).json({ message });
+ throw err;
+ });
+ if (app.get("env") === "development") {
+ await setupVite(app, server);
+ } else {
+ serveStatic(app);
+ }
+ const port = 5e3;
+ server.listen({
+ port,
+ host: "0.0.0.0",
+ reusePort: true
+ }, () => {
+ log(`serving on port ${port}`);
+ });
+})();
+
+```
\ No newline at end of file
diff --git a/OpenAIChatAssistant/attached_assets/content-1746448603342.md b/OpenAIChatAssistant/attached_assets/content-1746448603342.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff9e7db99e5ed569ac2716efcb040b0ceb75009b
--- /dev/null
+++ b/OpenAIChatAssistant/attached_assets/content-1746448603342.md
@@ -0,0 +1,23 @@
+# 404
+
+**File not found**
+
+The site configured at this address does not
+contain the requested file.
+
+
+If this is your site, make sure that the filename case matches the URL
+as well as any file permissions.
+
+For root URLs (like `http://example.com/`) you must provide an
+`index.html` file.
+
+
+[Read the full documentation](https://help.github.com/pages/)
+for more information about using **GitHub Pages**.
+
+
+[GitHub Status](https://githubstatus.com/) —
+[@githubstatus](https://twitter.com/githubstatus)
+
+[![]()](https://bella288.github.io/)[![]()](https://bella288.github.io/)
\ No newline at end of file
diff --git a/OpenAIChatAssistant/attached_assets/screenshot-1746193704446.png b/OpenAIChatAssistant/attached_assets/screenshot-1746193704446.png
new file mode 100644
index 0000000000000000000000000000000000000000..3e4d3c4e09ea84cb1b17cfbd2705733b1e266f4c
Binary files /dev/null and b/OpenAIChatAssistant/attached_assets/screenshot-1746193704446.png differ
diff --git a/OpenAIChatAssistant/attached_assets/screenshot-1746193776733.png b/OpenAIChatAssistant/attached_assets/screenshot-1746193776733.png
new file mode 100644
index 0000000000000000000000000000000000000000..3e4d3c4e09ea84cb1b17cfbd2705733b1e266f4c
Binary files /dev/null and b/OpenAIChatAssistant/attached_assets/screenshot-1746193776733.png differ
diff --git a/OpenAIChatAssistant/client/index.html b/OpenAIChatAssistant/client/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..4b4d09e3151713b7af7e8d38dd2bac2a27d5a7ee
--- /dev/null
+++ b/OpenAIChatAssistant/client/index.html
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/App.tsx b/OpenAIChatAssistant/client/src/App.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..a34c65ce4675ad7a5ce4a2c60325d85a1fcb31f9
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/App.tsx
@@ -0,0 +1,43 @@
+import { Switch, Route } from "wouter";
+import { queryClient } from "./lib/queryClient";
+import { QueryClientProvider } from "@tanstack/react-query";
+import { Toaster } from "@/components/ui/toaster";
+import { TooltipProvider } from "@/components/ui/tooltip";
+import NotFound from "@/pages/not-found";
+import Home from "@/pages/Home";
+import AuthPage from "@/pages/auth-page";
+import ImageGenPage from "@/pages/ImageGenPage";
+import VideoGenPage from "@/pages/VideoGenPage";
+import { AuthProvider } from "@/hooks/use-auth";
+import { ProtectedRoute } from "@/lib/protected-route";
+import LogoutPage from "@/pages/logout-page";
+import RegisterPage from "@/pages/register-page";
+
+function Router() {
+ return (
+
+
+
+
+
+
+
+
+
+ );
+}
+
+function App() {
+ return (
+
+
+
+
+
+
+
+
+ );
+}
+
+export default App;
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/ChatHistory.tsx b/OpenAIChatAssistant/client/src/components/ChatHistory.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..3dd729c1fafbaa6f8ea38e0f5fa153f4a4720b37
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ChatHistory.tsx
@@ -0,0 +1,126 @@
+import React from 'react';
+import { ChatHistoryProps } from '@/lib/types';
+import TypingIndicator from './TypingIndicator';
+import { useScrollToBottom } from '@/lib/hooks';
+import { AlertTriangle } from 'lucide-react';
+
+const ChatHistory: React.FC = ({
+ messages,
+ isLoading,
+ currentModel = 'openai'
+}) => {
+ const scrollRef = useScrollToBottom([messages, isLoading]);
+
+ // Check if we're in fallback mode by looking at the model or fallback indicator in messages
+ const isFallbackMode = currentModel === 'qwen' ||
+ messages.some(message =>
+ message.role === 'assistant' &&
+ message.content.includes('fallback mode')
+ );
+
+ return (
+
+ {/* Fallback mode indicator */}
+ {isFallbackMode && (
+
+
+
+
+
+ Qwen Fallback Mode Active: The OpenAI API is currently unavailable.
+ Responses are being generated by the Qwen model instead.
+
+
+
+
+ )}
+
+ {messages.map((message, index) => {
+ // Check if this is a fallback message directly from the content
+ const isMessageFallback = message.role === 'assistant' &&
+ (message.content.includes('fallback mode') ||
+ message.content.includes('Qwen model'));
+
+ // Determine if this message appears to be a fallback response
+ const isAssistantFallbackMessage = message.role === 'assistant' &&
+ (currentModel === 'qwen' || isMessageFallback);
+
+ // Clean up fallback message for display
+ let displayContent = isAssistantFallbackMessage
+ ? message.content.replace(/\n\n\(Note: I'm currently operating in fallback mode.*\)$/, '')
+ : message.content;
+
+ // Remove any thinking process sections for Qwen responses
+ if (isAssistantFallbackMessage) {
+ // Remove
tags and their content
+ displayContent = displayContent.replace(/[\s\S]*?<\/think>/g, '');
+
+ // Remove any other XML-like tags
+ displayContent = displayContent.replace(/<[^>]*>/g, '');
+
+ // Clean up any excessive whitespace
+ displayContent = displayContent.replace(/^\s+|\s+$/g, '');
+ displayContent = displayContent.replace(/\n{3,}/g, '\n\n');
+ }
+
+ return (
+
+ {message.role !== 'user' && (
+
+ )}
+
+
+
{displayContent}
+
+ {isAssistantFallbackMessage && isMessageFallback && (
+
+ (This response was generated using the Qwen fallback model)
+
+ )}
+
+
+ {message.role === 'user' && (
+
+ )}
+
+ );
+ })}
+
+
+
+ );
+};
+
+export default ChatHistory;
diff --git a/OpenAIChatAssistant/client/src/components/ChatInputForm.tsx b/OpenAIChatAssistant/client/src/components/ChatInputForm.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..683e1fbadf8834c6e2a7748fa0510d045bc56e56
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ChatInputForm.tsx
@@ -0,0 +1,72 @@
+import React, { useState, useRef, useEffect } from 'react';
+import { ChatInputFormProps } from '@/lib/types';
+import { Button } from '@/components/ui/button';
+import { Input } from '@/components/ui/input';
+
+const ChatInputForm: React.FC = ({ onSendMessage, isLoading }) => {
+ const [input, setInput] = useState('');
+ const inputRef = useRef(null);
+
+ // Focus input on component mount
+ useEffect(() => {
+ if (inputRef.current) {
+ inputRef.current.focus();
+ }
+ }, []);
+
+ const handleSubmit = (e: React.FormEvent) => {
+ e.preventDefault();
+
+ const message = input.trim();
+ if (!message || isLoading) return;
+
+ onSendMessage(message);
+ setInput('');
+ };
+
+ const handleClear = () => {
+ setInput('');
+ if (inputRef.current) {
+ inputRef.current.focus();
+ }
+ };
+
+ return (
+
+ );
+};
+
+export default ChatInputForm;
diff --git a/OpenAIChatAssistant/client/src/components/ConnectionStatus.tsx b/OpenAIChatAssistant/client/src/components/ConnectionStatus.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..8252421d0ea15be5da148b1acb5e425d428df372
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ConnectionStatus.tsx
@@ -0,0 +1,64 @@
+import React from 'react';
+import { ConnectionStatusProps } from '@/lib/types';
+import { Badge } from '@/components/ui/badge';
+import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip';
+
+const ConnectionStatus: React.FC = ({ isConnected, currentModel = 'openai' }) => {
+ // Get model status details
+ const getModelBadge = () => {
+ switch (currentModel) {
+ case 'openai':
+ return (
+
+ OpenAI
+
+ );
+ case 'qwen':
+ return (
+
+ Qwen (Fallback)
+
+ );
+ case 'unavailable':
+ return (
+
+ No AI Available
+
+ );
+ default:
+ return null;
+ }
+ };
+
+ return (
+
+
+
+ {isConnected ? 'Connected' : 'Disconnected'}
+
+
+
+
+
+ {getModelBadge()}
+
+
+
+ {currentModel === 'openai'
+ ? 'Using OpenAI GPT-4o model'
+ : currentModel === 'qwen'
+ ? 'Using Qwen fallback model due to OpenAI unavailability'
+ : 'All AI models are currently unavailable'}
+
+
+
+
+
+ );
+};
+
+export default ConnectionStatus;
diff --git a/OpenAIChatAssistant/client/src/components/ConversationSidebar.tsx b/OpenAIChatAssistant/client/src/components/ConversationSidebar.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0208ad04d6e3cc7e3015f0b9d475c393fdec1dfd
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ConversationSidebar.tsx
@@ -0,0 +1,315 @@
+import React, { useState, useEffect } from 'react';
+import { PlusCircle, MessageSquare, Trash2, Edit2, Save, X, User, LogOut } from 'lucide-react';
+import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip';
+import { Button } from '@/components/ui/button';
+import { Input } from '@/components/ui/input';
+import { apiRequest } from '@/lib/queryClient';
+import { Conversation } from '@/lib/types';
+import { useAuth } from '@/hooks/use-auth';
+import { useLocation } from 'wouter';
+
+interface ConversationSidebarProps {
+ isOpen: boolean;
+ onClose: () => void;
+ selectedConversationId: string;
+ onSelectConversation: (conversationId: string) => void;
+ onNewConversation: () => void;
+}
+
+const ConversationSidebar: React.FC = ({
+ isOpen,
+ onClose,
+ selectedConversationId,
+ onSelectConversation,
+ onNewConversation
+}) => {
+ const [conversations, setConversations] = useState([]);
+ const [isLoading, setIsLoading] = useState(false);
+ const [editingId, setEditingId] = useState(null);
+ const [editingTitle, setEditingTitle] = useState('');
+ const { user, logoutMutation } = useAuth();
+ const [, setLocation] = useLocation();
+
+ const isSignedIn = !!user;
+
+ // Fetch conversations
+ useEffect(() => {
+ const fetchConversations = async () => {
+ setIsLoading(true);
+ try {
+ const response = await fetch('/api/conversations');
+ if (response.ok) {
+ const data = await response.json();
+ setConversations(data);
+ } else {
+ console.error('Failed to fetch conversations');
+ }
+ } catch (error) {
+ console.error('Error fetching conversations:', error);
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ fetchConversations();
+
+ // Set up interval to refresh conversations (every 30 seconds)
+ const interval = setInterval(fetchConversations, 30000);
+ return () => clearInterval(interval);
+ }, [isSignedIn]);
+
+ // Navigate to auth page
+ const handleSignIn = () => {
+ setLocation('/auth');
+ };
+
+ // Sign out
+ const handleSignOut = () => {
+ setLocation('/logout');
+ };
+
+ // Start editing a conversation title
+ const handleEditStart = (conversation: Conversation) => {
+ setEditingId(conversation.id);
+ setEditingTitle(conversation.title);
+ };
+
+ // Cancel editing
+ const handleEditCancel = () => {
+ setEditingId(null);
+ setEditingTitle('');
+ };
+
+ // Save edited title
+ const handleEditSave = async (conversationId: string) => {
+ try {
+ const response = await apiRequest('PATCH', `/api/conversations/${conversationId}/title`, {
+ title: editingTitle
+ });
+
+ if (response.ok) {
+ const updatedConversation = await response.json();
+ setConversations(conversations.map(conv =>
+ conv.id === conversationId ? updatedConversation : conv
+ ));
+ setEditingId(null);
+ } else {
+ console.error('Failed to update conversation title');
+ }
+ } catch (error) {
+ console.error('Error updating conversation title:', error);
+ }
+ };
+
+ // Delete a conversation
+ const handleDelete = async (conversationId: string) => {
+ // Confirm delete
+ if (!window.confirm('Are you sure you want to delete this conversation?')) {
+ return;
+ }
+
+ try {
+ const response = await apiRequest('DELETE', `/api/conversations/${conversationId}`);
+
+ if (response.ok) {
+ setConversations(conversations.filter(conv => conv.id !== conversationId));
+
+ // If we deleted the selected conversation, switch to a new one
+ if (conversationId === selectedConversationId) {
+ const nextConv = conversations.find(conv => conv.id !== conversationId);
+ if (nextConv) {
+ onSelectConversation(nextConv.id);
+ } else {
+ onNewConversation();
+ }
+ }
+ } else {
+ console.error('Failed to delete conversation');
+ }
+ } catch (error) {
+ console.error('Error deleting conversation:', error);
+ }
+ };
+
+ return (
+
+ );
+};
+
+export default ConversationSidebar;
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/ImageGenerator.tsx b/OpenAIChatAssistant/client/src/components/ImageGenerator.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..4cdab352cb017fda207719a39fc286da70ebaf02
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ImageGenerator.tsx
@@ -0,0 +1,319 @@
+import React, { useState } from 'react';
+import { useForm } from 'react-hook-form';
+import { zodResolver } from '@hookform/resolvers/zod';
+import { z } from 'zod';
+import { Button } from './ui/button';
+import {
+ Form,
+ FormControl,
+ FormDescription,
+ FormField,
+ FormItem,
+ FormLabel,
+ FormMessage,
+} from './ui/form';
+import { Input } from './ui/input';
+import { Slider } from './ui/slider';
+import { Switch } from './ui/switch';
+import { cn } from '@/lib/utils';
+import { Card } from './ui/card';
+import { Loader2 } from 'lucide-react';
+
+// Define form schema
+const formSchema = z.object({
+ prompt: z.string().min(1, {
+ message: 'Prompt is required',
+ }).max(1000, {
+ message: 'Prompt must be less than 1000 characters',
+ }),
+ width: z.number().min(256).max(1024).default(512),
+ height: z.number().min(256).max(1024).default(512),
+ seed: z.number().default(0),
+ randomize_seed: z.boolean().default(true),
+ guidance_scale: z.number().min(0).max(20).default(7.5),
+ num_inference_steps: z.number().min(1).max(50).default(20),
+});
+
+type FormValues = z.infer;
+
+export default function ImageGenerator() {
+ const [isLoading, setIsLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const [imageUrl, setImageUrl] = useState(null);
+
+ // Default form values
+ const defaultValues: FormValues = {
+ prompt: '',
+ width: 512,
+ height: 512,
+ seed: 0,
+ randomize_seed: true,
+ guidance_scale: 7.5,
+ num_inference_steps: 20,
+ };
+
+ // Initialize form
+ const form = useForm({
+ resolver: zodResolver(formSchema),
+ defaultValues,
+ });
+
+ // Handle form submission
+ const onSubmit = async (data: FormValues) => {
+ setIsLoading(true);
+ setError(null);
+
+ try {
+ const response = await fetch('/api/generate-image', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (!response.ok) {
+ const errorData = await response.json();
+ throw new Error(errorData.message || 'Failed to generate image');
+ }
+
+ const result = await response.json();
+ setImageUrl(result.imageUrl);
+ } catch (err: any) {
+ setError(err.message || 'An error occurred while generating the image');
+ console.error('Error generating image:', err);
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ return (
+
+
+
Image Generator
+ {isLoading &&
Generating...
}
+
+
+
+
+
+
+ {imageUrl ? (
+
+

+
+
+
+
+
+ ) : (
+
+
+
+ Your generated image will appear here
+
+
+
+ )}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/TypingIndicator.tsx b/OpenAIChatAssistant/client/src/components/TypingIndicator.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..336fa26736e12c8decc9ed6aa7311278aa7b1cf1
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/TypingIndicator.tsx
@@ -0,0 +1,30 @@
+import React from 'react';
+import { TypingIndicatorProps } from '@/lib/types';
+
+const TypingIndicator: React.FC = ({ isVisible }) => {
+ if (!isVisible) return null;
+
+ return (
+
+ );
+};
+
+export default TypingIndicator;
diff --git a/OpenAIChatAssistant/client/src/components/UserSettingsModal.tsx b/OpenAIChatAssistant/client/src/components/UserSettingsModal.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..bb499c9f43d0119333942e76126d5dee4b51d0fc
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/UserSettingsModal.tsx
@@ -0,0 +1,348 @@
+import React, { useEffect } from "react";
+import { useForm } from "react-hook-form";
+import { zodResolver } from "@hookform/resolvers/zod";
+import { z } from "zod";
+import { useMutation } from "@tanstack/react-query";
+import { apiRequest, queryClient } from "@/lib/queryClient";
+import { useAuth } from "@/hooks/use-auth";
+import { useToast } from "@/hooks/use-toast";
+import { updateUserProfileSchema } from "@shared/schema";
+
+import {
+ Dialog,
+ DialogContent,
+ DialogDescription,
+ DialogHeader,
+ DialogTitle,
+ DialogFooter,
+} from "@/components/ui/dialog";
+import {
+ Form,
+ FormControl,
+ FormDescription,
+ FormField,
+ FormItem,
+ FormLabel,
+ FormMessage,
+} from "@/components/ui/form";
+import { Input } from "@/components/ui/input";
+import { Textarea } from "@/components/ui/textarea";
+import { Button } from "@/components/ui/button";
+import { Loader2 } from "lucide-react";
+
+// Create a form schema
+const profileFormSchema = z.object({
+ fullName: z.string().optional(),
+ location: z.string().optional(),
+ interests: z.array(z.string()).optional(),
+ interestsInput: z.string().optional(), // For input field value only, not submitted
+ profession: z.string().optional(),
+ pets: z.string().optional(),
+ systemContext: z.string().optional(),
+});
+
+type ProfileFormValues = z.infer;
+
+interface UserSettingsModalProps {
+ isOpen: boolean;
+ onClose: () => void;
+}
+
+export default function UserSettingsModal({
+ isOpen,
+ onClose,
+}: UserSettingsModalProps) {
+ const { user } = useAuth();
+ const { toast } = useToast();
+
+ // Create form with default values
+ const form = useForm({
+ resolver: zodResolver(profileFormSchema),
+ defaultValues: {
+ fullName: "",
+ location: "",
+ interests: [],
+ interestsInput: "",
+ profession: "",
+ pets: "",
+ systemContext: "",
+ },
+ });
+
+ // Update form when user data changes
+ useEffect(() => {
+ if (user) {
+ // Convert interests array to comma-separated string for display
+ const interestsString = user.interests?.join(", ") || "";
+
+ form.reset({
+ fullName: user.fullName || "",
+ location: user.location || "",
+ interests: user.interests || [],
+ interestsInput: interestsString,
+ profession: user.profession || "",
+ pets: user.pets || "",
+ systemContext: user.systemContext || "",
+ });
+ }
+ }, [user, form]);
+
+ const updateProfileMutation = useMutation({
+ mutationFn: async (data: ProfileFormValues) => {
+ const res = await apiRequest("PATCH", "/api/user/profile", data);
+ if (!res.ok) {
+ const errorData = await res.json();
+ throw new Error(errorData.message || "Failed to update profile");
+ }
+ return await res.json();
+ },
+ onSuccess: (updatedUser) => {
+ queryClient.setQueryData(["/api/user"], updatedUser);
+ toast({
+ title: "Profile updated",
+ description: "Your profile has been updated successfully.",
+ });
+ onClose();
+ },
+ onError: (error: Error) => {
+ toast({
+ title: "Update failed",
+ description: error.message,
+ variant: "destructive",
+ });
+ },
+ });
+
+ const onSubmit = async (data: ProfileFormValues) => {
+ // Create a copy of the data object without interestsInput
+ const { interestsInput, ...submitData } = data;
+
+ // Submit data without the temporary interestsInput field
+ await updateProfileMutation.mutateAsync(submitData);
+ };
+
+ // Convert string to array for interests field if needed
+ const handleInterestsChange = (e: React.ChangeEvent) => {
+ const value = e.target.value;
+ // Just store the input value as is, don't process it yet
+ form.setValue("interestsInput", value);
+
+ // Process for the actual interests field that gets submitted
+ const interestsArray = value
+ .split(",")
+ .map((item) => item.trim())
+ .filter((item) => item !== "");
+ form.setValue("interests", interestsArray);
+ };
+
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/VideoGenerator.tsx b/OpenAIChatAssistant/client/src/components/VideoGenerator.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..797f20420d4568fe37ec92c987686e162531c36d
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/VideoGenerator.tsx
@@ -0,0 +1,169 @@
+import React, { useState } from 'react';
+import { useForm } from 'react-hook-form';
+import { zodResolver } from '@hookform/resolvers/zod';
+import { z } from 'zod';
+import { Button } from './ui/button';
+import {
+ Form,
+ FormControl,
+ FormDescription,
+ FormField,
+ FormItem,
+ FormLabel,
+ FormMessage,
+} from './ui/form';
+import { Input } from './ui/input';
+import { cn } from '@/lib/utils';
+import { Card } from './ui/card';
+import { Loader2 } from 'lucide-react';
+
+// Define form schema
+const formSchema = z.object({
+ prompt: z.string().min(1, {
+ message: 'Prompt is required',
+ }).max(1000, {
+ message: 'Prompt must be less than 1000 characters',
+ }),
+ model: z.enum(["Wan-AI/Wan2.1-T2V-14B"]).default("Wan-AI/Wan2.1-T2V-14B"),
+});
+
+type FormValues = z.infer;
+
+export default function VideoGenerator() {
+ const [isLoading, setIsLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const [videoUrl, setVideoUrl] = useState(null);
+
+ // Default form values
+ const defaultValues: FormValues = {
+ prompt: '',
+ model: "Wan-AI/Wan2.1-T2V-14B",
+ };
+
+ // Initialize form
+ const form = useForm({
+ resolver: zodResolver(formSchema),
+ defaultValues,
+ });
+
+ // Handle form submission
+ const onSubmit = async (data: FormValues) => {
+ setIsLoading(true);
+ setError(null);
+
+ try {
+ const response = await fetch('/api/generate-video', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(data),
+ });
+
+ if (!response.ok) {
+ const errorData = await response.json();
+ throw new Error(errorData.message || 'Failed to generate video');
+ }
+
+ const result = await response.json();
+ setVideoUrl(result.videoUrl);
+ } catch (err: any) {
+ setError(err.message || 'An error occurred while generating the video');
+ console.error('Error generating video:', err);
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ return (
+
+
+
+
+
+
+ {error && (
+
+ {error}
+
+ )}
+
+
+ {videoUrl ? (
+
+
+
+
+
+
+
+ ) : (
+
+
+
+ Your generated video will appear here
+
+
+
+ )}
+
+
+ );
+}
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/ui/accordion.tsx b/OpenAIChatAssistant/client/src/components/ui/accordion.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..e6a723d06574ee5cec8b00759b98f3fbe1ac7cc9
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/accordion.tsx
@@ -0,0 +1,56 @@
+import * as React from "react"
+import * as AccordionPrimitive from "@radix-ui/react-accordion"
+import { ChevronDown } from "lucide-react"
+
+import { cn } from "@/lib/utils"
+
+const Accordion = AccordionPrimitive.Root
+
+const AccordionItem = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AccordionItem.displayName = "AccordionItem"
+
+const AccordionTrigger = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, children, ...props }, ref) => (
+
+ svg]:rotate-180",
+ className
+ )}
+ {...props}
+ >
+ {children}
+
+
+
+))
+AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName
+
+const AccordionContent = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, children, ...props }, ref) => (
+
+ {children}
+
+))
+
+AccordionContent.displayName = AccordionPrimitive.Content.displayName
+
+export { Accordion, AccordionItem, AccordionTrigger, AccordionContent }
diff --git a/OpenAIChatAssistant/client/src/components/ui/alert-dialog.tsx b/OpenAIChatAssistant/client/src/components/ui/alert-dialog.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..8722561cf6bda62d62f9a0c67730aefda971873a
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/alert-dialog.tsx
@@ -0,0 +1,139 @@
+import * as React from "react"
+import * as AlertDialogPrimitive from "@radix-ui/react-alert-dialog"
+
+import { cn } from "@/lib/utils"
+import { buttonVariants } from "@/components/ui/button"
+
+const AlertDialog = AlertDialogPrimitive.Root
+
+const AlertDialogTrigger = AlertDialogPrimitive.Trigger
+
+const AlertDialogPortal = AlertDialogPrimitive.Portal
+
+const AlertDialogOverlay = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName
+
+const AlertDialogContent = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+
+
+
+))
+AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName
+
+const AlertDialogHeader = ({
+ className,
+ ...props
+}: React.HTMLAttributes) => (
+
+)
+AlertDialogHeader.displayName = "AlertDialogHeader"
+
+const AlertDialogFooter = ({
+ className,
+ ...props
+}: React.HTMLAttributes) => (
+
+)
+AlertDialogFooter.displayName = "AlertDialogFooter"
+
+const AlertDialogTitle = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName
+
+const AlertDialogDescription = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AlertDialogDescription.displayName =
+ AlertDialogPrimitive.Description.displayName
+
+const AlertDialogAction = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName
+
+const AlertDialogCancel = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName
+
+export {
+ AlertDialog,
+ AlertDialogPortal,
+ AlertDialogOverlay,
+ AlertDialogTrigger,
+ AlertDialogContent,
+ AlertDialogHeader,
+ AlertDialogFooter,
+ AlertDialogTitle,
+ AlertDialogDescription,
+ AlertDialogAction,
+ AlertDialogCancel,
+}
diff --git a/OpenAIChatAssistant/client/src/components/ui/alert.tsx b/OpenAIChatAssistant/client/src/components/ui/alert.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..41fa7e0561a3fdb5f986c1213a35e563de740e96
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/alert.tsx
@@ -0,0 +1,59 @@
+import * as React from "react"
+import { cva, type VariantProps } from "class-variance-authority"
+
+import { cn } from "@/lib/utils"
+
+const alertVariants = cva(
+ "relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground",
+ {
+ variants: {
+ variant: {
+ default: "bg-background text-foreground",
+ destructive:
+ "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive",
+ },
+ },
+ defaultVariants: {
+ variant: "default",
+ },
+ }
+)
+
+const Alert = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes & VariantProps
+>(({ className, variant, ...props }, ref) => (
+
+))
+Alert.displayName = "Alert"
+
+const AlertTitle = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+AlertTitle.displayName = "AlertTitle"
+
+const AlertDescription = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+AlertDescription.displayName = "AlertDescription"
+
+export { Alert, AlertTitle, AlertDescription }
diff --git a/OpenAIChatAssistant/client/src/components/ui/aspect-ratio.tsx b/OpenAIChatAssistant/client/src/components/ui/aspect-ratio.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..c4abbf37f217c715a0eaade7f45ac78600df419f
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/aspect-ratio.tsx
@@ -0,0 +1,5 @@
+import * as AspectRatioPrimitive from "@radix-ui/react-aspect-ratio"
+
+const AspectRatio = AspectRatioPrimitive.Root
+
+export { AspectRatio }
diff --git a/OpenAIChatAssistant/client/src/components/ui/avatar.tsx b/OpenAIChatAssistant/client/src/components/ui/avatar.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..51e507ba9d08bcdbb1fb630498f1cbdf2bf50093
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/avatar.tsx
@@ -0,0 +1,50 @@
+"use client"
+
+import * as React from "react"
+import * as AvatarPrimitive from "@radix-ui/react-avatar"
+
+import { cn } from "@/lib/utils"
+
+const Avatar = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+Avatar.displayName = AvatarPrimitive.Root.displayName
+
+const AvatarImage = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AvatarImage.displayName = AvatarPrimitive.Image.displayName
+
+const AvatarFallback = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+))
+AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName
+
+export { Avatar, AvatarImage, AvatarFallback }
diff --git a/OpenAIChatAssistant/client/src/components/ui/badge.tsx b/OpenAIChatAssistant/client/src/components/ui/badge.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..f000e3ef5176395b067dfc3f3e1256a80c450015
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/badge.tsx
@@ -0,0 +1,36 @@
+import * as React from "react"
+import { cva, type VariantProps } from "class-variance-authority"
+
+import { cn } from "@/lib/utils"
+
+const badgeVariants = cva(
+ "inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
+ {
+ variants: {
+ variant: {
+ default:
+ "border-transparent bg-primary text-primary-foreground hover:bg-primary/80",
+ secondary:
+ "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80",
+ destructive:
+ "border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80",
+ outline: "text-foreground",
+ },
+ },
+ defaultVariants: {
+ variant: "default",
+ },
+ }
+)
+
+export interface BadgeProps
+ extends React.HTMLAttributes,
+ VariantProps {}
+
+function Badge({ className, variant, ...props }: BadgeProps) {
+ return (
+
+ )
+}
+
+export { Badge, badgeVariants }
diff --git a/OpenAIChatAssistant/client/src/components/ui/breadcrumb.tsx b/OpenAIChatAssistant/client/src/components/ui/breadcrumb.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..60e6c96f72f0350d08b47e4730cab8f3975dc853
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/breadcrumb.tsx
@@ -0,0 +1,115 @@
+import * as React from "react"
+import { Slot } from "@radix-ui/react-slot"
+import { ChevronRight, MoreHorizontal } from "lucide-react"
+
+import { cn } from "@/lib/utils"
+
+const Breadcrumb = React.forwardRef<
+ HTMLElement,
+ React.ComponentPropsWithoutRef<"nav"> & {
+ separator?: React.ReactNode
+ }
+>(({ ...props }, ref) => )
+Breadcrumb.displayName = "Breadcrumb"
+
+const BreadcrumbList = React.forwardRef<
+ HTMLOListElement,
+ React.ComponentPropsWithoutRef<"ol">
+>(({ className, ...props }, ref) => (
+
+))
+BreadcrumbList.displayName = "BreadcrumbList"
+
+const BreadcrumbItem = React.forwardRef<
+ HTMLLIElement,
+ React.ComponentPropsWithoutRef<"li">
+>(({ className, ...props }, ref) => (
+
+))
+BreadcrumbItem.displayName = "BreadcrumbItem"
+
+const BreadcrumbLink = React.forwardRef<
+ HTMLAnchorElement,
+ React.ComponentPropsWithoutRef<"a"> & {
+ asChild?: boolean
+ }
+>(({ asChild, className, ...props }, ref) => {
+ const Comp = asChild ? Slot : "a"
+
+ return (
+
+ )
+})
+BreadcrumbLink.displayName = "BreadcrumbLink"
+
+const BreadcrumbPage = React.forwardRef<
+ HTMLSpanElement,
+ React.ComponentPropsWithoutRef<"span">
+>(({ className, ...props }, ref) => (
+
+))
+BreadcrumbPage.displayName = "BreadcrumbPage"
+
+const BreadcrumbSeparator = ({
+ children,
+ className,
+ ...props
+}: React.ComponentProps<"li">) => (
+ svg]:w-3.5 [&>svg]:h-3.5", className)}
+ {...props}
+ >
+ {children ?? }
+
+)
+BreadcrumbSeparator.displayName = "BreadcrumbSeparator"
+
+const BreadcrumbEllipsis = ({
+ className,
+ ...props
+}: React.ComponentProps<"span">) => (
+
+
+ More
+
+)
+BreadcrumbEllipsis.displayName = "BreadcrumbElipssis"
+
+export {
+ Breadcrumb,
+ BreadcrumbList,
+ BreadcrumbItem,
+ BreadcrumbLink,
+ BreadcrumbPage,
+ BreadcrumbSeparator,
+ BreadcrumbEllipsis,
+}
diff --git a/OpenAIChatAssistant/client/src/components/ui/button.tsx b/OpenAIChatAssistant/client/src/components/ui/button.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..36496a28727a3643b4212a14225d4f6cbd50bda5
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/button.tsx
@@ -0,0 +1,56 @@
+import * as React from "react"
+import { Slot } from "@radix-ui/react-slot"
+import { cva, type VariantProps } from "class-variance-authority"
+
+import { cn } from "@/lib/utils"
+
+const buttonVariants = cva(
+ "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0",
+ {
+ variants: {
+ variant: {
+ default: "bg-primary text-primary-foreground hover:bg-primary/90",
+ destructive:
+ "bg-destructive text-destructive-foreground hover:bg-destructive/90",
+ outline:
+ "border border-input bg-background hover:bg-accent hover:text-accent-foreground",
+ secondary:
+ "bg-secondary text-secondary-foreground hover:bg-secondary/80",
+ ghost: "hover:bg-accent hover:text-accent-foreground",
+ link: "text-primary underline-offset-4 hover:underline",
+ },
+ size: {
+ default: "h-10 px-4 py-2",
+ sm: "h-9 rounded-md px-3",
+ lg: "h-11 rounded-md px-8",
+ icon: "h-10 w-10",
+ },
+ },
+ defaultVariants: {
+ variant: "default",
+ size: "default",
+ },
+ }
+)
+
+export interface ButtonProps
+ extends React.ButtonHTMLAttributes,
+ VariantProps {
+ asChild?: boolean
+}
+
+const Button = React.forwardRef(
+ ({ className, variant, size, asChild = false, ...props }, ref) => {
+ const Comp = asChild ? Slot : "button"
+ return (
+
+ )
+ }
+)
+Button.displayName = "Button"
+
+export { Button, buttonVariants }
diff --git a/OpenAIChatAssistant/client/src/components/ui/calendar.tsx b/OpenAIChatAssistant/client/src/components/ui/calendar.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..2174f7101ba18ec1385eed03f69dbd619feb98aa
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/calendar.tsx
@@ -0,0 +1,68 @@
+import * as React from "react"
+import { ChevronLeft, ChevronRight } from "lucide-react"
+import { DayPicker } from "react-day-picker"
+
+import { cn } from "@/lib/utils"
+import { buttonVariants } from "@/components/ui/button"
+
+export type CalendarProps = React.ComponentProps
+
+function Calendar({
+ className,
+ classNames,
+ showOutsideDays = true,
+ ...props
+}: CalendarProps) {
+ return (
+ (
+
+ ),
+ IconRight: ({ className, ...props }) => (
+
+ ),
+ }}
+ {...props}
+ />
+ )
+}
+Calendar.displayName = "Calendar"
+
+export { Calendar }
diff --git a/OpenAIChatAssistant/client/src/components/ui/card.tsx b/OpenAIChatAssistant/client/src/components/ui/card.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..938aa2281749dbb7dd40340261aafe3b6a2bf819
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/card.tsx
@@ -0,0 +1,79 @@
+import * as React from "react"
+
+import { cn } from "@/lib/utils"
+
+const Card = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+Card.displayName = "Card"
+
+const CardHeader = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+CardHeader.displayName = "CardHeader"
+
+const CardTitle = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+CardTitle.displayName = "CardTitle"
+
+const CardDescription = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+CardDescription.displayName = "CardDescription"
+
+const CardContent = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+CardContent.displayName = "CardContent"
+
+const CardFooter = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+))
+CardFooter.displayName = "CardFooter"
+
+export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }
\ No newline at end of file
diff --git a/OpenAIChatAssistant/client/src/components/ui/carousel.tsx b/OpenAIChatAssistant/client/src/components/ui/carousel.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..9c2b9bf3705d8421bef00704c0c52e83d371ca11
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/carousel.tsx
@@ -0,0 +1,260 @@
+import * as React from "react"
+import useEmblaCarousel, {
+ type UseEmblaCarouselType,
+} from "embla-carousel-react"
+import { ArrowLeft, ArrowRight } from "lucide-react"
+
+import { cn } from "@/lib/utils"
+import { Button } from "@/components/ui/button"
+
+type CarouselApi = UseEmblaCarouselType[1]
+type UseCarouselParameters = Parameters
+type CarouselOptions = UseCarouselParameters[0]
+type CarouselPlugin = UseCarouselParameters[1]
+
+type CarouselProps = {
+ opts?: CarouselOptions
+ plugins?: CarouselPlugin
+ orientation?: "horizontal" | "vertical"
+ setApi?: (api: CarouselApi) => void
+}
+
+type CarouselContextProps = {
+ carouselRef: ReturnType[0]
+ api: ReturnType[1]
+ scrollPrev: () => void
+ scrollNext: () => void
+ canScrollPrev: boolean
+ canScrollNext: boolean
+} & CarouselProps
+
+const CarouselContext = React.createContext(null)
+
+function useCarousel() {
+ const context = React.useContext(CarouselContext)
+
+ if (!context) {
+ throw new Error("useCarousel must be used within a ")
+ }
+
+ return context
+}
+
+const Carousel = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes & CarouselProps
+>(
+ (
+ {
+ orientation = "horizontal",
+ opts,
+ setApi,
+ plugins,
+ className,
+ children,
+ ...props
+ },
+ ref
+ ) => {
+ const [carouselRef, api] = useEmblaCarousel(
+ {
+ ...opts,
+ axis: orientation === "horizontal" ? "x" : "y",
+ },
+ plugins
+ )
+ const [canScrollPrev, setCanScrollPrev] = React.useState(false)
+ const [canScrollNext, setCanScrollNext] = React.useState(false)
+
+ const onSelect = React.useCallback((api: CarouselApi) => {
+ if (!api) {
+ return
+ }
+
+ setCanScrollPrev(api.canScrollPrev())
+ setCanScrollNext(api.canScrollNext())
+ }, [])
+
+ const scrollPrev = React.useCallback(() => {
+ api?.scrollPrev()
+ }, [api])
+
+ const scrollNext = React.useCallback(() => {
+ api?.scrollNext()
+ }, [api])
+
+ const handleKeyDown = React.useCallback(
+ (event: React.KeyboardEvent) => {
+ if (event.key === "ArrowLeft") {
+ event.preventDefault()
+ scrollPrev()
+ } else if (event.key === "ArrowRight") {
+ event.preventDefault()
+ scrollNext()
+ }
+ },
+ [scrollPrev, scrollNext]
+ )
+
+ React.useEffect(() => {
+ if (!api || !setApi) {
+ return
+ }
+
+ setApi(api)
+ }, [api, setApi])
+
+ React.useEffect(() => {
+ if (!api) {
+ return
+ }
+
+ onSelect(api)
+ api.on("reInit", onSelect)
+ api.on("select", onSelect)
+
+ return () => {
+ api?.off("select", onSelect)
+ }
+ }, [api, onSelect])
+
+ return (
+
+
+ {children}
+
+
+ )
+ }
+)
+Carousel.displayName = "Carousel"
+
+const CarouselContent = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => {
+ const { carouselRef, orientation } = useCarousel()
+
+ return (
+
+ )
+})
+CarouselContent.displayName = "CarouselContent"
+
+const CarouselItem = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => {
+ const { orientation } = useCarousel()
+
+ return (
+
+ )
+})
+CarouselItem.displayName = "CarouselItem"
+
+const CarouselPrevious = React.forwardRef<
+ HTMLButtonElement,
+ React.ComponentProps
+>(({ className, variant = "outline", size = "icon", ...props }, ref) => {
+ const { orientation, scrollPrev, canScrollPrev } = useCarousel()
+
+ return (
+
+ )
+})
+CarouselPrevious.displayName = "CarouselPrevious"
+
+const CarouselNext = React.forwardRef<
+ HTMLButtonElement,
+ React.ComponentProps
+>(({ className, variant = "outline", size = "icon", ...props }, ref) => {
+ const { orientation, scrollNext, canScrollNext } = useCarousel()
+
+ return (
+
+ )
+})
+CarouselNext.displayName = "CarouselNext"
+
+export {
+ type CarouselApi,
+ Carousel,
+ CarouselContent,
+ CarouselItem,
+ CarouselPrevious,
+ CarouselNext,
+}
diff --git a/OpenAIChatAssistant/client/src/components/ui/chart.tsx b/OpenAIChatAssistant/client/src/components/ui/chart.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..39fba6d6fc8509b968824b5076a8791c46650294
--- /dev/null
+++ b/OpenAIChatAssistant/client/src/components/ui/chart.tsx
@@ -0,0 +1,365 @@
+"use client"
+
+import * as React from "react"
+import * as RechartsPrimitive from "recharts"
+
+import { cn } from "@/lib/utils"
+
+// Format: { THEME_NAME: CSS_SELECTOR }
+const THEMES = { light: "", dark: ".dark" } as const
+
+export type ChartConfig = {
+ [k in string]: {
+ label?: React.ReactNode
+ icon?: React.ComponentType
+ } & (
+ | { color?: string; theme?: never }
+ | { color?: never; theme: Record }
+ )
+}
+
+type ChartContextProps = {
+ config: ChartConfig
+}
+
+const ChartContext = React.createContext(null)
+
+function useChart() {
+ const context = React.useContext(ChartContext)
+
+ if (!context) {
+ throw new Error("useChart must be used within a ")
+ }
+
+ return context
+}
+
+const ChartContainer = React.forwardRef<
+ HTMLDivElement,
+ React.ComponentProps<"div"> & {
+ config: ChartConfig
+ children: React.ComponentProps<
+ typeof RechartsPrimitive.ResponsiveContainer
+ >["children"]
+ }
+>(({ id, className, children, config, ...props }, ref) => {
+ const uniqueId = React.useId()
+ const chartId = `chart-${id || uniqueId.replace(/:/g, "")}`
+
+ return (
+
+
+
+
+ {children}
+
+
+
+ )
+})
+ChartContainer.displayName = "Chart"
+
+const ChartStyle = ({ id, config }: { id: string; config: ChartConfig }) => {
+ const colorConfig = Object.entries(config).filter(
+ ([, config]) => config.theme || config.color
+ )
+
+ if (!colorConfig.length) {
+ return null
+ }
+
+ return (
+