import express from "express"; import fetch from "node-fetch"; import cors from "cors"; import dotenv from 'dotenv' dotenv.config() const app = express(); app.use(express.json()); app.use(cors()); // Enable CORS for frontend requests // ðŸâ€Â¹ Ollama Server Details const OLLAMA_URL = "https://ollama.own1.aganitha.ai/api/generate"; const USERNAME = process.env.USER_NAME // Replace with your actual username const PASSWORD = process.env.USER_PASSWORD // Replace with your actual password app.post("/ask", async (req, res) => { const { prompt } = req.body; if (!prompt) { return res.status(400).json({ error: "Prompt is required" }); }
const credentials = Buffer.from(${USERNAME}:${PASSWORD}
).toString("base64");
try {
const response = await fetch(OLLAMA_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": Basic ${credentials}
},
body: JSON.stringify({
model: "deepseek-r1:14b",
prompt: prompt,
stream: false
})
});
const data = await response.json();
res.json({ response: data.response });
} catch (error) { console.error("Error communicating with Ollama:", error); res.status(500).json({ error: "Internal Server Error" }); } }); const PORT = 3000; app.listen(PORT, () => console.log(… Server running on http://localhost:${PORT})); this is the backend for a ollama model fix all the errors or problems here.
make the fronted do anything just make it work
I understand you'd like help with creating a frontend for your Ollama model backend and fixing any errors in the provided code. Let's start by addressing the backend code, and then I'll create a simple frontend to interact with it.
First, let's fix the backend code:
import express from "express";
import fetch from "node-fetch";
import cors from "cors";
import dotenv from 'dotenv';
dotenv.config();
const app = express();
app.use(express.json());
app.use(cors()); // Enable CORS for frontend requests
// Ollama Server Details
const OLLAMA_URL = "https://ollama.own1.aganitha.ai/api/generate";
const USERNAME = process.env.USER_NAME; // Replace with your actual username
const PASSWORD = process.env.USER_PASSWORD; // Replace with your actual password
app.post("/ask", async (req, res) => {
const { prompt } = req.body;
if (!prompt) {
return res.status(400).json({ error: "Prompt is required" });
}
const credentials = Buffer.from(`${USERNAME}:${PASSWORD}`).toString("base64");
try {
const response = await fetch(OLLAMA_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Basic ${credentials}`
},
body: JSON.stringify({
model: "deepseek-r1:14b",
prompt: prompt,
stream: false
})
});
const data = await response.json();
res.json({ response: data.response });
} catch (error) {
console.error("Error communicating with Ollama:", error);
res.status(500).json({ error: "Internal Server Error" });
}
});
const PORT = 3000;
app.listen(PORT, () => console.log(`Server running on http://localhost:${PORT}`));
Now, let's create a simple frontend to interact with this backend. We'll use React for the frontend. Here's a basic implementation:
This frontend creates a simple form where users can enter a prompt and submit it to the Ollama model. The response from the model is then displayed below the form.
To run this frontend, you'll need to:
npx create-next-app@latest
app
folder with the files we created above.Install the necessary dependencies:
npm install @radix-ui/react-slot class-variance-authority clsx tailwind-merge lucide-react
npm run dev
Make sure your backend server is running on http://localhost:3000
before trying to use the frontend.
This setup should give you a working frontend that interacts with your Ollama model backend. You can further customize and expand the frontend as needed.
Thank you for providing the credentials. I'll update the backend code to use these credentials. However, it's important to note that hardcoding credentials directly in the code is not a secure practice. In a production environment, you should use environment variables or a secure secret management system.
Let's update the backend code to use these credentials:
import express from "express";
import fetch from "node-fetch";
import cors from "cors";
const app = express();
app.use(express.json());
app.use(cors()); // Enable CORS for frontend requests
// Ollama Server Details
const OLLAMA_URL = "https://ollama.own1.aganitha.ai/api/generate";
const USERNAME = "yashraj";
const PASSWORD = "Yashraj@123";
app.post("/ask", async (req, res) => {
const { prompt } = req.body;
if (!prompt) {
return res.status(400).json({ error: "Prompt is required" });
}
const credentials = Buffer.from(`${USERNAME}:${PASSWORD}`).toString("base64");
try {
const response = await fetch(OLLAMA_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Basic ${credentials}`
},
body: JSON.stringify({
model: "deepseek-r1:14b",
prompt: prompt,
stream: false
})
});
const data = await response.json();
res.json({ response: data.response });
} catch (error) {
console.error("Error communicating with Ollama:", error);
res.status(500).json({ error: "Internal Server Error" });
}
});
const PORT = 3000;
app.listen(PORT, () => console.log(`Server running on http://localhost:${PORT}`));
Now, let's update the frontend to include a simple header with the model information:
This updated frontend includes a header with the model information and uses the whitespace-pre-wrap
class to preserve formatting in the AI's response.
To run this setup:
server.js
.Install the necessary backend dependencies:
npm install express node-fetch cors
Start the backend server:
node server.js
In a separate terminal, navigate to your Next.js project directory and start the frontend development server:
npm run dev
Your Ollama chat application should now be running with the provided credentials. The frontend will be accessible at http://localhost:3000
(or whatever port Next.js uses), and it will communicate with the backend running on http://localhost:3000
.
Remember to keep your credentials secure and consider using environment variables for sensitive information in a production environment.