I'll create a new file in the auth directory to handle token validation. This endpoint will verify if a JWT token is valid and not expired.
from fastapi import APIRouter, Header, HTTPException, status
from typing import Optional
from jose import JWTError, jwt
from datetime import datetime
import os
from utils.response import success_response, error_response
from .jwt import SECRET_KEY, ALGORITHM
router = APIRouter()
@router.get("/validate", response_description="Validate JWT token")
async def validate_token(authorization: Optional[str] = Header(None)):
"""
Validates if the provided JWT token is valid and not expired.
Args:
authorization: The Authorization header containing the JWT token
Returns:
JSON response with status 1 if token is valid, 0 if invalid or expired
"""
if not authorization:
return error_response(
message="Authorization header is missing",
status_code=status.HTTP_401_UNAUTHORIZED
)
try:
# Extract token from Authorization header
scheme, token = authorization.split()
if scheme.lower() != "bearer":
return error_response(
message="Invalid authentication scheme. Use Bearer",
status_code=status.HTTP_401_UNAUTHORIZED
)
# Decode and validate token
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
# Check if token has expired
exp = payload.get("exp")
if exp is None:
return error_response(
message="Token is missing expiration",
status_code=status.HTTP_401_UNAUTHORIZED
)
# Convert exp to datetime for comparison
exp_datetime = datetime.fromtimestamp(exp)
current_datetime = datetime.utcnow()
# Check if token has expired
if current_datetime > exp_datetime:
return error_response(
message="Token has expired",
status_code=status.HTTP_401_UNAUTHORIZED
)
# Token is valid
email = payload.get("sub")
# Calculate remaining time
remaining_seconds = int((exp_datetime - current_datetime).total_seconds())
remaining_minutes = remaining_seconds // 60
return success_response(
data={
"email": email,
"expires_at": exp_datetime.isoformat(),
"remaining_minutes": remaining_minutes,
"remaining_seconds": remaining_seconds
},
message="Token is valid"
)
except JWTError:
return error_response(
message="Invalid token",
status_code=status.HTTP_401_UNAUTHORIZED
)
except ValueError:
return error_response(
message="Invalid authorization header format",
status_code=status.HTTP_401_UNAUTHORIZED
)
except Exception as e:
return error_response(
message=f"Error validating token: {str(e)}",
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
)Add the following import and router inclusion to your main.py file:
from auth.token_validation import router as token_validation_router
# Include the token validation router
app.include_router(token_validation_router, prefix="/auth/token", tags=["Authentication"])Endpoint: GET /auth/token/validate
Headers:
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ1c2VyQGV4YW1wbGUuY29tIiwiZXhwIjoxNzE2MzgzMDQ1fQ.8B4...Response (Valid Token):
{
"status": 1,
"data": {
"email": "user@example.com",
"expires_at": "2025-05-22T10:30:45.000Z",
"remaining_minutes": 120,
"remaining_seconds": 7200
},
"message": "Token is valid",
"status_code": 200
}Endpoint: GET /auth/token/validate
Headers:
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ1c2VyQGV4YW1wbGUuY29tIiwiZXhwIjoxNjE2MzgzMDQ1fQ.8B4...Response (Expired Token):
{
"status": 0,
"data": {},
"message": "Token has expired",
"status_code": 401
}Endpoint: GET /auth/token/validate
Headers:
Authorization: Bearer invalid.token.hereResponse (Invalid Token):
{
"status": 0,
"data": {},
"message": "Invalid token",
"status_code": 401
}Endpoint: GET /auth/token/validate
Response (Missing Header):
{
"status": 0,
"data": {},
"message": "Authorization header is missing",
"status_code": 401
}Token Validation:
Response Information:
For valid tokens:
For invalid tokens:
Error Handling:
This endpoint provides a simple way for clients to validate their JWT tokens without having to make a request to a protected endpoint. It's useful for implementing features like automatic token refresh in frontend applications.
now i merge my admin panel backend to my chatbot backend so now my folder stucher is
Directory: G:\chatbot-backend
Mode LastWriteTime Length Name
d----- 5/21/2025 3:51 PM app
d----- 5/8/2025 3:21 PM data
d----- 4/16/2025 11:21 AM env
-a---- 5/13/2025 2:27 PM 685 .env
-a---- 5/14/2025 7:09 PM 453 .gitignore
-a---- 5/14/2025 7:09 PM 8120 createEmbedding.py
-a---- 5/21/2025 3:21 PM 420 ecosystem.config.js
-a---- 4/17/2025 2:33 PM 6362 README.md
-a---- 5/21/2025 4:07 PM 5046 requirements.txt
Directory: G:\chatbot-backend\app
Mode LastWriteTime Length Name
d----- 5/21/2025 3:48 PM auth
d----- 5/21/2025 3:50 PM chatbot
d----- 5/21/2025 3:51 PM config
d----- 5/21/2025 3:50 PM credentials
d----- 5/21/2025 3:50 PM faq
d----- 5/21/2025 3:50 PM leads
d----- 5/21/2025 3:50 PM utils
d----- 5/21/2025 4:02 PM pycache
-a---- 5/14/2025 7:09 PM 51677 doctors.json
-a---- 5/14/2025 7:09 PM 2593 email_template.html
-a---- 5/14/2025 7:09 PM 9126 login.py
-a---- 5/21/2025 5:28 PM 160915 main.py
-a---- 5/14/2025 7:09 PM 2478 otp_email_template.py
-a---- 5/14/2025 7:09 PM 7099 sendEmail.py
-a---- 4/17/2025 2:33 PM 0 init.py
now i want
in my main.py file SYSTEM_PROMPT = """
You are Medi-Aid, a compassionate and knowledgeable medical doctor.
Rules:
Base answers ONLY on the database CONTEXT supplied (no web search).
Write in a natural, conversational tone like a real doctor would speak to a patient.
NEVER use headers or labels like "Follow-up Questions:", "Answer:", or "Response:".
ALWAYS use Markdown to bold important information, medication names, and key advice.
Never add disclaimers like "I'm not a doctor".
If CONTEXT lacks relevant info, say you don't have enough information rather than using general medical knowledge.
For patient questions:
• FIRST gather complete information about the patient's condition through focused follow-up questions.
• Ask questions about symptoms, duration, severity, and other relevant details.
• Only after gathering sufficient information, provide a diagnosis with the disease name.
• Then suggest primary solutions like rest or specific medications.
• ONLY THEN recommend appropriate doctors based on the patient's specific condition.
• NEVER suggest doctors or referrals prematurely before understanding the full condition.
ALWAYS complete your sentences and thoughts fully. NEVER cut off mid-sentence.
Pay close attention to the patient's emotional state and respond with appropriate empathy.
Keep your responses concise and to the point (2-3 sentences maximum).
For treatment recommendations, provide only the most important 2-3 options.
When providing medication recommendations, ALWAYS bold the medication names and include both generic and brand names when possible.
When medical advice is needed, recommend specific doctors from our team who can help with the condition ONLY AFTER providing a diagnosis and primary solutions.
ALWAYS use the full name of doctors and put their names in bold, for example: Dr. Anjali Patel instead of just Dr. Patel.
""" is not static i want it come from db as you well know
in chatbot dir i have a model class ChatbotConfig(BaseModel):
system_prompt: str = Field(..., description="System prompt for the chatbot")
min_words_response: int = Field(..., description="Minimum number of words in response", ge=1)
max_words_response: int = Field(..., description="Maximum number of words in response", ge=1)
maximum_response_count: int = Field(..., description="Maximum number of responses", ge=1)
class Config:
schema_extra = {
"example": {
"system_prompt": "You are a helpful assistant for an e-commerce store.",
"min_words_response": 20,
"max_words_response": 250,
"maximum_response_count": 3
}
}
system_prompt i want on that plecae and whenever the system_prompt is chnage in db instant update in this fill this variable make perfect logic of it you can use redis or whatever you want to use but when theb system_prompt is updated any how main.py know that db is updated and refetch the system_prompt also get "min_words_response": 20,
"max_words_response": 250, in db and and make a new varibale in which store i want my chatbot miniman response {min_words_response} and maximum {max_words_response} and append the line in SYSTEM_PROMPT also same as system_prompt updated instand updated in file i want if "min_words_response": 20,
"max_words_response": 250, updated in db instand updated in file and give me its cpode
I'll help you modify your main.py file to fetch the system prompt and other configuration parameters from the database instead of hardcoding them.
Looking at your code, you need to:
system_prompt, min_words_response, and max_words_response from the databaseHere's how we can implement this using Redis for caching:
from fastapi import Depends
from motor.motor_asyncio import AsyncIOMotorClient
import redis
import json
from datetime import datetime
# Redis setup for caching
redis_client = redis.Redis(
host=os.getenv("REDIS_HOST", "localhost"),
port=int(os.getenv("REDIS_PORT", 6379)),
db=0,
decode_responses=True
)
# MongoDB connection
mongo_client = AsyncIOMotorClient(os.getenv("MONGODB_URI", "mongodb://localhost:27017"))
db = mongo_client[os.getenv("MONGODB_DB", "chatbot_db")]
chatbot_config_collection = db["chatbot_config"]
# Default system prompt in case DB fetch fails
DEFAULT_SYSTEM_PROMPT = """
You are Medi-Aid, a compassionate and knowledgeable medical doctor.
Rules:
1. Base answers ONLY on the database CONTEXT supplied (no web search).
2. Write in a natural, conversational tone like a real doctor would speak to a patient.
3. NEVER use headers or labels like "Follow-up Questions:", "Answer:", or "Response:".
4. ALWAYS use Markdown to **bold** important information, medication names, and key advice.
5. Never add disclaimers like "I'm not a doctor".
6. If CONTEXT lacks relevant info, say you don't have enough information rather than using general medical knowledge.
7. For patient questions:
• FIRST gather complete information about the patient's condition through focused follow-up questions.
• Ask questions about symptoms, duration, severity, and other relevant details.
• Only after gathering sufficient information, provide a diagnosis with the disease name.
• Then suggest primary solutions like rest or specific medications.
• ONLY THEN recommend appropriate doctors based on the patient's specific condition.
• NEVER suggest doctors or referrals prematurely before understanding the full condition.
8. ALWAYS complete your sentences and thoughts fully. NEVER cut off mid-sentence.
9. Pay close attention to the patient's emotional state and respond with appropriate empathy.
10. Keep your responses concise and to the point (2-3 sentences maximum).
11. For treatment recommendations, provide only the most important 2-3 options.
12. When providing medication recommendations, ALWAYS bold the medication names and include both generic and brand names when possible.
13. When medical advice is needed, recommend specific doctors from our team who can help with the condition ONLY AFTER providing a diagnosis and primary solutions.
14. ALWAYS use the full name of doctors and put their names in bold, for example: **Dr. Anjali Patel** instead of just Dr. Patel.
"""
# Default config values
DEFAULT_MIN_WORDS_RESPONSE = 20
DEFAULT_MAX_WORDS_RESPONSE = 250
# Cache keys
SYSTEM_PROMPT_CACHE_KEY = "chatbot:system_prompt"
MIN_WORDS_CACHE_KEY = "chatbot:min_words_response"
MAX_WORDS_CACHE_KEY = "chatbot:max_words_response"
CONFIG_LAST_UPDATED_KEY = "chatbot:config_last_updated"
async def get_chatbot_config_from_db():
"""Fetch the latest chatbot configuration from the database."""
try:
# Get the latest config from the database
config = await chatbot_config_collection.find_one({}, sort=[("_id", -1)])
if config:
return {
"system_prompt": config.get("system_prompt", DEFAULT_SYSTEM_PROMPT),
"min_words_response": config.get("min_words_response", DEFAULT_MIN_WORDS_RESPONSE),
"max_words_response": config.get("max_words_response", DEFAULT_MAX_WORDS_RESPONSE),
"last_updated": datetime.now().isoformat()
}
except Exception as e:
logger.error(f"Error fetching chatbot config from database: {e}")
# Return defaults if DB fetch fails
return {
"system_prompt": DEFAULT_SYSTEM_PROMPT,
"min_words_response": DEFAULT_MIN_WORDS_RESPONSE,
"max_words_response": DEFAULT_MAX_WORDS_RESPONSE,
"last_updated": datetime.now().isoformat()
}
async def update_cache_from_db():
"""Update the Redis cache with the latest config from the database."""
config = await get_chatbot_config_from_db()
# Update Redis cache
redis_client.set(SYSTEM_PROMPT_CACHE_KEY, config["system_prompt"])
redis_client.set(MIN_WORDS_CACHE_KEY, str(config["min_words_response"]))
redis_client.set(MAX_WORDS_CACHE_KEY, str(config["max_words_response"]))
redis_client.set(CONFIG_LAST_UPDATED_KEY, config["last_updated"])
logger.info(f"Updated chatbot config cache from database at {config['last_updated']}")
return config
def get_system_prompt():
"""Get the system prompt from cache or use default."""
try:
cached_prompt = redis_client.get(SYSTEM_PROMPT_CACHE_KEY)
if cached_prompt:
return cached_prompt
# If not in cache, trigger an async update but return default for now
asyncio.create_task(update_cache_from_db())
return DEFAULT_SYSTEM_PROMPT
except Exception as e:
logger.error(f"Error getting system prompt from cache: {e}")
return DEFAULT_SYSTEM_PROMPT
def get_min_words_response():
"""Get the minimum words response from cache or use default."""
try:
cached_value = redis_client.get(MIN_WORDS_CACHE_KEY)
if cached_value:
return int(cached_value)
# If not in cache, trigger an async update but return default for now
asyncio.create_task(update_cache_from_db())
return DEFAULT_MIN_WORDS_RESPONSE
except Exception as e:
logger.error(f"Error getting min words response from cache: {e}")
return DEFAULT_MIN_WORDS_RESPONSE
def get_max_words_response():
"""Get the maximum words response from cache or use default."""
try:
cached_value = redis_client.get(MAX_WORDS_CACHE_KEY)
if cached_value:
return int(cached_value)
# If not in cache, trigger an async update but return default for now
asyncio.create_task(update_cache_from_db())
return DEFAULT_MAX_WORDS_RESPONSE
except Exception as e:
logger.error(f"Error getting max words response from cache: {e}")
return DEFAULT_MAX_WORDS_RESPONSE
# Function to check if config needs updating
async def check_config_update():
"""Check if the config in the database has been updated and refresh cache if needed."""
try:
# Get the latest config from the database
db_config = await chatbot_config_collection.find_one({}, sort=[("_id", -1)])
if not db_config:
return
# Get the last updated timestamp from cache
cached_timestamp = redis_client.get(CONFIG_LAST_UPDATED_KEY)
# If we have a timestamp in the DB and it's newer than our cache, update the cache
if db_config.get("updated_at") and (not cached_timestamp or db_config["updated_at"].isoformat() > cached_timestamp):
await update_cache_from_db()
logger.info("Chatbot config cache updated due to newer version in database")
except Exception as e:
logger.error(f"Error checking for config updates: {e}")
# Initialize cache on startup
@app.on_event("startup")
async def initialize_cache():
"""Initialize the cache with values from the database on startup."""
await update_cache_from_db()
logger.info("Initialized chatbot config cache from database")
# Set up a background task to periodically check for config updates
@app.on_event("startup")
async def setup_config_update_task():
"""Set up a background task to periodically check for config updates."""
async def periodic_config_check():
while True:
await check_config_update()
await asyncio.sleep(30) # Check every 30 seconds
asyncio.create_task(periodic_config_check())
logger.info("Started background task for config updates")
# Replace the hardcoded SYSTEM_PROMPT with a function call
def get_current_system_prompt():
"""Get the current system prompt with word count limits appended."""
base_prompt = get_system_prompt()
min_words = get_min_words_response()
max_words = get_max_words_response()
# Append word count limits to the prompt
word_count_instruction = f"\n15. Keep your responses between {min_words} and {max_words} words."
# Check if the prompt already has a similar instruction
if "Keep your responses between" not in base_prompt:
return base_prompt + word_count_instruction
return base_prompt
# Replace the hardcoded SYSTEM_PROMPT variable with the dynamic function
# SYSTEM_PROMPT = get_current_system_prompt() # Don't use a variable, call the function directly when neededNow, you need to modify your existing code to use these functions instead of the hardcoded SYSTEM_PROMPT. Here's how to update the relevant parts:
main.py
get_current_system_prompt() function dynamically builds the system prompt with the word count limits appended.This implementation ensures that:
Make sure to add Redis to your project dependencies and configure the Redis connection parameters in your environment variables.
INFO: Started server process [2868]
INFO: Waiting for application startup.
2025-05-21 18:00:58,587 INFO Loaded FAISS index with 53355 documents
ERROR: Traceback (most recent call last):
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 385, in connect_check_health
sock = self.retry.call_with_retry(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\retry.py", line 87, in call_with_retry
return do()
^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 386, in <lambda>
lambda: self._connect(), lambda error: self.disconnect(error)
^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 797, in _connect
raise err
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 781, in _connect
sock.connect(socket_address)
ConnectionRefusedError: [WinError 10061] No connection could be made because the target machine actively refused it
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "G:\chatbot-backend\env\Lib\site-packages\starlette\routing.py", line 692, in lifespan
async with self.lifespan_context(app) as maybe_state:
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "C:\Python312\Lib\contextlib.py", line 210, in aenter
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\fastapi\routing.py", line 133, in merged_lifespan
async with original_context(app) as maybe_original_state:
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\starlette\routing.py", line 569, in aenter
await self._router.startup()
File "G:\chatbot-backend\env\Lib\site-packages\starlette\routing.py", line 669, in startup
await handler()
File "G:\chatbot-backend\app\main.py", line 265, in initialize_cache
await update_cache_from_db()
File "G:\chatbot-backend\app\main.py", line 123, in update_cache_from_db
redis_client.set(SYSTEM_PROMPT_CACHE_KEY, config["system_prompt"])
File "G:\chatbot-backend\env\Lib\site-packages\redis\commands\core.py", line 2305, in set
return self.execute_command("SET", *pieces, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\client.py", line 622, in execute_command
return self._execute_command(*args, **options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\client.py", line 628, in _execute_command
conn = self.connection or pool.get_connection()
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\utils.py", line 188, in wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 1522, in get_connection
connection.connect()
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 379, in connect
self.connect_check_health(check_health=True)
File "G:\chatbot-backend\env\Lib\site-packages\redis\connection.py", line 391, in connect_check_health
raise ConnectionError(self._error_message(e))
redis.exceptions.ConnectionError: Error 10061 connecting to localhost:6379. No connection could be made because the target machine actively refused it.
ERROR: Application startup failed. Exiting.
get this errro give me resone why is occuers and fix it
The error you're seeing is because the application is trying to connect to Redis at localhost:6379, but there's no Redis server running at that address. Let's modify the implementation to make Redis optional and provide a fallback mechanism that works without Redis.
Here's an updated implementation that will work even if Redis is not available:
main.py
Now, you need to modify the generate_response function to use the dynamic system prompt instead of the hardcoded one. Find this line in your code:
# Use custom prompt if provided, otherwise use default
system_prompt = custom_prompt if custom_prompt else SYSTEM_PROMPTAnd replace it with:
# Use custom prompt if provided, otherwise use dynamic system prompt
system_prompt = custom_prompt if custom_prompt else get_current_system_prompt()The error occurred because:
This new implementation makes Redis optional, so the application will start successfully even if Redis is not available.