// queue.js
const Queue = require('bull');
const emailQueue = new Queue('email processing', {
redis: { port: 6380, host: 'localhost', db: 1 }
});
const aiQueue = new Queue('ai processing', {
redis: { port: 6380, host: 'localhost', db: 1 }
});
// Define job processors
emailQueue.process(async (job) => {
const { to, subject, body } = job.data;
console.log(`Processing email job: ${to}`);
// Email sending logic
await sendEmail(to, subject, body);
return { status: 'sent', to };
});
aiQueue.process(async (job) => {
const { prompt, model } = job.data;
console.log(`Processing AI request: ${prompt.substring(0, 50)}...`);
// Call local AI service
const response = await fetch('http://localhost:11434/api/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, prompt, stream: false })
});
const result = await response.json();
return result.response;
});
// Add jobs to queue
async function queueEmail(to, subject, body) {
const job = await emailQueue.add('send-email', { to, subject, body });
return job.id;
}
async function queueAIRequest(prompt, model = 'llama3.2') {
const job = await aiQueue.add('ai-request', { prompt, model }, {
delay: 1000, // Delay 1 second
attempts: 3, // Retry 3 times if failed
});
return job.id;
}
module.exports = { queueEmail, queueAIRequest };