import { generateObject, embed } from "ai";
import { ollama } from "ollama-ai-provider";
import { pipe } from "@screenpipe/js";
import { z } from "zod";
// define your output schema
const workLog = z.object({
title: z.string(),
description: z.string(),
tags: z.array(z.string()),
mediaLinks: z.array(z.string()).optional(),
});
type WorkLog = z.infer<typeof workLog> & {
startTime: string;
endTime: string;
};
async function generateWorkLog(screenData, model, startTime, endTime) {
// configure the prompt with context and instructions
const prompt = `You are analyzing screen recording data from Screenpipe.
Based on the following screen data, generate a concise work activity log entry.
Screen data: ${JSON.stringify(screenData)}
Return a JSON object with:
{
"title": "Brief title describing the main activity",
"description": "Clear description of what was accomplished",
"tags": ["#relevant-tool", "#activity-type"],
"mediaLinks": ["<video src=\"file:///path/to/video.mp4\" controls></video>"]
}`;
// use ollama provider with vercel ai sdk
const provider = ollama(model);
const response = await generateObject({
model: provider,
messages: [{ role: "user", content: prompt }],
schema: workLog, // zod schema for type safety
});
return {
...response.object,
startTime: formatDate(startTime),
endTime: formatDate(endTime),
};
}
// api endpoint implementation
async function handleRequest() {
// get last hour of activity
const now = new Date();
const oneHourAgo = new Date(now.getTime() - 3600000);
// query context from screenpipe
const screenData = await pipe.queryScreenpipe({
startTime: oneHourAgo.toISOString(),
endTime: now.toISOString(),
limit: 100,
contentType: "all",
});
// generate structured log with ai
const logEntry = await generateWorkLog(
screenData.data,
"llama3.2", // ollama model name
oneHourAgo,
now
);
return { message: "log generated successfully", logEntry };
}