Skip to main content
bun add @screenpipe/js
for browser environments, use @screenpipe/browser instead.
import { ScreenpipeClient } from "@screenpipe/js";

const client = new ScreenpipeClient();

const results = await client.search({
  q: "meeting notes",
  contentType: "all",        // "all" | "vision" | "audio" | "input"
  limit: 10,
  startTime: "2026-01-10T12:00:00Z",
  endTime: "2026-01-10T13:00:00Z",
  appName: "chrome",         // optional
  windowName: "meeting",     // optional
  speakerName: "John",       // optional
  browserUrl: "github.com",  // optional
  includeFrames: true,       // optional, include screenshots
});

for (const item of results.data) {
  if (item.type === "OCR") {
    console.log(item.content.text);
    console.log(item.content.appName);
  } else if (item.type === "Audio") {
    console.log(item.content.transcription);
    console.log(item.content.speakerId);
  }
}

streaming (real-time)

// stream transcriptions
for await (const chunk of client.streamTranscriptions()) {
  console.log(chunk.choices[0].text);
  console.log(chunk.metadata); // { timestamp, device, isInput }
}

// stream vision events
for await (const event of client.streamVision(true)) {
  console.log(event.data.text);
  console.log(event.data.appName);
}

pipes

pipes are scheduled AI agents defined as markdown files (pipe.md) that run on your screenpipe data. see pipes docs for details.