feat: track token usage and estimated cost per filter run in filter_runs.json

This commit is contained in:
2026-03-06 16:22:14 +00:00
parent 3575f06018
commit 37b95b6b85
2 changed files with 35 additions and 4 deletions

View File

@@ -146,6 +146,14 @@ export async function checkBatch(batchId, apiKey) {
/**
* Download and parse batch results. Returns array of { jobId, score, reason, error }
*/
// Sonnet batch API pricing (per million tokens)
const PRICING = {
input: 1.50,
output: 7.50,
cache_write: 1.875,
cache_read: 0.15,
};
export async function downloadResults(batchId, apiKey, idMap = {}) {
const res = await fetch(`${BATCH_API}/${batchId}/results`, {
headers: apiHeaders(apiKey),
@@ -156,13 +164,22 @@ export async function downloadResults(batchId, apiKey, idMap = {}) {
const text = await res.text();
const lines = text.trim().split('\n').filter(Boolean);
const results = [];
const usage = { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0 };
for (const line of lines) {
try {
const entry = JSON.parse(line);
// Resolve truncated custom_id back to original job ID
const jobId = idMap[entry.custom_id] || entry.custom_id;
// Accumulate token usage
const u = entry.result?.message?.usage;
if (u) {
usage.input_tokens += u.input_tokens || 0;
usage.output_tokens += u.output_tokens || 0;
usage.cache_creation_input_tokens += u.cache_creation_input_tokens || 0;
usage.cache_read_input_tokens += u.cache_read_input_tokens || 0;
}
if (entry.result?.type === 'succeeded') {
const content = entry.result.message?.content?.[0]?.text || '';
try {
@@ -180,5 +197,13 @@ export async function downloadResults(batchId, apiKey, idMap = {}) {
}
}
return results;
// Calculate estimated cost
const cost = (
(usage.input_tokens * PRICING.input) +
(usage.output_tokens * PRICING.output) +
(usage.cache_creation_input_tokens * PRICING.cache_write) +
(usage.cache_read_input_tokens * PRICING.cache_read)
) / 1_000_000;
return { results, usage, cost: Math.round(cost * 100) / 100 };
}