Skip to content

Commit

Permalink
Adds cost calculations per task and workflow.
Browse files Browse the repository at this point in the history
  • Loading branch information
darielnoel committed Aug 2, 2024
1 parent d7f53ef commit 3026d74
Show file tree
Hide file tree
Showing 6 changed files with 272 additions and 11 deletions.
10 changes: 8 additions & 2 deletions src/stores/taskStore.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { TASK_STATUS_enum, AGENT_STATUS_enum} from "../utils/enums";
import { getTaskTitleForLogs} from '../utils/tasks';
import { logger } from "../utils/logger";
import { PrettyError } from "../utils/errors";
import { calculateTaskCost } from "../utils/llmCostCalculator";

export const useTaskStore = (set, get) => ({
// state
Expand All @@ -10,7 +11,7 @@ export const useTaskStore = (set, get) => ({
getTaskStats(task) {
const endTime = Date.now();
const lastDoingLog = get().workflowLogs.slice().reverse().find(log =>
log.task.id === task.id && log.logType === "TaskStatusUpdate" && log.task.status === TASK_STATUS_enum.DOING
log.task && log.task.id === task.id && log.logType === "TaskStatusUpdate" && log.task.status === TASK_STATUS_enum.DOING
);
const startTime = lastDoingLog ? lastDoingLog.timestamp : endTime; // Use endTime if no DOING log is found
const duration = (endTime - startTime) / 1000; // Calculate duration in seconds
Expand All @@ -25,7 +26,7 @@ export const useTaskStore = (set, get) => ({
let iterationCount = 0;

get().workflowLogs.forEach(log => {
if (log.task.id === task.id && log.timestamp >= startTime && log.logType === 'AgentStatusUpdate') {
if (log.task && log.task.id === task.id && log.timestamp >= startTime && log.logType === 'AgentStatusUpdate') {
if (log.agentStatus === AGENT_STATUS_enum.THINKING_END) {
llmUsageStats.inputTokens += log.metadata.output.llmUsageStats.inputTokens;
llmUsageStats.outputTokens += log.metadata.output.llmUsageStats.outputTokens;
Expand Down Expand Up @@ -56,13 +57,18 @@ export const useTaskStore = (set, get) => ({
const stats = get().getTaskStats(task, get);
task.status = TASK_STATUS_enum.DONE;
task.result = result;

const modelCode = agent.llmConfig.model; // Assuming this is where the model code is stored
// Calculate costs directly using stats
const costDetails = calculateTaskCost(modelCode, stats.llmUsageStats);

const taskLog = get().prepareNewLog({
agent,
task,
logDescription: `Task completed: ${getTaskTitleForLogs(task)}, Duration: ${stats.duration} seconds`,
metadata: {
...stats,
costDetails,
result
},
logType: 'TaskStatusUpdate'
Expand Down
91 changes: 86 additions & 5 deletions src/stores/teamStore.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,10 @@ import {create} from 'zustand';
import { devtools, subscribeWithSelector } from 'zustand/middleware';
import { useAgentStore } from './agentStore';
import { useTaskStore } from './taskStore';
import { TASK_STATUS_enum, WORKFLOW_STATUS_enum} from '../utils/enums';
import { TASK_STATUS_enum, AGENT_STATUS_enum, WORKFLOW_STATUS_enum} from '../utils/enums';
import { getTaskTitleForLogs, interpolateTaskDescription} from '../utils/tasks';
import {logger, setLogLevel} from '../utils/logger';
import {calculateTotalWorkflowCost} from '../utils/llmCostCalculator';
import { subscribeWorkflowStatusUpdates } from '../subscribers/teamSubscriber';
import { subscribeTaskStatusUpdates } from '../subscribers/taskSubscriber';
import { setupWorkflowController } from './workflowController';
Expand Down Expand Up @@ -74,11 +75,33 @@ const createTeamStore = (initialState = {}) => {
startWorkflow: async (inputs) => {
// Start the first task or set all to 'TODO' initially
logger.info(`🚀 Team *${get().name}* is starting to work.`);
get().resetWorkflowStateAction();

if(inputs){
get().setInputs(inputs);
}
get().resetWorkflowStateAction();
get().setTeamWorkflowStatus(WORKFLOW_STATUS_enum.RUNNING);

// Create a log entry to mark the initiation of the workflow
const initialLog = {
task: null,
agent: null,
timestamp: Date.now(),
logDescription: `Workflow initiated for team *${get().name}*.`,
workflowStatus: WORKFLOW_STATUS_enum.RUNNING, // Using RUNNING as the initial status
metadata: {
message: "Workflow has been initialized with input settings.",
inputs: inputs // Assuming you want to log the inputs used to start the workflow
},
logType: 'WorkflowStatusUpdate'
};

// Update state with the new log
set(state => ({
...state,
workflowLogs: [...state.workflowLogs, initialLog],
teamWorkflowStatus: WORKFLOW_STATUS_enum.RUNNING
}));

const tasks = get().tasks;
if (tasks.length > 0 && tasks[0].status === TASK_STATUS_enum.TODO) {
get().updateTaskStatus(tasks[0].id, TASK_STATUS_enum.DOING);
Expand Down Expand Up @@ -115,6 +138,7 @@ const createTeamStore = (initialState = {}) => {

// New function to handle finishing workflow
finishWorkflowAction: () => {
const stats = get().getWorkflowStats();
const tasks = get().tasks;
const deliverableTask = tasks.slice().reverse().find(task => task.isDeliverable);
const lastTaskResult = tasks[tasks.length - 1].result;
Expand All @@ -130,7 +154,11 @@ const createTeamStore = (initialState = {}) => {
logDescription: `Workflow finished with result: ${deliverableTask ? deliverableTask.result : lastTaskResult}`,
workflowStatus: WORKFLOW_STATUS_enum.FINISHED,
metadata: {
result: deliverableTask ? deliverableTask.result : lastTaskResult
result: deliverableTask ? deliverableTask.result : lastTaskResult,
...stats,
teamName: get().name,
taskCount: tasks.length,
agentCount: get().agents.length
},
logType: 'WorkflowStatusUpdate'
};
Expand Down Expand Up @@ -262,8 +290,61 @@ const createTeamStore = (initialState = {}) => {

return newLog;
},
clearAll: () => set({ agents: [], tasks: [], inputs: {}, workflowLogs: [], workflowContext: '', workflowResult: null, teamWorkflowStatus: WORKFLOW_STATUS_enum.INITIAL})
clearAll: () => set({ agents: [], tasks: [], inputs: {}, workflowLogs: [], workflowContext: '', workflowResult: null, teamWorkflowStatus: WORKFLOW_STATUS_enum.INITIAL}),
getWorkflowStats() {
const endTime = Date.now(); // Consider the current time as the end time
const workflowLogs = get().workflowLogs;
const lastWorkflowRunningLog = workflowLogs.slice().reverse().find(log => log.logType === "WorkflowStatusUpdate" && log.workflowStatus === "RUNNING");

const startTime = lastWorkflowRunningLog ? lastWorkflowRunningLog.timestamp : Date.now();
const duration = (endTime - startTime) / 1000; // Calculate duration in seconds
let modelUsageStats = {};

let llmUsageStats = {
inputTokens: 0,
outputTokens: 0,
callsCount: 0,
callsErrorCount: 0,
parsingErrors: 0
};
let iterationCount = 0;

// Iterate over logs for usage stats, starting from the found 'INITIAL' timestamp
workflowLogs.forEach(log => {
if (log.logType === 'AgentStatusUpdate' && log.timestamp >= startTime) {
if (log.agentStatus === AGENT_STATUS_enum.THINKING_END) {
const modelCode = log.agent.llmConfig.model;
if (!modelUsageStats[modelCode]) {
modelUsageStats[modelCode] = {inputTokens: 0, outputTokens: 0, callsCount: 0};
}
modelUsageStats[modelCode].inputTokens += log.metadata.output.llmUsageStats.inputTokens;
modelUsageStats[modelCode].outputTokens += log.metadata.output.llmUsageStats.outputTokens;
modelUsageStats[modelCode].callsCount += 1; // Each log entry counts as a call
llmUsageStats.inputTokens += log.metadata.output.llmUsageStats.inputTokens;
llmUsageStats.outputTokens += log.metadata.output.llmUsageStats.outputTokens;
llmUsageStats.callsCount += 1;
} else if (log.agentStatus === AGENT_STATUS_enum.THINKING_ERROR) {
llmUsageStats.callsErrorCount += 1;
} else if (log.agentStatus === AGENT_STATUS_enum.ISSUES_PARSING_LLM_OUTPUT) {
llmUsageStats.parsingErrors += 1;
} else if (log.agentStatus === AGENT_STATUS_enum.ITERATION_END) {
iterationCount += 1;
}
}
});

// Calculate total costs based on the model usage stats
const costDetails = calculateTotalWorkflowCost(modelUsageStats);

return {
startTime,
endTime,
duration,
llmUsageStats,
iterationCount,
costDetails
};
}
}), "teamStore"))
);

Expand Down
2 changes: 2 additions & 0 deletions src/subscribers/taskSubscriber.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@ const subscribeTaskStatusUpdates = (useStore) => {
iterationCount: newLog.metadata.iterationCount,
duration: newLog.metadata.duration,
agentName: newLog.agent.name,
agentModel: newLog.agent.llmConfig.model,
taskTitle: getTaskTitleForLogs(newLog.task),
currentTaskNumber,
costDetails: newLog.metadata.costDetails,
totalTasks
});
break;
Expand Down
2 changes: 1 addition & 1 deletion src/subscribers/teamSubscriber.js
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ const subscribeWorkflowStatusUpdates = (useStore) => {
status: 'Finished',
message: 'Workflow has successfully completed all tasks.'
});
logPrettyWorkflowResult({...newLog.metadata});
logPrettyWorkflowResult({...newLog});
break;
case WORKFLOW_STATUS_enum.BLOCKED:
logPrettyWorkflowStatus({
Expand Down
140 changes: 140 additions & 0 deletions src/utils/llmCostCalculator.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import {logger} from "./logger";

const modelsPricing = [
{
"modelCode": "gpt-4o-mini",
"provider": "openai",
"inputPricePerMillionTokens": 0.150,
"outputPricePerMillionTokens": 0.600,
"features": "Most cost-efficient with vision capabilities"
},
{
"modelCode": "gpt-3.5-turbo",
"provider": "openai",
"inputPricePerMillionTokens": 0.50,
"outputPricePerMillionTokens": 1.50,
"features": "Cost-effective option"
},
{
"modelCode": "gpt-3.5-turbo-0125",
"provider": "openai",
"inputPricePerMillionTokens": 0.50,
"outputPricePerMillionTokens": 1.50,
"features": "Cost-effective option"
},
{
"modelCode": "gpt-4o",
"provider": "openai",
"inputPricePerMillionTokens": 5.00,
"outputPricePerMillionTokens": 15.00,
"features": "Enhanced multimodal capabilities"
},
{
"modelCode": "gpt-4-turbo",
"provider": "openai",
"inputPricePerMillionTokens": 10.00,
"outputPricePerMillionTokens": 30.00,
"features": "High speed and power"
},
{
"modelCode": "gpt-4",
"provider": "openai",
"inputPricePerMillionTokens": 30.00,
"outputPricePerMillionTokens": 60.00,
"features": "Latest generation AI"
}
];

/**
* Calculates the approximate cost of using a specified AI model based on the number of input and output tokens.
* If the model code does not match any in the predefined list, it returns a standardized error response with costs set to -1.
*
* @param {string} modelCode - The unique code identifier for the AI model.
* @param {object} llmUsageStats - An object containing usage statistics including:
* - inputTokens: The number of input tokens processed.
* - outputTokens: The number of output tokens generated.
* - callsCount: The total number of calls made.
* - callsErrorCount: The number of failed calls.
* - parsingErrors: The number of errors encountered during parsing.
* @returns {object} An object containing:
* - costInputTokens: The calculated cost for the input tokens or -1 if the model is not found.
* - costOutputTokens: The calculated cost for the output tokens or -1 if the model is not found.
* - totalCost: The total cost combining input and output tokens or -1 if the model is not found.
*
* @example
* // Calculate costs for a known model with usage stats
* const llmUsageStats = {
* inputTokens: 500000,
* outputTokens: 200000,
* callsCount: 10,
* callsErrorCount: 1,
* parsingErrors: 0
* };
* const costDetails = calculateCost('gpt-4o-mini', llmUsageStats);
* console.log(costDetails);
*/
function calculateTaskCost(modelCode, llmUsageStats) {
const model = modelsPricing.find(m => m.modelCode === modelCode);
if (!model) {
return {
costInputTokens: -1,
costOutputTokens: -1,
totalCost: -1
};
}

const inputCost = (llmUsageStats.inputTokens / 1000000) * model.inputPricePerMillionTokens;
const outputCost = (llmUsageStats.outputTokens / 1000000) * model.outputPricePerMillionTokens;
const totalCost = inputCost + outputCost;

return {
costInputTokens: parseFloat(inputCost.toFixed(4)),
costOutputTokens: parseFloat(outputCost.toFixed(4)),
totalCost: parseFloat(totalCost.toFixed(4))
};
}


function calculateTotalWorkflowCost(modelUsageStats) {
let totalInputCost = 0;
let totalOutputCost = 0;
let totalCost = 0;
let totalInputTokens = 0;
let totalOutputTokens = 0;
let allPricesAvailable = true;

Object.keys(modelUsageStats).forEach(modelCode => {
const stats = modelUsageStats[modelCode];
const model = modelsPricing.find(m => m.modelCode === modelCode);
if (!model) {
logger.warn(`No pricing information found for model ${modelCode}`);
allPricesAvailable = false;
return;
}
const inputCost = (stats.inputTokens / 1000000) * model.inputPricePerMillionTokens;
const outputCost = (stats.outputTokens / 1000000) * model.outputPricePerMillionTokens;
totalInputCost += inputCost;
totalOutputCost += outputCost;
totalCost += inputCost + outputCost;
totalInputTokens += stats.inputTokens;
totalOutputTokens += stats.outputTokens;
});

if (!allPricesAvailable) {
return {
costInputTokens: -1,
costOutputTokens: -1,
totalCost: -1
};
}

return {
costInputTokens: parseFloat(totalInputCost.toFixed(4)),
costOutputTokens: parseFloat(totalOutputCost.toFixed(4)),
totalCost: parseFloat(totalCost.toFixed(4))
};
}

export { calculateTaskCost, calculateTotalWorkflowCost };


Loading

0 comments on commit 3026d74

Please sign in to comment.