-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
fix: resolve architectural gaps in praisonai wrapper (fixes #1620) #1621
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
9127ad0
3285441
275a985
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -232,10 +232,8 @@ def __init__(self, agent_file, framework, config_list, log_level=None, agent_cal | |
| self.tool_registry = ToolRegistry() | ||
| self.tool_registry.register_builtin_autogen_adapters() | ||
|
|
||
| # Get framework adapter and validate availability | ||
| # Get framework adapter (availability already validated at CLI entry) | ||
| self.framework_adapter = self._get_framework_adapter(framework) | ||
| if not self.framework_adapter.is_available(): | ||
| raise ImportError(f"Framework '{framework}' is not available. Please install the required dependencies.") | ||
|
|
||
| def _get_framework_adapter(self, framework: str) -> FrameworkAdapter: | ||
| """ | ||
|
|
@@ -597,10 +595,10 @@ def generate_crew_and_kickoff(self): | |
| self.framework = framework | ||
| self.framework_adapter = self._get_framework_adapter(framework) | ||
|
|
||
| # Final availability check | ||
| if not self.framework_adapter.is_available(): | ||
| raise ImportError(f"Framework '{framework}' is not available. Please install the required dependencies.") | ||
| # Validate framework availability for non-CLI callers | ||
| from .framework_adapters.validators import assert_framework_available | ||
| assert_framework_available(framework) | ||
|
|
||
| self.logger.info(f"Using framework: {framework}") | ||
| return self.framework_adapter.run( | ||
| config, | ||
|
|
@@ -892,10 +890,13 @@ def _resolve(key, env_var=None, default=None): | |
| api_type = _resolve("api_type", default="openai").lower() | ||
| model_name = _resolve("model", default="gpt-4o-mini") | ||
| api_key = _resolve("api_key", env_var="OPENAI_API_KEY") | ||
| # Use resolver for consistent env-var precedence as fallback | ||
| from praisonai.llm.env import resolve_llm_endpoint | ||
| ep = resolve_llm_endpoint() | ||
|
|
||
| base_url = (model_config.get("base_url") | ||
| or yaml_llm.get("base_url") | ||
| or os.environ.get("OPENAI_BASE_URL") | ||
| or os.environ.get("OPENAI_API_BASE")) | ||
| or ep.base_url) | ||
|
Comment on lines
892
to
+899
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use the resolved endpoint as the single fallback tuple here. This only takes Suggested change api_type = _resolve("api_type", default="openai").lower()
- model_name = _resolve("model", default="gpt-4o-mini")
- api_key = _resolve("api_key", env_var="OPENAI_API_KEY")
- # Use resolver for consistent env-var precedence as fallback
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()
-
- base_url = (model_config.get("base_url")
- or yaml_llm.get("base_url")
- or ep.base_url)
+ model_name = _resolve("model", default=ep.model)
+ api_key = _resolve("api_key", env_var="OPENAI_API_KEY") or ep.api_key
+ base_url = (
+ yaml_llm.get("base_url")
+ or first_role_llm.get("base_url")
+ or model_config.get("base_url")
+ or ep.base_url
+ )🤖 Prompt for AI Agents |
||
|
|
||
| # Build LLMConfig — Bedrock needs no api_key | ||
| if api_type == "bedrock": | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -62,11 +62,22 @@ def realtime_connect( | |
| session_id = f"realtime-{uuid.uuid4().hex[:12]}" | ||
|
|
||
| # Build WebSocket URL | ||
| base = api_base or os.environ.get("OPENAI_API_BASE", "wss://api.openai.com") | ||
| if api_base: | ||
| base = api_base | ||
| else: | ||
| from praisonai.llm.env import resolve_llm_endpoint | ||
| ep = resolve_llm_endpoint(default_base="wss://api.openai.com") | ||
| base = ep.base_url | ||
|
Comment on lines
+68
to
+70
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
The realtime endpoint needs to restrict its lookup to the OpenAI-specific vars ( |
||
|
|
||
| if base.startswith("http"): | ||
| base = base.replace("https://", "wss://").replace("http://", "ws://") | ||
|
|
||
| url = f"{base.rstrip('/')}/v1/realtime?model={model}" | ||
| # Strip any existing /v1 path to avoid double /v1/v1/realtime | ||
| base = base.rstrip('/') | ||
| if base.endswith('/v1'): | ||
| base = base[:-3] | ||
|
|
||
| url = f"{base}/v1/realtime?model={model}" | ||
|
|
||
| return RealtimeSession( | ||
| id=session_id, | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -5,9 +5,12 @@ | |||||
| Usage: praisonai "prompt" --metrics | ||||||
| """ | ||||||
|
|
||||||
| import logging | ||||||
| from typing import Any, Dict, Tuple | ||||||
| from .base import FlagHandler | ||||||
|
|
||||||
| logger = logging.getLogger(__name__) | ||||||
|
|
||||||
|
|
||||||
| class MetricsHandler(FlagHandler): | ||||||
| """ | ||||||
|
|
@@ -133,33 +136,29 @@ def extract_metrics_from_agent(self, agent: Any) -> Dict[str, Any]: | |||||
| ctx = litellm._thread_context | ||||||
| if hasattr(ctx, 'usage'): | ||||||
| usage = ctx.usage | ||||||
| if hasattr(usage, 'prompt_tokens'): | ||||||
| metrics['prompt_tokens'] = usage.prompt_tokens | ||||||
| if hasattr(usage, 'completion_tokens'): | ||||||
| metrics['completion_tokens'] = usage.completion_tokens | ||||||
| if hasattr(usage, 'total_tokens'): | ||||||
| metrics['total_tokens'] = usage.total_tokens | ||||||
| except: | ||||||
| pass | ||||||
| metrics['prompt_tokens'] = getattr(usage, 'prompt_tokens', None) | ||||||
| metrics['completion_tokens'] = getattr(usage, 'completion_tokens', None) | ||||||
| metrics['total_tokens'] = getattr(usage, 'total_tokens', None) | ||||||
| except ImportError: | ||||||
| logger.debug("litellm not installed; skipping token metrics") | ||||||
| except AttributeError as e: | ||||||
| logger.debug("litellm internals changed (%s); skipping token metrics", e) | ||||||
|
|
||||||
| # Try to get cost from litellm cost tracking | ||||||
| try: | ||||||
| import litellm | ||||||
| from litellm import completion_cost | ||||||
| # If we have token counts, estimate cost | ||||||
| if 'prompt_tokens' in metrics and 'completion_tokens' in metrics: | ||||||
| if metrics.get('prompt_tokens') and metrics.get('completion_tokens'): | ||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| model = metrics.get('model', 'gpt-4o-mini') | ||||||
| try: | ||||||
| cost = completion_cost( | ||||||
| metrics['cost'] = completion_cost( | ||||||
| model=model, | ||||||
| prompt_tokens=metrics['prompt_tokens'], | ||||||
| completion_tokens=metrics['completion_tokens'] | ||||||
| completion_tokens=metrics['completion_tokens'], | ||||||
| ) | ||||||
| metrics['cost'] = cost | ||||||
| except: | ||||||
| pass | ||||||
| except: | ||||||
| pass | ||||||
| except Exception as e: | ||||||
| logger.debug("cost calc failed for model=%s: %s", model, e) | ||||||
| except ImportError: | ||||||
| logger.debug("litellm.completion_cost unavailable; skipping cost metrics") | ||||||
|
|
||||||
| return metrics | ||||||
|
|
||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -272,29 +272,26 @@ def __init__(self, agent_file="agents.yaml", framework="", auto=False, init=Fals | |||||||||||||||||||
| self.agent_yaml = agent_yaml | ||||||||||||||||||||
| self._interactive_mode = False # Flag for interactive TUI mode | ||||||||||||||||||||
| # Create config_list with AutoGen compatibility | ||||||||||||||||||||
| # Support multiple environment variable patterns for better compatibility | ||||||||||||||||||||
| # Priority order: MODEL_NAME > OPENAI_MODEL_NAME for model selection | ||||||||||||||||||||
| model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini") | ||||||||||||||||||||
|
|
||||||||||||||||||||
| # Priority order for base_url: OPENAI_BASE_URL > OPENAI_API_BASE > OLLAMA_API_BASE | ||||||||||||||||||||
| # OPENAI_BASE_URL is the standard OpenAI SDK environment variable | ||||||||||||||||||||
| base_url = ( | ||||||||||||||||||||
| os.environ.get("OPENAI_BASE_URL") or | ||||||||||||||||||||
| os.environ.get("OPENAI_API_BASE") or | ||||||||||||||||||||
| os.environ.get("OLLAMA_API_BASE", "https://api.openai.com/v1") | ||||||||||||||||||||
| ) | ||||||||||||||||||||
| # Resolve LLM endpoint configuration from environment variables | ||||||||||||||||||||
| from praisonai.llm.env import resolve_llm_endpoint | ||||||||||||||||||||
| ep = resolve_llm_endpoint() | ||||||||||||||||||||
|
|
||||||||||||||||||||
| api_key = os.environ.get("OPENAI_API_KEY") | ||||||||||||||||||||
| self.config_list = [ | ||||||||||||||||||||
| { | ||||||||||||||||||||
| 'model': model_name, | ||||||||||||||||||||
| 'base_url': base_url, | ||||||||||||||||||||
| 'api_key': api_key, | ||||||||||||||||||||
| 'model': ep.model, | ||||||||||||||||||||
| 'base_url': ep.base_url, | ||||||||||||||||||||
| 'api_key': ep.api_key, | ||||||||||||||||||||
| 'api_type': 'openai' # AutoGen expects this field | ||||||||||||||||||||
| } | ||||||||||||||||||||
| ] | ||||||||||||||||||||
| self.agent_file = agent_file | ||||||||||||||||||||
| self.framework = framework | ||||||||||||||||||||
|
|
||||||||||||||||||||
| # Validate framework availability early to fail fast | ||||||||||||||||||||
| if self.framework: | ||||||||||||||||||||
| from praisonai.framework_adapters.validators import assert_framework_available | ||||||||||||||||||||
| assert_framework_available(self.framework) | ||||||||||||||||||||
|
Comment on lines
287
to
+293
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||||||||||||
|
|
||||||||||||||||||||
| self.auto = auto | ||||||||||||||||||||
| self.init = init | ||||||||||||||||||||
| self.tools = tools or [] # Store tool class names as a list | ||||||||||||||||||||
|
|
@@ -383,6 +380,11 @@ def main(self): | |||||||||||||||||||
|
|
||||||||||||||||||||
| self.framework = args.framework or self.framework | ||||||||||||||||||||
|
|
||||||||||||||||||||
| # Validate framework availability early to fail fast | ||||||||||||||||||||
| if self.framework: | ||||||||||||||||||||
| from praisonai.framework_adapters.validators import assert_framework_available | ||||||||||||||||||||
| assert_framework_available(self.framework) | ||||||||||||||||||||
|
|
||||||||||||||||||||
| # Update config_list model if --model flag is provided | ||||||||||||||||||||
| if getattr(args, 'model', None): | ||||||||||||||||||||
| self.config_list[0]['model'] = args.model | ||||||||||||||||||||
|
|
||||||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -52,10 +52,7 @@ def run( | |
| Returns: | ||
| Execution result as string | ||
| """ | ||
| if not self.is_available(): | ||
| raise ImportError("CrewAI is not available. Install with: pip install crewai") | ||
|
|
||
| # Import CrewAI only when needed | ||
| # Import CrewAI only when needed (availability already validated at CLI entry) | ||
| from crewai import Agent, Task, Crew | ||
| from crewai.telemetry import Telemetry | ||
|
|
||
|
Comment on lines
+55
to
58
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: #!/bin/bash
# Find all sites that instantiate CrewAIAdapter / call its run() directly,
# to confirm whether non-CLI paths exist that bypass cli/main.py validation.
rg -nP -C3 '\bCrewAIAdapter\s*\('
rg -nP -C3 '\.run\s*\(' --type=py -g '*adapter*'
# Confirm assert_framework_available is only invoked from cli/main.py
rg -nP -C2 '\bassert_framework_available\s*\('Repository: MervinPraison/PraisonAI Length of output: 6224 🏁 Script executed: # Read the full CrewAIAdapter implementation to see current state of run() method
cat -n src/praisonai/praisonai/framework_adapters/crewai_adapter.pyRepository: MervinPraison/PraisonAI Length of output: 5032 🏁 Script executed: # Search for any instantiation or direct usage of CrewAIAdapter outside CLI
rg -l 'CrewAIAdapter' --type=py | head -20Repository: MervinPraison/PraisonAI Length of output: 238 🏁 Script executed: # Look for any tests or programmatic usage patterns
rg -n 'from.*crewai_adapter|import.*CrewAIAdapter' --type=pyRepository: MervinPraison/PraisonAI Length of output: 278 🏁 Script executed: # Search for calls to .run() method on adapters
rg -n '\.run\(' --type=py | grep -i adapterRepository: MervinPraison/PraisonAI Length of output: 1182 🏁 Script executed: # Check test files for any direct adapter.run() calls
fd -e test.py -e tests.py | head -20
rg -n 'adapter.*\.run\|run.*adapter' --type=py -A2 -B2Repository: MervinPraison/PraisonAI Length of output: 49 🏁 Script executed: # Check how the adapter is instantiated and used in the framework
rg -n 'get_adapter|get_framework_adapter' --type=py -A3Repository: MervinPraison/PraisonAI Length of output: 8419 🏁 Script executed: # Check AgentsGenerator and how it validates framework
sed -n '230,310p' src/praisonai/praisonai/agents_generator.pyRepository: MervinPraison/PraisonAI Length of output: 3526 🏁 Script executed: # Check _get_framework_adapter implementation
sed -n '238,260p' src/praisonai/praisonai/agents_generator.pyRepository: MervinPraison/PraisonAI Length of output: 959 🏁 Script executed: # Check where AgentsGenerator is instantiated
rg -n 'AgentsGenerator\(' --type=py -B2 -A2Repository: MervinPraison/PraisonAI Length of output: 11135 🏁 Script executed: # Check if AgentsGenerator validation really happens only at CLI
rg -n 'class AgentsGenerator' --type=py -A30Repository: MervinPraison/PraisonAI Length of output: 3452 Add framework availability guard to Non-CLI callers exist that bypass Adding a lightweight 🤖 Prompt for AI Agents |
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,36 @@ | ||
| """ | ||
| Framework availability validators. | ||
|
|
||
| Provides early validation of framework availability to fail fast at CLI entry | ||
| rather than inside run() methods after expensive setup work. | ||
| """ | ||
|
|
||
| from .registry import FrameworkAdapterRegistry | ||
|
|
||
|
|
||
| # Install hints for common frameworks | ||
| _INSTALL_HINTS = { | ||
| "crewai": "pip install 'praisonai[crewai]' # or: pip install crewai", | ||
| "autogen": "pip install 'praisonai[autogen]' # or: pip install pyautogen", | ||
| "praisonai": "pip install praisonaiagents", | ||
| } | ||
|
|
||
|
|
||
| def assert_framework_available(name: str) -> None: | ||
| """ | ||
| Raise ImportError immediately if the chosen framework is missing. | ||
|
|
||
| Args: | ||
| name: Framework name to validate | ||
|
|
||
| Raises: | ||
| ImportError: If framework is not available with actionable install hint | ||
| """ | ||
| registry = FrameworkAdapterRegistry.get_instance() | ||
|
|
||
| if not registry.is_available(name): | ||
| hint = _INSTALL_HINTS.get(name, f"pip install {name}") | ||
| raise ImportError( | ||
| f"Framework '{name}' was requested but is not installed.\n" | ||
| f"Install it with:\n {hint}" | ||
| ) |
Uh oh!
There was an error while loading. Please reload this page.