dify
This commit is contained in:
97
dify/scripts/stress-test/setup/configure_openai_plugin.py
Normal file
97
dify/scripts/stress-test/setup/configure_openai_plugin.py
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def configure_openai_plugin() -> None:
|
||||
"""Configure OpenAI plugin with mock server credentials."""
|
||||
|
||||
log = Logger("ConfigPlugin")
|
||||
log.header("Configuring OpenAI Plugin")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
log.step("Configuring OpenAI plugin with mock server...")
|
||||
|
||||
# API endpoint for plugin configuration
|
||||
base_url = "http://localhost:5001"
|
||||
config_endpoint = f"{base_url}/console/api/workspaces/current/model-providers/langgenius/openai/openai/credentials"
|
||||
|
||||
# Configuration payload with mock server
|
||||
config_payload = {
|
||||
"credentials": {
|
||||
"openai_api_key": "apikey",
|
||||
"openai_organization": None,
|
||||
"openai_api_base": "http://host.docker.internal:5004",
|
||||
}
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the configuration request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
config_endpoint,
|
||||
json=config_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("OpenAI plugin configured successfully!")
|
||||
log.key_value("API Base", config_payload["credentials"]["openai_api_base"])
|
||||
log.key_value("API Key", config_payload["credentials"]["openai_api_key"])
|
||||
|
||||
elif response.status_code == 201:
|
||||
log.success("OpenAI plugin credentials created successfully!")
|
||||
log.key_value("API Base", config_payload["credentials"]["openai_api_base"])
|
||||
log.key_value("API Key", config_payload["credentials"]["openai_api_key"])
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Configuration failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(f"Configuration failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
configure_openai_plugin()
|
||||
113
dify/scripts/stress-test/setup/create_api_key.py
Normal file
113
dify/scripts/stress-test/setup/create_api_key.py
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def create_api_key() -> None:
|
||||
"""Create API key for the imported app."""
|
||||
|
||||
log = Logger("CreateAPIKey")
|
||||
log.header("Creating API Key")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
return
|
||||
|
||||
# Read app_id from config
|
||||
app_id = config_helper.get_app_id()
|
||||
if not app_id:
|
||||
log.error("No app_id found in config")
|
||||
log.info("Please run import_workflow_app.py first to import the app")
|
||||
return
|
||||
|
||||
log.step(f"Creating API key for app: {app_id}")
|
||||
|
||||
# API endpoint for creating API key
|
||||
base_url = "http://localhost:5001"
|
||||
api_key_endpoint = f"{base_url}/console/api/apps/{app_id}/api-keys"
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"Content-Length": "0",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the API key creation request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
api_key_endpoint,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
response_data = response.json()
|
||||
|
||||
api_key_id = response_data.get("id")
|
||||
api_key_token = response_data.get("token")
|
||||
|
||||
if api_key_token:
|
||||
log.success("API key created successfully!")
|
||||
log.key_value("Key ID", api_key_id)
|
||||
log.key_value("Token", api_key_token)
|
||||
log.key_value("Type", response_data.get("type"))
|
||||
|
||||
# Save API key to config
|
||||
api_key_config = {
|
||||
"id": api_key_id,
|
||||
"token": api_key_token,
|
||||
"type": response_data.get("type"),
|
||||
"app_id": app_id,
|
||||
"created_at": response_data.get("created_at"),
|
||||
}
|
||||
|
||||
if config_helper.write_config("api_key_config", api_key_config):
|
||||
log.info(f"API key saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
else:
|
||||
log.error("No API token received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("API key creation failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(f"API key creation failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
create_api_key()
|
||||
176
dify/scripts/stress-test/setup/dsl/workflow_llm.yml
Normal file
176
dify/scripts/stress-test/setup/dsl/workflow_llm.yml
Normal file
@@ -0,0 +1,176 @@
|
||||
app:
|
||||
description: ''
|
||||
icon: 🤖
|
||||
icon_background: '#FFEAD5'
|
||||
mode: workflow
|
||||
name: workflow_llm
|
||||
use_icon_as_answer_icon: false
|
||||
dependencies:
|
||||
- current_identifier: null
|
||||
type: marketplace
|
||||
value:
|
||||
marketplace_plugin_unique_identifier: langgenius/openai:0.2.5@373362a028986aae53a7baf73a7f11991ba3c22c69eaf97d6cde048cfd4a9f98
|
||||
kind: app
|
||||
version: 0.4.0
|
||||
workflow:
|
||||
conversation_variables: []
|
||||
environment_variables: []
|
||||
features:
|
||||
file_upload:
|
||||
allowed_file_extensions:
|
||||
- .JPG
|
||||
- .JPEG
|
||||
- .PNG
|
||||
- .GIF
|
||||
- .WEBP
|
||||
- .SVG
|
||||
allowed_file_types:
|
||||
- image
|
||||
allowed_file_upload_methods:
|
||||
- local_file
|
||||
- remote_url
|
||||
enabled: false
|
||||
fileUploadConfig:
|
||||
audio_file_size_limit: 50
|
||||
batch_count_limit: 5
|
||||
file_size_limit: 15
|
||||
image_file_size_limit: 10
|
||||
video_file_size_limit: 100
|
||||
workflow_file_upload_limit: 10
|
||||
image:
|
||||
enabled: false
|
||||
number_limits: 3
|
||||
transfer_methods:
|
||||
- local_file
|
||||
- remote_url
|
||||
number_limits: 3
|
||||
opening_statement: ''
|
||||
retriever_resource:
|
||||
enabled: true
|
||||
sensitive_word_avoidance:
|
||||
enabled: false
|
||||
speech_to_text:
|
||||
enabled: false
|
||||
suggested_questions: []
|
||||
suggested_questions_after_answer:
|
||||
enabled: false
|
||||
text_to_speech:
|
||||
enabled: false
|
||||
language: ''
|
||||
voice: ''
|
||||
graph:
|
||||
edges:
|
||||
- data:
|
||||
isInIteration: false
|
||||
isInLoop: false
|
||||
sourceType: start
|
||||
targetType: llm
|
||||
id: 1757611990947-source-1757611992921-target
|
||||
source: '1757611990947'
|
||||
sourceHandle: source
|
||||
target: '1757611992921'
|
||||
targetHandle: target
|
||||
type: custom
|
||||
zIndex: 0
|
||||
- data:
|
||||
isInIteration: false
|
||||
isInLoop: false
|
||||
sourceType: llm
|
||||
targetType: end
|
||||
id: 1757611992921-source-1757611996447-target
|
||||
source: '1757611992921'
|
||||
sourceHandle: source
|
||||
target: '1757611996447'
|
||||
targetHandle: target
|
||||
type: custom
|
||||
zIndex: 0
|
||||
nodes:
|
||||
- data:
|
||||
desc: ''
|
||||
selected: false
|
||||
title: Start
|
||||
type: start
|
||||
variables:
|
||||
- label: question
|
||||
max_length: null
|
||||
options: []
|
||||
required: true
|
||||
type: text-input
|
||||
variable: question
|
||||
height: 90
|
||||
id: '1757611990947'
|
||||
position:
|
||||
x: 30
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 30
|
||||
y: 245
|
||||
selected: false
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
- data:
|
||||
context:
|
||||
enabled: false
|
||||
variable_selector: []
|
||||
desc: ''
|
||||
model:
|
||||
completion_params:
|
||||
temperature: 0.7
|
||||
mode: chat
|
||||
name: gpt-4o
|
||||
provider: langgenius/openai/openai
|
||||
prompt_template:
|
||||
- id: c165fcb6-f1f0-42f2-abab-e81982434deb
|
||||
role: system
|
||||
text: ''
|
||||
- role: user
|
||||
text: '{{#1757611990947.question#}}'
|
||||
selected: false
|
||||
title: LLM
|
||||
type: llm
|
||||
variables: []
|
||||
vision:
|
||||
enabled: false
|
||||
height: 90
|
||||
id: '1757611992921'
|
||||
position:
|
||||
x: 334
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 334
|
||||
y: 245
|
||||
selected: false
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
- data:
|
||||
desc: ''
|
||||
outputs:
|
||||
- value_selector:
|
||||
- '1757611992921'
|
||||
- text
|
||||
value_type: string
|
||||
variable: answer
|
||||
selected: false
|
||||
title: End
|
||||
type: end
|
||||
height: 90
|
||||
id: '1757611996447'
|
||||
position:
|
||||
x: 638
|
||||
y: 245
|
||||
positionAbsolute:
|
||||
x: 638
|
||||
y: 245
|
||||
selected: true
|
||||
sourcePosition: right
|
||||
targetPosition: left
|
||||
type: custom
|
||||
width: 244
|
||||
viewport:
|
||||
x: 0
|
||||
y: 0
|
||||
zoom: 0.7
|
||||
128
dify/scripts/stress-test/setup/import_workflow_app.py
Normal file
128
dify/scripts/stress-test/setup/import_workflow_app.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper # type: ignore[import]
|
||||
|
||||
|
||||
def import_workflow_app() -> None:
|
||||
"""Import workflow app from DSL file and save app_id."""
|
||||
|
||||
log = Logger("ImportApp")
|
||||
log.header("Importing Workflow Application")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
# Read workflow DSL file
|
||||
dsl_path = Path(__file__).parent / "dsl" / "workflow_llm.yml"
|
||||
|
||||
if not dsl_path.exists():
|
||||
log.error(f"DSL file not found: {dsl_path}")
|
||||
return
|
||||
|
||||
with open(dsl_path) as f:
|
||||
yaml_content = f.read()
|
||||
|
||||
log.step("Importing workflow app from DSL...")
|
||||
log.key_value("DSL file", dsl_path.name)
|
||||
|
||||
# API endpoint for app import
|
||||
base_url = "http://localhost:5001"
|
||||
import_endpoint = f"{base_url}/console/api/apps/imports"
|
||||
|
||||
# Import payload
|
||||
import_payload = {"mode": "yaml-content", "yaml_content": yaml_content}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the import request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
import_endpoint,
|
||||
json=import_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
|
||||
# Check import status
|
||||
if response_data.get("status") == "completed":
|
||||
app_id = response_data.get("app_id")
|
||||
|
||||
if app_id:
|
||||
log.success("Workflow app imported successfully!")
|
||||
log.key_value("App ID", app_id)
|
||||
log.key_value("App Mode", response_data.get("app_mode"))
|
||||
log.key_value("DSL Version", response_data.get("imported_dsl_version"))
|
||||
|
||||
# Save app_id to config
|
||||
app_config = {
|
||||
"app_id": app_id,
|
||||
"app_mode": response_data.get("app_mode"),
|
||||
"app_name": "workflow_llm",
|
||||
"dsl_version": response_data.get("imported_dsl_version"),
|
||||
}
|
||||
|
||||
if config_helper.write_config("app_config", app_config):
|
||||
log.info(f"App config saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
else:
|
||||
log.error("Import completed but no app_id received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response_data.get("status") == "failed":
|
||||
log.error("Import failed")
|
||||
log.error(f"Error: {response_data.get('error')}")
|
||||
else:
|
||||
log.warning(f"Import status: {response_data.get('status')}")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Import failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(f"Import failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import_workflow_app()
|
||||
157
dify/scripts/stress-test/setup/install_openai_plugin.py
Normal file
157
dify/scripts/stress-test/setup/install_openai_plugin.py
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import time
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def install_openai_plugin() -> None:
|
||||
"""Install OpenAI plugin using saved access token."""
|
||||
|
||||
log = Logger("InstallPlugin")
|
||||
log.header("Installing OpenAI Plugin")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
log.info("Please run login_admin.py first to get access token")
|
||||
return
|
||||
|
||||
log.step("Installing OpenAI plugin...")
|
||||
|
||||
# API endpoint for plugin installation
|
||||
base_url = "http://localhost:5001"
|
||||
install_endpoint = f"{base_url}/console/api/workspaces/current/plugin/install/marketplace"
|
||||
|
||||
# Plugin identifier
|
||||
plugin_payload = {
|
||||
"plugin_unique_identifiers": [
|
||||
"langgenius/openai:0.2.5@373362a028986aae53a7baf73a7f11991ba3c22c69eaf97d6cde048cfd4a9f98"
|
||||
]
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the installation request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
install_endpoint,
|
||||
json=plugin_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
task_id = response_data.get("task_id")
|
||||
|
||||
if not task_id:
|
||||
log.error("No task ID received from installation request")
|
||||
return
|
||||
|
||||
log.progress(f"Installation task created: {task_id}")
|
||||
log.info("Polling for task completion...")
|
||||
|
||||
# Poll for task completion
|
||||
task_endpoint = f"{base_url}/console/api/workspaces/current/plugin/tasks/{task_id}"
|
||||
|
||||
max_attempts = 30 # 30 attempts with 2 second delay = 60 seconds max
|
||||
attempt = 0
|
||||
|
||||
log.spinner_start("Installing plugin")
|
||||
|
||||
while attempt < max_attempts:
|
||||
attempt += 1
|
||||
time.sleep(2) # Wait 2 seconds between polls
|
||||
|
||||
task_response = client.get(
|
||||
task_endpoint,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if task_response.status_code != 200:
|
||||
log.spinner_stop(
|
||||
success=False,
|
||||
message=f"Failed to get task status: {task_response.status_code}",
|
||||
)
|
||||
return
|
||||
|
||||
task_data = task_response.json()
|
||||
task_info = task_data.get("task", {})
|
||||
status = task_info.get("status")
|
||||
|
||||
if status == "success":
|
||||
log.spinner_stop(success=True, message="Plugin installed!")
|
||||
log.success("OpenAI plugin installed successfully!")
|
||||
|
||||
# Display plugin info
|
||||
plugins = task_info.get("plugins", [])
|
||||
if plugins:
|
||||
plugin_info = plugins[0]
|
||||
log.key_value("Plugin ID", plugin_info.get("plugin_id"))
|
||||
log.key_value("Message", plugin_info.get("message"))
|
||||
break
|
||||
|
||||
elif status == "failed":
|
||||
log.spinner_stop(success=False, message="Installation failed")
|
||||
log.error("Plugin installation failed")
|
||||
plugins = task_info.get("plugins", [])
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
log.list_item(f"{plugin.get('plugin_id')}: {plugin.get('message')}")
|
||||
break
|
||||
|
||||
# Continue polling if status is "pending" or other
|
||||
|
||||
else:
|
||||
log.spinner_stop(success=False, message="Installation timed out")
|
||||
log.error("Installation timed out after 60 seconds")
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Installation failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
elif response.status_code == 409:
|
||||
log.warning("Plugin may already be installed")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Installation failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
install_openai_plugin()
|
||||
101
dify/scripts/stress-test/setup/login_admin.py
Normal file
101
dify/scripts/stress-test/setup/login_admin.py
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def login_admin() -> None:
|
||||
"""Login with admin account and save access token."""
|
||||
|
||||
log = Logger("Login")
|
||||
log.header("Admin Login")
|
||||
|
||||
# Read admin credentials from config
|
||||
admin_config = config_helper.read_config("admin_config")
|
||||
|
||||
if not admin_config:
|
||||
log.error("Admin config not found")
|
||||
log.info("Please run setup_admin.py first to create the admin account")
|
||||
return
|
||||
|
||||
log.info(f"Logging in with email: {admin_config['email']}")
|
||||
|
||||
# API login endpoint
|
||||
base_url = "http://localhost:5001"
|
||||
login_endpoint = f"{base_url}/console/api/login"
|
||||
|
||||
# Prepare login payload
|
||||
login_payload = {
|
||||
"email": admin_config["email"],
|
||||
"password": admin_config["password"],
|
||||
"remember_me": True,
|
||||
}
|
||||
|
||||
try:
|
||||
# Make the login request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
login_endpoint,
|
||||
json=login_payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("Login successful!")
|
||||
|
||||
# Extract token from response
|
||||
response_data = response.json()
|
||||
|
||||
# Check if login was successful
|
||||
if response_data.get("result") != "success":
|
||||
log.error(f"Login failed: {response_data}")
|
||||
return
|
||||
|
||||
# Extract tokens from data field
|
||||
token_data = response_data.get("data", {})
|
||||
access_token = token_data.get("access_token", "")
|
||||
refresh_token = token_data.get("refresh_token", "")
|
||||
|
||||
if not access_token:
|
||||
log.error("No access token found in response")
|
||||
log.debug(f"Full response: {json.dumps(response_data, indent=2)}")
|
||||
return
|
||||
|
||||
# Save token to config file
|
||||
token_config = {
|
||||
"email": admin_config["email"],
|
||||
"access_token": access_token,
|
||||
"refresh_token": refresh_token,
|
||||
}
|
||||
|
||||
# Save token config
|
||||
if config_helper.write_config("token_config", token_config):
|
||||
log.info(f"Token saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
|
||||
# Show truncated token for verification
|
||||
token_display = f"{access_token[:20]}..." if len(access_token) > 20 else "Token saved"
|
||||
log.key_value("Access token", token_display)
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Login failed: Invalid credentials")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Login failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
login_admin()
|
||||
205
dify/scripts/stress-test/setup/mock_openai_server.py
Normal file
205
dify/scripts/stress-test/setup/mock_openai_server.py
Normal file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from collections.abc import Iterator
|
||||
from typing import Any
|
||||
|
||||
from flask import Flask, Response, jsonify, request
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# Mock models list
|
||||
MODELS = [
|
||||
{
|
||||
"id": "gpt-3.5-turbo",
|
||||
"object": "model",
|
||||
"created": 1677649963,
|
||||
"owned_by": "openai",
|
||||
},
|
||||
{"id": "gpt-4", "object": "model", "created": 1687882411, "owned_by": "openai"},
|
||||
{
|
||||
"id": "text-embedding-ada-002",
|
||||
"object": "model",
|
||||
"created": 1671217299,
|
||||
"owned_by": "openai-internal",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@app.route("/v1/models", methods=["GET"])
|
||||
def list_models() -> Any:
|
||||
"""List available models."""
|
||||
return jsonify({"object": "list", "data": MODELS})
|
||||
|
||||
|
||||
@app.route("/v1/chat/completions", methods=["POST"])
|
||||
def chat_completions() -> Any:
|
||||
"""Handle chat completions."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "gpt-3.5-turbo")
|
||||
messages = data.get("messages", [])
|
||||
stream = data.get("stream", False)
|
||||
|
||||
# Generate mock response
|
||||
response_content = "This is a mock response from the OpenAI server."
|
||||
if messages:
|
||||
last_message = messages[-1].get("content", "")
|
||||
response_content = f"Mock response to: {last_message[:100]}..."
|
||||
|
||||
if stream:
|
||||
# Streaming response
|
||||
def generate() -> Iterator[str]:
|
||||
# Send initial chunk
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {"role": "assistant", "content": ""},
|
||||
"finish_reason": None,
|
||||
}
|
||||
],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
|
||||
# Send content in chunks
|
||||
words = response_content.split()
|
||||
for word in words:
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {"content": word + " "},
|
||||
"finish_reason": None,
|
||||
}
|
||||
],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
time.sleep(0.05) # Simulate streaming delay
|
||||
|
||||
# Send final chunk
|
||||
chunk = {
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
|
||||
}
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
return Response(generate(), mimetype="text/event-stream")
|
||||
else:
|
||||
# Non-streaming response
|
||||
return jsonify(
|
||||
{
|
||||
"id": f"chatcmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {"role": "assistant", "content": response_content},
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(str(messages)),
|
||||
"completion_tokens": len(response_content.split()),
|
||||
"total_tokens": len(str(messages)) + len(response_content.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/completions", methods=["POST"])
|
||||
def completions() -> Any:
|
||||
"""Handle text completions."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "gpt-3.5-turbo-instruct")
|
||||
prompt = data.get("prompt", "")
|
||||
|
||||
response_text = f"Mock completion for prompt: {prompt[:100]}..."
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
"id": f"cmpl-{uuid.uuid4().hex[:8]}",
|
||||
"object": "text_completion",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"text": response_text,
|
||||
"index": 0,
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt.split()),
|
||||
"completion_tokens": len(response_text.split()),
|
||||
"total_tokens": len(prompt.split()) + len(response_text.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/embeddings", methods=["POST"])
|
||||
def embeddings() -> Any:
|
||||
"""Handle embeddings requests."""
|
||||
data = request.json or {}
|
||||
model = data.get("model", "text-embedding-ada-002")
|
||||
input_text = data.get("input", "")
|
||||
|
||||
# Generate mock embedding (1536 dimensions for ada-002)
|
||||
mock_embedding = [0.1] * 1536
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
"object": "list",
|
||||
"data": [{"object": "embedding", "embedding": mock_embedding, "index": 0}],
|
||||
"model": model,
|
||||
"usage": {
|
||||
"prompt_tokens": len(input_text.split()),
|
||||
"total_tokens": len(input_text.split()),
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/v1/models/<model_id>", methods=["GET"])
|
||||
def get_model(model_id: str) -> tuple[Any, int] | Any:
|
||||
"""Get specific model details."""
|
||||
for model in MODELS:
|
||||
if model["id"] == model_id:
|
||||
return jsonify(model)
|
||||
|
||||
return jsonify({"error": "Model not found"}), 404
|
||||
|
||||
|
||||
@app.route("/health", methods=["GET"])
|
||||
def health() -> Any:
|
||||
"""Health check endpoint."""
|
||||
return jsonify({"status": "healthy"})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 Starting Mock OpenAI Server on http://localhost:5004")
|
||||
print("Available endpoints:")
|
||||
print(" - GET /v1/models")
|
||||
print(" - POST /v1/chat/completions")
|
||||
print(" - POST /v1/completions")
|
||||
print(" - POST /v1/embeddings")
|
||||
print(" - GET /v1/models/<model_id>")
|
||||
print(" - GET /health")
|
||||
app.run(host="0.0.0.0", port=5004, debug=True)
|
||||
105
dify/scripts/stress-test/setup/publish_workflow.py
Normal file
105
dify/scripts/stress-test/setup/publish_workflow.py
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def publish_workflow() -> None:
|
||||
"""Publish the imported workflow app."""
|
||||
|
||||
log = Logger("PublishWorkflow")
|
||||
log.header("Publishing Workflow")
|
||||
|
||||
# Read token from config
|
||||
access_token = config_helper.get_token()
|
||||
if not access_token:
|
||||
log.error("No access token found in config")
|
||||
return
|
||||
|
||||
# Read app_id from config
|
||||
app_id = config_helper.get_app_id()
|
||||
if not app_id:
|
||||
log.error("No app_id found in config")
|
||||
return
|
||||
|
||||
log.step(f"Publishing workflow for app: {app_id}")
|
||||
|
||||
# API endpoint for publishing workflow
|
||||
base_url = "http://localhost:5001"
|
||||
publish_endpoint = f"{base_url}/console/api/apps/{app_id}/workflows/publish"
|
||||
|
||||
# Publish payload
|
||||
publish_payload = {"marked_name": "", "marked_comment": ""}
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": "http://localhost:3000",
|
||||
"Pragma": "no-cache",
|
||||
"Referer": "http://localhost:3000/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-site",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
"authorization": f"Bearer {access_token}",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
}
|
||||
|
||||
cookies = {"locale": "en-US"}
|
||||
|
||||
try:
|
||||
# Make the publish request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
publish_endpoint,
|
||||
json=publish_payload,
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
)
|
||||
|
||||
if response.status_code == 200 or response.status_code == 201:
|
||||
log.success("Workflow published successfully!")
|
||||
log.key_value("App ID", app_id)
|
||||
|
||||
# Try to parse response if it has JSON content
|
||||
if response.text:
|
||||
try:
|
||||
response_data = response.json()
|
||||
if response_data:
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
except json.JSONDecodeError:
|
||||
# Response might be empty or non-JSON
|
||||
pass
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Workflow publish failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
elif response.status_code == 404:
|
||||
log.error("Workflow publish failed: App not found")
|
||||
log.info("Make sure the app was imported successfully")
|
||||
else:
|
||||
log.error(f"Workflow publish failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
publish_workflow()
|
||||
161
dify/scripts/stress-test/setup/run_workflow.py
Normal file
161
dify/scripts/stress-test/setup/run_workflow.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def run_workflow(question: str = "fake question", streaming: bool = True) -> None:
|
||||
"""Run the workflow app with a question."""
|
||||
|
||||
log = Logger("RunWorkflow")
|
||||
log.header("Running Workflow")
|
||||
|
||||
# Read API key from config
|
||||
api_token = config_helper.get_api_key()
|
||||
if not api_token:
|
||||
log.error("No API token found in config")
|
||||
log.info("Please run create_api_key.py first to create an API key")
|
||||
return
|
||||
|
||||
log.key_value("Question", question)
|
||||
log.key_value("Mode", "Streaming" if streaming else "Blocking")
|
||||
log.separator()
|
||||
|
||||
# API endpoint for running workflow
|
||||
base_url = "http://localhost:5001"
|
||||
run_endpoint = f"{base_url}/v1/workflows/run"
|
||||
|
||||
# Run payload
|
||||
run_payload = {
|
||||
"inputs": {"question": question},
|
||||
"user": "default user",
|
||||
"response_mode": "streaming" if streaming else "blocking",
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
# Make the run request
|
||||
with httpx.Client(timeout=30.0) as client:
|
||||
if streaming:
|
||||
# Handle streaming response
|
||||
with client.stream(
|
||||
"POST",
|
||||
run_endpoint,
|
||||
json=run_payload,
|
||||
headers=headers,
|
||||
) as response:
|
||||
if response.status_code == 200:
|
||||
log.success("Workflow started successfully!")
|
||||
log.separator()
|
||||
log.step("Streaming response:")
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line.startswith("data: "):
|
||||
data_str = line[6:] # Remove "data: " prefix
|
||||
if data_str == "[DONE]":
|
||||
log.success("Workflow completed!")
|
||||
break
|
||||
try:
|
||||
data = json.loads(data_str)
|
||||
event = data.get("event")
|
||||
|
||||
if event == "workflow_started":
|
||||
log.progress(f"Workflow started: {data.get('data', {}).get('id')}")
|
||||
elif event == "node_started":
|
||||
node_data = data.get("data", {})
|
||||
log.progress(
|
||||
f"Node started: {node_data.get('node_type')} - {node_data.get('title')}"
|
||||
)
|
||||
elif event == "node_finished":
|
||||
node_data = data.get("data", {})
|
||||
log.progress(
|
||||
f"Node finished: {node_data.get('node_type')} - {node_data.get('title')}"
|
||||
)
|
||||
|
||||
# Print output if it's the LLM node
|
||||
outputs = node_data.get("outputs", {})
|
||||
if outputs.get("text"):
|
||||
log.separator()
|
||||
log.info("💬 LLM Response:")
|
||||
log.info(outputs.get("text"), indent=2)
|
||||
log.separator()
|
||||
|
||||
elif event == "workflow_finished":
|
||||
workflow_data = data.get("data", {})
|
||||
outputs = workflow_data.get("outputs", {})
|
||||
if outputs.get("answer"):
|
||||
log.separator()
|
||||
log.info("📤 Final Answer:")
|
||||
log.info(outputs.get("answer"), indent=2)
|
||||
log.separator()
|
||||
log.key_value(
|
||||
"Total tokens",
|
||||
str(workflow_data.get("total_tokens", 0)),
|
||||
)
|
||||
log.key_value(
|
||||
"Total steps",
|
||||
str(workflow_data.get("total_steps", 0)),
|
||||
)
|
||||
|
||||
elif event == "error":
|
||||
log.error(f"Error: {data.get('message')}")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Some lines might not be JSON
|
||||
pass
|
||||
else:
|
||||
log.error(f"Workflow run failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
# Handle blocking response
|
||||
response = client.post(
|
||||
run_endpoint,
|
||||
json=run_payload,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("Workflow completed successfully!")
|
||||
response_data = response.json()
|
||||
|
||||
log.separator()
|
||||
log.debug(f"Full response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
# Extract the answer if available
|
||||
outputs = response_data.get("data", {}).get("outputs", {})
|
||||
if outputs.get("answer"):
|
||||
log.separator()
|
||||
log.info("📤 Final Answer:")
|
||||
log.info(outputs.get("answer"), indent=2)
|
||||
else:
|
||||
log.error(f"Workflow run failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except httpx.TimeoutException:
|
||||
log.error("Request timed out")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Allow passing question as command line argument
|
||||
if len(sys.argv) > 1:
|
||||
question = " ".join(sys.argv[1:])
|
||||
else:
|
||||
question = "What is the capital of France?"
|
||||
|
||||
run_workflow(question=question, streaming=True)
|
||||
71
dify/scripts/stress-test/setup/setup_admin.py
Normal file
71
dify/scripts/stress-test/setup/setup_admin.py
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def setup_admin_account() -> None:
|
||||
"""Setup Dify API with an admin account."""
|
||||
|
||||
log = Logger("SetupAdmin")
|
||||
log.header("Setting up Admin Account")
|
||||
|
||||
# Admin account credentials
|
||||
admin_config = {
|
||||
"email": "test@dify.ai",
|
||||
"username": "dify",
|
||||
"password": "password123",
|
||||
}
|
||||
|
||||
# Save credentials to config file
|
||||
if config_helper.write_config("admin_config", admin_config):
|
||||
log.info(f"Admin credentials saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
|
||||
# API setup endpoint
|
||||
base_url = "http://localhost:5001"
|
||||
setup_endpoint = f"{base_url}/console/api/setup"
|
||||
|
||||
# Prepare setup payload
|
||||
setup_payload = {
|
||||
"email": admin_config["email"],
|
||||
"name": admin_config["username"],
|
||||
"password": admin_config["password"],
|
||||
}
|
||||
|
||||
log.step("Configuring Dify with admin account...")
|
||||
|
||||
try:
|
||||
# Make the setup request
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
setup_endpoint,
|
||||
json=setup_payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
log.success("Admin account created successfully!")
|
||||
log.key_value("Email", admin_config["email"])
|
||||
log.key_value("Username", admin_config["username"])
|
||||
|
||||
elif response.status_code == 400:
|
||||
log.warning("Setup may have already been completed or invalid data provided")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Setup failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
log.error("Could not connect to Dify API at http://localhost:5001")
|
||||
log.info("Make sure the API server is running with: ./dev/start-api")
|
||||
except Exception as e:
|
||||
log.error(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup_admin_account()
|
||||
Reference in New Issue
Block a user